id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
8,500
|
trehn/termdown
|
termdown.py
|
graceful_ctrlc
|
def graceful_ctrlc(func):
"""
Makes the decorated function exit with code 1 on CTRL+C.
"""
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except KeyboardInterrupt:
exit(1)
return wrapper
|
python
|
def graceful_ctrlc(func):
"""
Makes the decorated function exit with code 1 on CTRL+C.
"""
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except KeyboardInterrupt:
exit(1)
return wrapper
|
[
"def",
"graceful_ctrlc",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"KeyboardInterrupt",
":",
"exit",
"(",
"1",
")",
"return",
"wrapper"
] |
Makes the decorated function exit with code 1 on CTRL+C.
|
[
"Makes",
"the",
"decorated",
"function",
"exit",
"with",
"code",
"1",
"on",
"CTRL",
"+",
"C",
"."
] |
aa0c4e39d9864fd1466ef9d76947fb93d0cf5be2
|
https://github.com/trehn/termdown/blob/aa0c4e39d9864fd1466ef9d76947fb93d0cf5be2/termdown.py#L144-L154
|
8,501
|
trehn/termdown
|
termdown.py
|
pad_to_size
|
def pad_to_size(text, x, y):
"""
Adds whitespace to text to center it within a frame of the given
dimensions.
"""
input_lines = text.rstrip().split("\n")
longest_input_line = max(map(len, input_lines))
number_of_input_lines = len(input_lines)
x = max(x, longest_input_line)
y = max(y, number_of_input_lines)
output = ""
padding_top = int((y - number_of_input_lines) / 2)
padding_bottom = y - number_of_input_lines - padding_top
padding_left = int((x - longest_input_line) / 2)
output += padding_top * (" " * x + "\n")
for line in input_lines:
output += padding_left * " " + line + " " * (x - padding_left - len(line)) + "\n"
output += padding_bottom * (" " * x + "\n")
return output
|
python
|
def pad_to_size(text, x, y):
"""
Adds whitespace to text to center it within a frame of the given
dimensions.
"""
input_lines = text.rstrip().split("\n")
longest_input_line = max(map(len, input_lines))
number_of_input_lines = len(input_lines)
x = max(x, longest_input_line)
y = max(y, number_of_input_lines)
output = ""
padding_top = int((y - number_of_input_lines) / 2)
padding_bottom = y - number_of_input_lines - padding_top
padding_left = int((x - longest_input_line) / 2)
output += padding_top * (" " * x + "\n")
for line in input_lines:
output += padding_left * " " + line + " " * (x - padding_left - len(line)) + "\n"
output += padding_bottom * (" " * x + "\n")
return output
|
[
"def",
"pad_to_size",
"(",
"text",
",",
"x",
",",
"y",
")",
":",
"input_lines",
"=",
"text",
".",
"rstrip",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"longest_input_line",
"=",
"max",
"(",
"map",
"(",
"len",
",",
"input_lines",
")",
")",
"number_of_input_lines",
"=",
"len",
"(",
"input_lines",
")",
"x",
"=",
"max",
"(",
"x",
",",
"longest_input_line",
")",
"y",
"=",
"max",
"(",
"y",
",",
"number_of_input_lines",
")",
"output",
"=",
"\"\"",
"padding_top",
"=",
"int",
"(",
"(",
"y",
"-",
"number_of_input_lines",
")",
"/",
"2",
")",
"padding_bottom",
"=",
"y",
"-",
"number_of_input_lines",
"-",
"padding_top",
"padding_left",
"=",
"int",
"(",
"(",
"x",
"-",
"longest_input_line",
")",
"/",
"2",
")",
"output",
"+=",
"padding_top",
"*",
"(",
"\" \"",
"*",
"x",
"+",
"\"\\n\"",
")",
"for",
"line",
"in",
"input_lines",
":",
"output",
"+=",
"padding_left",
"*",
"\" \"",
"+",
"line",
"+",
"\" \"",
"*",
"(",
"x",
"-",
"padding_left",
"-",
"len",
"(",
"line",
")",
")",
"+",
"\"\\n\"",
"output",
"+=",
"padding_bottom",
"*",
"(",
"\" \"",
"*",
"x",
"+",
"\"\\n\"",
")",
"return",
"output"
] |
Adds whitespace to text to center it within a frame of the given
dimensions.
|
[
"Adds",
"whitespace",
"to",
"text",
"to",
"center",
"it",
"within",
"a",
"frame",
"of",
"the",
"given",
"dimensions",
"."
] |
aa0c4e39d9864fd1466ef9d76947fb93d0cf5be2
|
https://github.com/trehn/termdown/blob/aa0c4e39d9864fd1466ef9d76947fb93d0cf5be2/termdown.py#L177-L198
|
8,502
|
trehn/termdown
|
termdown.py
|
parse_timestr
|
def parse_timestr(timestr):
"""
Parse a string describing a point in time.
"""
timedelta_secs = parse_timedelta(timestr)
sync_start = datetime.now()
if timedelta_secs:
target = datetime.now() + timedelta(seconds=timedelta_secs)
elif timestr.isdigit():
target = datetime.now() + timedelta(seconds=int(timestr))
else:
try:
target = parse(timestr)
except:
# unfortunately, dateutil doesn't raise the best exceptions
raise ValueError("Unable to parse '{}'".format(timestr))
# When I do "termdown 10" (the two cases above), I want a
# countdown for the next 10 seconds. Okay. But when I do
# "termdown 23:52", I want a countdown that ends at that exact
# moment -- the countdown is related to real time. Thus, I want
# my frames to be drawn at full seconds, so I enforce
# microsecond=0.
sync_start = sync_start.replace(microsecond=0)
try:
# try to convert target to naive local timezone
target = target.astimezone(tz=tz.tzlocal()).replace(tzinfo=None)
except ValueError:
# parse() already returned a naive datetime, all is well
pass
return (sync_start, target)
|
python
|
def parse_timestr(timestr):
"""
Parse a string describing a point in time.
"""
timedelta_secs = parse_timedelta(timestr)
sync_start = datetime.now()
if timedelta_secs:
target = datetime.now() + timedelta(seconds=timedelta_secs)
elif timestr.isdigit():
target = datetime.now() + timedelta(seconds=int(timestr))
else:
try:
target = parse(timestr)
except:
# unfortunately, dateutil doesn't raise the best exceptions
raise ValueError("Unable to parse '{}'".format(timestr))
# When I do "termdown 10" (the two cases above), I want a
# countdown for the next 10 seconds. Okay. But when I do
# "termdown 23:52", I want a countdown that ends at that exact
# moment -- the countdown is related to real time. Thus, I want
# my frames to be drawn at full seconds, so I enforce
# microsecond=0.
sync_start = sync_start.replace(microsecond=0)
try:
# try to convert target to naive local timezone
target = target.astimezone(tz=tz.tzlocal()).replace(tzinfo=None)
except ValueError:
# parse() already returned a naive datetime, all is well
pass
return (sync_start, target)
|
[
"def",
"parse_timestr",
"(",
"timestr",
")",
":",
"timedelta_secs",
"=",
"parse_timedelta",
"(",
"timestr",
")",
"sync_start",
"=",
"datetime",
".",
"now",
"(",
")",
"if",
"timedelta_secs",
":",
"target",
"=",
"datetime",
".",
"now",
"(",
")",
"+",
"timedelta",
"(",
"seconds",
"=",
"timedelta_secs",
")",
"elif",
"timestr",
".",
"isdigit",
"(",
")",
":",
"target",
"=",
"datetime",
".",
"now",
"(",
")",
"+",
"timedelta",
"(",
"seconds",
"=",
"int",
"(",
"timestr",
")",
")",
"else",
":",
"try",
":",
"target",
"=",
"parse",
"(",
"timestr",
")",
"except",
":",
"# unfortunately, dateutil doesn't raise the best exceptions",
"raise",
"ValueError",
"(",
"\"Unable to parse '{}'\"",
".",
"format",
"(",
"timestr",
")",
")",
"# When I do \"termdown 10\" (the two cases above), I want a",
"# countdown for the next 10 seconds. Okay. But when I do",
"# \"termdown 23:52\", I want a countdown that ends at that exact",
"# moment -- the countdown is related to real time. Thus, I want",
"# my frames to be drawn at full seconds, so I enforce",
"# microsecond=0.",
"sync_start",
"=",
"sync_start",
".",
"replace",
"(",
"microsecond",
"=",
"0",
")",
"try",
":",
"# try to convert target to naive local timezone",
"target",
"=",
"target",
".",
"astimezone",
"(",
"tz",
"=",
"tz",
".",
"tzlocal",
"(",
")",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"None",
")",
"except",
"ValueError",
":",
"# parse() already returned a naive datetime, all is well",
"pass",
"return",
"(",
"sync_start",
",",
"target",
")"
] |
Parse a string describing a point in time.
|
[
"Parse",
"a",
"string",
"describing",
"a",
"point",
"in",
"time",
"."
] |
aa0c4e39d9864fd1466ef9d76947fb93d0cf5be2
|
https://github.com/trehn/termdown/blob/aa0c4e39d9864fd1466ef9d76947fb93d0cf5be2/termdown.py#L201-L232
|
8,503
|
trehn/termdown
|
termdown.py
|
parse_timedelta
|
def parse_timedelta(deltastr):
"""
Parse a string describing a period of time.
"""
matches = TIMEDELTA_REGEX.match(deltastr)
if not matches:
return None
components = {}
for name, value in matches.groupdict().items():
if value:
components[name] = int(value)
for period, hours in (('days', 24), ('years', 8766)):
if period in components:
components['hours'] = components.get('hours', 0) + \
components[period] * hours
del components[period]
return int(timedelta(**components).total_seconds())
|
python
|
def parse_timedelta(deltastr):
"""
Parse a string describing a period of time.
"""
matches = TIMEDELTA_REGEX.match(deltastr)
if not matches:
return None
components = {}
for name, value in matches.groupdict().items():
if value:
components[name] = int(value)
for period, hours in (('days', 24), ('years', 8766)):
if period in components:
components['hours'] = components.get('hours', 0) + \
components[period] * hours
del components[period]
return int(timedelta(**components).total_seconds())
|
[
"def",
"parse_timedelta",
"(",
"deltastr",
")",
":",
"matches",
"=",
"TIMEDELTA_REGEX",
".",
"match",
"(",
"deltastr",
")",
"if",
"not",
"matches",
":",
"return",
"None",
"components",
"=",
"{",
"}",
"for",
"name",
",",
"value",
"in",
"matches",
".",
"groupdict",
"(",
")",
".",
"items",
"(",
")",
":",
"if",
"value",
":",
"components",
"[",
"name",
"]",
"=",
"int",
"(",
"value",
")",
"for",
"period",
",",
"hours",
"in",
"(",
"(",
"'days'",
",",
"24",
")",
",",
"(",
"'years'",
",",
"8766",
")",
")",
":",
"if",
"period",
"in",
"components",
":",
"components",
"[",
"'hours'",
"]",
"=",
"components",
".",
"get",
"(",
"'hours'",
",",
"0",
")",
"+",
"components",
"[",
"period",
"]",
"*",
"hours",
"del",
"components",
"[",
"period",
"]",
"return",
"int",
"(",
"timedelta",
"(",
"*",
"*",
"components",
")",
".",
"total_seconds",
"(",
")",
")"
] |
Parse a string describing a period of time.
|
[
"Parse",
"a",
"string",
"describing",
"a",
"period",
"of",
"time",
"."
] |
aa0c4e39d9864fd1466ef9d76947fb93d0cf5be2
|
https://github.com/trehn/termdown/blob/aa0c4e39d9864fd1466ef9d76947fb93d0cf5be2/termdown.py#L235-L251
|
8,504
|
wakatime/wakatime
|
wakatime/packages/ntlm_auth/session_security.py
|
SessionSecurity._verify_signature
|
def _verify_signature(self, message, signature):
"""
Will verify that the signature received from the server matches up with the expected signature
computed locally. Will throw an exception if they do not match
@param message: The message data that is received from the server
@param signature: The signature of the message received from the server
"""
if self.negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY:
actual_checksum = signature[4:12]
actual_seq_num = struct.unpack("<I", signature[12:16])[0]
else:
actual_checksum = signature[8:12]
actual_seq_num = struct.unpack("<I", signature[12:16])[0]
expected_signature = calc_signature(message, self.negotiate_flags, self.incoming_signing_key, self.incoming_seq_num, self.incoming_handle)
expected_checksum = expected_signature.checksum
expected_seq_num = struct.unpack("<I", expected_signature.seq_num)[0]
if actual_checksum != expected_checksum:
raise Exception("The signature checksum does not match, message has been altered")
if actual_seq_num != expected_seq_num:
raise Exception("The signature sequence number does not match up, message not received in the correct sequence")
self.incoming_seq_num += 1
|
python
|
def _verify_signature(self, message, signature):
"""
Will verify that the signature received from the server matches up with the expected signature
computed locally. Will throw an exception if they do not match
@param message: The message data that is received from the server
@param signature: The signature of the message received from the server
"""
if self.negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY:
actual_checksum = signature[4:12]
actual_seq_num = struct.unpack("<I", signature[12:16])[0]
else:
actual_checksum = signature[8:12]
actual_seq_num = struct.unpack("<I", signature[12:16])[0]
expected_signature = calc_signature(message, self.negotiate_flags, self.incoming_signing_key, self.incoming_seq_num, self.incoming_handle)
expected_checksum = expected_signature.checksum
expected_seq_num = struct.unpack("<I", expected_signature.seq_num)[0]
if actual_checksum != expected_checksum:
raise Exception("The signature checksum does not match, message has been altered")
if actual_seq_num != expected_seq_num:
raise Exception("The signature sequence number does not match up, message not received in the correct sequence")
self.incoming_seq_num += 1
|
[
"def",
"_verify_signature",
"(",
"self",
",",
"message",
",",
"signature",
")",
":",
"if",
"self",
".",
"negotiate_flags",
"&",
"NegotiateFlags",
".",
"NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY",
":",
"actual_checksum",
"=",
"signature",
"[",
"4",
":",
"12",
"]",
"actual_seq_num",
"=",
"struct",
".",
"unpack",
"(",
"\"<I\"",
",",
"signature",
"[",
"12",
":",
"16",
"]",
")",
"[",
"0",
"]",
"else",
":",
"actual_checksum",
"=",
"signature",
"[",
"8",
":",
"12",
"]",
"actual_seq_num",
"=",
"struct",
".",
"unpack",
"(",
"\"<I\"",
",",
"signature",
"[",
"12",
":",
"16",
"]",
")",
"[",
"0",
"]",
"expected_signature",
"=",
"calc_signature",
"(",
"message",
",",
"self",
".",
"negotiate_flags",
",",
"self",
".",
"incoming_signing_key",
",",
"self",
".",
"incoming_seq_num",
",",
"self",
".",
"incoming_handle",
")",
"expected_checksum",
"=",
"expected_signature",
".",
"checksum",
"expected_seq_num",
"=",
"struct",
".",
"unpack",
"(",
"\"<I\"",
",",
"expected_signature",
".",
"seq_num",
")",
"[",
"0",
"]",
"if",
"actual_checksum",
"!=",
"expected_checksum",
":",
"raise",
"Exception",
"(",
"\"The signature checksum does not match, message has been altered\"",
")",
"if",
"actual_seq_num",
"!=",
"expected_seq_num",
":",
"raise",
"Exception",
"(",
"\"The signature sequence number does not match up, message not received in the correct sequence\"",
")",
"self",
".",
"incoming_seq_num",
"+=",
"1"
] |
Will verify that the signature received from the server matches up with the expected signature
computed locally. Will throw an exception if they do not match
@param message: The message data that is received from the server
@param signature: The signature of the message received from the server
|
[
"Will",
"verify",
"that",
"the",
"signature",
"received",
"from",
"the",
"server",
"matches",
"up",
"with",
"the",
"expected",
"signature",
"computed",
"locally",
".",
"Will",
"throw",
"an",
"exception",
"if",
"they",
"do",
"not",
"match"
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/ntlm_auth/session_security.py#L201-L226
|
8,505
|
wakatime/wakatime
|
wakatime/packages/pygments/regexopt.py
|
regex_opt_inner
|
def regex_opt_inner(strings, open_paren):
"""Return a regex that matches any string in the sorted list of strings."""
close_paren = open_paren and ')' or ''
# print strings, repr(open_paren)
if not strings:
# print '-> nothing left'
return ''
first = strings[0]
if len(strings) == 1:
# print '-> only 1 string'
return open_paren + escape(first) + close_paren
if not first:
# print '-> first string empty'
return open_paren + regex_opt_inner(strings[1:], '(?:') \
+ '?' + close_paren
if len(first) == 1:
# multiple one-char strings? make a charset
oneletter = []
rest = []
for s in strings:
if len(s) == 1:
oneletter.append(s)
else:
rest.append(s)
if len(oneletter) > 1: # do we have more than one oneletter string?
if rest:
# print '-> 1-character + rest'
return open_paren + regex_opt_inner(rest, '') + '|' \
+ make_charset(oneletter) + close_paren
# print '-> only 1-character'
return open_paren + make_charset(oneletter) + close_paren
prefix = commonprefix(strings)
if prefix:
plen = len(prefix)
# we have a prefix for all strings
# print '-> prefix:', prefix
return open_paren + escape(prefix) \
+ regex_opt_inner([s[plen:] for s in strings], '(?:') \
+ close_paren
# is there a suffix?
strings_rev = [s[::-1] for s in strings]
suffix = commonprefix(strings_rev)
if suffix:
slen = len(suffix)
# print '-> suffix:', suffix[::-1]
return open_paren \
+ regex_opt_inner(sorted(s[:-slen] for s in strings), '(?:') \
+ escape(suffix[::-1]) + close_paren
# recurse on common 1-string prefixes
# print '-> last resort'
return open_paren + \
'|'.join(regex_opt_inner(list(group[1]), '')
for group in groupby(strings, lambda s: s[0] == first[0])) \
+ close_paren
|
python
|
def regex_opt_inner(strings, open_paren):
"""Return a regex that matches any string in the sorted list of strings."""
close_paren = open_paren and ')' or ''
# print strings, repr(open_paren)
if not strings:
# print '-> nothing left'
return ''
first = strings[0]
if len(strings) == 1:
# print '-> only 1 string'
return open_paren + escape(first) + close_paren
if not first:
# print '-> first string empty'
return open_paren + regex_opt_inner(strings[1:], '(?:') \
+ '?' + close_paren
if len(first) == 1:
# multiple one-char strings? make a charset
oneletter = []
rest = []
for s in strings:
if len(s) == 1:
oneletter.append(s)
else:
rest.append(s)
if len(oneletter) > 1: # do we have more than one oneletter string?
if rest:
# print '-> 1-character + rest'
return open_paren + regex_opt_inner(rest, '') + '|' \
+ make_charset(oneletter) + close_paren
# print '-> only 1-character'
return open_paren + make_charset(oneletter) + close_paren
prefix = commonprefix(strings)
if prefix:
plen = len(prefix)
# we have a prefix for all strings
# print '-> prefix:', prefix
return open_paren + escape(prefix) \
+ regex_opt_inner([s[plen:] for s in strings], '(?:') \
+ close_paren
# is there a suffix?
strings_rev = [s[::-1] for s in strings]
suffix = commonprefix(strings_rev)
if suffix:
slen = len(suffix)
# print '-> suffix:', suffix[::-1]
return open_paren \
+ regex_opt_inner(sorted(s[:-slen] for s in strings), '(?:') \
+ escape(suffix[::-1]) + close_paren
# recurse on common 1-string prefixes
# print '-> last resort'
return open_paren + \
'|'.join(regex_opt_inner(list(group[1]), '')
for group in groupby(strings, lambda s: s[0] == first[0])) \
+ close_paren
|
[
"def",
"regex_opt_inner",
"(",
"strings",
",",
"open_paren",
")",
":",
"close_paren",
"=",
"open_paren",
"and",
"')'",
"or",
"''",
"# print strings, repr(open_paren)",
"if",
"not",
"strings",
":",
"# print '-> nothing left'",
"return",
"''",
"first",
"=",
"strings",
"[",
"0",
"]",
"if",
"len",
"(",
"strings",
")",
"==",
"1",
":",
"# print '-> only 1 string'",
"return",
"open_paren",
"+",
"escape",
"(",
"first",
")",
"+",
"close_paren",
"if",
"not",
"first",
":",
"# print '-> first string empty'",
"return",
"open_paren",
"+",
"regex_opt_inner",
"(",
"strings",
"[",
"1",
":",
"]",
",",
"'(?:'",
")",
"+",
"'?'",
"+",
"close_paren",
"if",
"len",
"(",
"first",
")",
"==",
"1",
":",
"# multiple one-char strings? make a charset",
"oneletter",
"=",
"[",
"]",
"rest",
"=",
"[",
"]",
"for",
"s",
"in",
"strings",
":",
"if",
"len",
"(",
"s",
")",
"==",
"1",
":",
"oneletter",
".",
"append",
"(",
"s",
")",
"else",
":",
"rest",
".",
"append",
"(",
"s",
")",
"if",
"len",
"(",
"oneletter",
")",
">",
"1",
":",
"# do we have more than one oneletter string?",
"if",
"rest",
":",
"# print '-> 1-character + rest'",
"return",
"open_paren",
"+",
"regex_opt_inner",
"(",
"rest",
",",
"''",
")",
"+",
"'|'",
"+",
"make_charset",
"(",
"oneletter",
")",
"+",
"close_paren",
"# print '-> only 1-character'",
"return",
"open_paren",
"+",
"make_charset",
"(",
"oneletter",
")",
"+",
"close_paren",
"prefix",
"=",
"commonprefix",
"(",
"strings",
")",
"if",
"prefix",
":",
"plen",
"=",
"len",
"(",
"prefix",
")",
"# we have a prefix for all strings",
"# print '-> prefix:', prefix",
"return",
"open_paren",
"+",
"escape",
"(",
"prefix",
")",
"+",
"regex_opt_inner",
"(",
"[",
"s",
"[",
"plen",
":",
"]",
"for",
"s",
"in",
"strings",
"]",
",",
"'(?:'",
")",
"+",
"close_paren",
"# is there a suffix?",
"strings_rev",
"=",
"[",
"s",
"[",
":",
":",
"-",
"1",
"]",
"for",
"s",
"in",
"strings",
"]",
"suffix",
"=",
"commonprefix",
"(",
"strings_rev",
")",
"if",
"suffix",
":",
"slen",
"=",
"len",
"(",
"suffix",
")",
"# print '-> suffix:', suffix[::-1]",
"return",
"open_paren",
"+",
"regex_opt_inner",
"(",
"sorted",
"(",
"s",
"[",
":",
"-",
"slen",
"]",
"for",
"s",
"in",
"strings",
")",
",",
"'(?:'",
")",
"+",
"escape",
"(",
"suffix",
"[",
":",
":",
"-",
"1",
"]",
")",
"+",
"close_paren",
"# recurse on common 1-string prefixes",
"# print '-> last resort'",
"return",
"open_paren",
"+",
"'|'",
".",
"join",
"(",
"regex_opt_inner",
"(",
"list",
"(",
"group",
"[",
"1",
"]",
")",
",",
"''",
")",
"for",
"group",
"in",
"groupby",
"(",
"strings",
",",
"lambda",
"s",
":",
"s",
"[",
"0",
"]",
"==",
"first",
"[",
"0",
"]",
")",
")",
"+",
"close_paren"
] |
Return a regex that matches any string in the sorted list of strings.
|
[
"Return",
"a",
"regex",
"that",
"matches",
"any",
"string",
"in",
"the",
"sorted",
"list",
"of",
"strings",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/regexopt.py#L27-L80
|
8,506
|
wakatime/wakatime
|
wakatime/packages/pygments/regexopt.py
|
regex_opt
|
def regex_opt(strings, prefix='', suffix=''):
"""Return a compiled regex that matches any string in the given list.
The strings to match must be literal strings, not regexes. They will be
regex-escaped.
*prefix* and *suffix* are pre- and appended to the final regex.
"""
strings = sorted(strings)
return prefix + regex_opt_inner(strings, '(') + suffix
|
python
|
def regex_opt(strings, prefix='', suffix=''):
"""Return a compiled regex that matches any string in the given list.
The strings to match must be literal strings, not regexes. They will be
regex-escaped.
*prefix* and *suffix* are pre- and appended to the final regex.
"""
strings = sorted(strings)
return prefix + regex_opt_inner(strings, '(') + suffix
|
[
"def",
"regex_opt",
"(",
"strings",
",",
"prefix",
"=",
"''",
",",
"suffix",
"=",
"''",
")",
":",
"strings",
"=",
"sorted",
"(",
"strings",
")",
"return",
"prefix",
"+",
"regex_opt_inner",
"(",
"strings",
",",
"'('",
")",
"+",
"suffix"
] |
Return a compiled regex that matches any string in the given list.
The strings to match must be literal strings, not regexes. They will be
regex-escaped.
*prefix* and *suffix* are pre- and appended to the final regex.
|
[
"Return",
"a",
"compiled",
"regex",
"that",
"matches",
"any",
"string",
"in",
"the",
"given",
"list",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/regexopt.py#L83-L92
|
8,507
|
wakatime/wakatime
|
wakatime/packages/pytz/__init__.py
|
open_resource
|
def open_resource(name):
"""Open a resource from the zoneinfo subdir for reading.
Uses the pkg_resources module if available and no standard file
found at the calculated location.
"""
name_parts = name.lstrip('/').split('/')
for part in name_parts:
if part == os.path.pardir or os.path.sep in part:
raise ValueError('Bad path segment: %r' % part)
filename = os.path.join(os.path.dirname(__file__),
'zoneinfo', *name_parts)
if not os.path.exists(filename):
# http://bugs.launchpad.net/bugs/383171 - we avoid using this
# unless absolutely necessary to help when a broken version of
# pkg_resources is installed.
try:
from pkg_resources import resource_stream
except ImportError:
resource_stream = None
if resource_stream is not None:
return resource_stream(__name__, 'zoneinfo/' + name)
return open(filename, 'rb')
|
python
|
def open_resource(name):
"""Open a resource from the zoneinfo subdir for reading.
Uses the pkg_resources module if available and no standard file
found at the calculated location.
"""
name_parts = name.lstrip('/').split('/')
for part in name_parts:
if part == os.path.pardir or os.path.sep in part:
raise ValueError('Bad path segment: %r' % part)
filename = os.path.join(os.path.dirname(__file__),
'zoneinfo', *name_parts)
if not os.path.exists(filename):
# http://bugs.launchpad.net/bugs/383171 - we avoid using this
# unless absolutely necessary to help when a broken version of
# pkg_resources is installed.
try:
from pkg_resources import resource_stream
except ImportError:
resource_stream = None
if resource_stream is not None:
return resource_stream(__name__, 'zoneinfo/' + name)
return open(filename, 'rb')
|
[
"def",
"open_resource",
"(",
"name",
")",
":",
"name_parts",
"=",
"name",
".",
"lstrip",
"(",
"'/'",
")",
".",
"split",
"(",
"'/'",
")",
"for",
"part",
"in",
"name_parts",
":",
"if",
"part",
"==",
"os",
".",
"path",
".",
"pardir",
"or",
"os",
".",
"path",
".",
"sep",
"in",
"part",
":",
"raise",
"ValueError",
"(",
"'Bad path segment: %r'",
"%",
"part",
")",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'zoneinfo'",
",",
"*",
"name_parts",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"# http://bugs.launchpad.net/bugs/383171 - we avoid using this",
"# unless absolutely necessary to help when a broken version of",
"# pkg_resources is installed.",
"try",
":",
"from",
"pkg_resources",
"import",
"resource_stream",
"except",
"ImportError",
":",
"resource_stream",
"=",
"None",
"if",
"resource_stream",
"is",
"not",
"None",
":",
"return",
"resource_stream",
"(",
"__name__",
",",
"'zoneinfo/'",
"+",
"name",
")",
"return",
"open",
"(",
"filename",
",",
"'rb'",
")"
] |
Open a resource from the zoneinfo subdir for reading.
Uses the pkg_resources module if available and no standard file
found at the calculated location.
|
[
"Open",
"a",
"resource",
"from",
"the",
"zoneinfo",
"subdir",
"for",
"reading",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pytz/__init__.py#L74-L97
|
8,508
|
wakatime/wakatime
|
wakatime/packages/pytz/__init__.py
|
FixedOffset
|
def FixedOffset(offset, _tzinfos = {}):
"""return a fixed-offset timezone based off a number of minutes.
>>> one = FixedOffset(-330)
>>> one
pytz.FixedOffset(-330)
>>> one.utcoffset(datetime.datetime.now())
datetime.timedelta(-1, 66600)
>>> one.dst(datetime.datetime.now())
datetime.timedelta(0)
>>> two = FixedOffset(1380)
>>> two
pytz.FixedOffset(1380)
>>> two.utcoffset(datetime.datetime.now())
datetime.timedelta(0, 82800)
>>> two.dst(datetime.datetime.now())
datetime.timedelta(0)
The datetime.timedelta must be between the range of -1 and 1 day,
non-inclusive.
>>> FixedOffset(1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', 1440)
>>> FixedOffset(-1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', -1440)
An offset of 0 is special-cased to return UTC.
>>> FixedOffset(0) is UTC
True
There should always be only one instance of a FixedOffset per timedelta.
This should be true for multiple creation calls.
>>> FixedOffset(-330) is one
True
>>> FixedOffset(1380) is two
True
It should also be true for pickling.
>>> import pickle
>>> pickle.loads(pickle.dumps(one)) is one
True
>>> pickle.loads(pickle.dumps(two)) is two
True
"""
if offset == 0:
return UTC
info = _tzinfos.get(offset)
if info is None:
# We haven't seen this one before. we need to save it.
# Use setdefault to avoid a race condition and make sure we have
# only one
info = _tzinfos.setdefault(offset, _FixedOffset(offset))
return info
|
python
|
def FixedOffset(offset, _tzinfos = {}):
"""return a fixed-offset timezone based off a number of minutes.
>>> one = FixedOffset(-330)
>>> one
pytz.FixedOffset(-330)
>>> one.utcoffset(datetime.datetime.now())
datetime.timedelta(-1, 66600)
>>> one.dst(datetime.datetime.now())
datetime.timedelta(0)
>>> two = FixedOffset(1380)
>>> two
pytz.FixedOffset(1380)
>>> two.utcoffset(datetime.datetime.now())
datetime.timedelta(0, 82800)
>>> two.dst(datetime.datetime.now())
datetime.timedelta(0)
The datetime.timedelta must be between the range of -1 and 1 day,
non-inclusive.
>>> FixedOffset(1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', 1440)
>>> FixedOffset(-1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', -1440)
An offset of 0 is special-cased to return UTC.
>>> FixedOffset(0) is UTC
True
There should always be only one instance of a FixedOffset per timedelta.
This should be true for multiple creation calls.
>>> FixedOffset(-330) is one
True
>>> FixedOffset(1380) is two
True
It should also be true for pickling.
>>> import pickle
>>> pickle.loads(pickle.dumps(one)) is one
True
>>> pickle.loads(pickle.dumps(two)) is two
True
"""
if offset == 0:
return UTC
info = _tzinfos.get(offset)
if info is None:
# We haven't seen this one before. we need to save it.
# Use setdefault to avoid a race condition and make sure we have
# only one
info = _tzinfos.setdefault(offset, _FixedOffset(offset))
return info
|
[
"def",
"FixedOffset",
"(",
"offset",
",",
"_tzinfos",
"=",
"{",
"}",
")",
":",
"if",
"offset",
"==",
"0",
":",
"return",
"UTC",
"info",
"=",
"_tzinfos",
".",
"get",
"(",
"offset",
")",
"if",
"info",
"is",
"None",
":",
"# We haven't seen this one before. we need to save it.",
"# Use setdefault to avoid a race condition and make sure we have",
"# only one",
"info",
"=",
"_tzinfos",
".",
"setdefault",
"(",
"offset",
",",
"_FixedOffset",
"(",
"offset",
")",
")",
"return",
"info"
] |
return a fixed-offset timezone based off a number of minutes.
>>> one = FixedOffset(-330)
>>> one
pytz.FixedOffset(-330)
>>> one.utcoffset(datetime.datetime.now())
datetime.timedelta(-1, 66600)
>>> one.dst(datetime.datetime.now())
datetime.timedelta(0)
>>> two = FixedOffset(1380)
>>> two
pytz.FixedOffset(1380)
>>> two.utcoffset(datetime.datetime.now())
datetime.timedelta(0, 82800)
>>> two.dst(datetime.datetime.now())
datetime.timedelta(0)
The datetime.timedelta must be between the range of -1 and 1 day,
non-inclusive.
>>> FixedOffset(1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', 1440)
>>> FixedOffset(-1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', -1440)
An offset of 0 is special-cased to return UTC.
>>> FixedOffset(0) is UTC
True
There should always be only one instance of a FixedOffset per timedelta.
This should be true for multiple creation calls.
>>> FixedOffset(-330) is one
True
>>> FixedOffset(1380) is two
True
It should also be true for pickling.
>>> import pickle
>>> pickle.loads(pickle.dumps(one)) is one
True
>>> pickle.loads(pickle.dumps(two)) is two
True
|
[
"return",
"a",
"fixed",
"-",
"offset",
"timezone",
"based",
"off",
"a",
"number",
"of",
"minutes",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pytz/__init__.py#L415-L479
|
8,509
|
wakatime/wakatime
|
wakatime/arguments.py
|
boolean_or_list
|
def boolean_or_list(config_name, args, configs, alternative_names=[]):
"""Get a boolean or list of regexes from args and configs."""
# when argument flag present, set to wildcard regex
for key in alternative_names + [config_name]:
if hasattr(args, key) and getattr(args, key):
setattr(args, config_name, ['.*'])
return
setattr(args, config_name, [])
option = None
alternative_names.insert(0, config_name)
for key in alternative_names:
if configs.has_option('settings', key):
option = configs.get('settings', key)
break
if option is not None:
if option.strip().lower() == 'true':
setattr(args, config_name, ['.*'])
elif option.strip().lower() != 'false':
for pattern in option.split("\n"):
if pattern.strip() != '':
getattr(args, config_name).append(pattern)
|
python
|
def boolean_or_list(config_name, args, configs, alternative_names=[]):
"""Get a boolean or list of regexes from args and configs."""
# when argument flag present, set to wildcard regex
for key in alternative_names + [config_name]:
if hasattr(args, key) and getattr(args, key):
setattr(args, config_name, ['.*'])
return
setattr(args, config_name, [])
option = None
alternative_names.insert(0, config_name)
for key in alternative_names:
if configs.has_option('settings', key):
option = configs.get('settings', key)
break
if option is not None:
if option.strip().lower() == 'true':
setattr(args, config_name, ['.*'])
elif option.strip().lower() != 'false':
for pattern in option.split("\n"):
if pattern.strip() != '':
getattr(args, config_name).append(pattern)
|
[
"def",
"boolean_or_list",
"(",
"config_name",
",",
"args",
",",
"configs",
",",
"alternative_names",
"=",
"[",
"]",
")",
":",
"# when argument flag present, set to wildcard regex",
"for",
"key",
"in",
"alternative_names",
"+",
"[",
"config_name",
"]",
":",
"if",
"hasattr",
"(",
"args",
",",
"key",
")",
"and",
"getattr",
"(",
"args",
",",
"key",
")",
":",
"setattr",
"(",
"args",
",",
"config_name",
",",
"[",
"'.*'",
"]",
")",
"return",
"setattr",
"(",
"args",
",",
"config_name",
",",
"[",
"]",
")",
"option",
"=",
"None",
"alternative_names",
".",
"insert",
"(",
"0",
",",
"config_name",
")",
"for",
"key",
"in",
"alternative_names",
":",
"if",
"configs",
".",
"has_option",
"(",
"'settings'",
",",
"key",
")",
":",
"option",
"=",
"configs",
".",
"get",
"(",
"'settings'",
",",
"key",
")",
"break",
"if",
"option",
"is",
"not",
"None",
":",
"if",
"option",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"==",
"'true'",
":",
"setattr",
"(",
"args",
",",
"config_name",
",",
"[",
"'.*'",
"]",
")",
"elif",
"option",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"!=",
"'false'",
":",
"for",
"pattern",
"in",
"option",
".",
"split",
"(",
"\"\\n\"",
")",
":",
"if",
"pattern",
".",
"strip",
"(",
")",
"!=",
"''",
":",
"getattr",
"(",
"args",
",",
"config_name",
")",
".",
"append",
"(",
"pattern",
")"
] |
Get a boolean or list of regexes from args and configs.
|
[
"Get",
"a",
"boolean",
"or",
"list",
"of",
"regexes",
"from",
"args",
"and",
"configs",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/arguments.py#L340-L364
|
8,510
|
wakatime/wakatime
|
wakatime/packages/pygments/formatters/latex.py
|
LatexFormatter.get_style_defs
|
def get_style_defs(self, arg=''):
"""
Return the command sequences needed to define the commands
used to format text in the verbatim environment. ``arg`` is ignored.
"""
cp = self.commandprefix
styles = []
for name, definition in iteritems(self.cmd2def):
styles.append(r'\expandafter\def\csname %s@tok@%s\endcsname{%s}' %
(cp, name, definition))
return STYLE_TEMPLATE % {'cp': self.commandprefix,
'styles': '\n'.join(styles)}
|
python
|
def get_style_defs(self, arg=''):
"""
Return the command sequences needed to define the commands
used to format text in the verbatim environment. ``arg`` is ignored.
"""
cp = self.commandprefix
styles = []
for name, definition in iteritems(self.cmd2def):
styles.append(r'\expandafter\def\csname %s@tok@%s\endcsname{%s}' %
(cp, name, definition))
return STYLE_TEMPLATE % {'cp': self.commandprefix,
'styles': '\n'.join(styles)}
|
[
"def",
"get_style_defs",
"(",
"self",
",",
"arg",
"=",
"''",
")",
":",
"cp",
"=",
"self",
".",
"commandprefix",
"styles",
"=",
"[",
"]",
"for",
"name",
",",
"definition",
"in",
"iteritems",
"(",
"self",
".",
"cmd2def",
")",
":",
"styles",
".",
"append",
"(",
"r'\\expandafter\\def\\csname %s@tok@%s\\endcsname{%s}'",
"%",
"(",
"cp",
",",
"name",
",",
"definition",
")",
")",
"return",
"STYLE_TEMPLATE",
"%",
"{",
"'cp'",
":",
"self",
".",
"commandprefix",
",",
"'styles'",
":",
"'\\n'",
".",
"join",
"(",
"styles",
")",
"}"
] |
Return the command sequences needed to define the commands
used to format text in the verbatim environment. ``arg`` is ignored.
|
[
"Return",
"the",
"command",
"sequences",
"needed",
"to",
"define",
"the",
"commands",
"used",
"to",
"format",
"text",
"in",
"the",
"verbatim",
"environment",
".",
"arg",
"is",
"ignored",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/latex.py#L318-L329
|
8,511
|
wakatime/wakatime
|
wakatime/packages/pygments/formatters/__init__.py
|
_fn_matches
|
def _fn_matches(fn, glob):
"""Return whether the supplied file name fn matches pattern filename."""
if glob not in _pattern_cache:
pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob))
return pattern.match(fn)
return _pattern_cache[glob].match(fn)
|
python
|
def _fn_matches(fn, glob):
"""Return whether the supplied file name fn matches pattern filename."""
if glob not in _pattern_cache:
pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob))
return pattern.match(fn)
return _pattern_cache[glob].match(fn)
|
[
"def",
"_fn_matches",
"(",
"fn",
",",
"glob",
")",
":",
"if",
"glob",
"not",
"in",
"_pattern_cache",
":",
"pattern",
"=",
"_pattern_cache",
"[",
"glob",
"]",
"=",
"re",
".",
"compile",
"(",
"fnmatch",
".",
"translate",
"(",
"glob",
")",
")",
"return",
"pattern",
".",
"match",
"(",
"fn",
")",
"return",
"_pattern_cache",
"[",
"glob",
"]",
".",
"match",
"(",
"fn",
")"
] |
Return whether the supplied file name fn matches pattern filename.
|
[
"Return",
"whether",
"the",
"supplied",
"file",
"name",
"fn",
"matches",
"pattern",
"filename",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/__init__.py#L29-L34
|
8,512
|
wakatime/wakatime
|
wakatime/packages/pygments/formatters/__init__.py
|
get_all_formatters
|
def get_all_formatters():
"""Return a generator for all formatter classes."""
# NB: this returns formatter classes, not info like get_all_lexers().
for info in itervalues(FORMATTERS):
if info[1] not in _formatter_cache:
_load_formatters(info[0])
yield _formatter_cache[info[1]]
for _, formatter in find_plugin_formatters():
yield formatter
|
python
|
def get_all_formatters():
"""Return a generator for all formatter classes."""
# NB: this returns formatter classes, not info like get_all_lexers().
for info in itervalues(FORMATTERS):
if info[1] not in _formatter_cache:
_load_formatters(info[0])
yield _formatter_cache[info[1]]
for _, formatter in find_plugin_formatters():
yield formatter
|
[
"def",
"get_all_formatters",
"(",
")",
":",
"# NB: this returns formatter classes, not info like get_all_lexers().",
"for",
"info",
"in",
"itervalues",
"(",
"FORMATTERS",
")",
":",
"if",
"info",
"[",
"1",
"]",
"not",
"in",
"_formatter_cache",
":",
"_load_formatters",
"(",
"info",
"[",
"0",
"]",
")",
"yield",
"_formatter_cache",
"[",
"info",
"[",
"1",
"]",
"]",
"for",
"_",
",",
"formatter",
"in",
"find_plugin_formatters",
"(",
")",
":",
"yield",
"formatter"
] |
Return a generator for all formatter classes.
|
[
"Return",
"a",
"generator",
"for",
"all",
"formatter",
"classes",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/__init__.py#L45-L53
|
8,513
|
wakatime/wakatime
|
wakatime/packages/pygments/formatters/__init__.py
|
find_formatter_class
|
def find_formatter_class(alias):
"""Lookup a formatter by alias.
Returns None if not found.
"""
for module_name, name, aliases, _, _ in itervalues(FORMATTERS):
if alias in aliases:
if name not in _formatter_cache:
_load_formatters(module_name)
return _formatter_cache[name]
for _, cls in find_plugin_formatters():
if alias in cls.aliases:
return cls
|
python
|
def find_formatter_class(alias):
"""Lookup a formatter by alias.
Returns None if not found.
"""
for module_name, name, aliases, _, _ in itervalues(FORMATTERS):
if alias in aliases:
if name not in _formatter_cache:
_load_formatters(module_name)
return _formatter_cache[name]
for _, cls in find_plugin_formatters():
if alias in cls.aliases:
return cls
|
[
"def",
"find_formatter_class",
"(",
"alias",
")",
":",
"for",
"module_name",
",",
"name",
",",
"aliases",
",",
"_",
",",
"_",
"in",
"itervalues",
"(",
"FORMATTERS",
")",
":",
"if",
"alias",
"in",
"aliases",
":",
"if",
"name",
"not",
"in",
"_formatter_cache",
":",
"_load_formatters",
"(",
"module_name",
")",
"return",
"_formatter_cache",
"[",
"name",
"]",
"for",
"_",
",",
"cls",
"in",
"find_plugin_formatters",
"(",
")",
":",
"if",
"alias",
"in",
"cls",
".",
"aliases",
":",
"return",
"cls"
] |
Lookup a formatter by alias.
Returns None if not found.
|
[
"Lookup",
"a",
"formatter",
"by",
"alias",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/__init__.py#L56-L68
|
8,514
|
wakatime/wakatime
|
wakatime/packages/pygments/formatters/__init__.py
|
get_formatter_by_name
|
def get_formatter_by_name(_alias, **options):
"""Lookup and instantiate a formatter by alias.
Raises ClassNotFound if not found.
"""
cls = find_formatter_class(_alias)
if cls is None:
raise ClassNotFound("no formatter found for name %r" % _alias)
return cls(**options)
|
python
|
def get_formatter_by_name(_alias, **options):
"""Lookup and instantiate a formatter by alias.
Raises ClassNotFound if not found.
"""
cls = find_formatter_class(_alias)
if cls is None:
raise ClassNotFound("no formatter found for name %r" % _alias)
return cls(**options)
|
[
"def",
"get_formatter_by_name",
"(",
"_alias",
",",
"*",
"*",
"options",
")",
":",
"cls",
"=",
"find_formatter_class",
"(",
"_alias",
")",
"if",
"cls",
"is",
"None",
":",
"raise",
"ClassNotFound",
"(",
"\"no formatter found for name %r\"",
"%",
"_alias",
")",
"return",
"cls",
"(",
"*",
"*",
"options",
")"
] |
Lookup and instantiate a formatter by alias.
Raises ClassNotFound if not found.
|
[
"Lookup",
"and",
"instantiate",
"a",
"formatter",
"by",
"alias",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/__init__.py#L71-L79
|
8,515
|
wakatime/wakatime
|
wakatime/packages/pygments/formatters/__init__.py
|
load_formatter_from_file
|
def load_formatter_from_file(filename, formattername="CustomFormatter",
**options):
"""Load a formatter from a file.
This method expects a file located relative to the current working
directory, which contains a class named CustomFormatter. By default,
it expects the Formatter to be named CustomFormatter; you can specify
your own class name as the second argument to this function.
Users should be very careful with the input, because this method
is equivalent to running eval on the input file.
Raises ClassNotFound if there are any problems importing the Formatter.
.. versionadded:: 2.2
"""
try:
# This empty dict will contain the namespace for the exec'd file
custom_namespace = {}
exec(open(filename, 'rb').read(), custom_namespace)
# Retrieve the class `formattername` from that namespace
if formattername not in custom_namespace:
raise ClassNotFound('no valid %s class found in %s' %
(formattername, filename))
formatter_class = custom_namespace[formattername]
# And finally instantiate it with the options
return formatter_class(**options)
except IOError as err:
raise ClassNotFound('cannot read %s' % filename)
except ClassNotFound as err:
raise
except Exception as err:
raise ClassNotFound('error when loading custom formatter: %s' % err)
|
python
|
def load_formatter_from_file(filename, formattername="CustomFormatter",
**options):
"""Load a formatter from a file.
This method expects a file located relative to the current working
directory, which contains a class named CustomFormatter. By default,
it expects the Formatter to be named CustomFormatter; you can specify
your own class name as the second argument to this function.
Users should be very careful with the input, because this method
is equivalent to running eval on the input file.
Raises ClassNotFound if there are any problems importing the Formatter.
.. versionadded:: 2.2
"""
try:
# This empty dict will contain the namespace for the exec'd file
custom_namespace = {}
exec(open(filename, 'rb').read(), custom_namespace)
# Retrieve the class `formattername` from that namespace
if formattername not in custom_namespace:
raise ClassNotFound('no valid %s class found in %s' %
(formattername, filename))
formatter_class = custom_namespace[formattername]
# And finally instantiate it with the options
return formatter_class(**options)
except IOError as err:
raise ClassNotFound('cannot read %s' % filename)
except ClassNotFound as err:
raise
except Exception as err:
raise ClassNotFound('error when loading custom formatter: %s' % err)
|
[
"def",
"load_formatter_from_file",
"(",
"filename",
",",
"formattername",
"=",
"\"CustomFormatter\"",
",",
"*",
"*",
"options",
")",
":",
"try",
":",
"# This empty dict will contain the namespace for the exec'd file",
"custom_namespace",
"=",
"{",
"}",
"exec",
"(",
"open",
"(",
"filename",
",",
"'rb'",
")",
".",
"read",
"(",
")",
",",
"custom_namespace",
")",
"# Retrieve the class `formattername` from that namespace",
"if",
"formattername",
"not",
"in",
"custom_namespace",
":",
"raise",
"ClassNotFound",
"(",
"'no valid %s class found in %s'",
"%",
"(",
"formattername",
",",
"filename",
")",
")",
"formatter_class",
"=",
"custom_namespace",
"[",
"formattername",
"]",
"# And finally instantiate it with the options",
"return",
"formatter_class",
"(",
"*",
"*",
"options",
")",
"except",
"IOError",
"as",
"err",
":",
"raise",
"ClassNotFound",
"(",
"'cannot read %s'",
"%",
"filename",
")",
"except",
"ClassNotFound",
"as",
"err",
":",
"raise",
"except",
"Exception",
"as",
"err",
":",
"raise",
"ClassNotFound",
"(",
"'error when loading custom formatter: %s'",
"%",
"err",
")"
] |
Load a formatter from a file.
This method expects a file located relative to the current working
directory, which contains a class named CustomFormatter. By default,
it expects the Formatter to be named CustomFormatter; you can specify
your own class name as the second argument to this function.
Users should be very careful with the input, because this method
is equivalent to running eval on the input file.
Raises ClassNotFound if there are any problems importing the Formatter.
.. versionadded:: 2.2
|
[
"Load",
"a",
"formatter",
"from",
"a",
"file",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/__init__.py#L82-L114
|
8,516
|
wakatime/wakatime
|
wakatime/packages/pygments/formatters/__init__.py
|
get_formatter_for_filename
|
def get_formatter_for_filename(fn, **options):
"""Lookup and instantiate a formatter by filename pattern.
Raises ClassNotFound if not found.
"""
fn = basename(fn)
for modname, name, _, filenames, _ in itervalues(FORMATTERS):
for filename in filenames:
if _fn_matches(fn, filename):
if name not in _formatter_cache:
_load_formatters(modname)
return _formatter_cache[name](**options)
for cls in find_plugin_formatters():
for filename in cls.filenames:
if _fn_matches(fn, filename):
return cls(**options)
raise ClassNotFound("no formatter found for file name %r" % fn)
|
python
|
def get_formatter_for_filename(fn, **options):
"""Lookup and instantiate a formatter by filename pattern.
Raises ClassNotFound if not found.
"""
fn = basename(fn)
for modname, name, _, filenames, _ in itervalues(FORMATTERS):
for filename in filenames:
if _fn_matches(fn, filename):
if name not in _formatter_cache:
_load_formatters(modname)
return _formatter_cache[name](**options)
for cls in find_plugin_formatters():
for filename in cls.filenames:
if _fn_matches(fn, filename):
return cls(**options)
raise ClassNotFound("no formatter found for file name %r" % fn)
|
[
"def",
"get_formatter_for_filename",
"(",
"fn",
",",
"*",
"*",
"options",
")",
":",
"fn",
"=",
"basename",
"(",
"fn",
")",
"for",
"modname",
",",
"name",
",",
"_",
",",
"filenames",
",",
"_",
"in",
"itervalues",
"(",
"FORMATTERS",
")",
":",
"for",
"filename",
"in",
"filenames",
":",
"if",
"_fn_matches",
"(",
"fn",
",",
"filename",
")",
":",
"if",
"name",
"not",
"in",
"_formatter_cache",
":",
"_load_formatters",
"(",
"modname",
")",
"return",
"_formatter_cache",
"[",
"name",
"]",
"(",
"*",
"*",
"options",
")",
"for",
"cls",
"in",
"find_plugin_formatters",
"(",
")",
":",
"for",
"filename",
"in",
"cls",
".",
"filenames",
":",
"if",
"_fn_matches",
"(",
"fn",
",",
"filename",
")",
":",
"return",
"cls",
"(",
"*",
"*",
"options",
")",
"raise",
"ClassNotFound",
"(",
"\"no formatter found for file name %r\"",
"%",
"fn",
")"
] |
Lookup and instantiate a formatter by filename pattern.
Raises ClassNotFound if not found.
|
[
"Lookup",
"and",
"instantiate",
"a",
"formatter",
"by",
"filename",
"pattern",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/__init__.py#L117-L133
|
8,517
|
wakatime/wakatime
|
wakatime/packages/pygments/lexers/textfmts.py
|
HttpLexer.get_tokens_unprocessed
|
def get_tokens_unprocessed(self, text, stack=('root',)):
"""Reset the content-type state."""
self.content_type = None
return RegexLexer.get_tokens_unprocessed(self, text, stack)
|
python
|
def get_tokens_unprocessed(self, text, stack=('root',)):
"""Reset the content-type state."""
self.content_type = None
return RegexLexer.get_tokens_unprocessed(self, text, stack)
|
[
"def",
"get_tokens_unprocessed",
"(",
"self",
",",
"text",
",",
"stack",
"=",
"(",
"'root'",
",",
")",
")",
":",
"self",
".",
"content_type",
"=",
"None",
"return",
"RegexLexer",
".",
"get_tokens_unprocessed",
"(",
"self",
",",
"text",
",",
"stack",
")"
] |
Reset the content-type state.
|
[
"Reset",
"the",
"content",
"-",
"type",
"state",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/textfmts.py#L125-L128
|
8,518
|
wakatime/wakatime
|
wakatime/packages/pygments/lexers/__init__.py
|
find_lexer_class
|
def find_lexer_class(name):
"""Lookup a lexer class by name.
Return None if not found.
"""
if name in _lexer_cache:
return _lexer_cache[name]
# lookup builtin lexers
for module_name, lname, aliases, _, _ in itervalues(LEXERS):
if name == lname:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if cls.name == name:
return cls
|
python
|
def find_lexer_class(name):
"""Lookup a lexer class by name.
Return None if not found.
"""
if name in _lexer_cache:
return _lexer_cache[name]
# lookup builtin lexers
for module_name, lname, aliases, _, _ in itervalues(LEXERS):
if name == lname:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if cls.name == name:
return cls
|
[
"def",
"find_lexer_class",
"(",
"name",
")",
":",
"if",
"name",
"in",
"_lexer_cache",
":",
"return",
"_lexer_cache",
"[",
"name",
"]",
"# lookup builtin lexers",
"for",
"module_name",
",",
"lname",
",",
"aliases",
",",
"_",
",",
"_",
"in",
"itervalues",
"(",
"LEXERS",
")",
":",
"if",
"name",
"==",
"lname",
":",
"_load_lexers",
"(",
"module_name",
")",
"return",
"_lexer_cache",
"[",
"name",
"]",
"# continue with lexers from setuptools entrypoints",
"for",
"cls",
"in",
"find_plugin_lexers",
"(",
")",
":",
"if",
"cls",
".",
"name",
"==",
"name",
":",
"return",
"cls"
] |
Lookup a lexer class by name.
Return None if not found.
|
[
"Lookup",
"a",
"lexer",
"class",
"by",
"name",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/__init__.py#L57-L72
|
8,519
|
wakatime/wakatime
|
wakatime/packages/pygments/lexers/__init__.py
|
find_lexer_class_by_name
|
def find_lexer_class_by_name(_alias):
"""Lookup a lexer class by alias.
Like `get_lexer_by_name`, but does not instantiate the class.
.. versionadded:: 2.2
"""
if not _alias:
raise ClassNotFound('no lexer for alias %r found' % _alias)
# lookup builtin lexers
for module_name, name, aliases, _, _ in itervalues(LEXERS):
if _alias.lower() in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if _alias.lower() in cls.aliases:
return cls
raise ClassNotFound('no lexer for alias %r found' % _alias)
|
python
|
def find_lexer_class_by_name(_alias):
"""Lookup a lexer class by alias.
Like `get_lexer_by_name`, but does not instantiate the class.
.. versionadded:: 2.2
"""
if not _alias:
raise ClassNotFound('no lexer for alias %r found' % _alias)
# lookup builtin lexers
for module_name, name, aliases, _, _ in itervalues(LEXERS):
if _alias.lower() in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if _alias.lower() in cls.aliases:
return cls
raise ClassNotFound('no lexer for alias %r found' % _alias)
|
[
"def",
"find_lexer_class_by_name",
"(",
"_alias",
")",
":",
"if",
"not",
"_alias",
":",
"raise",
"ClassNotFound",
"(",
"'no lexer for alias %r found'",
"%",
"_alias",
")",
"# lookup builtin lexers",
"for",
"module_name",
",",
"name",
",",
"aliases",
",",
"_",
",",
"_",
"in",
"itervalues",
"(",
"LEXERS",
")",
":",
"if",
"_alias",
".",
"lower",
"(",
")",
"in",
"aliases",
":",
"if",
"name",
"not",
"in",
"_lexer_cache",
":",
"_load_lexers",
"(",
"module_name",
")",
"return",
"_lexer_cache",
"[",
"name",
"]",
"# continue with lexers from setuptools entrypoints",
"for",
"cls",
"in",
"find_plugin_lexers",
"(",
")",
":",
"if",
"_alias",
".",
"lower",
"(",
")",
"in",
"cls",
".",
"aliases",
":",
"return",
"cls",
"raise",
"ClassNotFound",
"(",
"'no lexer for alias %r found'",
"%",
"_alias",
")"
] |
Lookup a lexer class by alias.
Like `get_lexer_by_name`, but does not instantiate the class.
.. versionadded:: 2.2
|
[
"Lookup",
"a",
"lexer",
"class",
"by",
"alias",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/__init__.py#L75-L94
|
8,520
|
wakatime/wakatime
|
wakatime/packages/pygments/lexers/__init__.py
|
load_lexer_from_file
|
def load_lexer_from_file(filename, lexername="CustomLexer", **options):
"""Load a lexer from a file.
This method expects a file located relative to the current working
directory, which contains a Lexer class. By default, it expects the
Lexer to be name CustomLexer; you can specify your own class name
as the second argument to this function.
Users should be very careful with the input, because this method
is equivalent to running eval on the input file.
Raises ClassNotFound if there are any problems importing the Lexer.
.. versionadded:: 2.2
"""
try:
# This empty dict will contain the namespace for the exec'd file
custom_namespace = {}
exec(open(filename, 'rb').read(), custom_namespace)
# Retrieve the class `lexername` from that namespace
if lexername not in custom_namespace:
raise ClassNotFound('no valid %s class found in %s' %
(lexername, filename))
lexer_class = custom_namespace[lexername]
# And finally instantiate it with the options
return lexer_class(**options)
except IOError as err:
raise ClassNotFound('cannot read %s' % filename)
except ClassNotFound as err:
raise
except Exception as err:
raise ClassNotFound('error when loading custom lexer: %s' % err)
|
python
|
def load_lexer_from_file(filename, lexername="CustomLexer", **options):
"""Load a lexer from a file.
This method expects a file located relative to the current working
directory, which contains a Lexer class. By default, it expects the
Lexer to be name CustomLexer; you can specify your own class name
as the second argument to this function.
Users should be very careful with the input, because this method
is equivalent to running eval on the input file.
Raises ClassNotFound if there are any problems importing the Lexer.
.. versionadded:: 2.2
"""
try:
# This empty dict will contain the namespace for the exec'd file
custom_namespace = {}
exec(open(filename, 'rb').read(), custom_namespace)
# Retrieve the class `lexername` from that namespace
if lexername not in custom_namespace:
raise ClassNotFound('no valid %s class found in %s' %
(lexername, filename))
lexer_class = custom_namespace[lexername]
# And finally instantiate it with the options
return lexer_class(**options)
except IOError as err:
raise ClassNotFound('cannot read %s' % filename)
except ClassNotFound as err:
raise
except Exception as err:
raise ClassNotFound('error when loading custom lexer: %s' % err)
|
[
"def",
"load_lexer_from_file",
"(",
"filename",
",",
"lexername",
"=",
"\"CustomLexer\"",
",",
"*",
"*",
"options",
")",
":",
"try",
":",
"# This empty dict will contain the namespace for the exec'd file",
"custom_namespace",
"=",
"{",
"}",
"exec",
"(",
"open",
"(",
"filename",
",",
"'rb'",
")",
".",
"read",
"(",
")",
",",
"custom_namespace",
")",
"# Retrieve the class `lexername` from that namespace",
"if",
"lexername",
"not",
"in",
"custom_namespace",
":",
"raise",
"ClassNotFound",
"(",
"'no valid %s class found in %s'",
"%",
"(",
"lexername",
",",
"filename",
")",
")",
"lexer_class",
"=",
"custom_namespace",
"[",
"lexername",
"]",
"# And finally instantiate it with the options",
"return",
"lexer_class",
"(",
"*",
"*",
"options",
")",
"except",
"IOError",
"as",
"err",
":",
"raise",
"ClassNotFound",
"(",
"'cannot read %s'",
"%",
"filename",
")",
"except",
"ClassNotFound",
"as",
"err",
":",
"raise",
"except",
"Exception",
"as",
"err",
":",
"raise",
"ClassNotFound",
"(",
"'error when loading custom lexer: %s'",
"%",
"err",
")"
] |
Load a lexer from a file.
This method expects a file located relative to the current working
directory, which contains a Lexer class. By default, it expects the
Lexer to be name CustomLexer; you can specify your own class name
as the second argument to this function.
Users should be very careful with the input, because this method
is equivalent to running eval on the input file.
Raises ClassNotFound if there are any problems importing the Lexer.
.. versionadded:: 2.2
|
[
"Load",
"a",
"lexer",
"from",
"a",
"file",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/__init__.py#L118-L149
|
8,521
|
wakatime/wakatime
|
wakatime/packages/pygments/lexers/__init__.py
|
get_lexer_for_mimetype
|
def get_lexer_for_mimetype(_mime, **options):
"""Get a lexer for a mimetype.
Raises ClassNotFound if not found.
"""
for modname, name, _, _, mimetypes in itervalues(LEXERS):
if _mime in mimetypes:
if name not in _lexer_cache:
_load_lexers(modname)
return _lexer_cache[name](**options)
for cls in find_plugin_lexers():
if _mime in cls.mimetypes:
return cls(**options)
raise ClassNotFound('no lexer for mimetype %r found' % _mime)
|
python
|
def get_lexer_for_mimetype(_mime, **options):
"""Get a lexer for a mimetype.
Raises ClassNotFound if not found.
"""
for modname, name, _, _, mimetypes in itervalues(LEXERS):
if _mime in mimetypes:
if name not in _lexer_cache:
_load_lexers(modname)
return _lexer_cache[name](**options)
for cls in find_plugin_lexers():
if _mime in cls.mimetypes:
return cls(**options)
raise ClassNotFound('no lexer for mimetype %r found' % _mime)
|
[
"def",
"get_lexer_for_mimetype",
"(",
"_mime",
",",
"*",
"*",
"options",
")",
":",
"for",
"modname",
",",
"name",
",",
"_",
",",
"_",
",",
"mimetypes",
"in",
"itervalues",
"(",
"LEXERS",
")",
":",
"if",
"_mime",
"in",
"mimetypes",
":",
"if",
"name",
"not",
"in",
"_lexer_cache",
":",
"_load_lexers",
"(",
"modname",
")",
"return",
"_lexer_cache",
"[",
"name",
"]",
"(",
"*",
"*",
"options",
")",
"for",
"cls",
"in",
"find_plugin_lexers",
"(",
")",
":",
"if",
"_mime",
"in",
"cls",
".",
"mimetypes",
":",
"return",
"cls",
"(",
"*",
"*",
"options",
")",
"raise",
"ClassNotFound",
"(",
"'no lexer for mimetype %r found'",
"%",
"_mime",
")"
] |
Get a lexer for a mimetype.
Raises ClassNotFound if not found.
|
[
"Get",
"a",
"lexer",
"for",
"a",
"mimetype",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/__init__.py#L209-L222
|
8,522
|
wakatime/wakatime
|
wakatime/packages/pygments/lexers/__init__.py
|
_iter_lexerclasses
|
def _iter_lexerclasses(plugins=True):
"""Return an iterator over all lexer classes."""
for key in sorted(LEXERS):
module_name, name = LEXERS[key][:2]
if name not in _lexer_cache:
_load_lexers(module_name)
yield _lexer_cache[name]
if plugins:
for lexer in find_plugin_lexers():
yield lexer
|
python
|
def _iter_lexerclasses(plugins=True):
"""Return an iterator over all lexer classes."""
for key in sorted(LEXERS):
module_name, name = LEXERS[key][:2]
if name not in _lexer_cache:
_load_lexers(module_name)
yield _lexer_cache[name]
if plugins:
for lexer in find_plugin_lexers():
yield lexer
|
[
"def",
"_iter_lexerclasses",
"(",
"plugins",
"=",
"True",
")",
":",
"for",
"key",
"in",
"sorted",
"(",
"LEXERS",
")",
":",
"module_name",
",",
"name",
"=",
"LEXERS",
"[",
"key",
"]",
"[",
":",
"2",
"]",
"if",
"name",
"not",
"in",
"_lexer_cache",
":",
"_load_lexers",
"(",
"module_name",
")",
"yield",
"_lexer_cache",
"[",
"name",
"]",
"if",
"plugins",
":",
"for",
"lexer",
"in",
"find_plugin_lexers",
"(",
")",
":",
"yield",
"lexer"
] |
Return an iterator over all lexer classes.
|
[
"Return",
"an",
"iterator",
"over",
"all",
"lexer",
"classes",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/__init__.py#L225-L234
|
8,523
|
wakatime/wakatime
|
wakatime/stats.py
|
get_file_stats
|
def get_file_stats(file_name, entity_type='file', lineno=None, cursorpos=None,
plugin=None, language=None, local_file=None):
"""Returns a hash of information about the entity."""
language = standardize_language(language, plugin)
stats = {
'language': language,
'dependencies': [],
'lines': None,
'lineno': lineno,
'cursorpos': cursorpos,
}
if entity_type == 'file':
lexer = get_lexer(language)
if not language:
language, lexer = guess_language(file_name, local_file)
parser = DependencyParser(local_file or file_name, lexer)
stats.update({
'language': use_root_language(language, lexer),
'dependencies': parser.parse(),
'lines': number_lines_in_file(local_file or file_name),
})
return stats
|
python
|
def get_file_stats(file_name, entity_type='file', lineno=None, cursorpos=None,
plugin=None, language=None, local_file=None):
"""Returns a hash of information about the entity."""
language = standardize_language(language, plugin)
stats = {
'language': language,
'dependencies': [],
'lines': None,
'lineno': lineno,
'cursorpos': cursorpos,
}
if entity_type == 'file':
lexer = get_lexer(language)
if not language:
language, lexer = guess_language(file_name, local_file)
parser = DependencyParser(local_file or file_name, lexer)
stats.update({
'language': use_root_language(language, lexer),
'dependencies': parser.parse(),
'lines': number_lines_in_file(local_file or file_name),
})
return stats
|
[
"def",
"get_file_stats",
"(",
"file_name",
",",
"entity_type",
"=",
"'file'",
",",
"lineno",
"=",
"None",
",",
"cursorpos",
"=",
"None",
",",
"plugin",
"=",
"None",
",",
"language",
"=",
"None",
",",
"local_file",
"=",
"None",
")",
":",
"language",
"=",
"standardize_language",
"(",
"language",
",",
"plugin",
")",
"stats",
"=",
"{",
"'language'",
":",
"language",
",",
"'dependencies'",
":",
"[",
"]",
",",
"'lines'",
":",
"None",
",",
"'lineno'",
":",
"lineno",
",",
"'cursorpos'",
":",
"cursorpos",
",",
"}",
"if",
"entity_type",
"==",
"'file'",
":",
"lexer",
"=",
"get_lexer",
"(",
"language",
")",
"if",
"not",
"language",
":",
"language",
",",
"lexer",
"=",
"guess_language",
"(",
"file_name",
",",
"local_file",
")",
"parser",
"=",
"DependencyParser",
"(",
"local_file",
"or",
"file_name",
",",
"lexer",
")",
"stats",
".",
"update",
"(",
"{",
"'language'",
":",
"use_root_language",
"(",
"language",
",",
"lexer",
")",
",",
"'dependencies'",
":",
"parser",
".",
"parse",
"(",
")",
",",
"'lines'",
":",
"number_lines_in_file",
"(",
"local_file",
"or",
"file_name",
")",
",",
"}",
")",
"return",
"stats"
] |
Returns a hash of information about the entity.
|
[
"Returns",
"a",
"hash",
"of",
"information",
"about",
"the",
"entity",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/stats.py#L43-L67
|
8,524
|
wakatime/wakatime
|
wakatime/stats.py
|
guess_language
|
def guess_language(file_name, local_file):
"""Guess lexer and language for a file.
Returns a tuple of (language_str, lexer_obj).
"""
lexer = None
language = get_language_from_extension(file_name)
if language:
lexer = get_lexer(language)
else:
lexer = smart_guess_lexer(file_name, local_file)
if lexer:
language = u(lexer.name)
return language, lexer
|
python
|
def guess_language(file_name, local_file):
"""Guess lexer and language for a file.
Returns a tuple of (language_str, lexer_obj).
"""
lexer = None
language = get_language_from_extension(file_name)
if language:
lexer = get_lexer(language)
else:
lexer = smart_guess_lexer(file_name, local_file)
if lexer:
language = u(lexer.name)
return language, lexer
|
[
"def",
"guess_language",
"(",
"file_name",
",",
"local_file",
")",
":",
"lexer",
"=",
"None",
"language",
"=",
"get_language_from_extension",
"(",
"file_name",
")",
"if",
"language",
":",
"lexer",
"=",
"get_lexer",
"(",
"language",
")",
"else",
":",
"lexer",
"=",
"smart_guess_lexer",
"(",
"file_name",
",",
"local_file",
")",
"if",
"lexer",
":",
"language",
"=",
"u",
"(",
"lexer",
".",
"name",
")",
"return",
"language",
",",
"lexer"
] |
Guess lexer and language for a file.
Returns a tuple of (language_str, lexer_obj).
|
[
"Guess",
"lexer",
"and",
"language",
"for",
"a",
"file",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/stats.py#L70-L86
|
8,525
|
wakatime/wakatime
|
wakatime/stats.py
|
smart_guess_lexer
|
def smart_guess_lexer(file_name, local_file):
"""Guess Pygments lexer for a file.
Looks for a vim modeline in file contents, then compares the accuracy
of that lexer with a second guess. The second guess looks up all lexers
matching the file name, then runs a text analysis for the best choice.
"""
lexer = None
text = get_file_head(file_name)
lexer1, accuracy1 = guess_lexer_using_filename(local_file or file_name, text)
lexer2, accuracy2 = guess_lexer_using_modeline(text)
if lexer1:
lexer = lexer1
if (lexer2 and accuracy2 and
(not accuracy1 or accuracy2 > accuracy1)):
lexer = lexer2
return lexer
|
python
|
def smart_guess_lexer(file_name, local_file):
"""Guess Pygments lexer for a file.
Looks for a vim modeline in file contents, then compares the accuracy
of that lexer with a second guess. The second guess looks up all lexers
matching the file name, then runs a text analysis for the best choice.
"""
lexer = None
text = get_file_head(file_name)
lexer1, accuracy1 = guess_lexer_using_filename(local_file or file_name, text)
lexer2, accuracy2 = guess_lexer_using_modeline(text)
if lexer1:
lexer = lexer1
if (lexer2 and accuracy2 and
(not accuracy1 or accuracy2 > accuracy1)):
lexer = lexer2
return lexer
|
[
"def",
"smart_guess_lexer",
"(",
"file_name",
",",
"local_file",
")",
":",
"lexer",
"=",
"None",
"text",
"=",
"get_file_head",
"(",
"file_name",
")",
"lexer1",
",",
"accuracy1",
"=",
"guess_lexer_using_filename",
"(",
"local_file",
"or",
"file_name",
",",
"text",
")",
"lexer2",
",",
"accuracy2",
"=",
"guess_lexer_using_modeline",
"(",
"text",
")",
"if",
"lexer1",
":",
"lexer",
"=",
"lexer1",
"if",
"(",
"lexer2",
"and",
"accuracy2",
"and",
"(",
"not",
"accuracy1",
"or",
"accuracy2",
">",
"accuracy1",
")",
")",
":",
"lexer",
"=",
"lexer2",
"return",
"lexer"
] |
Guess Pygments lexer for a file.
Looks for a vim modeline in file contents, then compares the accuracy
of that lexer with a second guess. The second guess looks up all lexers
matching the file name, then runs a text analysis for the best choice.
|
[
"Guess",
"Pygments",
"lexer",
"for",
"a",
"file",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/stats.py#L89-L109
|
8,526
|
wakatime/wakatime
|
wakatime/stats.py
|
guess_lexer_using_filename
|
def guess_lexer_using_filename(file_name, text):
"""Guess lexer for given text, limited to lexers for this file's extension.
Returns a tuple of (lexer, accuracy).
"""
lexer, accuracy = None, None
try:
lexer = custom_pygments_guess_lexer_for_filename(file_name, text)
except SkipHeartbeat as ex:
raise SkipHeartbeat(u(ex))
except:
log.traceback(logging.DEBUG)
if lexer is not None:
try:
accuracy = lexer.analyse_text(text)
except:
log.traceback(logging.DEBUG)
return lexer, accuracy
|
python
|
def guess_lexer_using_filename(file_name, text):
"""Guess lexer for given text, limited to lexers for this file's extension.
Returns a tuple of (lexer, accuracy).
"""
lexer, accuracy = None, None
try:
lexer = custom_pygments_guess_lexer_for_filename(file_name, text)
except SkipHeartbeat as ex:
raise SkipHeartbeat(u(ex))
except:
log.traceback(logging.DEBUG)
if lexer is not None:
try:
accuracy = lexer.analyse_text(text)
except:
log.traceback(logging.DEBUG)
return lexer, accuracy
|
[
"def",
"guess_lexer_using_filename",
"(",
"file_name",
",",
"text",
")",
":",
"lexer",
",",
"accuracy",
"=",
"None",
",",
"None",
"try",
":",
"lexer",
"=",
"custom_pygments_guess_lexer_for_filename",
"(",
"file_name",
",",
"text",
")",
"except",
"SkipHeartbeat",
"as",
"ex",
":",
"raise",
"SkipHeartbeat",
"(",
"u",
"(",
"ex",
")",
")",
"except",
":",
"log",
".",
"traceback",
"(",
"logging",
".",
"DEBUG",
")",
"if",
"lexer",
"is",
"not",
"None",
":",
"try",
":",
"accuracy",
"=",
"lexer",
".",
"analyse_text",
"(",
"text",
")",
"except",
":",
"log",
".",
"traceback",
"(",
"logging",
".",
"DEBUG",
")",
"return",
"lexer",
",",
"accuracy"
] |
Guess lexer for given text, limited to lexers for this file's extension.
Returns a tuple of (lexer, accuracy).
|
[
"Guess",
"lexer",
"for",
"given",
"text",
"limited",
"to",
"lexers",
"for",
"this",
"file",
"s",
"extension",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/stats.py#L112-L133
|
8,527
|
wakatime/wakatime
|
wakatime/stats.py
|
guess_lexer_using_modeline
|
def guess_lexer_using_modeline(text):
"""Guess lexer for given text using Vim modeline.
Returns a tuple of (lexer, accuracy).
"""
lexer, accuracy = None, None
file_type = None
try:
file_type = get_filetype_from_buffer(text)
except: # pragma: nocover
log.traceback(logging.DEBUG)
if file_type is not None:
try:
lexer = get_lexer_by_name(file_type)
except ClassNotFound:
log.traceback(logging.DEBUG)
if lexer is not None:
try:
accuracy = lexer.analyse_text(text)
except: # pragma: nocover
log.traceback(logging.DEBUG)
return lexer, accuracy
|
python
|
def guess_lexer_using_modeline(text):
"""Guess lexer for given text using Vim modeline.
Returns a tuple of (lexer, accuracy).
"""
lexer, accuracy = None, None
file_type = None
try:
file_type = get_filetype_from_buffer(text)
except: # pragma: nocover
log.traceback(logging.DEBUG)
if file_type is not None:
try:
lexer = get_lexer_by_name(file_type)
except ClassNotFound:
log.traceback(logging.DEBUG)
if lexer is not None:
try:
accuracy = lexer.analyse_text(text)
except: # pragma: nocover
log.traceback(logging.DEBUG)
return lexer, accuracy
|
[
"def",
"guess_lexer_using_modeline",
"(",
"text",
")",
":",
"lexer",
",",
"accuracy",
"=",
"None",
",",
"None",
"file_type",
"=",
"None",
"try",
":",
"file_type",
"=",
"get_filetype_from_buffer",
"(",
"text",
")",
"except",
":",
"# pragma: nocover",
"log",
".",
"traceback",
"(",
"logging",
".",
"DEBUG",
")",
"if",
"file_type",
"is",
"not",
"None",
":",
"try",
":",
"lexer",
"=",
"get_lexer_by_name",
"(",
"file_type",
")",
"except",
"ClassNotFound",
":",
"log",
".",
"traceback",
"(",
"logging",
".",
"DEBUG",
")",
"if",
"lexer",
"is",
"not",
"None",
":",
"try",
":",
"accuracy",
"=",
"lexer",
".",
"analyse_text",
"(",
"text",
")",
"except",
":",
"# pragma: nocover",
"log",
".",
"traceback",
"(",
"logging",
".",
"DEBUG",
")",
"return",
"lexer",
",",
"accuracy"
] |
Guess lexer for given text using Vim modeline.
Returns a tuple of (lexer, accuracy).
|
[
"Guess",
"lexer",
"for",
"given",
"text",
"using",
"Vim",
"modeline",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/stats.py#L136-L162
|
8,528
|
wakatime/wakatime
|
wakatime/stats.py
|
get_language_from_extension
|
def get_language_from_extension(file_name):
"""Returns a matching language for the given file extension.
When guessed_language is 'C', does not restrict to known file extensions.
"""
filepart, extension = os.path.splitext(file_name)
pathpart, filename = os.path.split(file_name)
if filename == 'go.mod':
return 'Go'
if re.match(r'\.h.*$', extension, re.IGNORECASE) or re.match(r'\.c.*$', extension, re.IGNORECASE):
if os.path.exists(u('{0}{1}').format(u(filepart), u('.c'))) or os.path.exists(u('{0}{1}').format(u(filepart), u('.C'))):
return 'C'
if os.path.exists(u('{0}{1}').format(u(filepart), u('.m'))) or os.path.exists(u('{0}{1}').format(u(filepart), u('.M'))):
return 'Objective-C'
if os.path.exists(u('{0}{1}').format(u(filepart), u('.mm'))) or os.path.exists(u('{0}{1}').format(u(filepart), u('.MM'))):
return 'Objective-C++'
available_extensions = extensions_in_same_folder(file_name)
for ext in CppLexer.filenames:
ext = ext.lstrip('*')
if ext in available_extensions:
return 'C++'
if '.c' in available_extensions:
return 'C'
if re.match(r'\.m$', extension, re.IGNORECASE) and (os.path.exists(u('{0}{1}').format(u(filepart), u('.h'))) or os.path.exists(u('{0}{1}').format(u(filepart), u('.H')))):
return 'Objective-C'
if re.match(r'\.mm$', extension, re.IGNORECASE) and (os.path.exists(u('{0}{1}').format(u(filepart), u('.h'))) or os.path.exists(u('{0}{1}').format(u(filepart), u('.H')))):
return 'Objective-C++'
return None
|
python
|
def get_language_from_extension(file_name):
"""Returns a matching language for the given file extension.
When guessed_language is 'C', does not restrict to known file extensions.
"""
filepart, extension = os.path.splitext(file_name)
pathpart, filename = os.path.split(file_name)
if filename == 'go.mod':
return 'Go'
if re.match(r'\.h.*$', extension, re.IGNORECASE) or re.match(r'\.c.*$', extension, re.IGNORECASE):
if os.path.exists(u('{0}{1}').format(u(filepart), u('.c'))) or os.path.exists(u('{0}{1}').format(u(filepart), u('.C'))):
return 'C'
if os.path.exists(u('{0}{1}').format(u(filepart), u('.m'))) or os.path.exists(u('{0}{1}').format(u(filepart), u('.M'))):
return 'Objective-C'
if os.path.exists(u('{0}{1}').format(u(filepart), u('.mm'))) or os.path.exists(u('{0}{1}').format(u(filepart), u('.MM'))):
return 'Objective-C++'
available_extensions = extensions_in_same_folder(file_name)
for ext in CppLexer.filenames:
ext = ext.lstrip('*')
if ext in available_extensions:
return 'C++'
if '.c' in available_extensions:
return 'C'
if re.match(r'\.m$', extension, re.IGNORECASE) and (os.path.exists(u('{0}{1}').format(u(filepart), u('.h'))) or os.path.exists(u('{0}{1}').format(u(filepart), u('.H')))):
return 'Objective-C'
if re.match(r'\.mm$', extension, re.IGNORECASE) and (os.path.exists(u('{0}{1}').format(u(filepart), u('.h'))) or os.path.exists(u('{0}{1}').format(u(filepart), u('.H')))):
return 'Objective-C++'
return None
|
[
"def",
"get_language_from_extension",
"(",
"file_name",
")",
":",
"filepart",
",",
"extension",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"file_name",
")",
"pathpart",
",",
"filename",
"=",
"os",
".",
"path",
".",
"split",
"(",
"file_name",
")",
"if",
"filename",
"==",
"'go.mod'",
":",
"return",
"'Go'",
"if",
"re",
".",
"match",
"(",
"r'\\.h.*$'",
",",
"extension",
",",
"re",
".",
"IGNORECASE",
")",
"or",
"re",
".",
"match",
"(",
"r'\\.c.*$'",
",",
"extension",
",",
"re",
".",
"IGNORECASE",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"u",
"(",
"'{0}{1}'",
")",
".",
"format",
"(",
"u",
"(",
"filepart",
")",
",",
"u",
"(",
"'.c'",
")",
")",
")",
"or",
"os",
".",
"path",
".",
"exists",
"(",
"u",
"(",
"'{0}{1}'",
")",
".",
"format",
"(",
"u",
"(",
"filepart",
")",
",",
"u",
"(",
"'.C'",
")",
")",
")",
":",
"return",
"'C'",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"u",
"(",
"'{0}{1}'",
")",
".",
"format",
"(",
"u",
"(",
"filepart",
")",
",",
"u",
"(",
"'.m'",
")",
")",
")",
"or",
"os",
".",
"path",
".",
"exists",
"(",
"u",
"(",
"'{0}{1}'",
")",
".",
"format",
"(",
"u",
"(",
"filepart",
")",
",",
"u",
"(",
"'.M'",
")",
")",
")",
":",
"return",
"'Objective-C'",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"u",
"(",
"'{0}{1}'",
")",
".",
"format",
"(",
"u",
"(",
"filepart",
")",
",",
"u",
"(",
"'.mm'",
")",
")",
")",
"or",
"os",
".",
"path",
".",
"exists",
"(",
"u",
"(",
"'{0}{1}'",
")",
".",
"format",
"(",
"u",
"(",
"filepart",
")",
",",
"u",
"(",
"'.MM'",
")",
")",
")",
":",
"return",
"'Objective-C++'",
"available_extensions",
"=",
"extensions_in_same_folder",
"(",
"file_name",
")",
"for",
"ext",
"in",
"CppLexer",
".",
"filenames",
":",
"ext",
"=",
"ext",
".",
"lstrip",
"(",
"'*'",
")",
"if",
"ext",
"in",
"available_extensions",
":",
"return",
"'C++'",
"if",
"'.c'",
"in",
"available_extensions",
":",
"return",
"'C'",
"if",
"re",
".",
"match",
"(",
"r'\\.m$'",
",",
"extension",
",",
"re",
".",
"IGNORECASE",
")",
"and",
"(",
"os",
".",
"path",
".",
"exists",
"(",
"u",
"(",
"'{0}{1}'",
")",
".",
"format",
"(",
"u",
"(",
"filepart",
")",
",",
"u",
"(",
"'.h'",
")",
")",
")",
"or",
"os",
".",
"path",
".",
"exists",
"(",
"u",
"(",
"'{0}{1}'",
")",
".",
"format",
"(",
"u",
"(",
"filepart",
")",
",",
"u",
"(",
"'.H'",
")",
")",
")",
")",
":",
"return",
"'Objective-C'",
"if",
"re",
".",
"match",
"(",
"r'\\.mm$'",
",",
"extension",
",",
"re",
".",
"IGNORECASE",
")",
"and",
"(",
"os",
".",
"path",
".",
"exists",
"(",
"u",
"(",
"'{0}{1}'",
")",
".",
"format",
"(",
"u",
"(",
"filepart",
")",
",",
"u",
"(",
"'.h'",
")",
")",
")",
"or",
"os",
".",
"path",
".",
"exists",
"(",
"u",
"(",
"'{0}{1}'",
")",
".",
"format",
"(",
"u",
"(",
"filepart",
")",
",",
"u",
"(",
"'.H'",
")",
")",
")",
")",
":",
"return",
"'Objective-C++'",
"return",
"None"
] |
Returns a matching language for the given file extension.
When guessed_language is 'C', does not restrict to known file extensions.
|
[
"Returns",
"a",
"matching",
"language",
"for",
"the",
"given",
"file",
"extension",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/stats.py#L165-L204
|
8,529
|
wakatime/wakatime
|
wakatime/stats.py
|
standardize_language
|
def standardize_language(language, plugin):
"""Maps a string to the equivalent Pygments language.
Returns the standardized language string.
"""
if not language:
return None
# standardize language for this plugin
if plugin:
plugin = plugin.split(' ')[-1].split('/')[0].split('-')[0]
standardized = get_language_from_json(language, plugin)
if standardized is not None:
return standardized
# standardize language against default languages
return get_language_from_json(language, 'default')
|
python
|
def standardize_language(language, plugin):
"""Maps a string to the equivalent Pygments language.
Returns the standardized language string.
"""
if not language:
return None
# standardize language for this plugin
if plugin:
plugin = plugin.split(' ')[-1].split('/')[0].split('-')[0]
standardized = get_language_from_json(language, plugin)
if standardized is not None:
return standardized
# standardize language against default languages
return get_language_from_json(language, 'default')
|
[
"def",
"standardize_language",
"(",
"language",
",",
"plugin",
")",
":",
"if",
"not",
"language",
":",
"return",
"None",
"# standardize language for this plugin",
"if",
"plugin",
":",
"plugin",
"=",
"plugin",
".",
"split",
"(",
"' '",
")",
"[",
"-",
"1",
"]",
".",
"split",
"(",
"'/'",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'-'",
")",
"[",
"0",
"]",
"standardized",
"=",
"get_language_from_json",
"(",
"language",
",",
"plugin",
")",
"if",
"standardized",
"is",
"not",
"None",
":",
"return",
"standardized",
"# standardize language against default languages",
"return",
"get_language_from_json",
"(",
"language",
",",
"'default'",
")"
] |
Maps a string to the equivalent Pygments language.
Returns the standardized language string.
|
[
"Maps",
"a",
"string",
"to",
"the",
"equivalent",
"Pygments",
"language",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/stats.py#L228-L245
|
8,530
|
wakatime/wakatime
|
wakatime/stats.py
|
get_language_from_json
|
def get_language_from_json(language, key):
"""Finds the given language in a json file."""
file_name = os.path.join(
os.path.dirname(__file__),
'languages',
'{0}.json').format(key.lower())
if os.path.exists(file_name):
try:
with open(file_name, 'r', encoding='utf-8') as fh:
languages = json.loads(fh.read())
if languages.get(language.lower()):
return languages[language.lower()]
except:
log.traceback(logging.DEBUG)
return None
|
python
|
def get_language_from_json(language, key):
"""Finds the given language in a json file."""
file_name = os.path.join(
os.path.dirname(__file__),
'languages',
'{0}.json').format(key.lower())
if os.path.exists(file_name):
try:
with open(file_name, 'r', encoding='utf-8') as fh:
languages = json.loads(fh.read())
if languages.get(language.lower()):
return languages[language.lower()]
except:
log.traceback(logging.DEBUG)
return None
|
[
"def",
"get_language_from_json",
"(",
"language",
",",
"key",
")",
":",
"file_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'languages'",
",",
"'{0}.json'",
")",
".",
"format",
"(",
"key",
".",
"lower",
"(",
")",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"file_name",
")",
":",
"try",
":",
"with",
"open",
"(",
"file_name",
",",
"'r'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"fh",
":",
"languages",
"=",
"json",
".",
"loads",
"(",
"fh",
".",
"read",
"(",
")",
")",
"if",
"languages",
".",
"get",
"(",
"language",
".",
"lower",
"(",
")",
")",
":",
"return",
"languages",
"[",
"language",
".",
"lower",
"(",
")",
"]",
"except",
":",
"log",
".",
"traceback",
"(",
"logging",
".",
"DEBUG",
")",
"return",
"None"
] |
Finds the given language in a json file.
|
[
"Finds",
"the",
"given",
"language",
"in",
"a",
"json",
"file",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/stats.py#L268-L285
|
8,531
|
wakatime/wakatime
|
wakatime/stats.py
|
get_file_head
|
def get_file_head(file_name):
"""Returns the first 512000 bytes of the file's contents."""
text = None
try:
with open(file_name, 'r', encoding='utf-8') as fh:
text = fh.read(512000)
except:
try:
with open(file_name, 'r', encoding=sys.getfilesystemencoding()) as fh:
text = fh.read(512000) # pragma: nocover
except:
log.traceback(logging.DEBUG)
return text
|
python
|
def get_file_head(file_name):
"""Returns the first 512000 bytes of the file's contents."""
text = None
try:
with open(file_name, 'r', encoding='utf-8') as fh:
text = fh.read(512000)
except:
try:
with open(file_name, 'r', encoding=sys.getfilesystemencoding()) as fh:
text = fh.read(512000) # pragma: nocover
except:
log.traceback(logging.DEBUG)
return text
|
[
"def",
"get_file_head",
"(",
"file_name",
")",
":",
"text",
"=",
"None",
"try",
":",
"with",
"open",
"(",
"file_name",
",",
"'r'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"fh",
":",
"text",
"=",
"fh",
".",
"read",
"(",
"512000",
")",
"except",
":",
"try",
":",
"with",
"open",
"(",
"file_name",
",",
"'r'",
",",
"encoding",
"=",
"sys",
".",
"getfilesystemencoding",
"(",
")",
")",
"as",
"fh",
":",
"text",
"=",
"fh",
".",
"read",
"(",
"512000",
")",
"# pragma: nocover",
"except",
":",
"log",
".",
"traceback",
"(",
"logging",
".",
"DEBUG",
")",
"return",
"text"
] |
Returns the first 512000 bytes of the file's contents.
|
[
"Returns",
"the",
"first",
"512000",
"bytes",
"of",
"the",
"file",
"s",
"contents",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/stats.py#L288-L301
|
8,532
|
wakatime/wakatime
|
wakatime/stats.py
|
custom_pygments_guess_lexer_for_filename
|
def custom_pygments_guess_lexer_for_filename(_fn, _text, **options):
"""Overwrite pygments.lexers.guess_lexer_for_filename to customize the
priority of different lexers based on popularity of languages."""
fn = basename(_fn)
primary = {}
matching_lexers = set()
for lexer in _iter_lexerclasses():
for filename in lexer.filenames:
if _fn_matches(fn, filename):
matching_lexers.add(lexer)
primary[lexer] = True
for filename in lexer.alias_filenames:
if _fn_matches(fn, filename):
matching_lexers.add(lexer)
primary[lexer] = False
if not matching_lexers:
raise ClassNotFound('no lexer for filename %r found' % fn)
if len(matching_lexers) == 1:
return matching_lexers.pop()(**options)
result = []
for lexer in matching_lexers:
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
result.append(customize_lexer_priority(_fn, rv, lexer))
matlab = list(filter(lambda x: x[2].name.lower() == 'matlab', result))
if len(matlab) > 0:
objc = list(filter(lambda x: x[2].name.lower() == 'objective-c', result))
if objc and objc[0][0] == matlab[0][0]:
raise SkipHeartbeat('Skipping because not enough language accuracy.')
def type_sort(t):
# sort by:
# - analyse score
# - is primary filename pattern?
# - priority
# - last resort: class name
return (t[0], primary[t[2]], t[1], t[2].__name__)
result.sort(key=type_sort)
return result[-1][2](**options)
|
python
|
def custom_pygments_guess_lexer_for_filename(_fn, _text, **options):
"""Overwrite pygments.lexers.guess_lexer_for_filename to customize the
priority of different lexers based on popularity of languages."""
fn = basename(_fn)
primary = {}
matching_lexers = set()
for lexer in _iter_lexerclasses():
for filename in lexer.filenames:
if _fn_matches(fn, filename):
matching_lexers.add(lexer)
primary[lexer] = True
for filename in lexer.alias_filenames:
if _fn_matches(fn, filename):
matching_lexers.add(lexer)
primary[lexer] = False
if not matching_lexers:
raise ClassNotFound('no lexer for filename %r found' % fn)
if len(matching_lexers) == 1:
return matching_lexers.pop()(**options)
result = []
for lexer in matching_lexers:
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
result.append(customize_lexer_priority(_fn, rv, lexer))
matlab = list(filter(lambda x: x[2].name.lower() == 'matlab', result))
if len(matlab) > 0:
objc = list(filter(lambda x: x[2].name.lower() == 'objective-c', result))
if objc and objc[0][0] == matlab[0][0]:
raise SkipHeartbeat('Skipping because not enough language accuracy.')
def type_sort(t):
# sort by:
# - analyse score
# - is primary filename pattern?
# - priority
# - last resort: class name
return (t[0], primary[t[2]], t[1], t[2].__name__)
result.sort(key=type_sort)
return result[-1][2](**options)
|
[
"def",
"custom_pygments_guess_lexer_for_filename",
"(",
"_fn",
",",
"_text",
",",
"*",
"*",
"options",
")",
":",
"fn",
"=",
"basename",
"(",
"_fn",
")",
"primary",
"=",
"{",
"}",
"matching_lexers",
"=",
"set",
"(",
")",
"for",
"lexer",
"in",
"_iter_lexerclasses",
"(",
")",
":",
"for",
"filename",
"in",
"lexer",
".",
"filenames",
":",
"if",
"_fn_matches",
"(",
"fn",
",",
"filename",
")",
":",
"matching_lexers",
".",
"add",
"(",
"lexer",
")",
"primary",
"[",
"lexer",
"]",
"=",
"True",
"for",
"filename",
"in",
"lexer",
".",
"alias_filenames",
":",
"if",
"_fn_matches",
"(",
"fn",
",",
"filename",
")",
":",
"matching_lexers",
".",
"add",
"(",
"lexer",
")",
"primary",
"[",
"lexer",
"]",
"=",
"False",
"if",
"not",
"matching_lexers",
":",
"raise",
"ClassNotFound",
"(",
"'no lexer for filename %r found'",
"%",
"fn",
")",
"if",
"len",
"(",
"matching_lexers",
")",
"==",
"1",
":",
"return",
"matching_lexers",
".",
"pop",
"(",
")",
"(",
"*",
"*",
"options",
")",
"result",
"=",
"[",
"]",
"for",
"lexer",
"in",
"matching_lexers",
":",
"rv",
"=",
"lexer",
".",
"analyse_text",
"(",
"_text",
")",
"if",
"rv",
"==",
"1.0",
":",
"return",
"lexer",
"(",
"*",
"*",
"options",
")",
"result",
".",
"append",
"(",
"customize_lexer_priority",
"(",
"_fn",
",",
"rv",
",",
"lexer",
")",
")",
"matlab",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"x",
":",
"x",
"[",
"2",
"]",
".",
"name",
".",
"lower",
"(",
")",
"==",
"'matlab'",
",",
"result",
")",
")",
"if",
"len",
"(",
"matlab",
")",
">",
"0",
":",
"objc",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"x",
":",
"x",
"[",
"2",
"]",
".",
"name",
".",
"lower",
"(",
")",
"==",
"'objective-c'",
",",
"result",
")",
")",
"if",
"objc",
"and",
"objc",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"matlab",
"[",
"0",
"]",
"[",
"0",
"]",
":",
"raise",
"SkipHeartbeat",
"(",
"'Skipping because not enough language accuracy.'",
")",
"def",
"type_sort",
"(",
"t",
")",
":",
"# sort by:",
"# - analyse score",
"# - is primary filename pattern?",
"# - priority",
"# - last resort: class name",
"return",
"(",
"t",
"[",
"0",
"]",
",",
"primary",
"[",
"t",
"[",
"2",
"]",
"]",
",",
"t",
"[",
"1",
"]",
",",
"t",
"[",
"2",
"]",
".",
"__name__",
")",
"result",
".",
"sort",
"(",
"key",
"=",
"type_sort",
")",
"return",
"result",
"[",
"-",
"1",
"]",
"[",
"2",
"]",
"(",
"*",
"*",
"options",
")"
] |
Overwrite pygments.lexers.guess_lexer_for_filename to customize the
priority of different lexers based on popularity of languages.
|
[
"Overwrite",
"pygments",
".",
"lexers",
".",
"guess_lexer_for_filename",
"to",
"customize",
"the",
"priority",
"of",
"different",
"lexers",
"based",
"on",
"popularity",
"of",
"languages",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/stats.py#L304-L346
|
8,533
|
wakatime/wakatime
|
wakatime/stats.py
|
customize_lexer_priority
|
def customize_lexer_priority(file_name, accuracy, lexer):
"""Customize lexer priority"""
priority = lexer.priority
lexer_name = lexer.name.lower().replace('sharp', '#')
if lexer_name in LANGUAGES:
priority = LANGUAGES[lexer_name]
elif lexer_name == 'matlab':
available_extensions = extensions_in_same_folder(file_name)
if '.mat' in available_extensions:
accuracy += 0.01
if '.h' not in available_extensions:
accuracy += 0.01
elif lexer_name == 'objective-c':
available_extensions = extensions_in_same_folder(file_name)
if '.mat' in available_extensions:
accuracy -= 0.01
else:
accuracy += 0.01
if '.h' in available_extensions:
accuracy += 0.01
return (accuracy, priority, lexer)
|
python
|
def customize_lexer_priority(file_name, accuracy, lexer):
"""Customize lexer priority"""
priority = lexer.priority
lexer_name = lexer.name.lower().replace('sharp', '#')
if lexer_name in LANGUAGES:
priority = LANGUAGES[lexer_name]
elif lexer_name == 'matlab':
available_extensions = extensions_in_same_folder(file_name)
if '.mat' in available_extensions:
accuracy += 0.01
if '.h' not in available_extensions:
accuracy += 0.01
elif lexer_name == 'objective-c':
available_extensions = extensions_in_same_folder(file_name)
if '.mat' in available_extensions:
accuracy -= 0.01
else:
accuracy += 0.01
if '.h' in available_extensions:
accuracy += 0.01
return (accuracy, priority, lexer)
|
[
"def",
"customize_lexer_priority",
"(",
"file_name",
",",
"accuracy",
",",
"lexer",
")",
":",
"priority",
"=",
"lexer",
".",
"priority",
"lexer_name",
"=",
"lexer",
".",
"name",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"'sharp'",
",",
"'#'",
")",
"if",
"lexer_name",
"in",
"LANGUAGES",
":",
"priority",
"=",
"LANGUAGES",
"[",
"lexer_name",
"]",
"elif",
"lexer_name",
"==",
"'matlab'",
":",
"available_extensions",
"=",
"extensions_in_same_folder",
"(",
"file_name",
")",
"if",
"'.mat'",
"in",
"available_extensions",
":",
"accuracy",
"+=",
"0.01",
"if",
"'.h'",
"not",
"in",
"available_extensions",
":",
"accuracy",
"+=",
"0.01",
"elif",
"lexer_name",
"==",
"'objective-c'",
":",
"available_extensions",
"=",
"extensions_in_same_folder",
"(",
"file_name",
")",
"if",
"'.mat'",
"in",
"available_extensions",
":",
"accuracy",
"-=",
"0.01",
"else",
":",
"accuracy",
"+=",
"0.01",
"if",
"'.h'",
"in",
"available_extensions",
":",
"accuracy",
"+=",
"0.01",
"return",
"(",
"accuracy",
",",
"priority",
",",
"lexer",
")"
] |
Customize lexer priority
|
[
"Customize",
"lexer",
"priority"
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/stats.py#L349-L372
|
8,534
|
wakatime/wakatime
|
wakatime/stats.py
|
extensions_in_same_folder
|
def extensions_in_same_folder(file_name):
"""Returns a list of file extensions from the same folder as file_name."""
directory = os.path.dirname(file_name)
files = os.listdir(directory)
extensions = list(zip(*map(os.path.splitext, files)))[1]
extensions = set([ext.lower() for ext in extensions])
return extensions
|
python
|
def extensions_in_same_folder(file_name):
"""Returns a list of file extensions from the same folder as file_name."""
directory = os.path.dirname(file_name)
files = os.listdir(directory)
extensions = list(zip(*map(os.path.splitext, files)))[1]
extensions = set([ext.lower() for ext in extensions])
return extensions
|
[
"def",
"extensions_in_same_folder",
"(",
"file_name",
")",
":",
"directory",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"file_name",
")",
"files",
"=",
"os",
".",
"listdir",
"(",
"directory",
")",
"extensions",
"=",
"list",
"(",
"zip",
"(",
"*",
"map",
"(",
"os",
".",
"path",
".",
"splitext",
",",
"files",
")",
")",
")",
"[",
"1",
"]",
"extensions",
"=",
"set",
"(",
"[",
"ext",
".",
"lower",
"(",
")",
"for",
"ext",
"in",
"extensions",
"]",
")",
"return",
"extensions"
] |
Returns a list of file extensions from the same folder as file_name.
|
[
"Returns",
"a",
"list",
"of",
"file",
"extensions",
"from",
"the",
"same",
"folder",
"as",
"file_name",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/stats.py#L375-L382
|
8,535
|
wakatime/wakatime
|
wakatime/packages/pygments/lexers/scripting.py
|
RexxLexer.analyse_text
|
def analyse_text(text):
"""
Check for inital comment and patterns that distinguish Rexx from other
C-like languages.
"""
if re.search(r'/\*\**\s*rexx', text, re.IGNORECASE):
# Header matches MVS Rexx requirements, this is certainly a Rexx
# script.
return 1.0
elif text.startswith('/*'):
# Header matches general Rexx requirements; the source code might
# still be any language using C comments such as C++, C# or Java.
lowerText = text.lower()
result = sum(weight
for (pattern, weight) in RexxLexer.PATTERNS_AND_WEIGHTS
if pattern.search(lowerText)) + 0.01
return min(result, 1.0)
|
python
|
def analyse_text(text):
"""
Check for inital comment and patterns that distinguish Rexx from other
C-like languages.
"""
if re.search(r'/\*\**\s*rexx', text, re.IGNORECASE):
# Header matches MVS Rexx requirements, this is certainly a Rexx
# script.
return 1.0
elif text.startswith('/*'):
# Header matches general Rexx requirements; the source code might
# still be any language using C comments such as C++, C# or Java.
lowerText = text.lower()
result = sum(weight
for (pattern, weight) in RexxLexer.PATTERNS_AND_WEIGHTS
if pattern.search(lowerText)) + 0.01
return min(result, 1.0)
|
[
"def",
"analyse_text",
"(",
"text",
")",
":",
"if",
"re",
".",
"search",
"(",
"r'/\\*\\**\\s*rexx'",
",",
"text",
",",
"re",
".",
"IGNORECASE",
")",
":",
"# Header matches MVS Rexx requirements, this is certainly a Rexx",
"# script.",
"return",
"1.0",
"elif",
"text",
".",
"startswith",
"(",
"'/*'",
")",
":",
"# Header matches general Rexx requirements; the source code might",
"# still be any language using C comments such as C++, C# or Java.",
"lowerText",
"=",
"text",
".",
"lower",
"(",
")",
"result",
"=",
"sum",
"(",
"weight",
"for",
"(",
"pattern",
",",
"weight",
")",
"in",
"RexxLexer",
".",
"PATTERNS_AND_WEIGHTS",
"if",
"pattern",
".",
"search",
"(",
"lowerText",
")",
")",
"+",
"0.01",
"return",
"min",
"(",
"result",
",",
"1.0",
")"
] |
Check for inital comment and patterns that distinguish Rexx from other
C-like languages.
|
[
"Check",
"for",
"inital",
"comment",
"and",
"patterns",
"that",
"distinguish",
"Rexx",
"from",
"other",
"C",
"-",
"like",
"languages",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/scripting.py#L801-L817
|
8,536
|
wakatime/wakatime
|
wakatime/packages/pygments/lexers/scripting.py
|
EasytrieveLexer.analyse_text
|
def analyse_text(text):
"""
Perform a structural analysis for basic Easytrieve constructs.
"""
result = 0.0
lines = text.split('\n')
hasEndProc = False
hasHeaderComment = False
hasFile = False
hasJob = False
hasProc = False
hasParm = False
hasReport = False
def isCommentLine(line):
return EasytrieveLexer._COMMENT_LINE_REGEX.match(lines[0]) is not None
def isEmptyLine(line):
return not bool(line.strip())
# Remove possible empty lines and header comments.
while lines and (isEmptyLine(lines[0]) or isCommentLine(lines[0])):
if not isEmptyLine(lines[0]):
hasHeaderComment = True
del lines[0]
if EasytrieveLexer._MACRO_HEADER_REGEX.match(lines[0]):
# Looks like an Easytrieve macro.
result = 0.4
if hasHeaderComment:
result += 0.4
else:
# Scan the source for lines starting with indicators.
for line in lines:
words = line.split()
if (len(words) >= 2):
firstWord = words[0]
if not hasReport:
if not hasJob:
if not hasFile:
if not hasParm:
if firstWord == 'PARM':
hasParm = True
if firstWord == 'FILE':
hasFile = True
if firstWord == 'JOB':
hasJob = True
elif firstWord == 'PROC':
hasProc = True
elif firstWord == 'END-PROC':
hasEndProc = True
elif firstWord == 'REPORT':
hasReport = True
# Weight the findings.
if hasJob and (hasProc == hasEndProc):
if hasHeaderComment:
result += 0.1
if hasParm:
if hasProc:
# Found PARM, JOB and PROC/END-PROC:
# pretty sure this is Easytrieve.
result += 0.8
else:
# Found PARAM and JOB: probably this is Easytrieve
result += 0.5
else:
# Found JOB and possibly other keywords: might be Easytrieve
result += 0.11
if hasParm:
# Note: PARAM is not a proper English word, so this is
# regarded a much better indicator for Easytrieve than
# the other words.
result += 0.2
if hasFile:
result += 0.01
if hasReport:
result += 0.01
assert 0.0 <= result <= 1.0
return result
|
python
|
def analyse_text(text):
"""
Perform a structural analysis for basic Easytrieve constructs.
"""
result = 0.0
lines = text.split('\n')
hasEndProc = False
hasHeaderComment = False
hasFile = False
hasJob = False
hasProc = False
hasParm = False
hasReport = False
def isCommentLine(line):
return EasytrieveLexer._COMMENT_LINE_REGEX.match(lines[0]) is not None
def isEmptyLine(line):
return not bool(line.strip())
# Remove possible empty lines and header comments.
while lines and (isEmptyLine(lines[0]) or isCommentLine(lines[0])):
if not isEmptyLine(lines[0]):
hasHeaderComment = True
del lines[0]
if EasytrieveLexer._MACRO_HEADER_REGEX.match(lines[0]):
# Looks like an Easytrieve macro.
result = 0.4
if hasHeaderComment:
result += 0.4
else:
# Scan the source for lines starting with indicators.
for line in lines:
words = line.split()
if (len(words) >= 2):
firstWord = words[0]
if not hasReport:
if not hasJob:
if not hasFile:
if not hasParm:
if firstWord == 'PARM':
hasParm = True
if firstWord == 'FILE':
hasFile = True
if firstWord == 'JOB':
hasJob = True
elif firstWord == 'PROC':
hasProc = True
elif firstWord == 'END-PROC':
hasEndProc = True
elif firstWord == 'REPORT':
hasReport = True
# Weight the findings.
if hasJob and (hasProc == hasEndProc):
if hasHeaderComment:
result += 0.1
if hasParm:
if hasProc:
# Found PARM, JOB and PROC/END-PROC:
# pretty sure this is Easytrieve.
result += 0.8
else:
# Found PARAM and JOB: probably this is Easytrieve
result += 0.5
else:
# Found JOB and possibly other keywords: might be Easytrieve
result += 0.11
if hasParm:
# Note: PARAM is not a proper English word, so this is
# regarded a much better indicator for Easytrieve than
# the other words.
result += 0.2
if hasFile:
result += 0.01
if hasReport:
result += 0.01
assert 0.0 <= result <= 1.0
return result
|
[
"def",
"analyse_text",
"(",
"text",
")",
":",
"result",
"=",
"0.0",
"lines",
"=",
"text",
".",
"split",
"(",
"'\\n'",
")",
"hasEndProc",
"=",
"False",
"hasHeaderComment",
"=",
"False",
"hasFile",
"=",
"False",
"hasJob",
"=",
"False",
"hasProc",
"=",
"False",
"hasParm",
"=",
"False",
"hasReport",
"=",
"False",
"def",
"isCommentLine",
"(",
"line",
")",
":",
"return",
"EasytrieveLexer",
".",
"_COMMENT_LINE_REGEX",
".",
"match",
"(",
"lines",
"[",
"0",
"]",
")",
"is",
"not",
"None",
"def",
"isEmptyLine",
"(",
"line",
")",
":",
"return",
"not",
"bool",
"(",
"line",
".",
"strip",
"(",
")",
")",
"# Remove possible empty lines and header comments.",
"while",
"lines",
"and",
"(",
"isEmptyLine",
"(",
"lines",
"[",
"0",
"]",
")",
"or",
"isCommentLine",
"(",
"lines",
"[",
"0",
"]",
")",
")",
":",
"if",
"not",
"isEmptyLine",
"(",
"lines",
"[",
"0",
"]",
")",
":",
"hasHeaderComment",
"=",
"True",
"del",
"lines",
"[",
"0",
"]",
"if",
"EasytrieveLexer",
".",
"_MACRO_HEADER_REGEX",
".",
"match",
"(",
"lines",
"[",
"0",
"]",
")",
":",
"# Looks like an Easytrieve macro.",
"result",
"=",
"0.4",
"if",
"hasHeaderComment",
":",
"result",
"+=",
"0.4",
"else",
":",
"# Scan the source for lines starting with indicators.",
"for",
"line",
"in",
"lines",
":",
"words",
"=",
"line",
".",
"split",
"(",
")",
"if",
"(",
"len",
"(",
"words",
")",
">=",
"2",
")",
":",
"firstWord",
"=",
"words",
"[",
"0",
"]",
"if",
"not",
"hasReport",
":",
"if",
"not",
"hasJob",
":",
"if",
"not",
"hasFile",
":",
"if",
"not",
"hasParm",
":",
"if",
"firstWord",
"==",
"'PARM'",
":",
"hasParm",
"=",
"True",
"if",
"firstWord",
"==",
"'FILE'",
":",
"hasFile",
"=",
"True",
"if",
"firstWord",
"==",
"'JOB'",
":",
"hasJob",
"=",
"True",
"elif",
"firstWord",
"==",
"'PROC'",
":",
"hasProc",
"=",
"True",
"elif",
"firstWord",
"==",
"'END-PROC'",
":",
"hasEndProc",
"=",
"True",
"elif",
"firstWord",
"==",
"'REPORT'",
":",
"hasReport",
"=",
"True",
"# Weight the findings.",
"if",
"hasJob",
"and",
"(",
"hasProc",
"==",
"hasEndProc",
")",
":",
"if",
"hasHeaderComment",
":",
"result",
"+=",
"0.1",
"if",
"hasParm",
":",
"if",
"hasProc",
":",
"# Found PARM, JOB and PROC/END-PROC:",
"# pretty sure this is Easytrieve.",
"result",
"+=",
"0.8",
"else",
":",
"# Found PARAM and JOB: probably this is Easytrieve",
"result",
"+=",
"0.5",
"else",
":",
"# Found JOB and possibly other keywords: might be Easytrieve",
"result",
"+=",
"0.11",
"if",
"hasParm",
":",
"# Note: PARAM is not a proper English word, so this is",
"# regarded a much better indicator for Easytrieve than",
"# the other words.",
"result",
"+=",
"0.2",
"if",
"hasFile",
":",
"result",
"+=",
"0.01",
"if",
"hasReport",
":",
"result",
"+=",
"0.01",
"assert",
"0.0",
"<=",
"result",
"<=",
"1.0",
"return",
"result"
] |
Perform a structural analysis for basic Easytrieve constructs.
|
[
"Perform",
"a",
"structural",
"analysis",
"for",
"basic",
"Easytrieve",
"constructs",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/scripting.py#L1059-L1138
|
8,537
|
wakatime/wakatime
|
wakatime/packages/pygments/lexers/scripting.py
|
JclLexer.analyse_text
|
def analyse_text(text):
"""
Recognize JCL job by header.
"""
result = 0.0
lines = text.split('\n')
if len(lines) > 0:
if JclLexer._JOB_HEADER_PATTERN.match(lines[0]):
result = 1.0
assert 0.0 <= result <= 1.0
return result
|
python
|
def analyse_text(text):
"""
Recognize JCL job by header.
"""
result = 0.0
lines = text.split('\n')
if len(lines) > 0:
if JclLexer._JOB_HEADER_PATTERN.match(lines[0]):
result = 1.0
assert 0.0 <= result <= 1.0
return result
|
[
"def",
"analyse_text",
"(",
"text",
")",
":",
"result",
"=",
"0.0",
"lines",
"=",
"text",
".",
"split",
"(",
"'\\n'",
")",
"if",
"len",
"(",
"lines",
")",
">",
"0",
":",
"if",
"JclLexer",
".",
"_JOB_HEADER_PATTERN",
".",
"match",
"(",
"lines",
"[",
"0",
"]",
")",
":",
"result",
"=",
"1.0",
"assert",
"0.0",
"<=",
"result",
"<=",
"1.0",
"return",
"result"
] |
Recognize JCL job by header.
|
[
"Recognize",
"JCL",
"job",
"by",
"header",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/scripting.py#L1212-L1222
|
8,538
|
wakatime/wakatime
|
wakatime/packages/pygments/lexers/asm.py
|
_objdump_lexer_tokens
|
def _objdump_lexer_tokens(asm_lexer):
"""
Common objdump lexer tokens to wrap an ASM lexer.
"""
hex_re = r'[0-9A-Za-z]'
return {
'root': [
# File name & format:
('(.*?)(:)( +file format )(.*?)$',
bygroups(Name.Label, Punctuation, Text, String)),
# Section header
('(Disassembly of section )(.*?)(:)$',
bygroups(Text, Name.Label, Punctuation)),
# Function labels
# (With offset)
('('+hex_re+'+)( )(<)(.*?)([-+])(0[xX][A-Za-z0-9]+)(>:)$',
bygroups(Number.Hex, Text, Punctuation, Name.Function,
Punctuation, Number.Hex, Punctuation)),
# (Without offset)
('('+hex_re+'+)( )(<)(.*?)(>:)$',
bygroups(Number.Hex, Text, Punctuation, Name.Function,
Punctuation)),
# Code line with disassembled instructions
('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)( *\t)([a-zA-Z].*?)$',
bygroups(Text, Name.Label, Text, Number.Hex, Text,
using(asm_lexer))),
# Code line with ascii
('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)( *)(.*?)$',
bygroups(Text, Name.Label, Text, Number.Hex, Text, String)),
# Continued code line, only raw opcodes without disassembled
# instruction
('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)$',
bygroups(Text, Name.Label, Text, Number.Hex)),
# Skipped a few bytes
(r'\t\.\.\.$', Text),
# Relocation line
# (With offset)
(r'(\t\t\t)('+hex_re+r'+:)( )([^\t]+)(\t)(.*?)([-+])(0x'+hex_re+'+)$',
bygroups(Text, Name.Label, Text, Name.Property, Text,
Name.Constant, Punctuation, Number.Hex)),
# (Without offset)
(r'(\t\t\t)('+hex_re+r'+:)( )([^\t]+)(\t)(.*?)$',
bygroups(Text, Name.Label, Text, Name.Property, Text,
Name.Constant)),
(r'[^\n]+\n', Other)
]
}
|
python
|
def _objdump_lexer_tokens(asm_lexer):
"""
Common objdump lexer tokens to wrap an ASM lexer.
"""
hex_re = r'[0-9A-Za-z]'
return {
'root': [
# File name & format:
('(.*?)(:)( +file format )(.*?)$',
bygroups(Name.Label, Punctuation, Text, String)),
# Section header
('(Disassembly of section )(.*?)(:)$',
bygroups(Text, Name.Label, Punctuation)),
# Function labels
# (With offset)
('('+hex_re+'+)( )(<)(.*?)([-+])(0[xX][A-Za-z0-9]+)(>:)$',
bygroups(Number.Hex, Text, Punctuation, Name.Function,
Punctuation, Number.Hex, Punctuation)),
# (Without offset)
('('+hex_re+'+)( )(<)(.*?)(>:)$',
bygroups(Number.Hex, Text, Punctuation, Name.Function,
Punctuation)),
# Code line with disassembled instructions
('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)( *\t)([a-zA-Z].*?)$',
bygroups(Text, Name.Label, Text, Number.Hex, Text,
using(asm_lexer))),
# Code line with ascii
('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)( *)(.*?)$',
bygroups(Text, Name.Label, Text, Number.Hex, Text, String)),
# Continued code line, only raw opcodes without disassembled
# instruction
('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)$',
bygroups(Text, Name.Label, Text, Number.Hex)),
# Skipped a few bytes
(r'\t\.\.\.$', Text),
# Relocation line
# (With offset)
(r'(\t\t\t)('+hex_re+r'+:)( )([^\t]+)(\t)(.*?)([-+])(0x'+hex_re+'+)$',
bygroups(Text, Name.Label, Text, Name.Property, Text,
Name.Constant, Punctuation, Number.Hex)),
# (Without offset)
(r'(\t\t\t)('+hex_re+r'+:)( )([^\t]+)(\t)(.*?)$',
bygroups(Text, Name.Label, Text, Name.Property, Text,
Name.Constant)),
(r'[^\n]+\n', Other)
]
}
|
[
"def",
"_objdump_lexer_tokens",
"(",
"asm_lexer",
")",
":",
"hex_re",
"=",
"r'[0-9A-Za-z]'",
"return",
"{",
"'root'",
":",
"[",
"# File name & format:",
"(",
"'(.*?)(:)( +file format )(.*?)$'",
",",
"bygroups",
"(",
"Name",
".",
"Label",
",",
"Punctuation",
",",
"Text",
",",
"String",
")",
")",
",",
"# Section header",
"(",
"'(Disassembly of section )(.*?)(:)$'",
",",
"bygroups",
"(",
"Text",
",",
"Name",
".",
"Label",
",",
"Punctuation",
")",
")",
",",
"# Function labels",
"# (With offset)",
"(",
"'('",
"+",
"hex_re",
"+",
"'+)( )(<)(.*?)([-+])(0[xX][A-Za-z0-9]+)(>:)$'",
",",
"bygroups",
"(",
"Number",
".",
"Hex",
",",
"Text",
",",
"Punctuation",
",",
"Name",
".",
"Function",
",",
"Punctuation",
",",
"Number",
".",
"Hex",
",",
"Punctuation",
")",
")",
",",
"# (Without offset)",
"(",
"'('",
"+",
"hex_re",
"+",
"'+)( )(<)(.*?)(>:)$'",
",",
"bygroups",
"(",
"Number",
".",
"Hex",
",",
"Text",
",",
"Punctuation",
",",
"Name",
".",
"Function",
",",
"Punctuation",
")",
")",
",",
"# Code line with disassembled instructions",
"(",
"'( *)('",
"+",
"hex_re",
"+",
"r'+:)(\\t)((?:'",
"+",
"hex_re",
"+",
"hex_re",
"+",
"' )+)( *\\t)([a-zA-Z].*?)$'",
",",
"bygroups",
"(",
"Text",
",",
"Name",
".",
"Label",
",",
"Text",
",",
"Number",
".",
"Hex",
",",
"Text",
",",
"using",
"(",
"asm_lexer",
")",
")",
")",
",",
"# Code line with ascii",
"(",
"'( *)('",
"+",
"hex_re",
"+",
"r'+:)(\\t)((?:'",
"+",
"hex_re",
"+",
"hex_re",
"+",
"' )+)( *)(.*?)$'",
",",
"bygroups",
"(",
"Text",
",",
"Name",
".",
"Label",
",",
"Text",
",",
"Number",
".",
"Hex",
",",
"Text",
",",
"String",
")",
")",
",",
"# Continued code line, only raw opcodes without disassembled",
"# instruction",
"(",
"'( *)('",
"+",
"hex_re",
"+",
"r'+:)(\\t)((?:'",
"+",
"hex_re",
"+",
"hex_re",
"+",
"' )+)$'",
",",
"bygroups",
"(",
"Text",
",",
"Name",
".",
"Label",
",",
"Text",
",",
"Number",
".",
"Hex",
")",
")",
",",
"# Skipped a few bytes",
"(",
"r'\\t\\.\\.\\.$'",
",",
"Text",
")",
",",
"# Relocation line",
"# (With offset)",
"(",
"r'(\\t\\t\\t)('",
"+",
"hex_re",
"+",
"r'+:)( )([^\\t]+)(\\t)(.*?)([-+])(0x'",
"+",
"hex_re",
"+",
"'+)$'",
",",
"bygroups",
"(",
"Text",
",",
"Name",
".",
"Label",
",",
"Text",
",",
"Name",
".",
"Property",
",",
"Text",
",",
"Name",
".",
"Constant",
",",
"Punctuation",
",",
"Number",
".",
"Hex",
")",
")",
",",
"# (Without offset)",
"(",
"r'(\\t\\t\\t)('",
"+",
"hex_re",
"+",
"r'+:)( )([^\\t]+)(\\t)(.*?)$'",
",",
"bygroups",
"(",
"Text",
",",
"Name",
".",
"Label",
",",
"Text",
",",
"Name",
".",
"Property",
",",
"Text",
",",
"Name",
".",
"Constant",
")",
")",
",",
"(",
"r'[^\\n]+\\n'",
",",
"Other",
")",
"]",
"}"
] |
Common objdump lexer tokens to wrap an ASM lexer.
|
[
"Common",
"objdump",
"lexer",
"tokens",
"to",
"wrap",
"an",
"ASM",
"lexer",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/asm.py#L100-L146
|
8,539
|
wakatime/wakatime
|
wakatime/packages/urllib3/util/selectors.py
|
BaseSelector.get_key
|
def get_key(self, fileobj):
""" Return the key associated with a registered file object. """
mapping = self.get_map()
if mapping is None:
raise RuntimeError("Selector is closed")
try:
return mapping[fileobj]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
|
python
|
def get_key(self, fileobj):
""" Return the key associated with a registered file object. """
mapping = self.get_map()
if mapping is None:
raise RuntimeError("Selector is closed")
try:
return mapping[fileobj]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
|
[
"def",
"get_key",
"(",
"self",
",",
"fileobj",
")",
":",
"mapping",
"=",
"self",
".",
"get_map",
"(",
")",
"if",
"mapping",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"\"Selector is closed\"",
")",
"try",
":",
"return",
"mapping",
"[",
"fileobj",
"]",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"\"{0!r} is not registered\"",
".",
"format",
"(",
"fileobj",
")",
")"
] |
Return the key associated with a registered file object.
|
[
"Return",
"the",
"key",
"associated",
"with",
"a",
"registered",
"file",
"object",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/urllib3/util/selectors.py#L256-L264
|
8,540
|
wakatime/wakatime
|
wakatime/packages/pygments/modeline.py
|
get_filetype_from_buffer
|
def get_filetype_from_buffer(buf, max_lines=5):
"""
Scan the buffer for modelines and return filetype if one is found.
"""
lines = buf.splitlines()
for l in lines[-1:-max_lines-1:-1]:
ret = get_filetype_from_line(l)
if ret:
return ret
for i in range(max_lines, -1, -1):
if i < len(lines):
ret = get_filetype_from_line(lines[i])
if ret:
return ret
return None
|
python
|
def get_filetype_from_buffer(buf, max_lines=5):
"""
Scan the buffer for modelines and return filetype if one is found.
"""
lines = buf.splitlines()
for l in lines[-1:-max_lines-1:-1]:
ret = get_filetype_from_line(l)
if ret:
return ret
for i in range(max_lines, -1, -1):
if i < len(lines):
ret = get_filetype_from_line(lines[i])
if ret:
return ret
return None
|
[
"def",
"get_filetype_from_buffer",
"(",
"buf",
",",
"max_lines",
"=",
"5",
")",
":",
"lines",
"=",
"buf",
".",
"splitlines",
"(",
")",
"for",
"l",
"in",
"lines",
"[",
"-",
"1",
":",
"-",
"max_lines",
"-",
"1",
":",
"-",
"1",
"]",
":",
"ret",
"=",
"get_filetype_from_line",
"(",
"l",
")",
"if",
"ret",
":",
"return",
"ret",
"for",
"i",
"in",
"range",
"(",
"max_lines",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"if",
"i",
"<",
"len",
"(",
"lines",
")",
":",
"ret",
"=",
"get_filetype_from_line",
"(",
"lines",
"[",
"i",
"]",
")",
"if",
"ret",
":",
"return",
"ret",
"return",
"None"
] |
Scan the buffer for modelines and return filetype if one is found.
|
[
"Scan",
"the",
"buffer",
"for",
"modelines",
"and",
"return",
"filetype",
"if",
"one",
"is",
"found",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/modeline.py#L29-L44
|
8,541
|
wakatime/wakatime
|
wakatime/packages/pygments/formatters/img.py
|
FontManager.get_font
|
def get_font(self, bold, oblique):
"""
Get the font based on bold and italic flags.
"""
if bold and oblique:
return self.fonts['BOLDITALIC']
elif bold:
return self.fonts['BOLD']
elif oblique:
return self.fonts['ITALIC']
else:
return self.fonts['NORMAL']
|
python
|
def get_font(self, bold, oblique):
"""
Get the font based on bold and italic flags.
"""
if bold and oblique:
return self.fonts['BOLDITALIC']
elif bold:
return self.fonts['BOLD']
elif oblique:
return self.fonts['ITALIC']
else:
return self.fonts['NORMAL']
|
[
"def",
"get_font",
"(",
"self",
",",
"bold",
",",
"oblique",
")",
":",
"if",
"bold",
"and",
"oblique",
":",
"return",
"self",
".",
"fonts",
"[",
"'BOLDITALIC'",
"]",
"elif",
"bold",
":",
"return",
"self",
".",
"fonts",
"[",
"'BOLD'",
"]",
"elif",
"oblique",
":",
"return",
"self",
".",
"fonts",
"[",
"'ITALIC'",
"]",
"else",
":",
"return",
"self",
".",
"fonts",
"[",
"'NORMAL'",
"]"
] |
Get the font based on bold and italic flags.
|
[
"Get",
"the",
"font",
"based",
"on",
"bold",
"and",
"italic",
"flags",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/img.py#L199-L210
|
8,542
|
wakatime/wakatime
|
wakatime/packages/pygments/formatters/img.py
|
ImageFormatter._get_char_x
|
def _get_char_x(self, charno):
"""
Get the X coordinate of a character position.
"""
return charno * self.fontw + self.image_pad + self.line_number_width
|
python
|
def _get_char_x(self, charno):
"""
Get the X coordinate of a character position.
"""
return charno * self.fontw + self.image_pad + self.line_number_width
|
[
"def",
"_get_char_x",
"(",
"self",
",",
"charno",
")",
":",
"return",
"charno",
"*",
"self",
".",
"fontw",
"+",
"self",
".",
"image_pad",
"+",
"self",
".",
"line_number_width"
] |
Get the X coordinate of a character position.
|
[
"Get",
"the",
"X",
"coordinate",
"of",
"a",
"character",
"position",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/img.py#L408-L412
|
8,543
|
wakatime/wakatime
|
wakatime/packages/pygments/formatters/img.py
|
ImageFormatter._get_text_pos
|
def _get_text_pos(self, charno, lineno):
"""
Get the actual position for a character and line position.
"""
return self._get_char_x(charno), self._get_line_y(lineno)
|
python
|
def _get_text_pos(self, charno, lineno):
"""
Get the actual position for a character and line position.
"""
return self._get_char_x(charno), self._get_line_y(lineno)
|
[
"def",
"_get_text_pos",
"(",
"self",
",",
"charno",
",",
"lineno",
")",
":",
"return",
"self",
".",
"_get_char_x",
"(",
"charno",
")",
",",
"self",
".",
"_get_line_y",
"(",
"lineno",
")"
] |
Get the actual position for a character and line position.
|
[
"Get",
"the",
"actual",
"position",
"for",
"a",
"character",
"and",
"line",
"position",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/img.py#L414-L418
|
8,544
|
wakatime/wakatime
|
wakatime/packages/pygments/formatters/img.py
|
ImageFormatter._get_image_size
|
def _get_image_size(self, maxcharno, maxlineno):
"""
Get the required image size.
"""
return (self._get_char_x(maxcharno) + self.image_pad,
self._get_line_y(maxlineno + 0) + self.image_pad)
|
python
|
def _get_image_size(self, maxcharno, maxlineno):
"""
Get the required image size.
"""
return (self._get_char_x(maxcharno) + self.image_pad,
self._get_line_y(maxlineno + 0) + self.image_pad)
|
[
"def",
"_get_image_size",
"(",
"self",
",",
"maxcharno",
",",
"maxlineno",
")",
":",
"return",
"(",
"self",
".",
"_get_char_x",
"(",
"maxcharno",
")",
"+",
"self",
".",
"image_pad",
",",
"self",
".",
"_get_line_y",
"(",
"maxlineno",
"+",
"0",
")",
"+",
"self",
".",
"image_pad",
")"
] |
Get the required image size.
|
[
"Get",
"the",
"required",
"image",
"size",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/img.py#L442-L447
|
8,545
|
wakatime/wakatime
|
wakatime/packages/pygments/formatters/img.py
|
ImageFormatter._draw_linenumber
|
def _draw_linenumber(self, posno, lineno):
"""
Remember a line number drawable to paint later.
"""
self._draw_text(
self._get_linenumber_pos(posno),
str(lineno).rjust(self.line_number_chars),
font=self.fonts.get_font(self.line_number_bold,
self.line_number_italic),
fill=self.line_number_fg,
)
|
python
|
def _draw_linenumber(self, posno, lineno):
"""
Remember a line number drawable to paint later.
"""
self._draw_text(
self._get_linenumber_pos(posno),
str(lineno).rjust(self.line_number_chars),
font=self.fonts.get_font(self.line_number_bold,
self.line_number_italic),
fill=self.line_number_fg,
)
|
[
"def",
"_draw_linenumber",
"(",
"self",
",",
"posno",
",",
"lineno",
")",
":",
"self",
".",
"_draw_text",
"(",
"self",
".",
"_get_linenumber_pos",
"(",
"posno",
")",
",",
"str",
"(",
"lineno",
")",
".",
"rjust",
"(",
"self",
".",
"line_number_chars",
")",
",",
"font",
"=",
"self",
".",
"fonts",
".",
"get_font",
"(",
"self",
".",
"line_number_bold",
",",
"self",
".",
"line_number_italic",
")",
",",
"fill",
"=",
"self",
".",
"line_number_fg",
",",
")"
] |
Remember a line number drawable to paint later.
|
[
"Remember",
"a",
"line",
"number",
"drawable",
"to",
"paint",
"later",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/img.py#L449-L459
|
8,546
|
wakatime/wakatime
|
wakatime/packages/pygments/formatters/img.py
|
ImageFormatter._draw_text
|
def _draw_text(self, pos, text, font, **kw):
"""
Remember a single drawable tuple to paint later.
"""
self.drawables.append((pos, text, font, kw))
|
python
|
def _draw_text(self, pos, text, font, **kw):
"""
Remember a single drawable tuple to paint later.
"""
self.drawables.append((pos, text, font, kw))
|
[
"def",
"_draw_text",
"(",
"self",
",",
"pos",
",",
"text",
",",
"font",
",",
"*",
"*",
"kw",
")",
":",
"self",
".",
"drawables",
".",
"append",
"(",
"(",
"pos",
",",
"text",
",",
"font",
",",
"kw",
")",
")"
] |
Remember a single drawable tuple to paint later.
|
[
"Remember",
"a",
"single",
"drawable",
"tuple",
"to",
"paint",
"later",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/img.py#L461-L465
|
8,547
|
wakatime/wakatime
|
wakatime/packages/pygments/formatters/img.py
|
ImageFormatter._create_drawables
|
def _create_drawables(self, tokensource):
"""
Create drawables for the token content.
"""
lineno = charno = maxcharno = 0
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
style = self.styles[ttype]
# TODO: make sure tab expansion happens earlier in the chain. It
# really ought to be done on the input, as to do it right here is
# quite complex.
value = value.expandtabs(4)
lines = value.splitlines(True)
# print lines
for i, line in enumerate(lines):
temp = line.rstrip('\n')
if temp:
self._draw_text(
self._get_text_pos(charno, lineno),
temp,
font = self._get_style_font(style),
fill = self._get_text_color(style)
)
charno += len(temp)
maxcharno = max(maxcharno, charno)
if line.endswith('\n'):
# add a line for each extra line in the value
charno = 0
lineno += 1
self.maxcharno = maxcharno
self.maxlineno = lineno
|
python
|
def _create_drawables(self, tokensource):
"""
Create drawables for the token content.
"""
lineno = charno = maxcharno = 0
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
style = self.styles[ttype]
# TODO: make sure tab expansion happens earlier in the chain. It
# really ought to be done on the input, as to do it right here is
# quite complex.
value = value.expandtabs(4)
lines = value.splitlines(True)
# print lines
for i, line in enumerate(lines):
temp = line.rstrip('\n')
if temp:
self._draw_text(
self._get_text_pos(charno, lineno),
temp,
font = self._get_style_font(style),
fill = self._get_text_color(style)
)
charno += len(temp)
maxcharno = max(maxcharno, charno)
if line.endswith('\n'):
# add a line for each extra line in the value
charno = 0
lineno += 1
self.maxcharno = maxcharno
self.maxlineno = lineno
|
[
"def",
"_create_drawables",
"(",
"self",
",",
"tokensource",
")",
":",
"lineno",
"=",
"charno",
"=",
"maxcharno",
"=",
"0",
"for",
"ttype",
",",
"value",
"in",
"tokensource",
":",
"while",
"ttype",
"not",
"in",
"self",
".",
"styles",
":",
"ttype",
"=",
"ttype",
".",
"parent",
"style",
"=",
"self",
".",
"styles",
"[",
"ttype",
"]",
"# TODO: make sure tab expansion happens earlier in the chain. It",
"# really ought to be done on the input, as to do it right here is",
"# quite complex.",
"value",
"=",
"value",
".",
"expandtabs",
"(",
"4",
")",
"lines",
"=",
"value",
".",
"splitlines",
"(",
"True",
")",
"# print lines",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"temp",
"=",
"line",
".",
"rstrip",
"(",
"'\\n'",
")",
"if",
"temp",
":",
"self",
".",
"_draw_text",
"(",
"self",
".",
"_get_text_pos",
"(",
"charno",
",",
"lineno",
")",
",",
"temp",
",",
"font",
"=",
"self",
".",
"_get_style_font",
"(",
"style",
")",
",",
"fill",
"=",
"self",
".",
"_get_text_color",
"(",
"style",
")",
")",
"charno",
"+=",
"len",
"(",
"temp",
")",
"maxcharno",
"=",
"max",
"(",
"maxcharno",
",",
"charno",
")",
"if",
"line",
".",
"endswith",
"(",
"'\\n'",
")",
":",
"# add a line for each extra line in the value",
"charno",
"=",
"0",
"lineno",
"+=",
"1",
"self",
".",
"maxcharno",
"=",
"maxcharno",
"self",
".",
"maxlineno",
"=",
"lineno"
] |
Create drawables for the token content.
|
[
"Create",
"drawables",
"for",
"the",
"token",
"content",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/img.py#L467-L498
|
8,548
|
wakatime/wakatime
|
wakatime/packages/pygments/formatters/img.py
|
ImageFormatter._draw_line_numbers
|
def _draw_line_numbers(self):
"""
Create drawables for the line numbers.
"""
if not self.line_numbers:
return
for p in xrange(self.maxlineno):
n = p + self.line_number_start
if (n % self.line_number_step) == 0:
self._draw_linenumber(p, n)
|
python
|
def _draw_line_numbers(self):
"""
Create drawables for the line numbers.
"""
if not self.line_numbers:
return
for p in xrange(self.maxlineno):
n = p + self.line_number_start
if (n % self.line_number_step) == 0:
self._draw_linenumber(p, n)
|
[
"def",
"_draw_line_numbers",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"line_numbers",
":",
"return",
"for",
"p",
"in",
"xrange",
"(",
"self",
".",
"maxlineno",
")",
":",
"n",
"=",
"p",
"+",
"self",
".",
"line_number_start",
"if",
"(",
"n",
"%",
"self",
".",
"line_number_step",
")",
"==",
"0",
":",
"self",
".",
"_draw_linenumber",
"(",
"p",
",",
"n",
")"
] |
Create drawables for the line numbers.
|
[
"Create",
"drawables",
"for",
"the",
"line",
"numbers",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/img.py#L500-L509
|
8,549
|
wakatime/wakatime
|
wakatime/packages/pygments/formatters/img.py
|
ImageFormatter._paint_line_number_bg
|
def _paint_line_number_bg(self, im):
"""
Paint the line number background on the image.
"""
if not self.line_numbers:
return
if self.line_number_fg is None:
return
draw = ImageDraw.Draw(im)
recth = im.size[-1]
rectw = self.image_pad + self.line_number_width - self.line_number_pad
draw.rectangle([(0, 0), (rectw, recth)],
fill=self.line_number_bg)
draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
del draw
|
python
|
def _paint_line_number_bg(self, im):
"""
Paint the line number background on the image.
"""
if not self.line_numbers:
return
if self.line_number_fg is None:
return
draw = ImageDraw.Draw(im)
recth = im.size[-1]
rectw = self.image_pad + self.line_number_width - self.line_number_pad
draw.rectangle([(0, 0), (rectw, recth)],
fill=self.line_number_bg)
draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
del draw
|
[
"def",
"_paint_line_number_bg",
"(",
"self",
",",
"im",
")",
":",
"if",
"not",
"self",
".",
"line_numbers",
":",
"return",
"if",
"self",
".",
"line_number_fg",
"is",
"None",
":",
"return",
"draw",
"=",
"ImageDraw",
".",
"Draw",
"(",
"im",
")",
"recth",
"=",
"im",
".",
"size",
"[",
"-",
"1",
"]",
"rectw",
"=",
"self",
".",
"image_pad",
"+",
"self",
".",
"line_number_width",
"-",
"self",
".",
"line_number_pad",
"draw",
".",
"rectangle",
"(",
"[",
"(",
"0",
",",
"0",
")",
",",
"(",
"rectw",
",",
"recth",
")",
"]",
",",
"fill",
"=",
"self",
".",
"line_number_bg",
")",
"draw",
".",
"line",
"(",
"[",
"(",
"rectw",
",",
"0",
")",
",",
"(",
"rectw",
",",
"recth",
")",
"]",
",",
"fill",
"=",
"self",
".",
"line_number_fg",
")",
"del",
"draw"
] |
Paint the line number background on the image.
|
[
"Paint",
"the",
"line",
"number",
"background",
"on",
"the",
"image",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/formatters/img.py#L511-L525
|
8,550
|
wakatime/wakatime
|
wakatime/heartbeat.py
|
Heartbeat.update
|
def update(self, attrs):
"""Return a copy of the current Heartbeat with updated attributes."""
data = self.dict()
data.update(attrs)
heartbeat = Heartbeat(data, self.args, self.configs, _clone=True)
return heartbeat
|
python
|
def update(self, attrs):
"""Return a copy of the current Heartbeat with updated attributes."""
data = self.dict()
data.update(attrs)
heartbeat = Heartbeat(data, self.args, self.configs, _clone=True)
return heartbeat
|
[
"def",
"update",
"(",
"self",
",",
"attrs",
")",
":",
"data",
"=",
"self",
".",
"dict",
"(",
")",
"data",
".",
"update",
"(",
"attrs",
")",
"heartbeat",
"=",
"Heartbeat",
"(",
"data",
",",
"self",
".",
"args",
",",
"self",
".",
"configs",
",",
"_clone",
"=",
"True",
")",
"return",
"heartbeat"
] |
Return a copy of the current Heartbeat with updated attributes.
|
[
"Return",
"a",
"copy",
"of",
"the",
"current",
"Heartbeat",
"with",
"updated",
"attributes",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/heartbeat.py#L130-L136
|
8,551
|
wakatime/wakatime
|
wakatime/heartbeat.py
|
Heartbeat.sanitize
|
def sanitize(self):
"""Removes sensitive data including file names and dependencies.
Returns a Heartbeat.
"""
if not self.args.hide_file_names:
return self
if self.entity is None:
return self
if self.type != 'file':
return self
if self.should_obfuscate_filename():
self._sanitize_metadata()
extension = u(os.path.splitext(self.entity)[1])
self.entity = u('HIDDEN{0}').format(extension)
elif self.should_obfuscate_project():
self._sanitize_metadata()
return self
|
python
|
def sanitize(self):
"""Removes sensitive data including file names and dependencies.
Returns a Heartbeat.
"""
if not self.args.hide_file_names:
return self
if self.entity is None:
return self
if self.type != 'file':
return self
if self.should_obfuscate_filename():
self._sanitize_metadata()
extension = u(os.path.splitext(self.entity)[1])
self.entity = u('HIDDEN{0}').format(extension)
elif self.should_obfuscate_project():
self._sanitize_metadata()
return self
|
[
"def",
"sanitize",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"args",
".",
"hide_file_names",
":",
"return",
"self",
"if",
"self",
".",
"entity",
"is",
"None",
":",
"return",
"self",
"if",
"self",
".",
"type",
"!=",
"'file'",
":",
"return",
"self",
"if",
"self",
".",
"should_obfuscate_filename",
"(",
")",
":",
"self",
".",
"_sanitize_metadata",
"(",
")",
"extension",
"=",
"u",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"self",
".",
"entity",
")",
"[",
"1",
"]",
")",
"self",
".",
"entity",
"=",
"u",
"(",
"'HIDDEN{0}'",
")",
".",
"format",
"(",
"extension",
")",
"elif",
"self",
".",
"should_obfuscate_project",
"(",
")",
":",
"self",
".",
"_sanitize_metadata",
"(",
")",
"return",
"self"
] |
Removes sensitive data including file names and dependencies.
Returns a Heartbeat.
|
[
"Removes",
"sensitive",
"data",
"including",
"file",
"names",
"and",
"dependencies",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/heartbeat.py#L138-L160
|
8,552
|
wakatime/wakatime
|
wakatime/heartbeat.py
|
Heartbeat.should_obfuscate_filename
|
def should_obfuscate_filename(self):
"""Returns True if hide_file_names is true or the entity file path
matches one in the list of obfuscated file paths."""
for pattern in self.args.hide_file_names:
try:
compiled = re.compile(pattern, re.IGNORECASE)
if compiled.search(self.entity):
return True
except re.error as ex:
log.warning(u('Regex error ({msg}) for hide_file_names pattern: {pattern}').format(
msg=u(ex),
pattern=u(pattern),
))
return False
|
python
|
def should_obfuscate_filename(self):
"""Returns True if hide_file_names is true or the entity file path
matches one in the list of obfuscated file paths."""
for pattern in self.args.hide_file_names:
try:
compiled = re.compile(pattern, re.IGNORECASE)
if compiled.search(self.entity):
return True
except re.error as ex:
log.warning(u('Regex error ({msg}) for hide_file_names pattern: {pattern}').format(
msg=u(ex),
pattern=u(pattern),
))
return False
|
[
"def",
"should_obfuscate_filename",
"(",
"self",
")",
":",
"for",
"pattern",
"in",
"self",
".",
"args",
".",
"hide_file_names",
":",
"try",
":",
"compiled",
"=",
"re",
".",
"compile",
"(",
"pattern",
",",
"re",
".",
"IGNORECASE",
")",
"if",
"compiled",
".",
"search",
"(",
"self",
".",
"entity",
")",
":",
"return",
"True",
"except",
"re",
".",
"error",
"as",
"ex",
":",
"log",
".",
"warning",
"(",
"u",
"(",
"'Regex error ({msg}) for hide_file_names pattern: {pattern}'",
")",
".",
"format",
"(",
"msg",
"=",
"u",
"(",
"ex",
")",
",",
"pattern",
"=",
"u",
"(",
"pattern",
")",
",",
")",
")",
"return",
"False"
] |
Returns True if hide_file_names is true or the entity file path
matches one in the list of obfuscated file paths.
|
[
"Returns",
"True",
"if",
"hide_file_names",
"is",
"true",
"or",
"the",
"entity",
"file",
"path",
"matches",
"one",
"in",
"the",
"list",
"of",
"obfuscated",
"file",
"paths",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/heartbeat.py#L196-L210
|
8,553
|
wakatime/wakatime
|
wakatime/heartbeat.py
|
Heartbeat._format_local_file
|
def _format_local_file(self):
"""When args.local_file empty on Windows, tries to map args.entity to a
unc path.
Updates args.local_file in-place without returning anything.
"""
if self.type != 'file':
return
if not self.entity:
return
if not is_win:
return
if self._file_exists():
return
self.args.local_file = self._to_unc_path(self.entity)
|
python
|
def _format_local_file(self):
"""When args.local_file empty on Windows, tries to map args.entity to a
unc path.
Updates args.local_file in-place without returning anything.
"""
if self.type != 'file':
return
if not self.entity:
return
if not is_win:
return
if self._file_exists():
return
self.args.local_file = self._to_unc_path(self.entity)
|
[
"def",
"_format_local_file",
"(",
"self",
")",
":",
"if",
"self",
".",
"type",
"!=",
"'file'",
":",
"return",
"if",
"not",
"self",
".",
"entity",
":",
"return",
"if",
"not",
"is_win",
":",
"return",
"if",
"self",
".",
"_file_exists",
"(",
")",
":",
"return",
"self",
".",
"args",
".",
"local_file",
"=",
"self",
".",
"_to_unc_path",
"(",
"self",
".",
"entity",
")"
] |
When args.local_file empty on Windows, tries to map args.entity to a
unc path.
Updates args.local_file in-place without returning anything.
|
[
"When",
"args",
".",
"local_file",
"empty",
"on",
"Windows",
"tries",
"to",
"map",
"args",
".",
"entity",
"to",
"a",
"unc",
"path",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/heartbeat.py#L242-L261
|
8,554
|
wakatime/wakatime
|
wakatime/packages/ntlm_auth/ntlm.py
|
Ntlm.create_negotiate_message
|
def create_negotiate_message(self, domain_name=None, workstation=None):
"""
Create an NTLM NEGOTIATE_MESSAGE
:param domain_name: The domain name of the user account we are authenticating with, default is None
:param worksation: The workstation we are using to authenticate with, default is None
:return: A base64 encoded string of the NEGOTIATE_MESSAGE
"""
self.negotiate_message = NegotiateMessage(self.negotiate_flags, domain_name, workstation)
return base64.b64encode(self.negotiate_message.get_data())
|
python
|
def create_negotiate_message(self, domain_name=None, workstation=None):
"""
Create an NTLM NEGOTIATE_MESSAGE
:param domain_name: The domain name of the user account we are authenticating with, default is None
:param worksation: The workstation we are using to authenticate with, default is None
:return: A base64 encoded string of the NEGOTIATE_MESSAGE
"""
self.negotiate_message = NegotiateMessage(self.negotiate_flags, domain_name, workstation)
return base64.b64encode(self.negotiate_message.get_data())
|
[
"def",
"create_negotiate_message",
"(",
"self",
",",
"domain_name",
"=",
"None",
",",
"workstation",
"=",
"None",
")",
":",
"self",
".",
"negotiate_message",
"=",
"NegotiateMessage",
"(",
"self",
".",
"negotiate_flags",
",",
"domain_name",
",",
"workstation",
")",
"return",
"base64",
".",
"b64encode",
"(",
"self",
".",
"negotiate_message",
".",
"get_data",
"(",
")",
")"
] |
Create an NTLM NEGOTIATE_MESSAGE
:param domain_name: The domain name of the user account we are authenticating with, default is None
:param worksation: The workstation we are using to authenticate with, default is None
:return: A base64 encoded string of the NEGOTIATE_MESSAGE
|
[
"Create",
"an",
"NTLM",
"NEGOTIATE_MESSAGE"
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/ntlm_auth/ntlm.py#L89-L99
|
8,555
|
wakatime/wakatime
|
wakatime/packages/ntlm_auth/ntlm.py
|
Ntlm.parse_challenge_message
|
def parse_challenge_message(self, msg2):
"""
Parse the NTLM CHALLENGE_MESSAGE from the server and add it to the Ntlm context fields
:param msg2: A base64 encoded string of the CHALLENGE_MESSAGE
"""
msg2 = base64.b64decode(msg2)
self.challenge_message = ChallengeMessage(msg2)
|
python
|
def parse_challenge_message(self, msg2):
"""
Parse the NTLM CHALLENGE_MESSAGE from the server and add it to the Ntlm context fields
:param msg2: A base64 encoded string of the CHALLENGE_MESSAGE
"""
msg2 = base64.b64decode(msg2)
self.challenge_message = ChallengeMessage(msg2)
|
[
"def",
"parse_challenge_message",
"(",
"self",
",",
"msg2",
")",
":",
"msg2",
"=",
"base64",
".",
"b64decode",
"(",
"msg2",
")",
"self",
".",
"challenge_message",
"=",
"ChallengeMessage",
"(",
"msg2",
")"
] |
Parse the NTLM CHALLENGE_MESSAGE from the server and add it to the Ntlm context fields
:param msg2: A base64 encoded string of the CHALLENGE_MESSAGE
|
[
"Parse",
"the",
"NTLM",
"CHALLENGE_MESSAGE",
"from",
"the",
"server",
"and",
"add",
"it",
"to",
"the",
"Ntlm",
"context",
"fields"
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/ntlm_auth/ntlm.py#L101-L108
|
8,556
|
wakatime/wakatime
|
wakatime/packages/ntlm_auth/ntlm.py
|
Ntlm.create_authenticate_message
|
def create_authenticate_message(self, user_name, password, domain_name=None, workstation=None, server_certificate_hash=None):
"""
Create an NTLM AUTHENTICATE_MESSAGE based on the Ntlm context and the previous messages sent and received
:param user_name: The user name of the user we are trying to authenticate with
:param password: The password of the user we are trying to authenticate with
:param domain_name: The domain name of the user account we are authenticated with, default is None
:param workstation: The workstation we are using to authenticate with, default is None
:param server_certificate_hash: The SHA256 hash string of the server certificate (DER encoded) NTLM is authenticating to. Used for Channel
Binding Tokens. If nothing is supplied then the CBT hash will not be sent. See messages.py AuthenticateMessage
for more details
:return: A base64 encoded string of the AUTHENTICATE_MESSAGE
"""
self.authenticate_message = AuthenticateMessage(user_name, password, domain_name, workstation,
self.challenge_message, self.ntlm_compatibility,
server_certificate_hash)
self.authenticate_message.add_mic(self.negotiate_message, self.challenge_message)
# Setups up the session_security context used to sign and seal messages if wanted
if self.negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_SEAL or self.negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_SIGN:
self.session_security = SessionSecurity(struct.unpack("<I", self.authenticate_message.negotiate_flags)[0],
self.authenticate_message.exported_session_key)
return base64.b64encode(self.authenticate_message.get_data())
|
python
|
def create_authenticate_message(self, user_name, password, domain_name=None, workstation=None, server_certificate_hash=None):
"""
Create an NTLM AUTHENTICATE_MESSAGE based on the Ntlm context and the previous messages sent and received
:param user_name: The user name of the user we are trying to authenticate with
:param password: The password of the user we are trying to authenticate with
:param domain_name: The domain name of the user account we are authenticated with, default is None
:param workstation: The workstation we are using to authenticate with, default is None
:param server_certificate_hash: The SHA256 hash string of the server certificate (DER encoded) NTLM is authenticating to. Used for Channel
Binding Tokens. If nothing is supplied then the CBT hash will not be sent. See messages.py AuthenticateMessage
for more details
:return: A base64 encoded string of the AUTHENTICATE_MESSAGE
"""
self.authenticate_message = AuthenticateMessage(user_name, password, domain_name, workstation,
self.challenge_message, self.ntlm_compatibility,
server_certificate_hash)
self.authenticate_message.add_mic(self.negotiate_message, self.challenge_message)
# Setups up the session_security context used to sign and seal messages if wanted
if self.negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_SEAL or self.negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_SIGN:
self.session_security = SessionSecurity(struct.unpack("<I", self.authenticate_message.negotiate_flags)[0],
self.authenticate_message.exported_session_key)
return base64.b64encode(self.authenticate_message.get_data())
|
[
"def",
"create_authenticate_message",
"(",
"self",
",",
"user_name",
",",
"password",
",",
"domain_name",
"=",
"None",
",",
"workstation",
"=",
"None",
",",
"server_certificate_hash",
"=",
"None",
")",
":",
"self",
".",
"authenticate_message",
"=",
"AuthenticateMessage",
"(",
"user_name",
",",
"password",
",",
"domain_name",
",",
"workstation",
",",
"self",
".",
"challenge_message",
",",
"self",
".",
"ntlm_compatibility",
",",
"server_certificate_hash",
")",
"self",
".",
"authenticate_message",
".",
"add_mic",
"(",
"self",
".",
"negotiate_message",
",",
"self",
".",
"challenge_message",
")",
"# Setups up the session_security context used to sign and seal messages if wanted",
"if",
"self",
".",
"negotiate_flags",
"&",
"NegotiateFlags",
".",
"NTLMSSP_NEGOTIATE_SEAL",
"or",
"self",
".",
"negotiate_flags",
"&",
"NegotiateFlags",
".",
"NTLMSSP_NEGOTIATE_SIGN",
":",
"self",
".",
"session_security",
"=",
"SessionSecurity",
"(",
"struct",
".",
"unpack",
"(",
"\"<I\"",
",",
"self",
".",
"authenticate_message",
".",
"negotiate_flags",
")",
"[",
"0",
"]",
",",
"self",
".",
"authenticate_message",
".",
"exported_session_key",
")",
"return",
"base64",
".",
"b64encode",
"(",
"self",
".",
"authenticate_message",
".",
"get_data",
"(",
")",
")"
] |
Create an NTLM AUTHENTICATE_MESSAGE based on the Ntlm context and the previous messages sent and received
:param user_name: The user name of the user we are trying to authenticate with
:param password: The password of the user we are trying to authenticate with
:param domain_name: The domain name of the user account we are authenticated with, default is None
:param workstation: The workstation we are using to authenticate with, default is None
:param server_certificate_hash: The SHA256 hash string of the server certificate (DER encoded) NTLM is authenticating to. Used for Channel
Binding Tokens. If nothing is supplied then the CBT hash will not be sent. See messages.py AuthenticateMessage
for more details
:return: A base64 encoded string of the AUTHENTICATE_MESSAGE
|
[
"Create",
"an",
"NTLM",
"AUTHENTICATE_MESSAGE",
"based",
"on",
"the",
"Ntlm",
"context",
"and",
"the",
"previous",
"messages",
"sent",
"and",
"received"
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/ntlm_auth/ntlm.py#L110-L133
|
8,557
|
wakatime/wakatime
|
wakatime/packages/pygments/__init__.py
|
lex
|
def lex(code, lexer):
"""
Lex ``code`` with ``lexer`` and return an iterable of tokens.
"""
try:
return lexer.get_tokens(code)
except TypeError as err:
if (isinstance(err.args[0], str) and
('unbound method get_tokens' in err.args[0] or
'missing 1 required positional argument' in err.args[0])):
raise TypeError('lex() argument must be a lexer instance, '
'not a class')
raise
|
python
|
def lex(code, lexer):
"""
Lex ``code`` with ``lexer`` and return an iterable of tokens.
"""
try:
return lexer.get_tokens(code)
except TypeError as err:
if (isinstance(err.args[0], str) and
('unbound method get_tokens' in err.args[0] or
'missing 1 required positional argument' in err.args[0])):
raise TypeError('lex() argument must be a lexer instance, '
'not a class')
raise
|
[
"def",
"lex",
"(",
"code",
",",
"lexer",
")",
":",
"try",
":",
"return",
"lexer",
".",
"get_tokens",
"(",
"code",
")",
"except",
"TypeError",
"as",
"err",
":",
"if",
"(",
"isinstance",
"(",
"err",
".",
"args",
"[",
"0",
"]",
",",
"str",
")",
"and",
"(",
"'unbound method get_tokens'",
"in",
"err",
".",
"args",
"[",
"0",
"]",
"or",
"'missing 1 required positional argument'",
"in",
"err",
".",
"args",
"[",
"0",
"]",
")",
")",
":",
"raise",
"TypeError",
"(",
"'lex() argument must be a lexer instance, '",
"'not a class'",
")",
"raise"
] |
Lex ``code`` with ``lexer`` and return an iterable of tokens.
|
[
"Lex",
"code",
"with",
"lexer",
"and",
"return",
"an",
"iterable",
"of",
"tokens",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/__init__.py#L38-L50
|
8,558
|
wakatime/wakatime
|
wakatime/packages/pygments/__init__.py
|
format
|
def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin
"""
Format a tokenlist ``tokens`` with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
try:
if not outfile:
realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO()
formatter.format(tokens, realoutfile)
return realoutfile.getvalue()
else:
formatter.format(tokens, outfile)
except TypeError as err:
if (isinstance(err.args[0], str) and
('unbound method format' in err.args[0] or
'missing 1 required positional argument' in err.args[0])):
raise TypeError('format() argument must be a formatter instance, '
'not a class')
raise
|
python
|
def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin
"""
Format a tokenlist ``tokens`` with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
try:
if not outfile:
realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO()
formatter.format(tokens, realoutfile)
return realoutfile.getvalue()
else:
formatter.format(tokens, outfile)
except TypeError as err:
if (isinstance(err.args[0], str) and
('unbound method format' in err.args[0] or
'missing 1 required positional argument' in err.args[0])):
raise TypeError('format() argument must be a formatter instance, '
'not a class')
raise
|
[
"def",
"format",
"(",
"tokens",
",",
"formatter",
",",
"outfile",
"=",
"None",
")",
":",
"# pylint: disable=redefined-builtin",
"try",
":",
"if",
"not",
"outfile",
":",
"realoutfile",
"=",
"getattr",
"(",
"formatter",
",",
"'encoding'",
",",
"None",
")",
"and",
"BytesIO",
"(",
")",
"or",
"StringIO",
"(",
")",
"formatter",
".",
"format",
"(",
"tokens",
",",
"realoutfile",
")",
"return",
"realoutfile",
".",
"getvalue",
"(",
")",
"else",
":",
"formatter",
".",
"format",
"(",
"tokens",
",",
"outfile",
")",
"except",
"TypeError",
"as",
"err",
":",
"if",
"(",
"isinstance",
"(",
"err",
".",
"args",
"[",
"0",
"]",
",",
"str",
")",
"and",
"(",
"'unbound method format'",
"in",
"err",
".",
"args",
"[",
"0",
"]",
"or",
"'missing 1 required positional argument'",
"in",
"err",
".",
"args",
"[",
"0",
"]",
")",
")",
":",
"raise",
"TypeError",
"(",
"'format() argument must be a formatter instance, '",
"'not a class'",
")",
"raise"
] |
Format a tokenlist ``tokens`` with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
|
[
"Format",
"a",
"tokenlist",
"tokens",
"with",
"the",
"formatter",
"formatter",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/__init__.py#L53-L74
|
8,559
|
wakatime/wakatime
|
wakatime/packages/pygments/__init__.py
|
highlight
|
def highlight(code, lexer, formatter, outfile=None):
"""
Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
return format(lex(code, lexer), formatter, outfile)
|
python
|
def highlight(code, lexer, formatter, outfile=None):
"""
Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
return format(lex(code, lexer), formatter, outfile)
|
[
"def",
"highlight",
"(",
"code",
",",
"lexer",
",",
"formatter",
",",
"outfile",
"=",
"None",
")",
":",
"return",
"format",
"(",
"lex",
"(",
"code",
",",
"lexer",
")",
",",
"formatter",
",",
"outfile",
")"
] |
Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
|
[
"Lex",
"code",
"with",
"lexer",
"and",
"format",
"it",
"with",
"the",
"formatter",
"formatter",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/__init__.py#L77-L85
|
8,560
|
wakatime/wakatime
|
wakatime/configs.py
|
getConfigFile
|
def getConfigFile():
"""Returns the config file location.
If $WAKATIME_HOME env varialbe is defined, returns
$WAKATIME_HOME/.wakatime.cfg, otherwise ~/.wakatime.cfg.
"""
fileName = '.wakatime.cfg'
home = os.environ.get('WAKATIME_HOME')
if home:
return os.path.join(os.path.expanduser(home), fileName)
return os.path.join(os.path.expanduser('~'), fileName)
|
python
|
def getConfigFile():
"""Returns the config file location.
If $WAKATIME_HOME env varialbe is defined, returns
$WAKATIME_HOME/.wakatime.cfg, otherwise ~/.wakatime.cfg.
"""
fileName = '.wakatime.cfg'
home = os.environ.get('WAKATIME_HOME')
if home:
return os.path.join(os.path.expanduser(home), fileName)
return os.path.join(os.path.expanduser('~'), fileName)
|
[
"def",
"getConfigFile",
"(",
")",
":",
"fileName",
"=",
"'.wakatime.cfg'",
"home",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'WAKATIME_HOME'",
")",
"if",
"home",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"home",
")",
",",
"fileName",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~'",
")",
",",
"fileName",
")"
] |
Returns the config file location.
If $WAKATIME_HOME env varialbe is defined, returns
$WAKATIME_HOME/.wakatime.cfg, otherwise ~/.wakatime.cfg.
|
[
"Returns",
"the",
"config",
"file",
"location",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/configs.py#L28-L41
|
8,561
|
wakatime/wakatime
|
wakatime/packages/pygments/filters/__init__.py
|
find_filter_class
|
def find_filter_class(filtername):
"""Lookup a filter by name. Return None if not found."""
if filtername in FILTERS:
return FILTERS[filtername]
for name, cls in find_plugin_filters():
if name == filtername:
return cls
return None
|
python
|
def find_filter_class(filtername):
"""Lookup a filter by name. Return None if not found."""
if filtername in FILTERS:
return FILTERS[filtername]
for name, cls in find_plugin_filters():
if name == filtername:
return cls
return None
|
[
"def",
"find_filter_class",
"(",
"filtername",
")",
":",
"if",
"filtername",
"in",
"FILTERS",
":",
"return",
"FILTERS",
"[",
"filtername",
"]",
"for",
"name",
",",
"cls",
"in",
"find_plugin_filters",
"(",
")",
":",
"if",
"name",
"==",
"filtername",
":",
"return",
"cls",
"return",
"None"
] |
Lookup a filter by name. Return None if not found.
|
[
"Lookup",
"a",
"filter",
"by",
"name",
".",
"Return",
"None",
"if",
"not",
"found",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/filters/__init__.py#L23-L30
|
8,562
|
wakatime/wakatime
|
wakatime/packages/pygments/filters/__init__.py
|
get_filter_by_name
|
def get_filter_by_name(filtername, **options):
"""Return an instantiated filter.
Options are passed to the filter initializer if wanted.
Raise a ClassNotFound if not found.
"""
cls = find_filter_class(filtername)
if cls:
return cls(**options)
else:
raise ClassNotFound('filter %r not found' % filtername)
|
python
|
def get_filter_by_name(filtername, **options):
"""Return an instantiated filter.
Options are passed to the filter initializer if wanted.
Raise a ClassNotFound if not found.
"""
cls = find_filter_class(filtername)
if cls:
return cls(**options)
else:
raise ClassNotFound('filter %r not found' % filtername)
|
[
"def",
"get_filter_by_name",
"(",
"filtername",
",",
"*",
"*",
"options",
")",
":",
"cls",
"=",
"find_filter_class",
"(",
"filtername",
")",
"if",
"cls",
":",
"return",
"cls",
"(",
"*",
"*",
"options",
")",
"else",
":",
"raise",
"ClassNotFound",
"(",
"'filter %r not found'",
"%",
"filtername",
")"
] |
Return an instantiated filter.
Options are passed to the filter initializer if wanted.
Raise a ClassNotFound if not found.
|
[
"Return",
"an",
"instantiated",
"filter",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/filters/__init__.py#L33-L43
|
8,563
|
wakatime/wakatime
|
wakatime/packages/pygments/lexers/templates.py
|
ErbLexer.get_tokens_unprocessed
|
def get_tokens_unprocessed(self, text):
"""
Since ERB doesn't allow "<%" and other tags inside of ruby
blocks we have to use a split approach here that fails for
that too.
"""
tokens = self._block_re.split(text)
tokens.reverse()
state = idx = 0
try:
while True:
# text
if state == 0:
val = tokens.pop()
yield idx, Other, val
idx += len(val)
state = 1
# block starts
elif state == 1:
tag = tokens.pop()
# literals
if tag in ('<%%', '%%>'):
yield idx, Other, tag
idx += 3
state = 0
# comment
elif tag == '<%#':
yield idx, Comment.Preproc, tag
val = tokens.pop()
yield idx + 3, Comment, val
idx += 3 + len(val)
state = 2
# blocks or output
elif tag in ('<%', '<%=', '<%-'):
yield idx, Comment.Preproc, tag
idx += len(tag)
data = tokens.pop()
r_idx = 0
for r_idx, r_token, r_value in \
self.ruby_lexer.get_tokens_unprocessed(data):
yield r_idx + idx, r_token, r_value
idx += len(data)
state = 2
elif tag in ('%>', '-%>'):
yield idx, Error, tag
idx += len(tag)
state = 0
# % raw ruby statements
else:
yield idx, Comment.Preproc, tag[0]
r_idx = 0
for r_idx, r_token, r_value in \
self.ruby_lexer.get_tokens_unprocessed(tag[1:]):
yield idx + 1 + r_idx, r_token, r_value
idx += len(tag)
state = 0
# block ends
elif state == 2:
tag = tokens.pop()
if tag not in ('%>', '-%>'):
yield idx, Other, tag
else:
yield idx, Comment.Preproc, tag
idx += len(tag)
state = 0
except IndexError:
return
|
python
|
def get_tokens_unprocessed(self, text):
"""
Since ERB doesn't allow "<%" and other tags inside of ruby
blocks we have to use a split approach here that fails for
that too.
"""
tokens = self._block_re.split(text)
tokens.reverse()
state = idx = 0
try:
while True:
# text
if state == 0:
val = tokens.pop()
yield idx, Other, val
idx += len(val)
state = 1
# block starts
elif state == 1:
tag = tokens.pop()
# literals
if tag in ('<%%', '%%>'):
yield idx, Other, tag
idx += 3
state = 0
# comment
elif tag == '<%#':
yield idx, Comment.Preproc, tag
val = tokens.pop()
yield idx + 3, Comment, val
idx += 3 + len(val)
state = 2
# blocks or output
elif tag in ('<%', '<%=', '<%-'):
yield idx, Comment.Preproc, tag
idx += len(tag)
data = tokens.pop()
r_idx = 0
for r_idx, r_token, r_value in \
self.ruby_lexer.get_tokens_unprocessed(data):
yield r_idx + idx, r_token, r_value
idx += len(data)
state = 2
elif tag in ('%>', '-%>'):
yield idx, Error, tag
idx += len(tag)
state = 0
# % raw ruby statements
else:
yield idx, Comment.Preproc, tag[0]
r_idx = 0
for r_idx, r_token, r_value in \
self.ruby_lexer.get_tokens_unprocessed(tag[1:]):
yield idx + 1 + r_idx, r_token, r_value
idx += len(tag)
state = 0
# block ends
elif state == 2:
tag = tokens.pop()
if tag not in ('%>', '-%>'):
yield idx, Other, tag
else:
yield idx, Comment.Preproc, tag
idx += len(tag)
state = 0
except IndexError:
return
|
[
"def",
"get_tokens_unprocessed",
"(",
"self",
",",
"text",
")",
":",
"tokens",
"=",
"self",
".",
"_block_re",
".",
"split",
"(",
"text",
")",
"tokens",
".",
"reverse",
"(",
")",
"state",
"=",
"idx",
"=",
"0",
"try",
":",
"while",
"True",
":",
"# text",
"if",
"state",
"==",
"0",
":",
"val",
"=",
"tokens",
".",
"pop",
"(",
")",
"yield",
"idx",
",",
"Other",
",",
"val",
"idx",
"+=",
"len",
"(",
"val",
")",
"state",
"=",
"1",
"# block starts",
"elif",
"state",
"==",
"1",
":",
"tag",
"=",
"tokens",
".",
"pop",
"(",
")",
"# literals",
"if",
"tag",
"in",
"(",
"'<%%'",
",",
"'%%>'",
")",
":",
"yield",
"idx",
",",
"Other",
",",
"tag",
"idx",
"+=",
"3",
"state",
"=",
"0",
"# comment",
"elif",
"tag",
"==",
"'<%#'",
":",
"yield",
"idx",
",",
"Comment",
".",
"Preproc",
",",
"tag",
"val",
"=",
"tokens",
".",
"pop",
"(",
")",
"yield",
"idx",
"+",
"3",
",",
"Comment",
",",
"val",
"idx",
"+=",
"3",
"+",
"len",
"(",
"val",
")",
"state",
"=",
"2",
"# blocks or output",
"elif",
"tag",
"in",
"(",
"'<%'",
",",
"'<%='",
",",
"'<%-'",
")",
":",
"yield",
"idx",
",",
"Comment",
".",
"Preproc",
",",
"tag",
"idx",
"+=",
"len",
"(",
"tag",
")",
"data",
"=",
"tokens",
".",
"pop",
"(",
")",
"r_idx",
"=",
"0",
"for",
"r_idx",
",",
"r_token",
",",
"r_value",
"in",
"self",
".",
"ruby_lexer",
".",
"get_tokens_unprocessed",
"(",
"data",
")",
":",
"yield",
"r_idx",
"+",
"idx",
",",
"r_token",
",",
"r_value",
"idx",
"+=",
"len",
"(",
"data",
")",
"state",
"=",
"2",
"elif",
"tag",
"in",
"(",
"'%>'",
",",
"'-%>'",
")",
":",
"yield",
"idx",
",",
"Error",
",",
"tag",
"idx",
"+=",
"len",
"(",
"tag",
")",
"state",
"=",
"0",
"# % raw ruby statements",
"else",
":",
"yield",
"idx",
",",
"Comment",
".",
"Preproc",
",",
"tag",
"[",
"0",
"]",
"r_idx",
"=",
"0",
"for",
"r_idx",
",",
"r_token",
",",
"r_value",
"in",
"self",
".",
"ruby_lexer",
".",
"get_tokens_unprocessed",
"(",
"tag",
"[",
"1",
":",
"]",
")",
":",
"yield",
"idx",
"+",
"1",
"+",
"r_idx",
",",
"r_token",
",",
"r_value",
"idx",
"+=",
"len",
"(",
"tag",
")",
"state",
"=",
"0",
"# block ends",
"elif",
"state",
"==",
"2",
":",
"tag",
"=",
"tokens",
".",
"pop",
"(",
")",
"if",
"tag",
"not",
"in",
"(",
"'%>'",
",",
"'-%>'",
")",
":",
"yield",
"idx",
",",
"Other",
",",
"tag",
"else",
":",
"yield",
"idx",
",",
"Comment",
".",
"Preproc",
",",
"tag",
"idx",
"+=",
"len",
"(",
"tag",
")",
"state",
"=",
"0",
"except",
"IndexError",
":",
"return"
] |
Since ERB doesn't allow "<%" and other tags inside of ruby
blocks we have to use a split approach here that fails for
that too.
|
[
"Since",
"ERB",
"doesn",
"t",
"allow",
"<%",
"and",
"other",
"tags",
"inside",
"of",
"ruby",
"blocks",
"we",
"have",
"to",
"use",
"a",
"split",
"approach",
"here",
"that",
"fails",
"for",
"that",
"too",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/templates.py#L72-L138
|
8,564
|
wakatime/wakatime
|
wakatime/utils.py
|
format_file_path
|
def format_file_path(filepath):
"""Formats a path as absolute and with the correct platform separator."""
try:
is_windows_network_mount = WINDOWS_NETWORK_MOUNT_PATTERN.match(filepath)
filepath = os.path.realpath(os.path.abspath(filepath))
filepath = re.sub(BACKSLASH_REPLACE_PATTERN, '/', filepath)
is_windows_drive = WINDOWS_DRIVE_PATTERN.match(filepath)
if is_windows_drive:
filepath = filepath.capitalize()
if is_windows_network_mount:
# Add back a / to the front, since the previous modifications
# will have replaced any double slashes with single
filepath = '/' + filepath
except:
pass
return filepath
|
python
|
def format_file_path(filepath):
"""Formats a path as absolute and with the correct platform separator."""
try:
is_windows_network_mount = WINDOWS_NETWORK_MOUNT_PATTERN.match(filepath)
filepath = os.path.realpath(os.path.abspath(filepath))
filepath = re.sub(BACKSLASH_REPLACE_PATTERN, '/', filepath)
is_windows_drive = WINDOWS_DRIVE_PATTERN.match(filepath)
if is_windows_drive:
filepath = filepath.capitalize()
if is_windows_network_mount:
# Add back a / to the front, since the previous modifications
# will have replaced any double slashes with single
filepath = '/' + filepath
except:
pass
return filepath
|
[
"def",
"format_file_path",
"(",
"filepath",
")",
":",
"try",
":",
"is_windows_network_mount",
"=",
"WINDOWS_NETWORK_MOUNT_PATTERN",
".",
"match",
"(",
"filepath",
")",
"filepath",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"filepath",
")",
")",
"filepath",
"=",
"re",
".",
"sub",
"(",
"BACKSLASH_REPLACE_PATTERN",
",",
"'/'",
",",
"filepath",
")",
"is_windows_drive",
"=",
"WINDOWS_DRIVE_PATTERN",
".",
"match",
"(",
"filepath",
")",
"if",
"is_windows_drive",
":",
"filepath",
"=",
"filepath",
".",
"capitalize",
"(",
")",
"if",
"is_windows_network_mount",
":",
"# Add back a / to the front, since the previous modifications",
"# will have replaced any double slashes with single",
"filepath",
"=",
"'/'",
"+",
"filepath",
"except",
":",
"pass",
"return",
"filepath"
] |
Formats a path as absolute and with the correct platform separator.
|
[
"Formats",
"a",
"path",
"as",
"absolute",
"and",
"with",
"the",
"correct",
"platform",
"separator",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/utils.py#L77-L93
|
8,565
|
wakatime/wakatime
|
wakatime/packages/urllib3/connectionpool.py
|
HTTPConnectionPool.close
|
def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except queue.Empty:
pass
|
python
|
def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except queue.Empty:
pass
|
[
"def",
"close",
"(",
"self",
")",
":",
"# Disable access to the pool",
"old_pool",
",",
"self",
".",
"pool",
"=",
"self",
".",
"pool",
",",
"None",
"try",
":",
"while",
"True",
":",
"conn",
"=",
"old_pool",
".",
"get",
"(",
"block",
"=",
"False",
")",
"if",
"conn",
":",
"conn",
".",
"close",
"(",
")",
"except",
"queue",
".",
"Empty",
":",
"pass"
] |
Close all pooled connections and disable the pool.
|
[
"Close",
"all",
"pooled",
"connections",
"and",
"disable",
"the",
"pool",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/urllib3/connectionpool.py#L410-L424
|
8,566
|
wakatime/wakatime
|
wakatime/packages/urllib3/connectionpool.py
|
HTTPConnectionPool.is_same_host
|
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith('/'):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
host = _ipv6_host(host).lower()
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port)
|
python
|
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith('/'):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
host = _ipv6_host(host).lower()
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port)
|
[
"def",
"is_same_host",
"(",
"self",
",",
"url",
")",
":",
"if",
"url",
".",
"startswith",
"(",
"'/'",
")",
":",
"return",
"True",
"# TODO: Add optional support for socket.gethostbyname checking.",
"scheme",
",",
"host",
",",
"port",
"=",
"get_host",
"(",
"url",
")",
"host",
"=",
"_ipv6_host",
"(",
"host",
")",
".",
"lower",
"(",
")",
"# Use explicit default port for comparison when none is given",
"if",
"self",
".",
"port",
"and",
"not",
"port",
":",
"port",
"=",
"port_by_scheme",
".",
"get",
"(",
"scheme",
")",
"elif",
"not",
"self",
".",
"port",
"and",
"port",
"==",
"port_by_scheme",
".",
"get",
"(",
"scheme",
")",
":",
"port",
"=",
"None",
"return",
"(",
"scheme",
",",
"host",
",",
"port",
")",
"==",
"(",
"self",
".",
"scheme",
",",
"self",
".",
"host",
",",
"self",
".",
"port",
")"
] |
Check if the given ``url`` is a member of the same host as this
connection pool.
|
[
"Check",
"if",
"the",
"given",
"url",
"is",
"a",
"member",
"of",
"the",
"same",
"host",
"as",
"this",
"connection",
"pool",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/urllib3/connectionpool.py#L426-L445
|
8,567
|
wakatime/wakatime
|
wakatime/packages/configparser/__init__.py
|
ParsingError.filename
|
def filename(self, value):
"""Deprecated, user `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
self.source = value
|
python
|
def filename(self, value):
"""Deprecated, user `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
self.source = value
|
[
"def",
"filename",
"(",
"self",
",",
"value",
")",
":",
"warnings",
".",
"warn",
"(",
"\"The 'filename' attribute will be removed in future versions. \"",
"\"Use 'source' instead.\"",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
")",
"self",
".",
"source",
"=",
"value"
] |
Deprecated, user `source'.
|
[
"Deprecated",
"user",
"source",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L315-L322
|
8,568
|
wakatime/wakatime
|
wakatime/packages/configparser/__init__.py
|
RawConfigParser.options
|
def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self._sections[section].copy()
except KeyError:
raise from_none(NoSectionError(section))
opts.update(self._defaults)
return list(opts.keys())
|
python
|
def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self._sections[section].copy()
except KeyError:
raise from_none(NoSectionError(section))
opts.update(self._defaults)
return list(opts.keys())
|
[
"def",
"options",
"(",
"self",
",",
"section",
")",
":",
"try",
":",
"opts",
"=",
"self",
".",
"_sections",
"[",
"section",
"]",
".",
"copy",
"(",
")",
"except",
"KeyError",
":",
"raise",
"from_none",
"(",
"NoSectionError",
"(",
"section",
")",
")",
"opts",
".",
"update",
"(",
"self",
".",
"_defaults",
")",
"return",
"list",
"(",
"opts",
".",
"keys",
"(",
")",
")"
] |
Return a list of option names for the given section name.
|
[
"Return",
"a",
"list",
"of",
"option",
"names",
"for",
"the",
"given",
"section",
"name",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L666-L673
|
8,569
|
wakatime/wakatime
|
wakatime/packages/configparser/__init__.py
|
RawConfigParser.read_string
|
def read_string(self, string, source='<string>'):
"""Read configuration from a given string."""
sfile = io.StringIO(string)
self.read_file(sfile, source)
|
python
|
def read_string(self, string, source='<string>'):
"""Read configuration from a given string."""
sfile = io.StringIO(string)
self.read_file(sfile, source)
|
[
"def",
"read_string",
"(",
"self",
",",
"string",
",",
"source",
"=",
"'<string>'",
")",
":",
"sfile",
"=",
"io",
".",
"StringIO",
"(",
"string",
")",
"self",
".",
"read_file",
"(",
"sfile",
",",
"source",
")"
] |
Read configuration from a given string.
|
[
"Read",
"configuration",
"from",
"a",
"given",
"string",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L726-L729
|
8,570
|
wakatime/wakatime
|
wakatime/packages/configparser/__init__.py
|
RawConfigParser.read_dict
|
def read_dict(self, dictionary, source='<dict>'):
"""Read configuration from a dictionary.
Keys are section names, values are dictionaries with keys and values
that should be present in the section. If the used dictionary type
preserves order, sections and their keys will be added in order.
All types held in the dictionary are converted to strings during
reading, including section names, option names and keys.
Optional second argument is the `source' specifying the name of the
dictionary being read.
"""
elements_added = set()
for section, keys in dictionary.items():
section = str(section)
try:
self.add_section(section)
except (DuplicateSectionError, ValueError):
if self._strict and section in elements_added:
raise
elements_added.add(section)
for key, value in keys.items():
key = self.optionxform(str(key))
if value is not None:
value = str(value)
if self._strict and (section, key) in elements_added:
raise DuplicateOptionError(section, key, source)
elements_added.add((section, key))
self.set(section, key, value)
|
python
|
def read_dict(self, dictionary, source='<dict>'):
"""Read configuration from a dictionary.
Keys are section names, values are dictionaries with keys and values
that should be present in the section. If the used dictionary type
preserves order, sections and their keys will be added in order.
All types held in the dictionary are converted to strings during
reading, including section names, option names and keys.
Optional second argument is the `source' specifying the name of the
dictionary being read.
"""
elements_added = set()
for section, keys in dictionary.items():
section = str(section)
try:
self.add_section(section)
except (DuplicateSectionError, ValueError):
if self._strict and section in elements_added:
raise
elements_added.add(section)
for key, value in keys.items():
key = self.optionxform(str(key))
if value is not None:
value = str(value)
if self._strict and (section, key) in elements_added:
raise DuplicateOptionError(section, key, source)
elements_added.add((section, key))
self.set(section, key, value)
|
[
"def",
"read_dict",
"(",
"self",
",",
"dictionary",
",",
"source",
"=",
"'<dict>'",
")",
":",
"elements_added",
"=",
"set",
"(",
")",
"for",
"section",
",",
"keys",
"in",
"dictionary",
".",
"items",
"(",
")",
":",
"section",
"=",
"str",
"(",
"section",
")",
"try",
":",
"self",
".",
"add_section",
"(",
"section",
")",
"except",
"(",
"DuplicateSectionError",
",",
"ValueError",
")",
":",
"if",
"self",
".",
"_strict",
"and",
"section",
"in",
"elements_added",
":",
"raise",
"elements_added",
".",
"add",
"(",
"section",
")",
"for",
"key",
",",
"value",
"in",
"keys",
".",
"items",
"(",
")",
":",
"key",
"=",
"self",
".",
"optionxform",
"(",
"str",
"(",
"key",
")",
")",
"if",
"value",
"is",
"not",
"None",
":",
"value",
"=",
"str",
"(",
"value",
")",
"if",
"self",
".",
"_strict",
"and",
"(",
"section",
",",
"key",
")",
"in",
"elements_added",
":",
"raise",
"DuplicateOptionError",
"(",
"section",
",",
"key",
",",
"source",
")",
"elements_added",
".",
"add",
"(",
"(",
"section",
",",
"key",
")",
")",
"self",
".",
"set",
"(",
"section",
",",
"key",
",",
"value",
")"
] |
Read configuration from a dictionary.
Keys are section names, values are dictionaries with keys and values
that should be present in the section. If the used dictionary type
preserves order, sections and their keys will be added in order.
All types held in the dictionary are converted to strings during
reading, including section names, option names and keys.
Optional second argument is the `source' specifying the name of the
dictionary being read.
|
[
"Read",
"configuration",
"from",
"a",
"dictionary",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L731-L760
|
8,571
|
wakatime/wakatime
|
wakatime/packages/configparser/__init__.py
|
RawConfigParser.readfp
|
def readfp(self, fp, filename=None):
"""Deprecated, use read_file instead."""
warnings.warn(
"This method will be removed in future versions. "
"Use 'parser.read_file()' instead.",
DeprecationWarning, stacklevel=2
)
self.read_file(fp, source=filename)
|
python
|
def readfp(self, fp, filename=None):
"""Deprecated, use read_file instead."""
warnings.warn(
"This method will be removed in future versions. "
"Use 'parser.read_file()' instead.",
DeprecationWarning, stacklevel=2
)
self.read_file(fp, source=filename)
|
[
"def",
"readfp",
"(",
"self",
",",
"fp",
",",
"filename",
"=",
"None",
")",
":",
"warnings",
".",
"warn",
"(",
"\"This method will be removed in future versions. \"",
"\"Use 'parser.read_file()' instead.\"",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
")",
"self",
".",
"read_file",
"(",
"fp",
",",
"source",
"=",
"filename",
")"
] |
Deprecated, use read_file instead.
|
[
"Deprecated",
"use",
"read_file",
"instead",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L762-L769
|
8,572
|
wakatime/wakatime
|
wakatime/packages/configparser/__init__.py
|
RawConfigParser.has_option
|
def has_option(self, section, option):
"""Check for the existence of a given option in a given section.
If the specified `section' is None or an empty string, DEFAULT is
assumed. If the specified `section' does not exist, returns False."""
if not section or section == self.default_section:
option = self.optionxform(option)
return option in self._defaults
elif section not in self._sections:
return False
else:
option = self.optionxform(option)
return (option in self._sections[section]
or option in self._defaults)
|
python
|
def has_option(self, section, option):
"""Check for the existence of a given option in a given section.
If the specified `section' is None or an empty string, DEFAULT is
assumed. If the specified `section' does not exist, returns False."""
if not section or section == self.default_section:
option = self.optionxform(option)
return option in self._defaults
elif section not in self._sections:
return False
else:
option = self.optionxform(option)
return (option in self._sections[section]
or option in self._defaults)
|
[
"def",
"has_option",
"(",
"self",
",",
"section",
",",
"option",
")",
":",
"if",
"not",
"section",
"or",
"section",
"==",
"self",
".",
"default_section",
":",
"option",
"=",
"self",
".",
"optionxform",
"(",
"option",
")",
"return",
"option",
"in",
"self",
".",
"_defaults",
"elif",
"section",
"not",
"in",
"self",
".",
"_sections",
":",
"return",
"False",
"else",
":",
"option",
"=",
"self",
".",
"optionxform",
"(",
"option",
")",
"return",
"(",
"option",
"in",
"self",
".",
"_sections",
"[",
"section",
"]",
"or",
"option",
"in",
"self",
".",
"_defaults",
")"
] |
Check for the existence of a given option in a given section.
If the specified `section' is None or an empty string, DEFAULT is
assumed. If the specified `section' does not exist, returns False.
|
[
"Check",
"for",
"the",
"existence",
"of",
"a",
"given",
"option",
"in",
"a",
"given",
"section",
".",
"If",
"the",
"specified",
"section",
"is",
"None",
"or",
"an",
"empty",
"string",
"DEFAULT",
"is",
"assumed",
".",
"If",
"the",
"specified",
"section",
"does",
"not",
"exist",
"returns",
"False",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L896-L908
|
8,573
|
wakatime/wakatime
|
wakatime/packages/configparser/__init__.py
|
RawConfigParser._write_section
|
def _write_section(self, fp, section_name, section_items, delimiter):
"""Write a single section to the specified `fp'."""
fp.write("[{0}]\n".format(section_name))
for key, value in section_items:
value = self._interpolation.before_write(self, section_name, key,
value)
if value is not None or not self._allow_no_value:
value = delimiter + str(value).replace('\n', '\n\t')
else:
value = ""
fp.write("{0}{1}\n".format(key, value))
fp.write("\n")
|
python
|
def _write_section(self, fp, section_name, section_items, delimiter):
"""Write a single section to the specified `fp'."""
fp.write("[{0}]\n".format(section_name))
for key, value in section_items:
value = self._interpolation.before_write(self, section_name, key,
value)
if value is not None or not self._allow_no_value:
value = delimiter + str(value).replace('\n', '\n\t')
else:
value = ""
fp.write("{0}{1}\n".format(key, value))
fp.write("\n")
|
[
"def",
"_write_section",
"(",
"self",
",",
"fp",
",",
"section_name",
",",
"section_items",
",",
"delimiter",
")",
":",
"fp",
".",
"write",
"(",
"\"[{0}]\\n\"",
".",
"format",
"(",
"section_name",
")",
")",
"for",
"key",
",",
"value",
"in",
"section_items",
":",
"value",
"=",
"self",
".",
"_interpolation",
".",
"before_write",
"(",
"self",
",",
"section_name",
",",
"key",
",",
"value",
")",
"if",
"value",
"is",
"not",
"None",
"or",
"not",
"self",
".",
"_allow_no_value",
":",
"value",
"=",
"delimiter",
"+",
"str",
"(",
"value",
")",
".",
"replace",
"(",
"'\\n'",
",",
"'\\n\\t'",
")",
"else",
":",
"value",
"=",
"\"\"",
"fp",
".",
"write",
"(",
"\"{0}{1}\\n\"",
".",
"format",
"(",
"key",
",",
"value",
")",
")",
"fp",
".",
"write",
"(",
"\"\\n\"",
")"
] |
Write a single section to the specified `fp'.
|
[
"Write",
"a",
"single",
"section",
"to",
"the",
"specified",
"fp",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L941-L952
|
8,574
|
wakatime/wakatime
|
wakatime/packages/configparser/__init__.py
|
RawConfigParser._unify_values
|
def _unify_values(self, section, vars):
"""Create a sequence of lookups with 'vars' taking priority over
the 'section' which takes priority over the DEFAULTSECT.
"""
sectiondict = {}
try:
sectiondict = self._sections[section]
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
vardict = {}
if vars:
for key, value in vars.items():
if value is not None:
value = str(value)
vardict[self.optionxform(key)] = value
return _ChainMap(vardict, sectiondict, self._defaults)
|
python
|
def _unify_values(self, section, vars):
"""Create a sequence of lookups with 'vars' taking priority over
the 'section' which takes priority over the DEFAULTSECT.
"""
sectiondict = {}
try:
sectiondict = self._sections[section]
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
vardict = {}
if vars:
for key, value in vars.items():
if value is not None:
value = str(value)
vardict[self.optionxform(key)] = value
return _ChainMap(vardict, sectiondict, self._defaults)
|
[
"def",
"_unify_values",
"(",
"self",
",",
"section",
",",
"vars",
")",
":",
"sectiondict",
"=",
"{",
"}",
"try",
":",
"sectiondict",
"=",
"self",
".",
"_sections",
"[",
"section",
"]",
"except",
"KeyError",
":",
"if",
"section",
"!=",
"self",
".",
"default_section",
":",
"raise",
"NoSectionError",
"(",
"section",
")",
"# Update with the entry specific variables",
"vardict",
"=",
"{",
"}",
"if",
"vars",
":",
"for",
"key",
",",
"value",
"in",
"vars",
".",
"items",
"(",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"value",
"=",
"str",
"(",
"value",
")",
"vardict",
"[",
"self",
".",
"optionxform",
"(",
"key",
")",
"]",
"=",
"value",
"return",
"_ChainMap",
"(",
"vardict",
",",
"sectiondict",
",",
"self",
".",
"_defaults",
")"
] |
Create a sequence of lookups with 'vars' taking priority over
the 'section' which takes priority over the DEFAULTSECT.
|
[
"Create",
"a",
"sequence",
"of",
"lookups",
"with",
"vars",
"taking",
"priority",
"over",
"the",
"section",
"which",
"takes",
"priority",
"over",
"the",
"DEFAULTSECT",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L1152-L1170
|
8,575
|
wakatime/wakatime
|
wakatime/packages/configparser/__init__.py
|
RawConfigParser._convert_to_boolean
|
def _convert_to_boolean(self, value):
"""Return a boolean value translating from other types if necessary.
"""
if value.lower() not in self.BOOLEAN_STATES:
raise ValueError('Not a boolean: %s' % value)
return self.BOOLEAN_STATES[value.lower()]
|
python
|
def _convert_to_boolean(self, value):
"""Return a boolean value translating from other types if necessary.
"""
if value.lower() not in self.BOOLEAN_STATES:
raise ValueError('Not a boolean: %s' % value)
return self.BOOLEAN_STATES[value.lower()]
|
[
"def",
"_convert_to_boolean",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
".",
"lower",
"(",
")",
"not",
"in",
"self",
".",
"BOOLEAN_STATES",
":",
"raise",
"ValueError",
"(",
"'Not a boolean: %s'",
"%",
"value",
")",
"return",
"self",
".",
"BOOLEAN_STATES",
"[",
"value",
".",
"lower",
"(",
")",
"]"
] |
Return a boolean value translating from other types if necessary.
|
[
"Return",
"a",
"boolean",
"value",
"translating",
"from",
"other",
"types",
"if",
"necessary",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L1172-L1177
|
8,576
|
wakatime/wakatime
|
wakatime/packages/configparser/__init__.py
|
RawConfigParser._validate_value_types
|
def _validate_value_types(self, **kwargs):
"""Raises a TypeError for non-string values.
The only legal non-string value if we allow valueless
options is None, so we need to check if the value is a
string if:
- we do not allow valueless options, or
- we allow valueless options but the value is not None
For compatibility reasons this method is not used in classic set()
for RawConfigParsers. It is invoked in every case for mapping protocol
access and in ConfigParser.set().
"""
# keyword-only arguments
section = kwargs.get('section', "")
option = kwargs.get('option', "")
value = kwargs.get('value', "")
if PY2 and bytes in (type(section), type(option), type(value)):
# we allow for a little unholy magic for Python 2 so that
# people not using unicode_literals can still use the library
# conveniently
warnings.warn(
"You passed a bytestring. Implicitly decoding as UTF-8 string."
" This will not work on Python 3. Please switch to using"
" Unicode strings across the board.",
DeprecationWarning,
stacklevel=2,
)
if isinstance(section, bytes):
section = section.decode('utf8')
if isinstance(option, bytes):
option = option.decode('utf8')
if isinstance(value, bytes):
value = value.decode('utf8')
if not isinstance(section, str):
raise TypeError("section names must be strings")
if not isinstance(option, str):
raise TypeError("option keys must be strings")
if not self._allow_no_value or value:
if not isinstance(value, str):
raise TypeError("option values must be strings")
return section, option, value
|
python
|
def _validate_value_types(self, **kwargs):
"""Raises a TypeError for non-string values.
The only legal non-string value if we allow valueless
options is None, so we need to check if the value is a
string if:
- we do not allow valueless options, or
- we allow valueless options but the value is not None
For compatibility reasons this method is not used in classic set()
for RawConfigParsers. It is invoked in every case for mapping protocol
access and in ConfigParser.set().
"""
# keyword-only arguments
section = kwargs.get('section', "")
option = kwargs.get('option', "")
value = kwargs.get('value', "")
if PY2 and bytes in (type(section), type(option), type(value)):
# we allow for a little unholy magic for Python 2 so that
# people not using unicode_literals can still use the library
# conveniently
warnings.warn(
"You passed a bytestring. Implicitly decoding as UTF-8 string."
" This will not work on Python 3. Please switch to using"
" Unicode strings across the board.",
DeprecationWarning,
stacklevel=2,
)
if isinstance(section, bytes):
section = section.decode('utf8')
if isinstance(option, bytes):
option = option.decode('utf8')
if isinstance(value, bytes):
value = value.decode('utf8')
if not isinstance(section, str):
raise TypeError("section names must be strings")
if not isinstance(option, str):
raise TypeError("option keys must be strings")
if not self._allow_no_value or value:
if not isinstance(value, str):
raise TypeError("option values must be strings")
return section, option, value
|
[
"def",
"_validate_value_types",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# keyword-only arguments",
"section",
"=",
"kwargs",
".",
"get",
"(",
"'section'",
",",
"\"\"",
")",
"option",
"=",
"kwargs",
".",
"get",
"(",
"'option'",
",",
"\"\"",
")",
"value",
"=",
"kwargs",
".",
"get",
"(",
"'value'",
",",
"\"\"",
")",
"if",
"PY2",
"and",
"bytes",
"in",
"(",
"type",
"(",
"section",
")",
",",
"type",
"(",
"option",
")",
",",
"type",
"(",
"value",
")",
")",
":",
"# we allow for a little unholy magic for Python 2 so that",
"# people not using unicode_literals can still use the library",
"# conveniently",
"warnings",
".",
"warn",
"(",
"\"You passed a bytestring. Implicitly decoding as UTF-8 string.\"",
"\" This will not work on Python 3. Please switch to using\"",
"\" Unicode strings across the board.\"",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
",",
")",
"if",
"isinstance",
"(",
"section",
",",
"bytes",
")",
":",
"section",
"=",
"section",
".",
"decode",
"(",
"'utf8'",
")",
"if",
"isinstance",
"(",
"option",
",",
"bytes",
")",
":",
"option",
"=",
"option",
".",
"decode",
"(",
"'utf8'",
")",
"if",
"isinstance",
"(",
"value",
",",
"bytes",
")",
":",
"value",
"=",
"value",
".",
"decode",
"(",
"'utf8'",
")",
"if",
"not",
"isinstance",
"(",
"section",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"section names must be strings\"",
")",
"if",
"not",
"isinstance",
"(",
"option",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"option keys must be strings\"",
")",
"if",
"not",
"self",
".",
"_allow_no_value",
"or",
"value",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"option values must be strings\"",
")",
"return",
"section",
",",
"option",
",",
"value"
] |
Raises a TypeError for non-string values.
The only legal non-string value if we allow valueless
options is None, so we need to check if the value is a
string if:
- we do not allow valueless options, or
- we allow valueless options but the value is not None
For compatibility reasons this method is not used in classic set()
for RawConfigParsers. It is invoked in every case for mapping protocol
access and in ConfigParser.set().
|
[
"Raises",
"a",
"TypeError",
"for",
"non",
"-",
"string",
"values",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L1179-L1223
|
8,577
|
wakatime/wakatime
|
wakatime/packages/configparser/__init__.py
|
ConfigParser.set
|
def set(self, section, option, value=None):
"""Set an option. Extends RawConfigParser.set by validating type and
interpolation syntax on the value."""
_, option, value = self._validate_value_types(option=option, value=value)
super(ConfigParser, self).set(section, option, value)
|
python
|
def set(self, section, option, value=None):
"""Set an option. Extends RawConfigParser.set by validating type and
interpolation syntax on the value."""
_, option, value = self._validate_value_types(option=option, value=value)
super(ConfigParser, self).set(section, option, value)
|
[
"def",
"set",
"(",
"self",
",",
"section",
",",
"option",
",",
"value",
"=",
"None",
")",
":",
"_",
",",
"option",
",",
"value",
"=",
"self",
".",
"_validate_value_types",
"(",
"option",
"=",
"option",
",",
"value",
"=",
"value",
")",
"super",
"(",
"ConfigParser",
",",
"self",
")",
".",
"set",
"(",
"section",
",",
"option",
",",
"value",
")"
] |
Set an option. Extends RawConfigParser.set by validating type and
interpolation syntax on the value.
|
[
"Set",
"an",
"option",
".",
"Extends",
"RawConfigParser",
".",
"set",
"by",
"validating",
"type",
"and",
"interpolation",
"syntax",
"on",
"the",
"value",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L1235-L1239
|
8,578
|
wakatime/wakatime
|
wakatime/packages/configparser/__init__.py
|
ConfigParser.add_section
|
def add_section(self, section):
"""Create a new section in the configuration. Extends
RawConfigParser.add_section by validating if the section name is
a string."""
section, _, _ = self._validate_value_types(section=section)
super(ConfigParser, self).add_section(section)
|
python
|
def add_section(self, section):
"""Create a new section in the configuration. Extends
RawConfigParser.add_section by validating if the section name is
a string."""
section, _, _ = self._validate_value_types(section=section)
super(ConfigParser, self).add_section(section)
|
[
"def",
"add_section",
"(",
"self",
",",
"section",
")",
":",
"section",
",",
"_",
",",
"_",
"=",
"self",
".",
"_validate_value_types",
"(",
"section",
"=",
"section",
")",
"super",
"(",
"ConfigParser",
",",
"self",
")",
".",
"add_section",
"(",
"section",
")"
] |
Create a new section in the configuration. Extends
RawConfigParser.add_section by validating if the section name is
a string.
|
[
"Create",
"a",
"new",
"section",
"in",
"the",
"configuration",
".",
"Extends",
"RawConfigParser",
".",
"add_section",
"by",
"validating",
"if",
"the",
"section",
"name",
"is",
"a",
"string",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L1241-L1246
|
8,579
|
wakatime/wakatime
|
wakatime/packages/configparser/__init__.py
|
SectionProxy.get
|
def get(self, option, fallback=None, **kwargs):
"""Get an option value.
Unless `fallback` is provided, `None` will be returned if the option
is not found.
"""
# keyword-only arguments
kwargs.setdefault('raw', False)
kwargs.setdefault('vars', None)
_impl = kwargs.pop('_impl', None)
# If `_impl` is provided, it should be a getter method on the parser
# object that provides the desired type conversion.
if not _impl:
_impl = self._parser.get
return _impl(self._name, option, fallback=fallback, **kwargs)
|
python
|
def get(self, option, fallback=None, **kwargs):
"""Get an option value.
Unless `fallback` is provided, `None` will be returned if the option
is not found.
"""
# keyword-only arguments
kwargs.setdefault('raw', False)
kwargs.setdefault('vars', None)
_impl = kwargs.pop('_impl', None)
# If `_impl` is provided, it should be a getter method on the parser
# object that provides the desired type conversion.
if not _impl:
_impl = self._parser.get
return _impl(self._name, option, fallback=fallback, **kwargs)
|
[
"def",
"get",
"(",
"self",
",",
"option",
",",
"fallback",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# keyword-only arguments",
"kwargs",
".",
"setdefault",
"(",
"'raw'",
",",
"False",
")",
"kwargs",
".",
"setdefault",
"(",
"'vars'",
",",
"None",
")",
"_impl",
"=",
"kwargs",
".",
"pop",
"(",
"'_impl'",
",",
"None",
")",
"# If `_impl` is provided, it should be a getter method on the parser",
"# object that provides the desired type conversion.",
"if",
"not",
"_impl",
":",
"_impl",
"=",
"self",
".",
"_parser",
".",
"get",
"return",
"_impl",
"(",
"self",
".",
"_name",
",",
"option",
",",
"fallback",
"=",
"fallback",
",",
"*",
"*",
"kwargs",
")"
] |
Get an option value.
Unless `fallback` is provided, `None` will be returned if the option
is not found.
|
[
"Get",
"an",
"option",
"value",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L1316-L1331
|
8,580
|
wakatime/wakatime
|
wakatime/packages/pygments/lexers/rebol.py
|
RebolLexer.analyse_text
|
def analyse_text(text):
"""
Check if code contains REBOL header and so it probably not R code
"""
if re.match(r'^\s*REBOL\s*\[', text, re.IGNORECASE):
# The code starts with REBOL header
return 1.0
elif re.search(r'\s*REBOL\s*[', text, re.IGNORECASE):
# The code contains REBOL header but also some text before it
return 0.5
|
python
|
def analyse_text(text):
"""
Check if code contains REBOL header and so it probably not R code
"""
if re.match(r'^\s*REBOL\s*\[', text, re.IGNORECASE):
# The code starts with REBOL header
return 1.0
elif re.search(r'\s*REBOL\s*[', text, re.IGNORECASE):
# The code contains REBOL header but also some text before it
return 0.5
|
[
"def",
"analyse_text",
"(",
"text",
")",
":",
"if",
"re",
".",
"match",
"(",
"r'^\\s*REBOL\\s*\\['",
",",
"text",
",",
"re",
".",
"IGNORECASE",
")",
":",
"# The code starts with REBOL header",
"return",
"1.0",
"elif",
"re",
".",
"search",
"(",
"r'\\s*REBOL\\s*['",
",",
"text",
",",
"re",
".",
"IGNORECASE",
")",
":",
"# The code contains REBOL header but also some text before it",
"return",
"0.5"
] |
Check if code contains REBOL header and so it probably not R code
|
[
"Check",
"if",
"code",
"contains",
"REBOL",
"header",
"and",
"so",
"it",
"probably",
"not",
"R",
"code"
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/rebol.py#L235-L244
|
8,581
|
wakatime/wakatime
|
wakatime/packages/ntlm_auth/compute_keys.py
|
get_sign_key
|
def get_sign_key(exported_session_key, magic_constant):
"""
3.4.5.2 SIGNKEY
@param exported_session_key: A 128-bit session key used to derive signing and sealing keys
@param magic_constant: A constant value set in the MS-NLMP documentation (constants.SignSealConstants)
@return sign_key: Key used to sign messages
"""
sign_key = hashlib.md5(exported_session_key + magic_constant).digest()
return sign_key
|
python
|
def get_sign_key(exported_session_key, magic_constant):
"""
3.4.5.2 SIGNKEY
@param exported_session_key: A 128-bit session key used to derive signing and sealing keys
@param magic_constant: A constant value set in the MS-NLMP documentation (constants.SignSealConstants)
@return sign_key: Key used to sign messages
"""
sign_key = hashlib.md5(exported_session_key + magic_constant).digest()
return sign_key
|
[
"def",
"get_sign_key",
"(",
"exported_session_key",
",",
"magic_constant",
")",
":",
"sign_key",
"=",
"hashlib",
".",
"md5",
"(",
"exported_session_key",
"+",
"magic_constant",
")",
".",
"digest",
"(",
")",
"return",
"sign_key"
] |
3.4.5.2 SIGNKEY
@param exported_session_key: A 128-bit session key used to derive signing and sealing keys
@param magic_constant: A constant value set in the MS-NLMP documentation (constants.SignSealConstants)
@return sign_key: Key used to sign messages
|
[
"3",
".",
"4",
".",
"5",
".",
"2",
"SIGNKEY"
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/ntlm_auth/compute_keys.py#L63-L74
|
8,582
|
wakatime/wakatime
|
wakatime/packages/urllib3/util/wait.py
|
_wait_for_io_events
|
def _wait_for_io_events(socks, events, timeout=None):
""" Waits for IO events to be available from a list of sockets
or optionally a single socket if passed in. Returns a list of
sockets that can be interacted with immediately. """
if not HAS_SELECT:
raise ValueError('Platform does not have a selector')
if not isinstance(socks, list):
# Probably just a single socket.
if hasattr(socks, "fileno"):
socks = [socks]
# Otherwise it might be a non-list iterable.
else:
socks = list(socks)
with DefaultSelector() as selector:
for sock in socks:
selector.register(sock, events)
return [key[0].fileobj for key in
selector.select(timeout) if key[1] & events]
|
python
|
def _wait_for_io_events(socks, events, timeout=None):
""" Waits for IO events to be available from a list of sockets
or optionally a single socket if passed in. Returns a list of
sockets that can be interacted with immediately. """
if not HAS_SELECT:
raise ValueError('Platform does not have a selector')
if not isinstance(socks, list):
# Probably just a single socket.
if hasattr(socks, "fileno"):
socks = [socks]
# Otherwise it might be a non-list iterable.
else:
socks = list(socks)
with DefaultSelector() as selector:
for sock in socks:
selector.register(sock, events)
return [key[0].fileobj for key in
selector.select(timeout) if key[1] & events]
|
[
"def",
"_wait_for_io_events",
"(",
"socks",
",",
"events",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"not",
"HAS_SELECT",
":",
"raise",
"ValueError",
"(",
"'Platform does not have a selector'",
")",
"if",
"not",
"isinstance",
"(",
"socks",
",",
"list",
")",
":",
"# Probably just a single socket.",
"if",
"hasattr",
"(",
"socks",
",",
"\"fileno\"",
")",
":",
"socks",
"=",
"[",
"socks",
"]",
"# Otherwise it might be a non-list iterable.",
"else",
":",
"socks",
"=",
"list",
"(",
"socks",
")",
"with",
"DefaultSelector",
"(",
")",
"as",
"selector",
":",
"for",
"sock",
"in",
"socks",
":",
"selector",
".",
"register",
"(",
"sock",
",",
"events",
")",
"return",
"[",
"key",
"[",
"0",
"]",
".",
"fileobj",
"for",
"key",
"in",
"selector",
".",
"select",
"(",
"timeout",
")",
"if",
"key",
"[",
"1",
"]",
"&",
"events",
"]"
] |
Waits for IO events to be available from a list of sockets
or optionally a single socket if passed in. Returns a list of
sockets that can be interacted with immediately.
|
[
"Waits",
"for",
"IO",
"events",
"to",
"be",
"available",
"from",
"a",
"list",
"of",
"sockets",
"or",
"optionally",
"a",
"single",
"socket",
"if",
"passed",
"in",
".",
"Returns",
"a",
"list",
"of",
"sockets",
"that",
"can",
"be",
"interacted",
"with",
"immediately",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/urllib3/util/wait.py#L9-L26
|
8,583
|
wakatime/wakatime
|
wakatime/packages/pygments/util.py
|
make_analysator
|
def make_analysator(f):
"""Return a static text analyser function that returns float values."""
def text_analyse(text):
try:
rv = f(text)
except Exception:
return 0.0
if not rv:
return 0.0
try:
return min(1.0, max(0.0, float(rv)))
except (ValueError, TypeError):
return 0.0
text_analyse.__doc__ = f.__doc__
return staticmethod(text_analyse)
|
python
|
def make_analysator(f):
"""Return a static text analyser function that returns float values."""
def text_analyse(text):
try:
rv = f(text)
except Exception:
return 0.0
if not rv:
return 0.0
try:
return min(1.0, max(0.0, float(rv)))
except (ValueError, TypeError):
return 0.0
text_analyse.__doc__ = f.__doc__
return staticmethod(text_analyse)
|
[
"def",
"make_analysator",
"(",
"f",
")",
":",
"def",
"text_analyse",
"(",
"text",
")",
":",
"try",
":",
"rv",
"=",
"f",
"(",
"text",
")",
"except",
"Exception",
":",
"return",
"0.0",
"if",
"not",
"rv",
":",
"return",
"0.0",
"try",
":",
"return",
"min",
"(",
"1.0",
",",
"max",
"(",
"0.0",
",",
"float",
"(",
"rv",
")",
")",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"return",
"0.0",
"text_analyse",
".",
"__doc__",
"=",
"f",
".",
"__doc__",
"return",
"staticmethod",
"(",
"text_analyse",
")"
] |
Return a static text analyser function that returns float values.
|
[
"Return",
"a",
"static",
"text",
"analyser",
"function",
"that",
"returns",
"float",
"values",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/util.py#L108-L122
|
8,584
|
wakatime/wakatime
|
wakatime/packages/pygments/util.py
|
shebang_matches
|
def shebang_matches(text, regex):
r"""Check if the given regular expression matches the last part of the
shebang if one exists.
>>> from pygments.util import shebang_matches
>>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/startsomethingwith python',
... r'python(2\.\d)?')
True
It also checks for common windows executable file extensions::
>>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
True
Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does
the same as ``'perl -e'``)
Note that this method automatically searches the whole string (eg:
the regular expression is wrapped in ``'^$'``)
"""
index = text.find('\n')
if index >= 0:
first_line = text[:index].lower()
else:
first_line = text.lower()
if first_line.startswith('#!'):
try:
found = [x for x in split_path_re.split(first_line[2:].strip())
if x and not x.startswith('-')][-1]
except IndexError:
return False
regex = re.compile(r'^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE)
if regex.search(found) is not None:
return True
return False
|
python
|
def shebang_matches(text, regex):
r"""Check if the given regular expression matches the last part of the
shebang if one exists.
>>> from pygments.util import shebang_matches
>>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/startsomethingwith python',
... r'python(2\.\d)?')
True
It also checks for common windows executable file extensions::
>>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
True
Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does
the same as ``'perl -e'``)
Note that this method automatically searches the whole string (eg:
the regular expression is wrapped in ``'^$'``)
"""
index = text.find('\n')
if index >= 0:
first_line = text[:index].lower()
else:
first_line = text.lower()
if first_line.startswith('#!'):
try:
found = [x for x in split_path_re.split(first_line[2:].strip())
if x and not x.startswith('-')][-1]
except IndexError:
return False
regex = re.compile(r'^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE)
if regex.search(found) is not None:
return True
return False
|
[
"def",
"shebang_matches",
"(",
"text",
",",
"regex",
")",
":",
"index",
"=",
"text",
".",
"find",
"(",
"'\\n'",
")",
"if",
"index",
">=",
"0",
":",
"first_line",
"=",
"text",
"[",
":",
"index",
"]",
".",
"lower",
"(",
")",
"else",
":",
"first_line",
"=",
"text",
".",
"lower",
"(",
")",
"if",
"first_line",
".",
"startswith",
"(",
"'#!'",
")",
":",
"try",
":",
"found",
"=",
"[",
"x",
"for",
"x",
"in",
"split_path_re",
".",
"split",
"(",
"first_line",
"[",
"2",
":",
"]",
".",
"strip",
"(",
")",
")",
"if",
"x",
"and",
"not",
"x",
".",
"startswith",
"(",
"'-'",
")",
"]",
"[",
"-",
"1",
"]",
"except",
"IndexError",
":",
"return",
"False",
"regex",
"=",
"re",
".",
"compile",
"(",
"r'^%s(\\.(exe|cmd|bat|bin))?$'",
"%",
"regex",
",",
"re",
".",
"IGNORECASE",
")",
"if",
"regex",
".",
"search",
"(",
"found",
")",
"is",
"not",
"None",
":",
"return",
"True",
"return",
"False"
] |
r"""Check if the given regular expression matches the last part of the
shebang if one exists.
>>> from pygments.util import shebang_matches
>>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/startsomethingwith python',
... r'python(2\.\d)?')
True
It also checks for common windows executable file extensions::
>>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
True
Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does
the same as ``'perl -e'``)
Note that this method automatically searches the whole string (eg:
the regular expression is wrapped in ``'^$'``)
|
[
"r",
"Check",
"if",
"the",
"given",
"regular",
"expression",
"matches",
"the",
"last",
"part",
"of",
"the",
"shebang",
"if",
"one",
"exists",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/util.py#L125-L167
|
8,585
|
wakatime/wakatime
|
wakatime/packages/pygments/util.py
|
looks_like_xml
|
def looks_like_xml(text):
"""Check if a doctype exists or if we have some tags."""
if xml_decl_re.match(text):
return True
key = hash(text)
try:
return _looks_like_xml_cache[key]
except KeyError:
m = doctype_lookup_re.match(text)
if m is not None:
return True
rv = tag_re.search(text[:1000]) is not None
_looks_like_xml_cache[key] = rv
return rv
|
python
|
def looks_like_xml(text):
"""Check if a doctype exists or if we have some tags."""
if xml_decl_re.match(text):
return True
key = hash(text)
try:
return _looks_like_xml_cache[key]
except KeyError:
m = doctype_lookup_re.match(text)
if m is not None:
return True
rv = tag_re.search(text[:1000]) is not None
_looks_like_xml_cache[key] = rv
return rv
|
[
"def",
"looks_like_xml",
"(",
"text",
")",
":",
"if",
"xml_decl_re",
".",
"match",
"(",
"text",
")",
":",
"return",
"True",
"key",
"=",
"hash",
"(",
"text",
")",
"try",
":",
"return",
"_looks_like_xml_cache",
"[",
"key",
"]",
"except",
"KeyError",
":",
"m",
"=",
"doctype_lookup_re",
".",
"match",
"(",
"text",
")",
"if",
"m",
"is",
"not",
"None",
":",
"return",
"True",
"rv",
"=",
"tag_re",
".",
"search",
"(",
"text",
"[",
":",
"1000",
"]",
")",
"is",
"not",
"None",
"_looks_like_xml_cache",
"[",
"key",
"]",
"=",
"rv",
"return",
"rv"
] |
Check if a doctype exists or if we have some tags.
|
[
"Check",
"if",
"a",
"doctype",
"exists",
"or",
"if",
"we",
"have",
"some",
"tags",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/util.py#L191-L204
|
8,586
|
wakatime/wakatime
|
wakatime/packages/pygments/util.py
|
unirange
|
def unirange(a, b):
"""Returns a regular expression string to match the given non-BMP range."""
if b < a:
raise ValueError("Bad character range")
if a < 0x10000 or b < 0x10000:
raise ValueError("unirange is only defined for non-BMP ranges")
if sys.maxunicode > 0xffff:
# wide build
return u'[%s-%s]' % (unichr(a), unichr(b))
else:
# narrow build stores surrogates, and the 're' module handles them
# (incorrectly) as characters. Since there is still ordering among
# these characters, expand the range to one that it understands. Some
# background in http://bugs.python.org/issue3665 and
# http://bugs.python.org/issue12749
#
# Additionally, the lower constants are using unichr rather than
# literals because jython [which uses the wide path] can't load this
# file if they are literals.
ah, al = _surrogatepair(a)
bh, bl = _surrogatepair(b)
if ah == bh:
return u'(?:%s[%s-%s])' % (unichr(ah), unichr(al), unichr(bl))
else:
buf = []
buf.append(u'%s[%s-%s]' %
(unichr(ah), unichr(al),
ah == bh and unichr(bl) or unichr(0xdfff)))
if ah - bh > 1:
buf.append(u'[%s-%s][%s-%s]' %
unichr(ah+1), unichr(bh-1), unichr(0xdc00), unichr(0xdfff))
if ah != bh:
buf.append(u'%s[%s-%s]' %
(unichr(bh), unichr(0xdc00), unichr(bl)))
return u'(?:' + u'|'.join(buf) + u')'
|
python
|
def unirange(a, b):
"""Returns a regular expression string to match the given non-BMP range."""
if b < a:
raise ValueError("Bad character range")
if a < 0x10000 or b < 0x10000:
raise ValueError("unirange is only defined for non-BMP ranges")
if sys.maxunicode > 0xffff:
# wide build
return u'[%s-%s]' % (unichr(a), unichr(b))
else:
# narrow build stores surrogates, and the 're' module handles them
# (incorrectly) as characters. Since there is still ordering among
# these characters, expand the range to one that it understands. Some
# background in http://bugs.python.org/issue3665 and
# http://bugs.python.org/issue12749
#
# Additionally, the lower constants are using unichr rather than
# literals because jython [which uses the wide path] can't load this
# file if they are literals.
ah, al = _surrogatepair(a)
bh, bl = _surrogatepair(b)
if ah == bh:
return u'(?:%s[%s-%s])' % (unichr(ah), unichr(al), unichr(bl))
else:
buf = []
buf.append(u'%s[%s-%s]' %
(unichr(ah), unichr(al),
ah == bh and unichr(bl) or unichr(0xdfff)))
if ah - bh > 1:
buf.append(u'[%s-%s][%s-%s]' %
unichr(ah+1), unichr(bh-1), unichr(0xdc00), unichr(0xdfff))
if ah != bh:
buf.append(u'%s[%s-%s]' %
(unichr(bh), unichr(0xdc00), unichr(bl)))
return u'(?:' + u'|'.join(buf) + u')'
|
[
"def",
"unirange",
"(",
"a",
",",
"b",
")",
":",
"if",
"b",
"<",
"a",
":",
"raise",
"ValueError",
"(",
"\"Bad character range\"",
")",
"if",
"a",
"<",
"0x10000",
"or",
"b",
"<",
"0x10000",
":",
"raise",
"ValueError",
"(",
"\"unirange is only defined for non-BMP ranges\"",
")",
"if",
"sys",
".",
"maxunicode",
">",
"0xffff",
":",
"# wide build",
"return",
"u'[%s-%s]'",
"%",
"(",
"unichr",
"(",
"a",
")",
",",
"unichr",
"(",
"b",
")",
")",
"else",
":",
"# narrow build stores surrogates, and the 're' module handles them",
"# (incorrectly) as characters. Since there is still ordering among",
"# these characters, expand the range to one that it understands. Some",
"# background in http://bugs.python.org/issue3665 and",
"# http://bugs.python.org/issue12749",
"#",
"# Additionally, the lower constants are using unichr rather than",
"# literals because jython [which uses the wide path] can't load this",
"# file if they are literals.",
"ah",
",",
"al",
"=",
"_surrogatepair",
"(",
"a",
")",
"bh",
",",
"bl",
"=",
"_surrogatepair",
"(",
"b",
")",
"if",
"ah",
"==",
"bh",
":",
"return",
"u'(?:%s[%s-%s])'",
"%",
"(",
"unichr",
"(",
"ah",
")",
",",
"unichr",
"(",
"al",
")",
",",
"unichr",
"(",
"bl",
")",
")",
"else",
":",
"buf",
"=",
"[",
"]",
"buf",
".",
"append",
"(",
"u'%s[%s-%s]'",
"%",
"(",
"unichr",
"(",
"ah",
")",
",",
"unichr",
"(",
"al",
")",
",",
"ah",
"==",
"bh",
"and",
"unichr",
"(",
"bl",
")",
"or",
"unichr",
"(",
"0xdfff",
")",
")",
")",
"if",
"ah",
"-",
"bh",
">",
"1",
":",
"buf",
".",
"append",
"(",
"u'[%s-%s][%s-%s]'",
"%",
"unichr",
"(",
"ah",
"+",
"1",
")",
",",
"unichr",
"(",
"bh",
"-",
"1",
")",
",",
"unichr",
"(",
"0xdc00",
")",
",",
"unichr",
"(",
"0xdfff",
")",
")",
"if",
"ah",
"!=",
"bh",
":",
"buf",
".",
"append",
"(",
"u'%s[%s-%s]'",
"%",
"(",
"unichr",
"(",
"bh",
")",
",",
"unichr",
"(",
"0xdc00",
")",
",",
"unichr",
"(",
"bl",
")",
")",
")",
"return",
"u'(?:'",
"+",
"u'|'",
".",
"join",
"(",
"buf",
")",
"+",
"u')'"
] |
Returns a regular expression string to match the given non-BMP range.
|
[
"Returns",
"a",
"regular",
"expression",
"string",
"to",
"match",
"the",
"given",
"non",
"-",
"BMP",
"range",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/util.py#L218-L254
|
8,587
|
wakatime/wakatime
|
wakatime/packages/pygments/util.py
|
format_lines
|
def format_lines(var_name, seq, raw=False, indent_level=0):
"""Formats a sequence of strings for output."""
lines = []
base_indent = ' ' * indent_level * 4
inner_indent = ' ' * (indent_level + 1) * 4
lines.append(base_indent + var_name + ' = (')
if raw:
# These should be preformatted reprs of, say, tuples.
for i in seq:
lines.append(inner_indent + i + ',')
else:
for i in seq:
# Force use of single quotes
r = repr(i + '"')
lines.append(inner_indent + r[:-2] + r[-1] + ',')
lines.append(base_indent + ')')
return '\n'.join(lines)
|
python
|
def format_lines(var_name, seq, raw=False, indent_level=0):
"""Formats a sequence of strings for output."""
lines = []
base_indent = ' ' * indent_level * 4
inner_indent = ' ' * (indent_level + 1) * 4
lines.append(base_indent + var_name + ' = (')
if raw:
# These should be preformatted reprs of, say, tuples.
for i in seq:
lines.append(inner_indent + i + ',')
else:
for i in seq:
# Force use of single quotes
r = repr(i + '"')
lines.append(inner_indent + r[:-2] + r[-1] + ',')
lines.append(base_indent + ')')
return '\n'.join(lines)
|
[
"def",
"format_lines",
"(",
"var_name",
",",
"seq",
",",
"raw",
"=",
"False",
",",
"indent_level",
"=",
"0",
")",
":",
"lines",
"=",
"[",
"]",
"base_indent",
"=",
"' '",
"*",
"indent_level",
"*",
"4",
"inner_indent",
"=",
"' '",
"*",
"(",
"indent_level",
"+",
"1",
")",
"*",
"4",
"lines",
".",
"append",
"(",
"base_indent",
"+",
"var_name",
"+",
"' = ('",
")",
"if",
"raw",
":",
"# These should be preformatted reprs of, say, tuples.",
"for",
"i",
"in",
"seq",
":",
"lines",
".",
"append",
"(",
"inner_indent",
"+",
"i",
"+",
"','",
")",
"else",
":",
"for",
"i",
"in",
"seq",
":",
"# Force use of single quotes",
"r",
"=",
"repr",
"(",
"i",
"+",
"'\"'",
")",
"lines",
".",
"append",
"(",
"inner_indent",
"+",
"r",
"[",
":",
"-",
"2",
"]",
"+",
"r",
"[",
"-",
"1",
"]",
"+",
"','",
")",
"lines",
".",
"append",
"(",
"base_indent",
"+",
"')'",
")",
"return",
"'\\n'",
".",
"join",
"(",
"lines",
")"
] |
Formats a sequence of strings for output.
|
[
"Formats",
"a",
"sequence",
"of",
"strings",
"for",
"output",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/util.py#L257-L273
|
8,588
|
wakatime/wakatime
|
wakatime/packages/pygments/util.py
|
duplicates_removed
|
def duplicates_removed(it, already_seen=()):
"""
Returns a list with duplicates removed from the iterable `it`.
Order is preserved.
"""
lst = []
seen = set()
for i in it:
if i in seen or i in already_seen:
continue
lst.append(i)
seen.add(i)
return lst
|
python
|
def duplicates_removed(it, already_seen=()):
"""
Returns a list with duplicates removed from the iterable `it`.
Order is preserved.
"""
lst = []
seen = set()
for i in it:
if i in seen or i in already_seen:
continue
lst.append(i)
seen.add(i)
return lst
|
[
"def",
"duplicates_removed",
"(",
"it",
",",
"already_seen",
"=",
"(",
")",
")",
":",
"lst",
"=",
"[",
"]",
"seen",
"=",
"set",
"(",
")",
"for",
"i",
"in",
"it",
":",
"if",
"i",
"in",
"seen",
"or",
"i",
"in",
"already_seen",
":",
"continue",
"lst",
".",
"append",
"(",
"i",
")",
"seen",
".",
"add",
"(",
"i",
")",
"return",
"lst"
] |
Returns a list with duplicates removed from the iterable `it`.
Order is preserved.
|
[
"Returns",
"a",
"list",
"with",
"duplicates",
"removed",
"from",
"the",
"iterable",
"it",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/util.py#L276-L289
|
8,589
|
wakatime/wakatime
|
wakatime/packages/pygments/lexers/fortran.py
|
FortranFixedLexer._lex_fortran
|
def _lex_fortran(self, match, ctx=None):
"""Lex a line just as free form fortran without line break."""
lexer = FortranLexer()
text = match.group(0) + "\n"
for index, token, value in lexer.get_tokens_unprocessed(text):
value = value.replace('\n', '')
if value != '':
yield index, token, value
|
python
|
def _lex_fortran(self, match, ctx=None):
"""Lex a line just as free form fortran without line break."""
lexer = FortranLexer()
text = match.group(0) + "\n"
for index, token, value in lexer.get_tokens_unprocessed(text):
value = value.replace('\n', '')
if value != '':
yield index, token, value
|
[
"def",
"_lex_fortran",
"(",
"self",
",",
"match",
",",
"ctx",
"=",
"None",
")",
":",
"lexer",
"=",
"FortranLexer",
"(",
")",
"text",
"=",
"match",
".",
"group",
"(",
"0",
")",
"+",
"\"\\n\"",
"for",
"index",
",",
"token",
",",
"value",
"in",
"lexer",
".",
"get_tokens_unprocessed",
"(",
"text",
")",
":",
"value",
"=",
"value",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
"if",
"value",
"!=",
"''",
":",
"yield",
"index",
",",
"token",
",",
"value"
] |
Lex a line just as free form fortran without line break.
|
[
"Lex",
"a",
"line",
"just",
"as",
"free",
"form",
"fortran",
"without",
"line",
"break",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/fortran.py#L177-L184
|
8,590
|
wakatime/wakatime
|
wakatime/project.py
|
get_project_info
|
def get_project_info(configs, heartbeat, data):
"""Find the current project and branch.
First looks for a .wakatime-project file. Second, uses the --project arg.
Third, uses the folder name from a revision control repository. Last, uses
the --alternate-project arg.
Returns a project, branch tuple.
"""
project_name, branch_name = heartbeat.project, heartbeat.branch
if heartbeat.type != 'file':
project_name = project_name or heartbeat.args.project or heartbeat.args.alternate_project
return project_name, branch_name
if project_name is None or branch_name is None:
for plugin_cls in CONFIG_PLUGINS:
plugin_name = plugin_cls.__name__.lower()
plugin_configs = get_configs_for_plugin(plugin_name, configs)
project = plugin_cls(heartbeat.entity, configs=plugin_configs)
if project.process():
project_name = project_name or project.name()
branch_name = project.branch()
break
if project_name is None:
project_name = data.get('project') or heartbeat.args.project
hide_project = heartbeat.should_obfuscate_project()
if hide_project and project_name is not None:
return project_name, None
if project_name is None or branch_name is None:
for plugin_cls in REV_CONTROL_PLUGINS:
plugin_name = plugin_cls.__name__.lower()
plugin_configs = get_configs_for_plugin(plugin_name, configs)
project = plugin_cls(heartbeat.entity, configs=plugin_configs)
if project.process():
project_name = project_name or project.name()
branch_name = branch_name or project.branch()
if hide_project:
branch_name = None
project_name = generate_project_name()
project_file = os.path.join(project.folder(), '.wakatime-project')
try:
with open(project_file, 'w') as fh:
fh.write(project_name)
except IOError:
project_name = None
break
if project_name is None and not hide_project:
project_name = data.get('alternate_project') or heartbeat.args.alternate_project
return project_name, branch_name
|
python
|
def get_project_info(configs, heartbeat, data):
"""Find the current project and branch.
First looks for a .wakatime-project file. Second, uses the --project arg.
Third, uses the folder name from a revision control repository. Last, uses
the --alternate-project arg.
Returns a project, branch tuple.
"""
project_name, branch_name = heartbeat.project, heartbeat.branch
if heartbeat.type != 'file':
project_name = project_name or heartbeat.args.project or heartbeat.args.alternate_project
return project_name, branch_name
if project_name is None or branch_name is None:
for plugin_cls in CONFIG_PLUGINS:
plugin_name = plugin_cls.__name__.lower()
plugin_configs = get_configs_for_plugin(plugin_name, configs)
project = plugin_cls(heartbeat.entity, configs=plugin_configs)
if project.process():
project_name = project_name or project.name()
branch_name = project.branch()
break
if project_name is None:
project_name = data.get('project') or heartbeat.args.project
hide_project = heartbeat.should_obfuscate_project()
if hide_project and project_name is not None:
return project_name, None
if project_name is None or branch_name is None:
for plugin_cls in REV_CONTROL_PLUGINS:
plugin_name = plugin_cls.__name__.lower()
plugin_configs = get_configs_for_plugin(plugin_name, configs)
project = plugin_cls(heartbeat.entity, configs=plugin_configs)
if project.process():
project_name = project_name or project.name()
branch_name = branch_name or project.branch()
if hide_project:
branch_name = None
project_name = generate_project_name()
project_file = os.path.join(project.folder(), '.wakatime-project')
try:
with open(project_file, 'w') as fh:
fh.write(project_name)
except IOError:
project_name = None
break
if project_name is None and not hide_project:
project_name = data.get('alternate_project') or heartbeat.args.alternate_project
return project_name, branch_name
|
[
"def",
"get_project_info",
"(",
"configs",
",",
"heartbeat",
",",
"data",
")",
":",
"project_name",
",",
"branch_name",
"=",
"heartbeat",
".",
"project",
",",
"heartbeat",
".",
"branch",
"if",
"heartbeat",
".",
"type",
"!=",
"'file'",
":",
"project_name",
"=",
"project_name",
"or",
"heartbeat",
".",
"args",
".",
"project",
"or",
"heartbeat",
".",
"args",
".",
"alternate_project",
"return",
"project_name",
",",
"branch_name",
"if",
"project_name",
"is",
"None",
"or",
"branch_name",
"is",
"None",
":",
"for",
"plugin_cls",
"in",
"CONFIG_PLUGINS",
":",
"plugin_name",
"=",
"plugin_cls",
".",
"__name__",
".",
"lower",
"(",
")",
"plugin_configs",
"=",
"get_configs_for_plugin",
"(",
"plugin_name",
",",
"configs",
")",
"project",
"=",
"plugin_cls",
"(",
"heartbeat",
".",
"entity",
",",
"configs",
"=",
"plugin_configs",
")",
"if",
"project",
".",
"process",
"(",
")",
":",
"project_name",
"=",
"project_name",
"or",
"project",
".",
"name",
"(",
")",
"branch_name",
"=",
"project",
".",
"branch",
"(",
")",
"break",
"if",
"project_name",
"is",
"None",
":",
"project_name",
"=",
"data",
".",
"get",
"(",
"'project'",
")",
"or",
"heartbeat",
".",
"args",
".",
"project",
"hide_project",
"=",
"heartbeat",
".",
"should_obfuscate_project",
"(",
")",
"if",
"hide_project",
"and",
"project_name",
"is",
"not",
"None",
":",
"return",
"project_name",
",",
"None",
"if",
"project_name",
"is",
"None",
"or",
"branch_name",
"is",
"None",
":",
"for",
"plugin_cls",
"in",
"REV_CONTROL_PLUGINS",
":",
"plugin_name",
"=",
"plugin_cls",
".",
"__name__",
".",
"lower",
"(",
")",
"plugin_configs",
"=",
"get_configs_for_plugin",
"(",
"plugin_name",
",",
"configs",
")",
"project",
"=",
"plugin_cls",
"(",
"heartbeat",
".",
"entity",
",",
"configs",
"=",
"plugin_configs",
")",
"if",
"project",
".",
"process",
"(",
")",
":",
"project_name",
"=",
"project_name",
"or",
"project",
".",
"name",
"(",
")",
"branch_name",
"=",
"branch_name",
"or",
"project",
".",
"branch",
"(",
")",
"if",
"hide_project",
":",
"branch_name",
"=",
"None",
"project_name",
"=",
"generate_project_name",
"(",
")",
"project_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"project",
".",
"folder",
"(",
")",
",",
"'.wakatime-project'",
")",
"try",
":",
"with",
"open",
"(",
"project_file",
",",
"'w'",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"project_name",
")",
"except",
"IOError",
":",
"project_name",
"=",
"None",
"break",
"if",
"project_name",
"is",
"None",
"and",
"not",
"hide_project",
":",
"project_name",
"=",
"data",
".",
"get",
"(",
"'alternate_project'",
")",
"or",
"heartbeat",
".",
"args",
".",
"alternate_project",
"return",
"project_name",
",",
"branch_name"
] |
Find the current project and branch.
First looks for a .wakatime-project file. Second, uses the --project arg.
Third, uses the folder name from a revision control repository. Last, uses
the --alternate-project arg.
Returns a project, branch tuple.
|
[
"Find",
"the",
"current",
"project",
"and",
"branch",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/project.py#L39-L100
|
8,591
|
wakatime/wakatime
|
wakatime/project.py
|
generate_project_name
|
def generate_project_name():
"""Generates a random project name."""
adjectives = [
'aged', 'ancient', 'autumn', 'billowing', 'bitter', 'black', 'blue', 'bold',
'broad', 'broken', 'calm', 'cold', 'cool', 'crimson', 'curly', 'damp',
'dark', 'dawn', 'delicate', 'divine', 'dry', 'empty', 'falling', 'fancy',
'flat', 'floral', 'fragrant', 'frosty', 'gentle', 'green', 'hidden', 'holy',
'icy', 'jolly', 'late', 'lingering', 'little', 'lively', 'long', 'lucky',
'misty', 'morning', 'muddy', 'mute', 'nameless', 'noisy', 'odd', 'old',
'orange', 'patient', 'plain', 'polished', 'proud', 'purple', 'quiet', 'rapid',
'raspy', 'red', 'restless', 'rough', 'round', 'royal', 'shiny', 'shrill',
'shy', 'silent', 'small', 'snowy', 'soft', 'solitary', 'sparkling', 'spring',
'square', 'steep', 'still', 'summer', 'super', 'sweet', 'throbbing', 'tight',
'tiny', 'twilight', 'wandering', 'weathered', 'white', 'wild', 'winter', 'wispy',
'withered', 'yellow', 'young'
]
nouns = [
'art', 'band', 'bar', 'base', 'bird', 'block', 'boat', 'bonus',
'bread', 'breeze', 'brook', 'bush', 'butterfly', 'cake', 'cell', 'cherry',
'cloud', 'credit', 'darkness', 'dawn', 'dew', 'disk', 'dream', 'dust',
'feather', 'field', 'fire', 'firefly', 'flower', 'fog', 'forest', 'frog',
'frost', 'glade', 'glitter', 'grass', 'hall', 'hat', 'haze', 'heart',
'hill', 'king', 'lab', 'lake', 'leaf', 'limit', 'math', 'meadow',
'mode', 'moon', 'morning', 'mountain', 'mouse', 'mud', 'night', 'paper',
'pine', 'poetry', 'pond', 'queen', 'rain', 'recipe', 'resonance', 'rice',
'river', 'salad', 'scene', 'sea', 'shadow', 'shape', 'silence', 'sky',
'smoke', 'snow', 'snowflake', 'sound', 'star', 'sun', 'sun', 'sunset',
'surf', 'term', 'thunder', 'tooth', 'tree', 'truth', 'union', 'unit',
'violet', 'voice', 'water', 'waterfall', 'wave', 'wildflower', 'wind', 'wood'
]
numbers = [str(x) for x in range(10)]
return ' '.join([
random.choice(adjectives).capitalize(),
random.choice(nouns).capitalize(),
random.choice(numbers) + random.choice(numbers),
])
|
python
|
def generate_project_name():
"""Generates a random project name."""
adjectives = [
'aged', 'ancient', 'autumn', 'billowing', 'bitter', 'black', 'blue', 'bold',
'broad', 'broken', 'calm', 'cold', 'cool', 'crimson', 'curly', 'damp',
'dark', 'dawn', 'delicate', 'divine', 'dry', 'empty', 'falling', 'fancy',
'flat', 'floral', 'fragrant', 'frosty', 'gentle', 'green', 'hidden', 'holy',
'icy', 'jolly', 'late', 'lingering', 'little', 'lively', 'long', 'lucky',
'misty', 'morning', 'muddy', 'mute', 'nameless', 'noisy', 'odd', 'old',
'orange', 'patient', 'plain', 'polished', 'proud', 'purple', 'quiet', 'rapid',
'raspy', 'red', 'restless', 'rough', 'round', 'royal', 'shiny', 'shrill',
'shy', 'silent', 'small', 'snowy', 'soft', 'solitary', 'sparkling', 'spring',
'square', 'steep', 'still', 'summer', 'super', 'sweet', 'throbbing', 'tight',
'tiny', 'twilight', 'wandering', 'weathered', 'white', 'wild', 'winter', 'wispy',
'withered', 'yellow', 'young'
]
nouns = [
'art', 'band', 'bar', 'base', 'bird', 'block', 'boat', 'bonus',
'bread', 'breeze', 'brook', 'bush', 'butterfly', 'cake', 'cell', 'cherry',
'cloud', 'credit', 'darkness', 'dawn', 'dew', 'disk', 'dream', 'dust',
'feather', 'field', 'fire', 'firefly', 'flower', 'fog', 'forest', 'frog',
'frost', 'glade', 'glitter', 'grass', 'hall', 'hat', 'haze', 'heart',
'hill', 'king', 'lab', 'lake', 'leaf', 'limit', 'math', 'meadow',
'mode', 'moon', 'morning', 'mountain', 'mouse', 'mud', 'night', 'paper',
'pine', 'poetry', 'pond', 'queen', 'rain', 'recipe', 'resonance', 'rice',
'river', 'salad', 'scene', 'sea', 'shadow', 'shape', 'silence', 'sky',
'smoke', 'snow', 'snowflake', 'sound', 'star', 'sun', 'sun', 'sunset',
'surf', 'term', 'thunder', 'tooth', 'tree', 'truth', 'union', 'unit',
'violet', 'voice', 'water', 'waterfall', 'wave', 'wildflower', 'wind', 'wood'
]
numbers = [str(x) for x in range(10)]
return ' '.join([
random.choice(adjectives).capitalize(),
random.choice(nouns).capitalize(),
random.choice(numbers) + random.choice(numbers),
])
|
[
"def",
"generate_project_name",
"(",
")",
":",
"adjectives",
"=",
"[",
"'aged'",
",",
"'ancient'",
",",
"'autumn'",
",",
"'billowing'",
",",
"'bitter'",
",",
"'black'",
",",
"'blue'",
",",
"'bold'",
",",
"'broad'",
",",
"'broken'",
",",
"'calm'",
",",
"'cold'",
",",
"'cool'",
",",
"'crimson'",
",",
"'curly'",
",",
"'damp'",
",",
"'dark'",
",",
"'dawn'",
",",
"'delicate'",
",",
"'divine'",
",",
"'dry'",
",",
"'empty'",
",",
"'falling'",
",",
"'fancy'",
",",
"'flat'",
",",
"'floral'",
",",
"'fragrant'",
",",
"'frosty'",
",",
"'gentle'",
",",
"'green'",
",",
"'hidden'",
",",
"'holy'",
",",
"'icy'",
",",
"'jolly'",
",",
"'late'",
",",
"'lingering'",
",",
"'little'",
",",
"'lively'",
",",
"'long'",
",",
"'lucky'",
",",
"'misty'",
",",
"'morning'",
",",
"'muddy'",
",",
"'mute'",
",",
"'nameless'",
",",
"'noisy'",
",",
"'odd'",
",",
"'old'",
",",
"'orange'",
",",
"'patient'",
",",
"'plain'",
",",
"'polished'",
",",
"'proud'",
",",
"'purple'",
",",
"'quiet'",
",",
"'rapid'",
",",
"'raspy'",
",",
"'red'",
",",
"'restless'",
",",
"'rough'",
",",
"'round'",
",",
"'royal'",
",",
"'shiny'",
",",
"'shrill'",
",",
"'shy'",
",",
"'silent'",
",",
"'small'",
",",
"'snowy'",
",",
"'soft'",
",",
"'solitary'",
",",
"'sparkling'",
",",
"'spring'",
",",
"'square'",
",",
"'steep'",
",",
"'still'",
",",
"'summer'",
",",
"'super'",
",",
"'sweet'",
",",
"'throbbing'",
",",
"'tight'",
",",
"'tiny'",
",",
"'twilight'",
",",
"'wandering'",
",",
"'weathered'",
",",
"'white'",
",",
"'wild'",
",",
"'winter'",
",",
"'wispy'",
",",
"'withered'",
",",
"'yellow'",
",",
"'young'",
"]",
"nouns",
"=",
"[",
"'art'",
",",
"'band'",
",",
"'bar'",
",",
"'base'",
",",
"'bird'",
",",
"'block'",
",",
"'boat'",
",",
"'bonus'",
",",
"'bread'",
",",
"'breeze'",
",",
"'brook'",
",",
"'bush'",
",",
"'butterfly'",
",",
"'cake'",
",",
"'cell'",
",",
"'cherry'",
",",
"'cloud'",
",",
"'credit'",
",",
"'darkness'",
",",
"'dawn'",
",",
"'dew'",
",",
"'disk'",
",",
"'dream'",
",",
"'dust'",
",",
"'feather'",
",",
"'field'",
",",
"'fire'",
",",
"'firefly'",
",",
"'flower'",
",",
"'fog'",
",",
"'forest'",
",",
"'frog'",
",",
"'frost'",
",",
"'glade'",
",",
"'glitter'",
",",
"'grass'",
",",
"'hall'",
",",
"'hat'",
",",
"'haze'",
",",
"'heart'",
",",
"'hill'",
",",
"'king'",
",",
"'lab'",
",",
"'lake'",
",",
"'leaf'",
",",
"'limit'",
",",
"'math'",
",",
"'meadow'",
",",
"'mode'",
",",
"'moon'",
",",
"'morning'",
",",
"'mountain'",
",",
"'mouse'",
",",
"'mud'",
",",
"'night'",
",",
"'paper'",
",",
"'pine'",
",",
"'poetry'",
",",
"'pond'",
",",
"'queen'",
",",
"'rain'",
",",
"'recipe'",
",",
"'resonance'",
",",
"'rice'",
",",
"'river'",
",",
"'salad'",
",",
"'scene'",
",",
"'sea'",
",",
"'shadow'",
",",
"'shape'",
",",
"'silence'",
",",
"'sky'",
",",
"'smoke'",
",",
"'snow'",
",",
"'snowflake'",
",",
"'sound'",
",",
"'star'",
",",
"'sun'",
",",
"'sun'",
",",
"'sunset'",
",",
"'surf'",
",",
"'term'",
",",
"'thunder'",
",",
"'tooth'",
",",
"'tree'",
",",
"'truth'",
",",
"'union'",
",",
"'unit'",
",",
"'violet'",
",",
"'voice'",
",",
"'water'",
",",
"'waterfall'",
",",
"'wave'",
",",
"'wildflower'",
",",
"'wind'",
",",
"'wood'",
"]",
"numbers",
"=",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"range",
"(",
"10",
")",
"]",
"return",
"' '",
".",
"join",
"(",
"[",
"random",
".",
"choice",
"(",
"adjectives",
")",
".",
"capitalize",
"(",
")",
",",
"random",
".",
"choice",
"(",
"nouns",
")",
".",
"capitalize",
"(",
")",
",",
"random",
".",
"choice",
"(",
"numbers",
")",
"+",
"random",
".",
"choice",
"(",
"numbers",
")",
",",
"]",
")"
] |
Generates a random project name.
|
[
"Generates",
"a",
"random",
"project",
"name",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/project.py#L109-L145
|
8,592
|
wakatime/wakatime
|
wakatime/session_cache.py
|
SessionCache.save
|
def save(self, session):
"""Saves a requests.Session object for the next heartbeat process.
"""
if not HAS_SQL: # pragma: nocover
return
try:
conn, c = self.connect()
c.execute('DELETE FROM {0}'.format(self.table_name))
values = {
'value': sqlite3.Binary(pickle.dumps(session, protocol=2)),
}
c.execute('INSERT INTO {0} VALUES (:value)'.format(self.table_name), values)
conn.commit()
conn.close()
except: # pragma: nocover
log.traceback(logging.DEBUG)
|
python
|
def save(self, session):
"""Saves a requests.Session object for the next heartbeat process.
"""
if not HAS_SQL: # pragma: nocover
return
try:
conn, c = self.connect()
c.execute('DELETE FROM {0}'.format(self.table_name))
values = {
'value': sqlite3.Binary(pickle.dumps(session, protocol=2)),
}
c.execute('INSERT INTO {0} VALUES (:value)'.format(self.table_name), values)
conn.commit()
conn.close()
except: # pragma: nocover
log.traceback(logging.DEBUG)
|
[
"def",
"save",
"(",
"self",
",",
"session",
")",
":",
"if",
"not",
"HAS_SQL",
":",
"# pragma: nocover",
"return",
"try",
":",
"conn",
",",
"c",
"=",
"self",
".",
"connect",
"(",
")",
"c",
".",
"execute",
"(",
"'DELETE FROM {0}'",
".",
"format",
"(",
"self",
".",
"table_name",
")",
")",
"values",
"=",
"{",
"'value'",
":",
"sqlite3",
".",
"Binary",
"(",
"pickle",
".",
"dumps",
"(",
"session",
",",
"protocol",
"=",
"2",
")",
")",
",",
"}",
"c",
".",
"execute",
"(",
"'INSERT INTO {0} VALUES (:value)'",
".",
"format",
"(",
"self",
".",
"table_name",
")",
",",
"values",
")",
"conn",
".",
"commit",
"(",
")",
"conn",
".",
"close",
"(",
")",
"except",
":",
"# pragma: nocover",
"log",
".",
"traceback",
"(",
"logging",
".",
"DEBUG",
")"
] |
Saves a requests.Session object for the next heartbeat process.
|
[
"Saves",
"a",
"requests",
".",
"Session",
"object",
"for",
"the",
"next",
"heartbeat",
"process",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/session_cache.py#L44-L60
|
8,593
|
wakatime/wakatime
|
wakatime/session_cache.py
|
SessionCache.get
|
def get(self):
"""Returns a requests.Session object.
Gets Session from sqlite3 cache or creates a new Session.
"""
if not HAS_SQL: # pragma: nocover
return requests.session()
try:
conn, c = self.connect()
except:
log.traceback(logging.DEBUG)
return requests.session()
session = None
try:
c.execute('BEGIN IMMEDIATE')
c.execute('SELECT value FROM {0} LIMIT 1'.format(self.table_name))
row = c.fetchone()
if row is not None:
session = pickle.loads(row[0])
except: # pragma: nocover
log.traceback(logging.DEBUG)
try:
conn.close()
except: # pragma: nocover
log.traceback(logging.DEBUG)
return session if session is not None else requests.session()
|
python
|
def get(self):
"""Returns a requests.Session object.
Gets Session from sqlite3 cache or creates a new Session.
"""
if not HAS_SQL: # pragma: nocover
return requests.session()
try:
conn, c = self.connect()
except:
log.traceback(logging.DEBUG)
return requests.session()
session = None
try:
c.execute('BEGIN IMMEDIATE')
c.execute('SELECT value FROM {0} LIMIT 1'.format(self.table_name))
row = c.fetchone()
if row is not None:
session = pickle.loads(row[0])
except: # pragma: nocover
log.traceback(logging.DEBUG)
try:
conn.close()
except: # pragma: nocover
log.traceback(logging.DEBUG)
return session if session is not None else requests.session()
|
[
"def",
"get",
"(",
"self",
")",
":",
"if",
"not",
"HAS_SQL",
":",
"# pragma: nocover",
"return",
"requests",
".",
"session",
"(",
")",
"try",
":",
"conn",
",",
"c",
"=",
"self",
".",
"connect",
"(",
")",
"except",
":",
"log",
".",
"traceback",
"(",
"logging",
".",
"DEBUG",
")",
"return",
"requests",
".",
"session",
"(",
")",
"session",
"=",
"None",
"try",
":",
"c",
".",
"execute",
"(",
"'BEGIN IMMEDIATE'",
")",
"c",
".",
"execute",
"(",
"'SELECT value FROM {0} LIMIT 1'",
".",
"format",
"(",
"self",
".",
"table_name",
")",
")",
"row",
"=",
"c",
".",
"fetchone",
"(",
")",
"if",
"row",
"is",
"not",
"None",
":",
"session",
"=",
"pickle",
".",
"loads",
"(",
"row",
"[",
"0",
"]",
")",
"except",
":",
"# pragma: nocover",
"log",
".",
"traceback",
"(",
"logging",
".",
"DEBUG",
")",
"try",
":",
"conn",
".",
"close",
"(",
")",
"except",
":",
"# pragma: nocover",
"log",
".",
"traceback",
"(",
"logging",
".",
"DEBUG",
")",
"return",
"session",
"if",
"session",
"is",
"not",
"None",
"else",
"requests",
".",
"session",
"(",
")"
] |
Returns a requests.Session object.
Gets Session from sqlite3 cache or creates a new Session.
|
[
"Returns",
"a",
"requests",
".",
"Session",
"object",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/session_cache.py#L62-L92
|
8,594
|
wakatime/wakatime
|
wakatime/session_cache.py
|
SessionCache.delete
|
def delete(self):
"""Clears all cached Session objects.
"""
if not HAS_SQL: # pragma: nocover
return
try:
conn, c = self.connect()
c.execute('DELETE FROM {0}'.format(self.table_name))
conn.commit()
conn.close()
except:
log.traceback(logging.DEBUG)
|
python
|
def delete(self):
"""Clears all cached Session objects.
"""
if not HAS_SQL: # pragma: nocover
return
try:
conn, c = self.connect()
c.execute('DELETE FROM {0}'.format(self.table_name))
conn.commit()
conn.close()
except:
log.traceback(logging.DEBUG)
|
[
"def",
"delete",
"(",
"self",
")",
":",
"if",
"not",
"HAS_SQL",
":",
"# pragma: nocover",
"return",
"try",
":",
"conn",
",",
"c",
"=",
"self",
".",
"connect",
"(",
")",
"c",
".",
"execute",
"(",
"'DELETE FROM {0}'",
".",
"format",
"(",
"self",
".",
"table_name",
")",
")",
"conn",
".",
"commit",
"(",
")",
"conn",
".",
"close",
"(",
")",
"except",
":",
"log",
".",
"traceback",
"(",
"logging",
".",
"DEBUG",
")"
] |
Clears all cached Session objects.
|
[
"Clears",
"all",
"cached",
"Session",
"objects",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/session_cache.py#L94-L106
|
8,595
|
wakatime/wakatime
|
wakatime/packages/requests/sessions.py
|
SessionRedirectMixin.resolve_redirects
|
def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs):
"""Receives a Response. Returns a generator of Responses or Requests."""
hist = [] # keep track of history
url = self.get_redirect_target(resp)
while url:
prepared_request = req.copy()
# Update history and keep track of redirects.
# resp.history must ignore the original request in this loop
hist.append(resp)
resp.history = hist[1:]
try:
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if len(resp.history) >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects, response=resp)
# Release the connection back into the pool.
resp.close()
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (to_native_string(parsed_rurl.scheme), url)
# The scheme should be lower case...
parsed = urlparse(url)
url = parsed.geturl()
# Facilitate relative 'location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
if not parsed.netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
self.rebuild_method(prepared_request, resp)
# https://github.com/requests/requests/issues/1084
if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
# https://github.com/requests/requests/issues/3490
purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding')
for header in purged_headers:
prepared_request.headers.pop(header, None)
prepared_request.body = None
headers = prepared_request.headers
try:
del headers['Cookie']
except KeyError:
pass
# Extract any cookies sent on the response to the cookiejar
# in the new request. Because we've mutated our copied prepared
# request, use the old one that we haven't yet touched.
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
merge_cookies(prepared_request._cookies, self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
# Rebuild auth and proxy information.
proxies = self.rebuild_proxies(prepared_request, proxies)
self.rebuild_auth(prepared_request, resp)
# A failed tell() sets `_body_position` to `object()`. This non-None
# value ensures `rewindable` will be True, allowing us to raise an
# UnrewindableBodyError, instead of hanging the connection.
rewindable = (
prepared_request._body_position is not None and
('Content-Length' in headers or 'Transfer-Encoding' in headers)
)
# Attempt to rewind consumed file-like object.
if rewindable:
rewind_body(prepared_request)
# Override the original request.
req = prepared_request
if yield_requests:
yield req
else:
resp = self.send(
req,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
**adapter_kwargs
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
# extract redirect url, if any, for the next loop
url = self.get_redirect_target(resp)
yield resp
|
python
|
def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs):
"""Receives a Response. Returns a generator of Responses or Requests."""
hist = [] # keep track of history
url = self.get_redirect_target(resp)
while url:
prepared_request = req.copy()
# Update history and keep track of redirects.
# resp.history must ignore the original request in this loop
hist.append(resp)
resp.history = hist[1:]
try:
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if len(resp.history) >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects, response=resp)
# Release the connection back into the pool.
resp.close()
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (to_native_string(parsed_rurl.scheme), url)
# The scheme should be lower case...
parsed = urlparse(url)
url = parsed.geturl()
# Facilitate relative 'location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
if not parsed.netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
self.rebuild_method(prepared_request, resp)
# https://github.com/requests/requests/issues/1084
if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
# https://github.com/requests/requests/issues/3490
purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding')
for header in purged_headers:
prepared_request.headers.pop(header, None)
prepared_request.body = None
headers = prepared_request.headers
try:
del headers['Cookie']
except KeyError:
pass
# Extract any cookies sent on the response to the cookiejar
# in the new request. Because we've mutated our copied prepared
# request, use the old one that we haven't yet touched.
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
merge_cookies(prepared_request._cookies, self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
# Rebuild auth and proxy information.
proxies = self.rebuild_proxies(prepared_request, proxies)
self.rebuild_auth(prepared_request, resp)
# A failed tell() sets `_body_position` to `object()`. This non-None
# value ensures `rewindable` will be True, allowing us to raise an
# UnrewindableBodyError, instead of hanging the connection.
rewindable = (
prepared_request._body_position is not None and
('Content-Length' in headers or 'Transfer-Encoding' in headers)
)
# Attempt to rewind consumed file-like object.
if rewindable:
rewind_body(prepared_request)
# Override the original request.
req = prepared_request
if yield_requests:
yield req
else:
resp = self.send(
req,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
**adapter_kwargs
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
# extract redirect url, if any, for the next loop
url = self.get_redirect_target(resp)
yield resp
|
[
"def",
"resolve_redirects",
"(",
"self",
",",
"resp",
",",
"req",
",",
"stream",
"=",
"False",
",",
"timeout",
"=",
"None",
",",
"verify",
"=",
"True",
",",
"cert",
"=",
"None",
",",
"proxies",
"=",
"None",
",",
"yield_requests",
"=",
"False",
",",
"*",
"*",
"adapter_kwargs",
")",
":",
"hist",
"=",
"[",
"]",
"# keep track of history",
"url",
"=",
"self",
".",
"get_redirect_target",
"(",
"resp",
")",
"while",
"url",
":",
"prepared_request",
"=",
"req",
".",
"copy",
"(",
")",
"# Update history and keep track of redirects.",
"# resp.history must ignore the original request in this loop",
"hist",
".",
"append",
"(",
"resp",
")",
"resp",
".",
"history",
"=",
"hist",
"[",
"1",
":",
"]",
"try",
":",
"resp",
".",
"content",
"# Consume socket so it can be released",
"except",
"(",
"ChunkedEncodingError",
",",
"ContentDecodingError",
",",
"RuntimeError",
")",
":",
"resp",
".",
"raw",
".",
"read",
"(",
"decode_content",
"=",
"False",
")",
"if",
"len",
"(",
"resp",
".",
"history",
")",
">=",
"self",
".",
"max_redirects",
":",
"raise",
"TooManyRedirects",
"(",
"'Exceeded %s redirects.'",
"%",
"self",
".",
"max_redirects",
",",
"response",
"=",
"resp",
")",
"# Release the connection back into the pool.",
"resp",
".",
"close",
"(",
")",
"# Handle redirection without scheme (see: RFC 1808 Section 4)",
"if",
"url",
".",
"startswith",
"(",
"'//'",
")",
":",
"parsed_rurl",
"=",
"urlparse",
"(",
"resp",
".",
"url",
")",
"url",
"=",
"'%s:%s'",
"%",
"(",
"to_native_string",
"(",
"parsed_rurl",
".",
"scheme",
")",
",",
"url",
")",
"# The scheme should be lower case...",
"parsed",
"=",
"urlparse",
"(",
"url",
")",
"url",
"=",
"parsed",
".",
"geturl",
"(",
")",
"# Facilitate relative 'location' headers, as allowed by RFC 7231.",
"# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')",
"# Compliant with RFC3986, we percent encode the url.",
"if",
"not",
"parsed",
".",
"netloc",
":",
"url",
"=",
"urljoin",
"(",
"resp",
".",
"url",
",",
"requote_uri",
"(",
"url",
")",
")",
"else",
":",
"url",
"=",
"requote_uri",
"(",
"url",
")",
"prepared_request",
".",
"url",
"=",
"to_native_string",
"(",
"url",
")",
"self",
".",
"rebuild_method",
"(",
"prepared_request",
",",
"resp",
")",
"# https://github.com/requests/requests/issues/1084",
"if",
"resp",
".",
"status_code",
"not",
"in",
"(",
"codes",
".",
"temporary_redirect",
",",
"codes",
".",
"permanent_redirect",
")",
":",
"# https://github.com/requests/requests/issues/3490",
"purged_headers",
"=",
"(",
"'Content-Length'",
",",
"'Content-Type'",
",",
"'Transfer-Encoding'",
")",
"for",
"header",
"in",
"purged_headers",
":",
"prepared_request",
".",
"headers",
".",
"pop",
"(",
"header",
",",
"None",
")",
"prepared_request",
".",
"body",
"=",
"None",
"headers",
"=",
"prepared_request",
".",
"headers",
"try",
":",
"del",
"headers",
"[",
"'Cookie'",
"]",
"except",
"KeyError",
":",
"pass",
"# Extract any cookies sent on the response to the cookiejar",
"# in the new request. Because we've mutated our copied prepared",
"# request, use the old one that we haven't yet touched.",
"extract_cookies_to_jar",
"(",
"prepared_request",
".",
"_cookies",
",",
"req",
",",
"resp",
".",
"raw",
")",
"merge_cookies",
"(",
"prepared_request",
".",
"_cookies",
",",
"self",
".",
"cookies",
")",
"prepared_request",
".",
"prepare_cookies",
"(",
"prepared_request",
".",
"_cookies",
")",
"# Rebuild auth and proxy information.",
"proxies",
"=",
"self",
".",
"rebuild_proxies",
"(",
"prepared_request",
",",
"proxies",
")",
"self",
".",
"rebuild_auth",
"(",
"prepared_request",
",",
"resp",
")",
"# A failed tell() sets `_body_position` to `object()`. This non-None",
"# value ensures `rewindable` will be True, allowing us to raise an",
"# UnrewindableBodyError, instead of hanging the connection.",
"rewindable",
"=",
"(",
"prepared_request",
".",
"_body_position",
"is",
"not",
"None",
"and",
"(",
"'Content-Length'",
"in",
"headers",
"or",
"'Transfer-Encoding'",
"in",
"headers",
")",
")",
"# Attempt to rewind consumed file-like object.",
"if",
"rewindable",
":",
"rewind_body",
"(",
"prepared_request",
")",
"# Override the original request.",
"req",
"=",
"prepared_request",
"if",
"yield_requests",
":",
"yield",
"req",
"else",
":",
"resp",
"=",
"self",
".",
"send",
"(",
"req",
",",
"stream",
"=",
"stream",
",",
"timeout",
"=",
"timeout",
",",
"verify",
"=",
"verify",
",",
"cert",
"=",
"cert",
",",
"proxies",
"=",
"proxies",
",",
"allow_redirects",
"=",
"False",
",",
"*",
"*",
"adapter_kwargs",
")",
"extract_cookies_to_jar",
"(",
"self",
".",
"cookies",
",",
"prepared_request",
",",
"resp",
".",
"raw",
")",
"# extract redirect url, if any, for the next loop",
"url",
"=",
"self",
".",
"get_redirect_target",
"(",
"resp",
")",
"yield",
"resp"
] |
Receives a Response. Returns a generator of Responses or Requests.
|
[
"Receives",
"a",
"Response",
".",
"Returns",
"a",
"generator",
"of",
"Responses",
"or",
"Requests",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/requests/sessions.py#L119-L225
|
8,596
|
wakatime/wakatime
|
wakatime/packages/requests/sessions.py
|
SessionRedirectMixin.rebuild_method
|
def rebuild_method(self, prepared_request, response):
"""When being redirected we may want to change the method of the request
based on certain specs or browser behavior.
"""
method = prepared_request.method
# http://tools.ietf.org/html/rfc7231#section-6.4.4
if response.status_code == codes.see_other and method != 'HEAD':
method = 'GET'
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
if response.status_code == codes.found and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
if response.status_code == codes.moved and method == 'POST':
method = 'GET'
prepared_request.method = method
|
python
|
def rebuild_method(self, prepared_request, response):
"""When being redirected we may want to change the method of the request
based on certain specs or browser behavior.
"""
method = prepared_request.method
# http://tools.ietf.org/html/rfc7231#section-6.4.4
if response.status_code == codes.see_other and method != 'HEAD':
method = 'GET'
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
if response.status_code == codes.found and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
if response.status_code == codes.moved and method == 'POST':
method = 'GET'
prepared_request.method = method
|
[
"def",
"rebuild_method",
"(",
"self",
",",
"prepared_request",
",",
"response",
")",
":",
"method",
"=",
"prepared_request",
".",
"method",
"# http://tools.ietf.org/html/rfc7231#section-6.4.4",
"if",
"response",
".",
"status_code",
"==",
"codes",
".",
"see_other",
"and",
"method",
"!=",
"'HEAD'",
":",
"method",
"=",
"'GET'",
"# Do what the browsers do, despite standards...",
"# First, turn 302s into GETs.",
"if",
"response",
".",
"status_code",
"==",
"codes",
".",
"found",
"and",
"method",
"!=",
"'HEAD'",
":",
"method",
"=",
"'GET'",
"# Second, if a POST is responded to with a 301, turn it into a GET.",
"# This bizarre behaviour is explained in Issue 1704.",
"if",
"response",
".",
"status_code",
"==",
"codes",
".",
"moved",
"and",
"method",
"==",
"'POST'",
":",
"method",
"=",
"'GET'",
"prepared_request",
".",
"method",
"=",
"method"
] |
When being redirected we may want to change the method of the request
based on certain specs or browser behavior.
|
[
"When",
"being",
"redirected",
"we",
"may",
"want",
"to",
"change",
"the",
"method",
"of",
"the",
"request",
"based",
"on",
"certain",
"specs",
"or",
"browser",
"behavior",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/requests/sessions.py#L292-L312
|
8,597
|
wakatime/wakatime
|
wakatime/packages/pygments/filter.py
|
apply_filters
|
def apply_filters(stream, filters, lexer=None):
"""
Use this method to apply an iterable of filters to
a stream. If lexer is given it's forwarded to the
filter, otherwise the filter receives `None`.
"""
def _apply(filter_, stream):
for token in filter_.filter(lexer, stream):
yield token
for filter_ in filters:
stream = _apply(filter_, stream)
return stream
|
python
|
def apply_filters(stream, filters, lexer=None):
"""
Use this method to apply an iterable of filters to
a stream. If lexer is given it's forwarded to the
filter, otherwise the filter receives `None`.
"""
def _apply(filter_, stream):
for token in filter_.filter(lexer, stream):
yield token
for filter_ in filters:
stream = _apply(filter_, stream)
return stream
|
[
"def",
"apply_filters",
"(",
"stream",
",",
"filters",
",",
"lexer",
"=",
"None",
")",
":",
"def",
"_apply",
"(",
"filter_",
",",
"stream",
")",
":",
"for",
"token",
"in",
"filter_",
".",
"filter",
"(",
"lexer",
",",
"stream",
")",
":",
"yield",
"token",
"for",
"filter_",
"in",
"filters",
":",
"stream",
"=",
"_apply",
"(",
"filter_",
",",
"stream",
")",
"return",
"stream"
] |
Use this method to apply an iterable of filters to
a stream. If lexer is given it's forwarded to the
filter, otherwise the filter receives `None`.
|
[
"Use",
"this",
"method",
"to",
"apply",
"an",
"iterable",
"of",
"filters",
"to",
"a",
"stream",
".",
"If",
"lexer",
"is",
"given",
"it",
"s",
"forwarded",
"to",
"the",
"filter",
"otherwise",
"the",
"filter",
"receives",
"None",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/filter.py#L13-L24
|
8,598
|
wakatime/wakatime
|
wakatime/packages/pygments/lexers/data.py
|
YamlLexer.reset_indent
|
def reset_indent(token_class):
"""Reset the indentation levels."""
def callback(lexer, match, context):
text = match.group()
context.indent_stack = []
context.indent = -1
context.next_indent = 0
context.block_scalar_indent = None
yield match.start(), token_class, text
context.pos = match.end()
return callback
|
python
|
def reset_indent(token_class):
"""Reset the indentation levels."""
def callback(lexer, match, context):
text = match.group()
context.indent_stack = []
context.indent = -1
context.next_indent = 0
context.block_scalar_indent = None
yield match.start(), token_class, text
context.pos = match.end()
return callback
|
[
"def",
"reset_indent",
"(",
"token_class",
")",
":",
"def",
"callback",
"(",
"lexer",
",",
"match",
",",
"context",
")",
":",
"text",
"=",
"match",
".",
"group",
"(",
")",
"context",
".",
"indent_stack",
"=",
"[",
"]",
"context",
".",
"indent",
"=",
"-",
"1",
"context",
".",
"next_indent",
"=",
"0",
"context",
".",
"block_scalar_indent",
"=",
"None",
"yield",
"match",
".",
"start",
"(",
")",
",",
"token_class",
",",
"text",
"context",
".",
"pos",
"=",
"match",
".",
"end",
"(",
")",
"return",
"callback"
] |
Reset the indentation levels.
|
[
"Reset",
"the",
"indentation",
"levels",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/data.py#L56-L66
|
8,599
|
wakatime/wakatime
|
wakatime/packages/pygments/lexers/data.py
|
YamlLexer.save_indent
|
def save_indent(token_class, start=False):
"""Save a possible indentation level."""
def callback(lexer, match, context):
text = match.group()
extra = ''
if start:
context.next_indent = len(text)
if context.next_indent < context.indent:
while context.next_indent < context.indent:
context.indent = context.indent_stack.pop()
if context.next_indent > context.indent:
extra = text[context.indent:]
text = text[:context.indent]
else:
context.next_indent += len(text)
if text:
yield match.start(), token_class, text
if extra:
yield match.start()+len(text), token_class.Error, extra
context.pos = match.end()
return callback
|
python
|
def save_indent(token_class, start=False):
"""Save a possible indentation level."""
def callback(lexer, match, context):
text = match.group()
extra = ''
if start:
context.next_indent = len(text)
if context.next_indent < context.indent:
while context.next_indent < context.indent:
context.indent = context.indent_stack.pop()
if context.next_indent > context.indent:
extra = text[context.indent:]
text = text[:context.indent]
else:
context.next_indent += len(text)
if text:
yield match.start(), token_class, text
if extra:
yield match.start()+len(text), token_class.Error, extra
context.pos = match.end()
return callback
|
[
"def",
"save_indent",
"(",
"token_class",
",",
"start",
"=",
"False",
")",
":",
"def",
"callback",
"(",
"lexer",
",",
"match",
",",
"context",
")",
":",
"text",
"=",
"match",
".",
"group",
"(",
")",
"extra",
"=",
"''",
"if",
"start",
":",
"context",
".",
"next_indent",
"=",
"len",
"(",
"text",
")",
"if",
"context",
".",
"next_indent",
"<",
"context",
".",
"indent",
":",
"while",
"context",
".",
"next_indent",
"<",
"context",
".",
"indent",
":",
"context",
".",
"indent",
"=",
"context",
".",
"indent_stack",
".",
"pop",
"(",
")",
"if",
"context",
".",
"next_indent",
">",
"context",
".",
"indent",
":",
"extra",
"=",
"text",
"[",
"context",
".",
"indent",
":",
"]",
"text",
"=",
"text",
"[",
":",
"context",
".",
"indent",
"]",
"else",
":",
"context",
".",
"next_indent",
"+=",
"len",
"(",
"text",
")",
"if",
"text",
":",
"yield",
"match",
".",
"start",
"(",
")",
",",
"token_class",
",",
"text",
"if",
"extra",
":",
"yield",
"match",
".",
"start",
"(",
")",
"+",
"len",
"(",
"text",
")",
",",
"token_class",
".",
"Error",
",",
"extra",
"context",
".",
"pos",
"=",
"match",
".",
"end",
"(",
")",
"return",
"callback"
] |
Save a possible indentation level.
|
[
"Save",
"a",
"possible",
"indentation",
"level",
"."
] |
74519ace04e8472f3a3993269963732b9946a01d
|
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/lexers/data.py#L68-L88
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.