id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
16,800
|
eventbrite/pysoa
|
pysoa/common/logging.py
|
SyslogHandler.emit
|
def emit(self, record):
"""
Emits a record. The record is sent carefully, according to the following rules, to ensure that data is not
lost by exceeding the MTU of the connection.
- If the byte-encoded record length plus prefix length plus suffix length plus priority length is less than the
maximum allowed length, then a single packet is sent, containing the priority, prefix, full record, and
suffix, in that order.
- If it's greater than or equal to the maximum allowed length and the overflow behavior is set to "truncate,"
the record is cleanly truncated (being careful not to split in the middle of a multi-byte character), and
then a single packet is sent, containing the priority, prefix, truncated record, and suffix, in that order.
- If it's greater than or equal to the maximum allowed length and the overflow behavior is set to "fragment,"
the record preamble (things like file name, logger name, correlation ID, etc.) is extracted from the start
of the record to calculate a new chunk length. The remainder of the record (which should just be the true
message and any exception info) is then chunked (being careful not to split in the middle of a multi-byte
character) into lengths less than or equal to the chunk length, and then the record is sent as multiple
packets, each packet containing the priority, prefix, record preamble, message chunk, and suffix, in that
order.
"""
# noinspection PyBroadException
try:
formatted_message = self.format(record)
encoded_message = formatted_message.encode('utf-8')
prefix = suffix = b''
if getattr(self, 'ident', False):
prefix = self.ident.encode('utf-8') if isinstance(self.ident, six.text_type) else self.ident
if getattr(self, 'append_nul', True):
suffix = '\000'.encode('utf-8')
priority = '<{:d}>'.format(
self.encodePriority(self.facility, self.mapPriority(record.levelname))
).encode('utf-8')
message_length = len(encoded_message)
message_length_limit = self.maximum_length - len(prefix) - len(suffix) - len(priority)
if message_length < message_length_limit:
parts = [priority + prefix + encoded_message + suffix]
elif self.overflow == self.OVERFLOW_BEHAVIOR_TRUNCATE:
truncated_message, _ = self._cleanly_slice_encoded_string(encoded_message, message_length_limit)
parts = [priority + prefix + truncated_message + suffix]
else:
# This can't work perfectly, but it's pretty unusual for a message to go before machine-parseable parts
# in the formatted record. So we split the record on the message part. Everything before the split
# becomes the preamble and gets repeated every packet. Everything after the split gets chunked. There's
# no reason to match on more than the first 40 characters of the message--the chances of that matching
# the wrong part of the record are astronomical.
try:
index = formatted_message.index(record.getMessage()[:40])
start_of_message, to_chunk = formatted_message[:index], formatted_message[index:]
except (TypeError, ValueError):
# We can't locate the message in the formatted record? That's unfortunate. Let's make something up.
start_of_message, to_chunk = '{} '.format(formatted_message[:30]), formatted_message[30:]
start_of_message = start_of_message.encode('utf-8')
to_chunk = to_chunk.encode('utf-8')
# 12 is the length of "... (cont'd)" in bytes
chunk_length_limit = message_length_limit - len(start_of_message) - 12
i = 1
parts = []
remaining_message = to_chunk
while remaining_message:
message_id = b''
subtractor = 0
if i > 1:
# If this is not the first message, we determine message # so that we can subtract that length
message_id = '{}'.format(i).encode('utf-8')
# 14 is the length of "(cont'd #) ..." in bytes
subtractor = 14 + len(message_id)
chunk, remaining_message = self._cleanly_slice_encoded_string(
remaining_message,
chunk_length_limit - subtractor,
)
if i > 1:
# If this is not the first message, we prepend the chunk to indicate continuation
chunk = b"(cont'd #" + message_id + b') ...' + chunk
i += 1
if remaining_message:
# If this is not the last message, we append the chunk to indicate continuation
chunk = chunk + b"... (cont'd)"
parts.append(priority + prefix + start_of_message + chunk + suffix)
self._send(parts)
except Exception:
self.handleError(record)
|
python
|
def emit(self, record):
"""
Emits a record. The record is sent carefully, according to the following rules, to ensure that data is not
lost by exceeding the MTU of the connection.
- If the byte-encoded record length plus prefix length plus suffix length plus priority length is less than the
maximum allowed length, then a single packet is sent, containing the priority, prefix, full record, and
suffix, in that order.
- If it's greater than or equal to the maximum allowed length and the overflow behavior is set to "truncate,"
the record is cleanly truncated (being careful not to split in the middle of a multi-byte character), and
then a single packet is sent, containing the priority, prefix, truncated record, and suffix, in that order.
- If it's greater than or equal to the maximum allowed length and the overflow behavior is set to "fragment,"
the record preamble (things like file name, logger name, correlation ID, etc.) is extracted from the start
of the record to calculate a new chunk length. The remainder of the record (which should just be the true
message and any exception info) is then chunked (being careful not to split in the middle of a multi-byte
character) into lengths less than or equal to the chunk length, and then the record is sent as multiple
packets, each packet containing the priority, prefix, record preamble, message chunk, and suffix, in that
order.
"""
# noinspection PyBroadException
try:
formatted_message = self.format(record)
encoded_message = formatted_message.encode('utf-8')
prefix = suffix = b''
if getattr(self, 'ident', False):
prefix = self.ident.encode('utf-8') if isinstance(self.ident, six.text_type) else self.ident
if getattr(self, 'append_nul', True):
suffix = '\000'.encode('utf-8')
priority = '<{:d}>'.format(
self.encodePriority(self.facility, self.mapPriority(record.levelname))
).encode('utf-8')
message_length = len(encoded_message)
message_length_limit = self.maximum_length - len(prefix) - len(suffix) - len(priority)
if message_length < message_length_limit:
parts = [priority + prefix + encoded_message + suffix]
elif self.overflow == self.OVERFLOW_BEHAVIOR_TRUNCATE:
truncated_message, _ = self._cleanly_slice_encoded_string(encoded_message, message_length_limit)
parts = [priority + prefix + truncated_message + suffix]
else:
# This can't work perfectly, but it's pretty unusual for a message to go before machine-parseable parts
# in the formatted record. So we split the record on the message part. Everything before the split
# becomes the preamble and gets repeated every packet. Everything after the split gets chunked. There's
# no reason to match on more than the first 40 characters of the message--the chances of that matching
# the wrong part of the record are astronomical.
try:
index = formatted_message.index(record.getMessage()[:40])
start_of_message, to_chunk = formatted_message[:index], formatted_message[index:]
except (TypeError, ValueError):
# We can't locate the message in the formatted record? That's unfortunate. Let's make something up.
start_of_message, to_chunk = '{} '.format(formatted_message[:30]), formatted_message[30:]
start_of_message = start_of_message.encode('utf-8')
to_chunk = to_chunk.encode('utf-8')
# 12 is the length of "... (cont'd)" in bytes
chunk_length_limit = message_length_limit - len(start_of_message) - 12
i = 1
parts = []
remaining_message = to_chunk
while remaining_message:
message_id = b''
subtractor = 0
if i > 1:
# If this is not the first message, we determine message # so that we can subtract that length
message_id = '{}'.format(i).encode('utf-8')
# 14 is the length of "(cont'd #) ..." in bytes
subtractor = 14 + len(message_id)
chunk, remaining_message = self._cleanly_slice_encoded_string(
remaining_message,
chunk_length_limit - subtractor,
)
if i > 1:
# If this is not the first message, we prepend the chunk to indicate continuation
chunk = b"(cont'd #" + message_id + b') ...' + chunk
i += 1
if remaining_message:
# If this is not the last message, we append the chunk to indicate continuation
chunk = chunk + b"... (cont'd)"
parts.append(priority + prefix + start_of_message + chunk + suffix)
self._send(parts)
except Exception:
self.handleError(record)
|
[
"def",
"emit",
"(",
"self",
",",
"record",
")",
":",
"# noinspection PyBroadException",
"try",
":",
"formatted_message",
"=",
"self",
".",
"format",
"(",
"record",
")",
"encoded_message",
"=",
"formatted_message",
".",
"encode",
"(",
"'utf-8'",
")",
"prefix",
"=",
"suffix",
"=",
"b''",
"if",
"getattr",
"(",
"self",
",",
"'ident'",
",",
"False",
")",
":",
"prefix",
"=",
"self",
".",
"ident",
".",
"encode",
"(",
"'utf-8'",
")",
"if",
"isinstance",
"(",
"self",
".",
"ident",
",",
"six",
".",
"text_type",
")",
"else",
"self",
".",
"ident",
"if",
"getattr",
"(",
"self",
",",
"'append_nul'",
",",
"True",
")",
":",
"suffix",
"=",
"'\\000'",
".",
"encode",
"(",
"'utf-8'",
")",
"priority",
"=",
"'<{:d}>'",
".",
"format",
"(",
"self",
".",
"encodePriority",
"(",
"self",
".",
"facility",
",",
"self",
".",
"mapPriority",
"(",
"record",
".",
"levelname",
")",
")",
")",
".",
"encode",
"(",
"'utf-8'",
")",
"message_length",
"=",
"len",
"(",
"encoded_message",
")",
"message_length_limit",
"=",
"self",
".",
"maximum_length",
"-",
"len",
"(",
"prefix",
")",
"-",
"len",
"(",
"suffix",
")",
"-",
"len",
"(",
"priority",
")",
"if",
"message_length",
"<",
"message_length_limit",
":",
"parts",
"=",
"[",
"priority",
"+",
"prefix",
"+",
"encoded_message",
"+",
"suffix",
"]",
"elif",
"self",
".",
"overflow",
"==",
"self",
".",
"OVERFLOW_BEHAVIOR_TRUNCATE",
":",
"truncated_message",
",",
"_",
"=",
"self",
".",
"_cleanly_slice_encoded_string",
"(",
"encoded_message",
",",
"message_length_limit",
")",
"parts",
"=",
"[",
"priority",
"+",
"prefix",
"+",
"truncated_message",
"+",
"suffix",
"]",
"else",
":",
"# This can't work perfectly, but it's pretty unusual for a message to go before machine-parseable parts",
"# in the formatted record. So we split the record on the message part. Everything before the split",
"# becomes the preamble and gets repeated every packet. Everything after the split gets chunked. There's",
"# no reason to match on more than the first 40 characters of the message--the chances of that matching",
"# the wrong part of the record are astronomical.",
"try",
":",
"index",
"=",
"formatted_message",
".",
"index",
"(",
"record",
".",
"getMessage",
"(",
")",
"[",
":",
"40",
"]",
")",
"start_of_message",
",",
"to_chunk",
"=",
"formatted_message",
"[",
":",
"index",
"]",
",",
"formatted_message",
"[",
"index",
":",
"]",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"# We can't locate the message in the formatted record? That's unfortunate. Let's make something up.",
"start_of_message",
",",
"to_chunk",
"=",
"'{} '",
".",
"format",
"(",
"formatted_message",
"[",
":",
"30",
"]",
")",
",",
"formatted_message",
"[",
"30",
":",
"]",
"start_of_message",
"=",
"start_of_message",
".",
"encode",
"(",
"'utf-8'",
")",
"to_chunk",
"=",
"to_chunk",
".",
"encode",
"(",
"'utf-8'",
")",
"# 12 is the length of \"... (cont'd)\" in bytes",
"chunk_length_limit",
"=",
"message_length_limit",
"-",
"len",
"(",
"start_of_message",
")",
"-",
"12",
"i",
"=",
"1",
"parts",
"=",
"[",
"]",
"remaining_message",
"=",
"to_chunk",
"while",
"remaining_message",
":",
"message_id",
"=",
"b''",
"subtractor",
"=",
"0",
"if",
"i",
">",
"1",
":",
"# If this is not the first message, we determine message # so that we can subtract that length",
"message_id",
"=",
"'{}'",
".",
"format",
"(",
"i",
")",
".",
"encode",
"(",
"'utf-8'",
")",
"# 14 is the length of \"(cont'd #) ...\" in bytes",
"subtractor",
"=",
"14",
"+",
"len",
"(",
"message_id",
")",
"chunk",
",",
"remaining_message",
"=",
"self",
".",
"_cleanly_slice_encoded_string",
"(",
"remaining_message",
",",
"chunk_length_limit",
"-",
"subtractor",
",",
")",
"if",
"i",
">",
"1",
":",
"# If this is not the first message, we prepend the chunk to indicate continuation",
"chunk",
"=",
"b\"(cont'd #\"",
"+",
"message_id",
"+",
"b') ...'",
"+",
"chunk",
"i",
"+=",
"1",
"if",
"remaining_message",
":",
"# If this is not the last message, we append the chunk to indicate continuation",
"chunk",
"=",
"chunk",
"+",
"b\"... (cont'd)\"",
"parts",
".",
"append",
"(",
"priority",
"+",
"prefix",
"+",
"start_of_message",
"+",
"chunk",
"+",
"suffix",
")",
"self",
".",
"_send",
"(",
"parts",
")",
"except",
"Exception",
":",
"self",
".",
"handleError",
"(",
"record",
")"
] |
Emits a record. The record is sent carefully, according to the following rules, to ensure that data is not
lost by exceeding the MTU of the connection.
- If the byte-encoded record length plus prefix length plus suffix length plus priority length is less than the
maximum allowed length, then a single packet is sent, containing the priority, prefix, full record, and
suffix, in that order.
- If it's greater than or equal to the maximum allowed length and the overflow behavior is set to "truncate,"
the record is cleanly truncated (being careful not to split in the middle of a multi-byte character), and
then a single packet is sent, containing the priority, prefix, truncated record, and suffix, in that order.
- If it's greater than or equal to the maximum allowed length and the overflow behavior is set to "fragment,"
the record preamble (things like file name, logger name, correlation ID, etc.) is extracted from the start
of the record to calculate a new chunk length. The remainder of the record (which should just be the true
message and any exception info) is then chunked (being careful not to split in the middle of a multi-byte
character) into lengths less than or equal to the chunk length, and then the record is sent as multiple
packets, each packet containing the priority, prefix, record preamble, message chunk, and suffix, in that
order.
|
[
"Emits",
"a",
"record",
".",
"The",
"record",
"is",
"sent",
"carefully",
"according",
"to",
"the",
"following",
"rules",
"to",
"ensure",
"that",
"data",
"is",
"not",
"lost",
"by",
"exceeding",
"the",
"MTU",
"of",
"the",
"connection",
"."
] |
9c052cae2397d13de3df8ae2c790846a70b53f18
|
https://github.com/eventbrite/pysoa/blob/9c052cae2397d13de3df8ae2c790846a70b53f18/pysoa/common/logging.py#L270-L359
|
16,801
|
eventbrite/pysoa
|
pysoa/client/expander.py
|
TypeNode.add_expansion
|
def add_expansion(self, expansion_node):
"""
Add a child expansion node to the type node's expansions.
If an expansion node with the same name is already present in type node's expansions, the new and existing
expansion node's children are merged.
:param expansion_node: The expansion node to add
:type expansion_node: ExpansionNode
"""
# Check for existing expansion node with the same name
existing_expansion_node = self.get_expansion(expansion_node.name)
if existing_expansion_node:
# Expansion node exists with the same name, merge child expansions.
for child_expansion in expansion_node.expansions:
existing_expansion_node.add_expansion(child_expansion)
else:
# Add the expansion node.
self._expansions[expansion_node.name] = expansion_node
|
python
|
def add_expansion(self, expansion_node):
"""
Add a child expansion node to the type node's expansions.
If an expansion node with the same name is already present in type node's expansions, the new and existing
expansion node's children are merged.
:param expansion_node: The expansion node to add
:type expansion_node: ExpansionNode
"""
# Check for existing expansion node with the same name
existing_expansion_node = self.get_expansion(expansion_node.name)
if existing_expansion_node:
# Expansion node exists with the same name, merge child expansions.
for child_expansion in expansion_node.expansions:
existing_expansion_node.add_expansion(child_expansion)
else:
# Add the expansion node.
self._expansions[expansion_node.name] = expansion_node
|
[
"def",
"add_expansion",
"(",
"self",
",",
"expansion_node",
")",
":",
"# Check for existing expansion node with the same name",
"existing_expansion_node",
"=",
"self",
".",
"get_expansion",
"(",
"expansion_node",
".",
"name",
")",
"if",
"existing_expansion_node",
":",
"# Expansion node exists with the same name, merge child expansions.",
"for",
"child_expansion",
"in",
"expansion_node",
".",
"expansions",
":",
"existing_expansion_node",
".",
"add_expansion",
"(",
"child_expansion",
")",
"else",
":",
"# Add the expansion node.",
"self",
".",
"_expansions",
"[",
"expansion_node",
".",
"name",
"]",
"=",
"expansion_node"
] |
Add a child expansion node to the type node's expansions.
If an expansion node with the same name is already present in type node's expansions, the new and existing
expansion node's children are merged.
:param expansion_node: The expansion node to add
:type expansion_node: ExpansionNode
|
[
"Add",
"a",
"child",
"expansion",
"node",
"to",
"the",
"type",
"node",
"s",
"expansions",
"."
] |
9c052cae2397d13de3df8ae2c790846a70b53f18
|
https://github.com/eventbrite/pysoa/blob/9c052cae2397d13de3df8ae2c790846a70b53f18/pysoa/client/expander.py#L114-L132
|
16,802
|
eventbrite/pysoa
|
pysoa/client/expander.py
|
TypeNode.find_objects
|
def find_objects(self, obj):
"""
Find all objects in obj that match the type of the type node.
:param obj: A dictionary or list of dictionaries to search, recursively
:type obj: union[dict, list[dict]]
:return: a list of dictionary objects that have a "_type" key value that matches the type of this node.
:rtype: list[dict]
"""
objects = []
if isinstance(obj, dict):
# obj is a dictionary, so it is a potential match...
object_type = obj.get('_type')
if object_type == self.type:
# Found a match!
objects.append(obj)
else:
# Not a match. Check each value of the dictionary for matches.
for sub_object in six.itervalues(obj):
objects.extend(self.find_objects(sub_object))
elif isinstance(obj, list):
# obj is a list. Check each element of the list for matches.
for sub_object in obj:
objects.extend(self.find_objects(sub_object))
return objects
|
python
|
def find_objects(self, obj):
"""
Find all objects in obj that match the type of the type node.
:param obj: A dictionary or list of dictionaries to search, recursively
:type obj: union[dict, list[dict]]
:return: a list of dictionary objects that have a "_type" key value that matches the type of this node.
:rtype: list[dict]
"""
objects = []
if isinstance(obj, dict):
# obj is a dictionary, so it is a potential match...
object_type = obj.get('_type')
if object_type == self.type:
# Found a match!
objects.append(obj)
else:
# Not a match. Check each value of the dictionary for matches.
for sub_object in six.itervalues(obj):
objects.extend(self.find_objects(sub_object))
elif isinstance(obj, list):
# obj is a list. Check each element of the list for matches.
for sub_object in obj:
objects.extend(self.find_objects(sub_object))
return objects
|
[
"def",
"find_objects",
"(",
"self",
",",
"obj",
")",
":",
"objects",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"obj",
",",
"dict",
")",
":",
"# obj is a dictionary, so it is a potential match...",
"object_type",
"=",
"obj",
".",
"get",
"(",
"'_type'",
")",
"if",
"object_type",
"==",
"self",
".",
"type",
":",
"# Found a match!",
"objects",
".",
"append",
"(",
"obj",
")",
"else",
":",
"# Not a match. Check each value of the dictionary for matches.",
"for",
"sub_object",
"in",
"six",
".",
"itervalues",
"(",
"obj",
")",
":",
"objects",
".",
"extend",
"(",
"self",
".",
"find_objects",
"(",
"sub_object",
")",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"list",
")",
":",
"# obj is a list. Check each element of the list for matches.",
"for",
"sub_object",
"in",
"obj",
":",
"objects",
".",
"extend",
"(",
"self",
".",
"find_objects",
"(",
"sub_object",
")",
")",
"return",
"objects"
] |
Find all objects in obj that match the type of the type node.
:param obj: A dictionary or list of dictionaries to search, recursively
:type obj: union[dict, list[dict]]
:return: a list of dictionary objects that have a "_type" key value that matches the type of this node.
:rtype: list[dict]
|
[
"Find",
"all",
"objects",
"in",
"obj",
"that",
"match",
"the",
"type",
"of",
"the",
"type",
"node",
"."
] |
9c052cae2397d13de3df8ae2c790846a70b53f18
|
https://github.com/eventbrite/pysoa/blob/9c052cae2397d13de3df8ae2c790846a70b53f18/pysoa/client/expander.py#L147-L174
|
16,803
|
eventbrite/pysoa
|
pysoa/client/expander.py
|
TypeNode.to_dict
|
def to_dict(self):
"""
Convert the tree node to its dictionary representation.
:return: an expansion dictionary that represents the type and expansions of this tree node.
:rtype dict[list[union[str, unicode]]]
"""
expansion_strings = []
for expansion in self.expansions:
expansion_strings.extend(expansion.to_strings())
return {
self.type: expansion_strings,
}
|
python
|
def to_dict(self):
"""
Convert the tree node to its dictionary representation.
:return: an expansion dictionary that represents the type and expansions of this tree node.
:rtype dict[list[union[str, unicode]]]
"""
expansion_strings = []
for expansion in self.expansions:
expansion_strings.extend(expansion.to_strings())
return {
self.type: expansion_strings,
}
|
[
"def",
"to_dict",
"(",
"self",
")",
":",
"expansion_strings",
"=",
"[",
"]",
"for",
"expansion",
"in",
"self",
".",
"expansions",
":",
"expansion_strings",
".",
"extend",
"(",
"expansion",
".",
"to_strings",
"(",
")",
")",
"return",
"{",
"self",
".",
"type",
":",
"expansion_strings",
",",
"}"
] |
Convert the tree node to its dictionary representation.
:return: an expansion dictionary that represents the type and expansions of this tree node.
:rtype dict[list[union[str, unicode]]]
|
[
"Convert",
"the",
"tree",
"node",
"to",
"its",
"dictionary",
"representation",
"."
] |
9c052cae2397d13de3df8ae2c790846a70b53f18
|
https://github.com/eventbrite/pysoa/blob/9c052cae2397d13de3df8ae2c790846a70b53f18/pysoa/client/expander.py#L183-L197
|
16,804
|
eventbrite/pysoa
|
pysoa/client/expander.py
|
ExpansionNode.to_strings
|
def to_strings(self):
"""
Convert the expansion node to a list of expansion strings.
:return: a list of expansion strings that represent the leaf nodes of the expansion tree.
:rtype: list[union[str, unicode]]
"""
result = []
if not self.expansions:
result.append(self.name)
else:
for expansion in self.expansions:
result.extend('{}.{}'.format(self.name, es) for es in expansion.to_strings())
return result
|
python
|
def to_strings(self):
"""
Convert the expansion node to a list of expansion strings.
:return: a list of expansion strings that represent the leaf nodes of the expansion tree.
:rtype: list[union[str, unicode]]
"""
result = []
if not self.expansions:
result.append(self.name)
else:
for expansion in self.expansions:
result.extend('{}.{}'.format(self.name, es) for es in expansion.to_strings())
return result
|
[
"def",
"to_strings",
"(",
"self",
")",
":",
"result",
"=",
"[",
"]",
"if",
"not",
"self",
".",
"expansions",
":",
"result",
".",
"append",
"(",
"self",
".",
"name",
")",
"else",
":",
"for",
"expansion",
"in",
"self",
".",
"expansions",
":",
"result",
".",
"extend",
"(",
"'{}.{}'",
".",
"format",
"(",
"self",
".",
"name",
",",
"es",
")",
"for",
"es",
"in",
"expansion",
".",
"to_strings",
"(",
")",
")",
"return",
"result"
] |
Convert the expansion node to a list of expansion strings.
:return: a list of expansion strings that represent the leaf nodes of the expansion tree.
:rtype: list[union[str, unicode]]
|
[
"Convert",
"the",
"expansion",
"node",
"to",
"a",
"list",
"of",
"expansion",
"strings",
"."
] |
9c052cae2397d13de3df8ae2c790846a70b53f18
|
https://github.com/eventbrite/pysoa/blob/9c052cae2397d13de3df8ae2c790846a70b53f18/pysoa/client/expander.py#L252-L267
|
16,805
|
eventbrite/pysoa
|
pysoa/client/expander.py
|
ExpansionConverter.dict_to_trees
|
def dict_to_trees(self, expansion_dict):
"""
Convert an expansion dictionary to a list of expansion trees.
:param expansion_dict: An expansion dictionary (see below)
:type expansion_dict: dict
:return: a list of expansion trees (`TreeNode` instances).
:rtype: list[TreeNode]
Expansion Dictionary Format:
{
"<type>": ["<expansion string>", ...],
...
}
<type> is the type of object to expand.
<expansion string> is a string with the following format:
<expansion string> => <expansion name>[.<expansion string>]
"""
trees = []
for node_type, expansion_list in six.iteritems(expansion_dict):
type_node = TypeNode(node_type=node_type)
for expansion_string in expansion_list:
expansion_node = type_node
for expansion_name in expansion_string.split('.'):
child_expansion_node = expansion_node.get_expansion(expansion_name)
if not child_expansion_node:
type_expansion = self.type_expansions[expansion_node.type][expansion_name]
type_route = self.type_routes[type_expansion['route']]
if type_expansion['destination_field'] == type_expansion['source_field']:
raise ValueError(
'Expansion configuration destination_field error: '
'destination_field can not have the same name as the source_field: '
'{}'.format(type_expansion['source_field'])
)
child_expansion_node = ExpansionNode(
node_type=type_expansion['type'],
name=expansion_name,
source_field=type_expansion['source_field'],
destination_field=type_expansion['destination_field'],
service=type_route['service'],
action=type_route['action'],
request_field=type_route['request_field'],
response_field=type_route['response_field'],
raise_action_errors=type_expansion.get('raise_action_errors', False),
)
expansion_node.add_expansion(child_expansion_node)
expansion_node = child_expansion_node
trees.append(type_node)
return trees
|
python
|
def dict_to_trees(self, expansion_dict):
"""
Convert an expansion dictionary to a list of expansion trees.
:param expansion_dict: An expansion dictionary (see below)
:type expansion_dict: dict
:return: a list of expansion trees (`TreeNode` instances).
:rtype: list[TreeNode]
Expansion Dictionary Format:
{
"<type>": ["<expansion string>", ...],
...
}
<type> is the type of object to expand.
<expansion string> is a string with the following format:
<expansion string> => <expansion name>[.<expansion string>]
"""
trees = []
for node_type, expansion_list in six.iteritems(expansion_dict):
type_node = TypeNode(node_type=node_type)
for expansion_string in expansion_list:
expansion_node = type_node
for expansion_name in expansion_string.split('.'):
child_expansion_node = expansion_node.get_expansion(expansion_name)
if not child_expansion_node:
type_expansion = self.type_expansions[expansion_node.type][expansion_name]
type_route = self.type_routes[type_expansion['route']]
if type_expansion['destination_field'] == type_expansion['source_field']:
raise ValueError(
'Expansion configuration destination_field error: '
'destination_field can not have the same name as the source_field: '
'{}'.format(type_expansion['source_field'])
)
child_expansion_node = ExpansionNode(
node_type=type_expansion['type'],
name=expansion_name,
source_field=type_expansion['source_field'],
destination_field=type_expansion['destination_field'],
service=type_route['service'],
action=type_route['action'],
request_field=type_route['request_field'],
response_field=type_route['response_field'],
raise_action_errors=type_expansion.get('raise_action_errors', False),
)
expansion_node.add_expansion(child_expansion_node)
expansion_node = child_expansion_node
trees.append(type_node)
return trees
|
[
"def",
"dict_to_trees",
"(",
"self",
",",
"expansion_dict",
")",
":",
"trees",
"=",
"[",
"]",
"for",
"node_type",
",",
"expansion_list",
"in",
"six",
".",
"iteritems",
"(",
"expansion_dict",
")",
":",
"type_node",
"=",
"TypeNode",
"(",
"node_type",
"=",
"node_type",
")",
"for",
"expansion_string",
"in",
"expansion_list",
":",
"expansion_node",
"=",
"type_node",
"for",
"expansion_name",
"in",
"expansion_string",
".",
"split",
"(",
"'.'",
")",
":",
"child_expansion_node",
"=",
"expansion_node",
".",
"get_expansion",
"(",
"expansion_name",
")",
"if",
"not",
"child_expansion_node",
":",
"type_expansion",
"=",
"self",
".",
"type_expansions",
"[",
"expansion_node",
".",
"type",
"]",
"[",
"expansion_name",
"]",
"type_route",
"=",
"self",
".",
"type_routes",
"[",
"type_expansion",
"[",
"'route'",
"]",
"]",
"if",
"type_expansion",
"[",
"'destination_field'",
"]",
"==",
"type_expansion",
"[",
"'source_field'",
"]",
":",
"raise",
"ValueError",
"(",
"'Expansion configuration destination_field error: '",
"'destination_field can not have the same name as the source_field: '",
"'{}'",
".",
"format",
"(",
"type_expansion",
"[",
"'source_field'",
"]",
")",
")",
"child_expansion_node",
"=",
"ExpansionNode",
"(",
"node_type",
"=",
"type_expansion",
"[",
"'type'",
"]",
",",
"name",
"=",
"expansion_name",
",",
"source_field",
"=",
"type_expansion",
"[",
"'source_field'",
"]",
",",
"destination_field",
"=",
"type_expansion",
"[",
"'destination_field'",
"]",
",",
"service",
"=",
"type_route",
"[",
"'service'",
"]",
",",
"action",
"=",
"type_route",
"[",
"'action'",
"]",
",",
"request_field",
"=",
"type_route",
"[",
"'request_field'",
"]",
",",
"response_field",
"=",
"type_route",
"[",
"'response_field'",
"]",
",",
"raise_action_errors",
"=",
"type_expansion",
".",
"get",
"(",
"'raise_action_errors'",
",",
"False",
")",
",",
")",
"expansion_node",
".",
"add_expansion",
"(",
"child_expansion_node",
")",
"expansion_node",
"=",
"child_expansion_node",
"trees",
".",
"append",
"(",
"type_node",
")",
"return",
"trees"
] |
Convert an expansion dictionary to a list of expansion trees.
:param expansion_dict: An expansion dictionary (see below)
:type expansion_dict: dict
:return: a list of expansion trees (`TreeNode` instances).
:rtype: list[TreeNode]
Expansion Dictionary Format:
{
"<type>": ["<expansion string>", ...],
...
}
<type> is the type of object to expand.
<expansion string> is a string with the following format:
<expansion string> => <expansion name>[.<expansion string>]
|
[
"Convert",
"an",
"expansion",
"dictionary",
"to",
"a",
"list",
"of",
"expansion",
"trees",
"."
] |
9c052cae2397d13de3df8ae2c790846a70b53f18
|
https://github.com/eventbrite/pysoa/blob/9c052cae2397d13de3df8ae2c790846a70b53f18/pysoa/client/expander.py#L344-L401
|
16,806
|
eventbrite/pysoa
|
pysoa/client/expander.py
|
ExpansionConverter.trees_to_dict
|
def trees_to_dict(trees_list):
"""
Convert a list of `TreeNode`s to an expansion dictionary.
:param trees_list: A list of `TreeNode` instances
:type trees_list: list[TreeNode]
:return: An expansion dictionary that represents the expansions detailed in the provided expansions tree nodes
:rtype: dict[union[str, unicode]]
"""
result = {}
for tree in trees_list:
result.update(tree.to_dict())
return result
|
python
|
def trees_to_dict(trees_list):
"""
Convert a list of `TreeNode`s to an expansion dictionary.
:param trees_list: A list of `TreeNode` instances
:type trees_list: list[TreeNode]
:return: An expansion dictionary that represents the expansions detailed in the provided expansions tree nodes
:rtype: dict[union[str, unicode]]
"""
result = {}
for tree in trees_list:
result.update(tree.to_dict())
return result
|
[
"def",
"trees_to_dict",
"(",
"trees_list",
")",
":",
"result",
"=",
"{",
"}",
"for",
"tree",
"in",
"trees_list",
":",
"result",
".",
"update",
"(",
"tree",
".",
"to_dict",
"(",
")",
")",
"return",
"result"
] |
Convert a list of `TreeNode`s to an expansion dictionary.
:param trees_list: A list of `TreeNode` instances
:type trees_list: list[TreeNode]
:return: An expansion dictionary that represents the expansions detailed in the provided expansions tree nodes
:rtype: dict[union[str, unicode]]
|
[
"Convert",
"a",
"list",
"of",
"TreeNode",
"s",
"to",
"an",
"expansion",
"dictionary",
"."
] |
9c052cae2397d13de3df8ae2c790846a70b53f18
|
https://github.com/eventbrite/pysoa/blob/9c052cae2397d13de3df8ae2c790846a70b53f18/pysoa/client/expander.py#L404-L419
|
16,807
|
eventbrite/pysoa
|
pysoa/common/transport/redis_gateway/backend/sentinel.py
|
SentinelRedisClient._get_service_names
|
def _get_service_names(self):
"""
Get a list of service names from Sentinel. Tries Sentinel hosts until one succeeds; if none succeed,
raises a ConnectionError.
:return: the list of service names from Sentinel.
"""
master_info = None
connection_errors = []
for sentinel in self._sentinel.sentinels:
# Unfortunately, redis.sentinel.Sentinel does not support sentinel_masters, so we have to step
# through all of its connections manually
try:
master_info = sentinel.sentinel_masters()
break
except (redis.ConnectionError, redis.TimeoutError) as e:
connection_errors.append('Failed to connect to {} due to error: "{}".'.format(sentinel, e))
continue
if master_info is None:
raise redis.ConnectionError(
'Could not get master info from Sentinel\n{}:'.format('\n'.join(connection_errors))
)
return list(master_info.keys())
|
python
|
def _get_service_names(self):
"""
Get a list of service names from Sentinel. Tries Sentinel hosts until one succeeds; if none succeed,
raises a ConnectionError.
:return: the list of service names from Sentinel.
"""
master_info = None
connection_errors = []
for sentinel in self._sentinel.sentinels:
# Unfortunately, redis.sentinel.Sentinel does not support sentinel_masters, so we have to step
# through all of its connections manually
try:
master_info = sentinel.sentinel_masters()
break
except (redis.ConnectionError, redis.TimeoutError) as e:
connection_errors.append('Failed to connect to {} due to error: "{}".'.format(sentinel, e))
continue
if master_info is None:
raise redis.ConnectionError(
'Could not get master info from Sentinel\n{}:'.format('\n'.join(connection_errors))
)
return list(master_info.keys())
|
[
"def",
"_get_service_names",
"(",
"self",
")",
":",
"master_info",
"=",
"None",
"connection_errors",
"=",
"[",
"]",
"for",
"sentinel",
"in",
"self",
".",
"_sentinel",
".",
"sentinels",
":",
"# Unfortunately, redis.sentinel.Sentinel does not support sentinel_masters, so we have to step",
"# through all of its connections manually",
"try",
":",
"master_info",
"=",
"sentinel",
".",
"sentinel_masters",
"(",
")",
"break",
"except",
"(",
"redis",
".",
"ConnectionError",
",",
"redis",
".",
"TimeoutError",
")",
"as",
"e",
":",
"connection_errors",
".",
"append",
"(",
"'Failed to connect to {} due to error: \"{}\".'",
".",
"format",
"(",
"sentinel",
",",
"e",
")",
")",
"continue",
"if",
"master_info",
"is",
"None",
":",
"raise",
"redis",
".",
"ConnectionError",
"(",
"'Could not get master info from Sentinel\\n{}:'",
".",
"format",
"(",
"'\\n'",
".",
"join",
"(",
"connection_errors",
")",
")",
")",
"return",
"list",
"(",
"master_info",
".",
"keys",
"(",
")",
")"
] |
Get a list of service names from Sentinel. Tries Sentinel hosts until one succeeds; if none succeed,
raises a ConnectionError.
:return: the list of service names from Sentinel.
|
[
"Get",
"a",
"list",
"of",
"service",
"names",
"from",
"Sentinel",
".",
"Tries",
"Sentinel",
"hosts",
"until",
"one",
"succeeds",
";",
"if",
"none",
"succeed",
"raises",
"a",
"ConnectionError",
"."
] |
9c052cae2397d13de3df8ae2c790846a70b53f18
|
https://github.com/eventbrite/pysoa/blob/9c052cae2397d13de3df8ae2c790846a70b53f18/pysoa/common/transport/redis_gateway/backend/sentinel.py#L92-L114
|
16,808
|
Yelp/venv-update
|
venv_update.py
|
timid_relpath
|
def timid_relpath(arg):
"""convert an argument to a relative path, carefully"""
# TODO-TEST: unit tests
from os.path import isabs, relpath, sep
if isabs(arg):
result = relpath(arg)
if result.count(sep) + 1 < arg.count(sep):
return result
return arg
|
python
|
def timid_relpath(arg):
"""convert an argument to a relative path, carefully"""
# TODO-TEST: unit tests
from os.path import isabs, relpath, sep
if isabs(arg):
result = relpath(arg)
if result.count(sep) + 1 < arg.count(sep):
return result
return arg
|
[
"def",
"timid_relpath",
"(",
"arg",
")",
":",
"# TODO-TEST: unit tests",
"from",
"os",
".",
"path",
"import",
"isabs",
",",
"relpath",
",",
"sep",
"if",
"isabs",
"(",
"arg",
")",
":",
"result",
"=",
"relpath",
"(",
"arg",
")",
"if",
"result",
".",
"count",
"(",
"sep",
")",
"+",
"1",
"<",
"arg",
".",
"count",
"(",
"sep",
")",
":",
"return",
"result",
"return",
"arg"
] |
convert an argument to a relative path, carefully
|
[
"convert",
"an",
"argument",
"to",
"a",
"relative",
"path",
"carefully"
] |
6feae7ab09ee870c582b97443cfa8f0dc8626ba7
|
https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/venv_update.py#L104-L113
|
16,809
|
Yelp/venv-update
|
venv_update.py
|
ensure_virtualenv
|
def ensure_virtualenv(args, return_values):
"""Ensure we have a valid virtualenv."""
def adjust_options(options, args):
# TODO-TEST: proper error message with no arguments
venv_path = return_values.venv_path = args[0]
if venv_path == DEFAULT_VIRTUALENV_PATH or options.prompt == '<dirname>':
from os.path import abspath, basename, dirname
options.prompt = '(%s)' % basename(dirname(abspath(venv_path)))
# end of option munging.
# there are two python interpreters involved here:
# 1) the interpreter we're instructing virtualenv to copy
if options.python is None:
source_python = None
else:
source_python = virtualenv.resolve_interpreter(options.python)
# 2) the interpreter virtualenv will create
destination_python = venv_python(venv_path)
if exists(destination_python):
reason = invalid_virtualenv_reason(venv_path, source_python, destination_python, options)
if reason:
info('Removing invalidated virtualenv. (%s)' % reason)
run(('rm', '-rf', venv_path))
else:
info('Keeping valid virtualenv from previous run.')
raise SystemExit(0) # looks good! we're done here.
# this is actually a documented extension point:
# http://virtualenv.readthedocs.org/en/latest/reference.html#adjust_options
import virtualenv
virtualenv.adjust_options = adjust_options
from sys import argv
argv[:] = ('virtualenv',) + args
info(colorize(argv))
raise_on_failure(virtualenv.main)
# There might not be a venv_path if doing something like "venv= --version"
# and not actually asking virtualenv to make a venv.
if return_values.venv_path is not None:
run(('rm', '-rf', join(return_values.venv_path, 'local')))
|
python
|
def ensure_virtualenv(args, return_values):
"""Ensure we have a valid virtualenv."""
def adjust_options(options, args):
# TODO-TEST: proper error message with no arguments
venv_path = return_values.venv_path = args[0]
if venv_path == DEFAULT_VIRTUALENV_PATH or options.prompt == '<dirname>':
from os.path import abspath, basename, dirname
options.prompt = '(%s)' % basename(dirname(abspath(venv_path)))
# end of option munging.
# there are two python interpreters involved here:
# 1) the interpreter we're instructing virtualenv to copy
if options.python is None:
source_python = None
else:
source_python = virtualenv.resolve_interpreter(options.python)
# 2) the interpreter virtualenv will create
destination_python = venv_python(venv_path)
if exists(destination_python):
reason = invalid_virtualenv_reason(venv_path, source_python, destination_python, options)
if reason:
info('Removing invalidated virtualenv. (%s)' % reason)
run(('rm', '-rf', venv_path))
else:
info('Keeping valid virtualenv from previous run.')
raise SystemExit(0) # looks good! we're done here.
# this is actually a documented extension point:
# http://virtualenv.readthedocs.org/en/latest/reference.html#adjust_options
import virtualenv
virtualenv.adjust_options = adjust_options
from sys import argv
argv[:] = ('virtualenv',) + args
info(colorize(argv))
raise_on_failure(virtualenv.main)
# There might not be a venv_path if doing something like "venv= --version"
# and not actually asking virtualenv to make a venv.
if return_values.venv_path is not None:
run(('rm', '-rf', join(return_values.venv_path, 'local')))
|
[
"def",
"ensure_virtualenv",
"(",
"args",
",",
"return_values",
")",
":",
"def",
"adjust_options",
"(",
"options",
",",
"args",
")",
":",
"# TODO-TEST: proper error message with no arguments",
"venv_path",
"=",
"return_values",
".",
"venv_path",
"=",
"args",
"[",
"0",
"]",
"if",
"venv_path",
"==",
"DEFAULT_VIRTUALENV_PATH",
"or",
"options",
".",
"prompt",
"==",
"'<dirname>'",
":",
"from",
"os",
".",
"path",
"import",
"abspath",
",",
"basename",
",",
"dirname",
"options",
".",
"prompt",
"=",
"'(%s)'",
"%",
"basename",
"(",
"dirname",
"(",
"abspath",
"(",
"venv_path",
")",
")",
")",
"# end of option munging.",
"# there are two python interpreters involved here:",
"# 1) the interpreter we're instructing virtualenv to copy",
"if",
"options",
".",
"python",
"is",
"None",
":",
"source_python",
"=",
"None",
"else",
":",
"source_python",
"=",
"virtualenv",
".",
"resolve_interpreter",
"(",
"options",
".",
"python",
")",
"# 2) the interpreter virtualenv will create",
"destination_python",
"=",
"venv_python",
"(",
"venv_path",
")",
"if",
"exists",
"(",
"destination_python",
")",
":",
"reason",
"=",
"invalid_virtualenv_reason",
"(",
"venv_path",
",",
"source_python",
",",
"destination_python",
",",
"options",
")",
"if",
"reason",
":",
"info",
"(",
"'Removing invalidated virtualenv. (%s)'",
"%",
"reason",
")",
"run",
"(",
"(",
"'rm'",
",",
"'-rf'",
",",
"venv_path",
")",
")",
"else",
":",
"info",
"(",
"'Keeping valid virtualenv from previous run.'",
")",
"raise",
"SystemExit",
"(",
"0",
")",
"# looks good! we're done here.",
"# this is actually a documented extension point:",
"# http://virtualenv.readthedocs.org/en/latest/reference.html#adjust_options",
"import",
"virtualenv",
"virtualenv",
".",
"adjust_options",
"=",
"adjust_options",
"from",
"sys",
"import",
"argv",
"argv",
"[",
":",
"]",
"=",
"(",
"'virtualenv'",
",",
")",
"+",
"args",
"info",
"(",
"colorize",
"(",
"argv",
")",
")",
"raise_on_failure",
"(",
"virtualenv",
".",
"main",
")",
"# There might not be a venv_path if doing something like \"venv= --version\"",
"# and not actually asking virtualenv to make a venv.",
"if",
"return_values",
".",
"venv_path",
"is",
"not",
"None",
":",
"run",
"(",
"(",
"'rm'",
",",
"'-rf'",
",",
"join",
"(",
"return_values",
".",
"venv_path",
",",
"'local'",
")",
")",
")"
] |
Ensure we have a valid virtualenv.
|
[
"Ensure",
"we",
"have",
"a",
"valid",
"virtualenv",
"."
] |
6feae7ab09ee870c582b97443cfa8f0dc8626ba7
|
https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/venv_update.py#L272-L313
|
16,810
|
Yelp/venv-update
|
venv_update.py
|
touch
|
def touch(filename, timestamp):
"""set the mtime of a file"""
if timestamp is not None:
timestamp = (timestamp, timestamp) # atime, mtime
from os import utime
utime(filename, timestamp)
|
python
|
def touch(filename, timestamp):
"""set the mtime of a file"""
if timestamp is not None:
timestamp = (timestamp, timestamp) # atime, mtime
from os import utime
utime(filename, timestamp)
|
[
"def",
"touch",
"(",
"filename",
",",
"timestamp",
")",
":",
"if",
"timestamp",
"is",
"not",
"None",
":",
"timestamp",
"=",
"(",
"timestamp",
",",
"timestamp",
")",
"# atime, mtime",
"from",
"os",
"import",
"utime",
"utime",
"(",
"filename",
",",
"timestamp",
")"
] |
set the mtime of a file
|
[
"set",
"the",
"mtime",
"of",
"a",
"file"
] |
6feae7ab09ee870c582b97443cfa8f0dc8626ba7
|
https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/venv_update.py#L328-L334
|
16,811
|
Yelp/venv-update
|
venv_update.py
|
pip_faster
|
def pip_faster(venv_path, pip_command, install, bootstrap_deps):
"""install and run pip-faster"""
# activate the virtualenv
execfile_(venv_executable(venv_path, 'activate_this.py'))
# disable a useless warning
# FIXME: ensure a "true SSLContext" is available
from os import environ
environ['PIP_DISABLE_PIP_VERSION_CHECK'] = '1'
# we always have to run the bootstrap, because the presense of an
# executable doesn't imply the right version. pip is able to validate the
# version in the fastpath case quickly anyway.
run(('pip', 'install') + bootstrap_deps)
run(pip_command + install)
|
python
|
def pip_faster(venv_path, pip_command, install, bootstrap_deps):
"""install and run pip-faster"""
# activate the virtualenv
execfile_(venv_executable(venv_path, 'activate_this.py'))
# disable a useless warning
# FIXME: ensure a "true SSLContext" is available
from os import environ
environ['PIP_DISABLE_PIP_VERSION_CHECK'] = '1'
# we always have to run the bootstrap, because the presense of an
# executable doesn't imply the right version. pip is able to validate the
# version in the fastpath case quickly anyway.
run(('pip', 'install') + bootstrap_deps)
run(pip_command + install)
|
[
"def",
"pip_faster",
"(",
"venv_path",
",",
"pip_command",
",",
"install",
",",
"bootstrap_deps",
")",
":",
"# activate the virtualenv",
"execfile_",
"(",
"venv_executable",
"(",
"venv_path",
",",
"'activate_this.py'",
")",
")",
"# disable a useless warning",
"# FIXME: ensure a \"true SSLContext\" is available",
"from",
"os",
"import",
"environ",
"environ",
"[",
"'PIP_DISABLE_PIP_VERSION_CHECK'",
"]",
"=",
"'1'",
"# we always have to run the bootstrap, because the presense of an",
"# executable doesn't imply the right version. pip is able to validate the",
"# version in the fastpath case quickly anyway.",
"run",
"(",
"(",
"'pip'",
",",
"'install'",
")",
"+",
"bootstrap_deps",
")",
"run",
"(",
"pip_command",
"+",
"install",
")"
] |
install and run pip-faster
|
[
"install",
"and",
"run",
"pip",
"-",
"faster"
] |
6feae7ab09ee870c582b97443cfa8f0dc8626ba7
|
https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/venv_update.py#L408-L423
|
16,812
|
Yelp/venv-update
|
venv_update.py
|
raise_on_failure
|
def raise_on_failure(mainfunc):
"""raise if and only if mainfunc fails"""
try:
errors = mainfunc()
if errors:
exit(errors)
except CalledProcessError as error:
exit(error.returncode)
except SystemExit as error:
if error.code:
raise
except KeyboardInterrupt: # I don't plan to test-cover this. :pragma:nocover:
exit(1)
|
python
|
def raise_on_failure(mainfunc):
"""raise if and only if mainfunc fails"""
try:
errors = mainfunc()
if errors:
exit(errors)
except CalledProcessError as error:
exit(error.returncode)
except SystemExit as error:
if error.code:
raise
except KeyboardInterrupt: # I don't plan to test-cover this. :pragma:nocover:
exit(1)
|
[
"def",
"raise_on_failure",
"(",
"mainfunc",
")",
":",
"try",
":",
"errors",
"=",
"mainfunc",
"(",
")",
"if",
"errors",
":",
"exit",
"(",
"errors",
")",
"except",
"CalledProcessError",
"as",
"error",
":",
"exit",
"(",
"error",
".",
"returncode",
")",
"except",
"SystemExit",
"as",
"error",
":",
"if",
"error",
".",
"code",
":",
"raise",
"except",
"KeyboardInterrupt",
":",
"# I don't plan to test-cover this. :pragma:nocover:",
"exit",
"(",
"1",
")"
] |
raise if and only if mainfunc fails
|
[
"raise",
"if",
"and",
"only",
"if",
"mainfunc",
"fails"
] |
6feae7ab09ee870c582b97443cfa8f0dc8626ba7
|
https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/venv_update.py#L426-L438
|
16,813
|
Yelp/venv-update
|
pip_faster.py
|
cache_installed_wheels
|
def cache_installed_wheels(index_url, installed_packages):
"""After installation, pip tells us what it installed and from where.
We build a structure that looks like
.cache/pip-faster/wheelhouse/$index_url/$wheel
"""
for installed_package in installed_packages:
if not _can_be_cached(installed_package):
continue
_store_wheel_in_cache(installed_package.link.path, index_url)
|
python
|
def cache_installed_wheels(index_url, installed_packages):
"""After installation, pip tells us what it installed and from where.
We build a structure that looks like
.cache/pip-faster/wheelhouse/$index_url/$wheel
"""
for installed_package in installed_packages:
if not _can_be_cached(installed_package):
continue
_store_wheel_in_cache(installed_package.link.path, index_url)
|
[
"def",
"cache_installed_wheels",
"(",
"index_url",
",",
"installed_packages",
")",
":",
"for",
"installed_package",
"in",
"installed_packages",
":",
"if",
"not",
"_can_be_cached",
"(",
"installed_package",
")",
":",
"continue",
"_store_wheel_in_cache",
"(",
"installed_package",
".",
"link",
".",
"path",
",",
"index_url",
")"
] |
After installation, pip tells us what it installed and from where.
We build a structure that looks like
.cache/pip-faster/wheelhouse/$index_url/$wheel
|
[
"After",
"installation",
"pip",
"tells",
"us",
"what",
"it",
"installed",
"and",
"from",
"where",
"."
] |
6feae7ab09ee870c582b97443cfa8f0dc8626ba7
|
https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/pip_faster.py#L171-L181
|
16,814
|
Yelp/venv-update
|
pip_faster.py
|
pip
|
def pip(args):
"""Run pip, in-process."""
from sys import stdout
stdout.write(colorize(('pip',) + args))
stdout.write('\n')
stdout.flush()
return pipmodule._internal.main(list(args))
|
python
|
def pip(args):
"""Run pip, in-process."""
from sys import stdout
stdout.write(colorize(('pip',) + args))
stdout.write('\n')
stdout.flush()
return pipmodule._internal.main(list(args))
|
[
"def",
"pip",
"(",
"args",
")",
":",
"from",
"sys",
"import",
"stdout",
"stdout",
".",
"write",
"(",
"colorize",
"(",
"(",
"'pip'",
",",
")",
"+",
"args",
")",
")",
"stdout",
".",
"write",
"(",
"'\\n'",
")",
"stdout",
".",
"flush",
"(",
")",
"return",
"pipmodule",
".",
"_internal",
".",
"main",
"(",
"list",
"(",
"args",
")",
")"
] |
Run pip, in-process.
|
[
"Run",
"pip",
"in",
"-",
"process",
"."
] |
6feae7ab09ee870c582b97443cfa8f0dc8626ba7
|
https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/pip_faster.py#L204-L211
|
16,815
|
Yelp/venv-update
|
pip_faster.py
|
dist_to_req
|
def dist_to_req(dist):
"""Make a pip.FrozenRequirement from a pkg_resources distribution object"""
try: # :pragma:nocover: (pip>=10)
from pip._internal.operations.freeze import FrozenRequirement
except ImportError: # :pragma:nocover: (pip<10)
from pip import FrozenRequirement
# normalize the casing, dashes in the req name
orig_name, dist.project_name = dist.project_name, dist.key
result = FrozenRequirement.from_dist(dist, [])
# put things back the way we found it.
dist.project_name = orig_name
return result
|
python
|
def dist_to_req(dist):
"""Make a pip.FrozenRequirement from a pkg_resources distribution object"""
try: # :pragma:nocover: (pip>=10)
from pip._internal.operations.freeze import FrozenRequirement
except ImportError: # :pragma:nocover: (pip<10)
from pip import FrozenRequirement
# normalize the casing, dashes in the req name
orig_name, dist.project_name = dist.project_name, dist.key
result = FrozenRequirement.from_dist(dist, [])
# put things back the way we found it.
dist.project_name = orig_name
return result
|
[
"def",
"dist_to_req",
"(",
"dist",
")",
":",
"try",
":",
"# :pragma:nocover: (pip>=10)",
"from",
"pip",
".",
"_internal",
".",
"operations",
".",
"freeze",
"import",
"FrozenRequirement",
"except",
"ImportError",
":",
"# :pragma:nocover: (pip<10)",
"from",
"pip",
"import",
"FrozenRequirement",
"# normalize the casing, dashes in the req name",
"orig_name",
",",
"dist",
".",
"project_name",
"=",
"dist",
".",
"project_name",
",",
"dist",
".",
"key",
"result",
"=",
"FrozenRequirement",
".",
"from_dist",
"(",
"dist",
",",
"[",
"]",
")",
"# put things back the way we found it.",
"dist",
".",
"project_name",
"=",
"orig_name",
"return",
"result"
] |
Make a pip.FrozenRequirement from a pkg_resources distribution object
|
[
"Make",
"a",
"pip",
".",
"FrozenRequirement",
"from",
"a",
"pkg_resources",
"distribution",
"object"
] |
6feae7ab09ee870c582b97443cfa8f0dc8626ba7
|
https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/pip_faster.py#L214-L227
|
16,816
|
Yelp/venv-update
|
pip_faster.py
|
req_cycle
|
def req_cycle(req):
"""is this requirement cyclic?"""
cls = req.__class__
seen = {req.name}
while isinstance(req.comes_from, cls):
req = req.comes_from
if req.name in seen:
return True
else:
seen.add(req.name)
return False
|
python
|
def req_cycle(req):
"""is this requirement cyclic?"""
cls = req.__class__
seen = {req.name}
while isinstance(req.comes_from, cls):
req = req.comes_from
if req.name in seen:
return True
else:
seen.add(req.name)
return False
|
[
"def",
"req_cycle",
"(",
"req",
")",
":",
"cls",
"=",
"req",
".",
"__class__",
"seen",
"=",
"{",
"req",
".",
"name",
"}",
"while",
"isinstance",
"(",
"req",
".",
"comes_from",
",",
"cls",
")",
":",
"req",
"=",
"req",
".",
"comes_from",
"if",
"req",
".",
"name",
"in",
"seen",
":",
"return",
"True",
"else",
":",
"seen",
".",
"add",
"(",
"req",
".",
"name",
")",
"return",
"False"
] |
is this requirement cyclic?
|
[
"is",
"this",
"requirement",
"cyclic?"
] |
6feae7ab09ee870c582b97443cfa8f0dc8626ba7
|
https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/pip_faster.py#L283-L293
|
16,817
|
Yelp/venv-update
|
pip_faster.py
|
pretty_req
|
def pretty_req(req):
"""
return a copy of a pip requirement that is a bit more readable,
at the expense of removing some of its data
"""
from copy import copy
req = copy(req)
req.link = None
req.satisfied_by = None
return req
|
python
|
def pretty_req(req):
"""
return a copy of a pip requirement that is a bit more readable,
at the expense of removing some of its data
"""
from copy import copy
req = copy(req)
req.link = None
req.satisfied_by = None
return req
|
[
"def",
"pretty_req",
"(",
"req",
")",
":",
"from",
"copy",
"import",
"copy",
"req",
"=",
"copy",
"(",
"req",
")",
"req",
".",
"link",
"=",
"None",
"req",
".",
"satisfied_by",
"=",
"None",
"return",
"req"
] |
return a copy of a pip requirement that is a bit more readable,
at the expense of removing some of its data
|
[
"return",
"a",
"copy",
"of",
"a",
"pip",
"requirement",
"that",
"is",
"a",
"bit",
"more",
"readable",
"at",
"the",
"expense",
"of",
"removing",
"some",
"of",
"its",
"data"
] |
6feae7ab09ee870c582b97443cfa8f0dc8626ba7
|
https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/pip_faster.py#L296-L305
|
16,818
|
Yelp/venv-update
|
pip_faster.py
|
trace_requirements
|
def trace_requirements(requirements):
"""given an iterable of pip InstallRequirements,
return the set of required packages, given their transitive requirements.
"""
requirements = tuple(pretty_req(r) for r in requirements)
working_set = fresh_working_set()
# breadth-first traversal:
from collections import deque
queue = deque(requirements)
queued = {_package_req_to_pkg_resources_req(req.req) for req in queue}
errors = []
result = []
while queue:
req = queue.popleft()
logger.debug('tracing: %s', req)
try:
dist = working_set.find_normalized(_package_req_to_pkg_resources_req(req.req))
except pkg_resources.VersionConflict as conflict:
dist = conflict.args[0]
errors.append('Error: version conflict: {} ({}) <-> {}'.format(
dist, timid_relpath(dist.location), req
))
assert dist is not None, 'Should be unreachable in pip8+'
result.append(dist_to_req(dist))
# TODO: pip does no validation of extras. should we?
extras = [extra for extra in req.extras if extra in dist.extras]
for sub_req in sorted(dist.requires(extras=extras), key=lambda req: req.key):
sub_req = InstallRequirement(sub_req, req)
if req_cycle(sub_req):
logger.warning('Circular dependency! %s', sub_req)
continue
elif sub_req.req in queued:
logger.debug('already queued: %s', sub_req)
continue
else:
logger.debug('adding sub-requirement %s', sub_req)
queue.append(sub_req)
queued.add(sub_req.req)
if errors:
raise InstallationError('\n'.join(errors))
return result
|
python
|
def trace_requirements(requirements):
"""given an iterable of pip InstallRequirements,
return the set of required packages, given their transitive requirements.
"""
requirements = tuple(pretty_req(r) for r in requirements)
working_set = fresh_working_set()
# breadth-first traversal:
from collections import deque
queue = deque(requirements)
queued = {_package_req_to_pkg_resources_req(req.req) for req in queue}
errors = []
result = []
while queue:
req = queue.popleft()
logger.debug('tracing: %s', req)
try:
dist = working_set.find_normalized(_package_req_to_pkg_resources_req(req.req))
except pkg_resources.VersionConflict as conflict:
dist = conflict.args[0]
errors.append('Error: version conflict: {} ({}) <-> {}'.format(
dist, timid_relpath(dist.location), req
))
assert dist is not None, 'Should be unreachable in pip8+'
result.append(dist_to_req(dist))
# TODO: pip does no validation of extras. should we?
extras = [extra for extra in req.extras if extra in dist.extras]
for sub_req in sorted(dist.requires(extras=extras), key=lambda req: req.key):
sub_req = InstallRequirement(sub_req, req)
if req_cycle(sub_req):
logger.warning('Circular dependency! %s', sub_req)
continue
elif sub_req.req in queued:
logger.debug('already queued: %s', sub_req)
continue
else:
logger.debug('adding sub-requirement %s', sub_req)
queue.append(sub_req)
queued.add(sub_req.req)
if errors:
raise InstallationError('\n'.join(errors))
return result
|
[
"def",
"trace_requirements",
"(",
"requirements",
")",
":",
"requirements",
"=",
"tuple",
"(",
"pretty_req",
"(",
"r",
")",
"for",
"r",
"in",
"requirements",
")",
"working_set",
"=",
"fresh_working_set",
"(",
")",
"# breadth-first traversal:",
"from",
"collections",
"import",
"deque",
"queue",
"=",
"deque",
"(",
"requirements",
")",
"queued",
"=",
"{",
"_package_req_to_pkg_resources_req",
"(",
"req",
".",
"req",
")",
"for",
"req",
"in",
"queue",
"}",
"errors",
"=",
"[",
"]",
"result",
"=",
"[",
"]",
"while",
"queue",
":",
"req",
"=",
"queue",
".",
"popleft",
"(",
")",
"logger",
".",
"debug",
"(",
"'tracing: %s'",
",",
"req",
")",
"try",
":",
"dist",
"=",
"working_set",
".",
"find_normalized",
"(",
"_package_req_to_pkg_resources_req",
"(",
"req",
".",
"req",
")",
")",
"except",
"pkg_resources",
".",
"VersionConflict",
"as",
"conflict",
":",
"dist",
"=",
"conflict",
".",
"args",
"[",
"0",
"]",
"errors",
".",
"append",
"(",
"'Error: version conflict: {} ({}) <-> {}'",
".",
"format",
"(",
"dist",
",",
"timid_relpath",
"(",
"dist",
".",
"location",
")",
",",
"req",
")",
")",
"assert",
"dist",
"is",
"not",
"None",
",",
"'Should be unreachable in pip8+'",
"result",
".",
"append",
"(",
"dist_to_req",
"(",
"dist",
")",
")",
"# TODO: pip does no validation of extras. should we?",
"extras",
"=",
"[",
"extra",
"for",
"extra",
"in",
"req",
".",
"extras",
"if",
"extra",
"in",
"dist",
".",
"extras",
"]",
"for",
"sub_req",
"in",
"sorted",
"(",
"dist",
".",
"requires",
"(",
"extras",
"=",
"extras",
")",
",",
"key",
"=",
"lambda",
"req",
":",
"req",
".",
"key",
")",
":",
"sub_req",
"=",
"InstallRequirement",
"(",
"sub_req",
",",
"req",
")",
"if",
"req_cycle",
"(",
"sub_req",
")",
":",
"logger",
".",
"warning",
"(",
"'Circular dependency! %s'",
",",
"sub_req",
")",
"continue",
"elif",
"sub_req",
".",
"req",
"in",
"queued",
":",
"logger",
".",
"debug",
"(",
"'already queued: %s'",
",",
"sub_req",
")",
"continue",
"else",
":",
"logger",
".",
"debug",
"(",
"'adding sub-requirement %s'",
",",
"sub_req",
")",
"queue",
".",
"append",
"(",
"sub_req",
")",
"queued",
".",
"add",
"(",
"sub_req",
".",
"req",
")",
"if",
"errors",
":",
"raise",
"InstallationError",
"(",
"'\\n'",
".",
"join",
"(",
"errors",
")",
")",
"return",
"result"
] |
given an iterable of pip InstallRequirements,
return the set of required packages, given their transitive requirements.
|
[
"given",
"an",
"iterable",
"of",
"pip",
"InstallRequirements",
"return",
"the",
"set",
"of",
"required",
"packages",
"given",
"their",
"transitive",
"requirements",
"."
] |
6feae7ab09ee870c582b97443cfa8f0dc8626ba7
|
https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/pip_faster.py#L312-L359
|
16,819
|
Yelp/venv-update
|
pip_faster.py
|
patch
|
def patch(attrs, updates):
"""Perform a set of updates to a attribute dictionary, return the original values."""
orig = {}
for attr, value in updates:
orig[attr] = attrs[attr]
attrs[attr] = value
return orig
|
python
|
def patch(attrs, updates):
"""Perform a set of updates to a attribute dictionary, return the original values."""
orig = {}
for attr, value in updates:
orig[attr] = attrs[attr]
attrs[attr] = value
return orig
|
[
"def",
"patch",
"(",
"attrs",
",",
"updates",
")",
":",
"orig",
"=",
"{",
"}",
"for",
"attr",
",",
"value",
"in",
"updates",
":",
"orig",
"[",
"attr",
"]",
"=",
"attrs",
"[",
"attr",
"]",
"attrs",
"[",
"attr",
"]",
"=",
"value",
"return",
"orig"
] |
Perform a set of updates to a attribute dictionary, return the original values.
|
[
"Perform",
"a",
"set",
"of",
"updates",
"to",
"a",
"attribute",
"dictionary",
"return",
"the",
"original",
"values",
"."
] |
6feae7ab09ee870c582b97443cfa8f0dc8626ba7
|
https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/pip_faster.py#L430-L436
|
16,820
|
Yelp/venv-update
|
pip_faster.py
|
patched
|
def patched(attrs, updates):
"""A context in which some attributes temporarily have a modified value."""
orig = patch(attrs, updates.items())
try:
yield orig
finally:
patch(attrs, orig.items())
|
python
|
def patched(attrs, updates):
"""A context in which some attributes temporarily have a modified value."""
orig = patch(attrs, updates.items())
try:
yield orig
finally:
patch(attrs, orig.items())
|
[
"def",
"patched",
"(",
"attrs",
",",
"updates",
")",
":",
"orig",
"=",
"patch",
"(",
"attrs",
",",
"updates",
".",
"items",
"(",
")",
")",
"try",
":",
"yield",
"orig",
"finally",
":",
"patch",
"(",
"attrs",
",",
"orig",
".",
"items",
"(",
")",
")"
] |
A context in which some attributes temporarily have a modified value.
|
[
"A",
"context",
"in",
"which",
"some",
"attributes",
"temporarily",
"have",
"a",
"modified",
"value",
"."
] |
6feae7ab09ee870c582b97443cfa8f0dc8626ba7
|
https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/pip_faster.py#L440-L446
|
16,821
|
Yelp/venv-update
|
pip_faster.py
|
pipfaster_packagefinder
|
def pipfaster_packagefinder():
"""Provide a short-circuited search when the requirement is pinned and appears on disk.
Suggested upstream at: https://github.com/pypa/pip/pull/2114
"""
# A poor man's dependency injection: monkeypatch :(
try: # :pragma:nocover: pip>=18.1
from pip._internal.cli import base_command
except ImportError: # :pragma:nocover: pip<18.1
from pip._internal import basecommand as base_command
return patched(vars(base_command), {'PackageFinder': FasterPackageFinder})
|
python
|
def pipfaster_packagefinder():
"""Provide a short-circuited search when the requirement is pinned and appears on disk.
Suggested upstream at: https://github.com/pypa/pip/pull/2114
"""
# A poor man's dependency injection: monkeypatch :(
try: # :pragma:nocover: pip>=18.1
from pip._internal.cli import base_command
except ImportError: # :pragma:nocover: pip<18.1
from pip._internal import basecommand as base_command
return patched(vars(base_command), {'PackageFinder': FasterPackageFinder})
|
[
"def",
"pipfaster_packagefinder",
"(",
")",
":",
"# A poor man's dependency injection: monkeypatch :(",
"try",
":",
"# :pragma:nocover: pip>=18.1",
"from",
"pip",
".",
"_internal",
".",
"cli",
"import",
"base_command",
"except",
"ImportError",
":",
"# :pragma:nocover: pip<18.1",
"from",
"pip",
".",
"_internal",
"import",
"basecommand",
"as",
"base_command",
"return",
"patched",
"(",
"vars",
"(",
"base_command",
")",
",",
"{",
"'PackageFinder'",
":",
"FasterPackageFinder",
"}",
")"
] |
Provide a short-circuited search when the requirement is pinned and appears on disk.
Suggested upstream at: https://github.com/pypa/pip/pull/2114
|
[
"Provide",
"a",
"short",
"-",
"circuited",
"search",
"when",
"the",
"requirement",
"is",
"pinned",
"and",
"appears",
"on",
"disk",
"."
] |
6feae7ab09ee870c582b97443cfa8f0dc8626ba7
|
https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/pip_faster.py#L454-L464
|
16,822
|
Yelp/venv-update
|
pip_faster.py
|
pipfaster_download_cacher
|
def pipfaster_download_cacher(index_urls):
"""vanilla pip stores a cache of the http session in its cache and not the
wheel files. We intercept the download and save those files into our
cache
"""
from pip._internal import download
orig = download._download_http_url
patched_fn = get_patched_download_http_url(orig, index_urls)
return patched(vars(download), {'_download_http_url': patched_fn})
|
python
|
def pipfaster_download_cacher(index_urls):
"""vanilla pip stores a cache of the http session in its cache and not the
wheel files. We intercept the download and save those files into our
cache
"""
from pip._internal import download
orig = download._download_http_url
patched_fn = get_patched_download_http_url(orig, index_urls)
return patched(vars(download), {'_download_http_url': patched_fn})
|
[
"def",
"pipfaster_download_cacher",
"(",
"index_urls",
")",
":",
"from",
"pip",
".",
"_internal",
"import",
"download",
"orig",
"=",
"download",
".",
"_download_http_url",
"patched_fn",
"=",
"get_patched_download_http_url",
"(",
"orig",
",",
"index_urls",
")",
"return",
"patched",
"(",
"vars",
"(",
"download",
")",
",",
"{",
"'_download_http_url'",
":",
"patched_fn",
"}",
")"
] |
vanilla pip stores a cache of the http session in its cache and not the
wheel files. We intercept the download and save those files into our
cache
|
[
"vanilla",
"pip",
"stores",
"a",
"cache",
"of",
"the",
"http",
"session",
"in",
"its",
"cache",
"and",
"not",
"the",
"wheel",
"files",
".",
"We",
"intercept",
"the",
"download",
"and",
"save",
"those",
"files",
"into",
"our",
"cache"
] |
6feae7ab09ee870c582b97443cfa8f0dc8626ba7
|
https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/pip_faster.py#L467-L475
|
16,823
|
Yelp/venv-update
|
pip_faster.py
|
FasterInstallCommand.run
|
def run(self, options, args):
"""update install options with caching values"""
if options.prune:
previously_installed = pip_get_installed()
index_urls = [options.index_url] + options.extra_index_urls
with pipfaster_download_cacher(index_urls):
requirement_set = super(FasterInstallCommand, self).run(
options, args,
)
required = requirement_set.requirements.values()
# With extra_index_urls we don't know where the wheel is from
if not options.extra_index_urls:
cache_installed_wheels(options.index_url, requirement_set.successfully_downloaded)
if not options.ignore_dependencies:
# transitive requirements, previously installed, are also required
# this has a side-effect of finding any missing / conflicting requirements
required = trace_requirements(required)
if not options.prune:
return requirement_set
extraneous = (
reqnames(previously_installed) -
reqnames(required) -
# the stage1 bootstrap packages
reqnames(trace_requirements([install_req_from_line('venv-update')])) -
# See #186
frozenset(('pkg-resources',))
)
if extraneous:
extraneous = sorted(extraneous)
pip(('uninstall', '--yes') + tuple(extraneous))
|
python
|
def run(self, options, args):
"""update install options with caching values"""
if options.prune:
previously_installed = pip_get_installed()
index_urls = [options.index_url] + options.extra_index_urls
with pipfaster_download_cacher(index_urls):
requirement_set = super(FasterInstallCommand, self).run(
options, args,
)
required = requirement_set.requirements.values()
# With extra_index_urls we don't know where the wheel is from
if not options.extra_index_urls:
cache_installed_wheels(options.index_url, requirement_set.successfully_downloaded)
if not options.ignore_dependencies:
# transitive requirements, previously installed, are also required
# this has a side-effect of finding any missing / conflicting requirements
required = trace_requirements(required)
if not options.prune:
return requirement_set
extraneous = (
reqnames(previously_installed) -
reqnames(required) -
# the stage1 bootstrap packages
reqnames(trace_requirements([install_req_from_line('venv-update')])) -
# See #186
frozenset(('pkg-resources',))
)
if extraneous:
extraneous = sorted(extraneous)
pip(('uninstall', '--yes') + tuple(extraneous))
|
[
"def",
"run",
"(",
"self",
",",
"options",
",",
"args",
")",
":",
"if",
"options",
".",
"prune",
":",
"previously_installed",
"=",
"pip_get_installed",
"(",
")",
"index_urls",
"=",
"[",
"options",
".",
"index_url",
"]",
"+",
"options",
".",
"extra_index_urls",
"with",
"pipfaster_download_cacher",
"(",
"index_urls",
")",
":",
"requirement_set",
"=",
"super",
"(",
"FasterInstallCommand",
",",
"self",
")",
".",
"run",
"(",
"options",
",",
"args",
",",
")",
"required",
"=",
"requirement_set",
".",
"requirements",
".",
"values",
"(",
")",
"# With extra_index_urls we don't know where the wheel is from",
"if",
"not",
"options",
".",
"extra_index_urls",
":",
"cache_installed_wheels",
"(",
"options",
".",
"index_url",
",",
"requirement_set",
".",
"successfully_downloaded",
")",
"if",
"not",
"options",
".",
"ignore_dependencies",
":",
"# transitive requirements, previously installed, are also required",
"# this has a side-effect of finding any missing / conflicting requirements",
"required",
"=",
"trace_requirements",
"(",
"required",
")",
"if",
"not",
"options",
".",
"prune",
":",
"return",
"requirement_set",
"extraneous",
"=",
"(",
"reqnames",
"(",
"previously_installed",
")",
"-",
"reqnames",
"(",
"required",
")",
"-",
"# the stage1 bootstrap packages",
"reqnames",
"(",
"trace_requirements",
"(",
"[",
"install_req_from_line",
"(",
"'venv-update'",
")",
"]",
")",
")",
"-",
"# See #186",
"frozenset",
"(",
"(",
"'pkg-resources'",
",",
")",
")",
")",
"if",
"extraneous",
":",
"extraneous",
"=",
"sorted",
"(",
"extraneous",
")",
"pip",
"(",
"(",
"'uninstall'",
",",
"'--yes'",
")",
"+",
"tuple",
"(",
"extraneous",
")",
")"
] |
update install options with caching values
|
[
"update",
"install",
"options",
"with",
"caching",
"values"
] |
6feae7ab09ee870c582b97443cfa8f0dc8626ba7
|
https://github.com/Yelp/venv-update/blob/6feae7ab09ee870c582b97443cfa8f0dc8626ba7/pip_faster.py#L387-L423
|
16,824
|
RedisJSON/rejson-py
|
rejson/client.py
|
Client.setEncoder
|
def setEncoder(self, encoder):
"""
Sets the client's encoder
``encoder`` should be an instance of a ``json.JSONEncoder`` class
"""
if not encoder:
self._encoder = json.JSONEncoder()
else:
self._encoder = encoder
self._encode = self._encoder.encode
|
python
|
def setEncoder(self, encoder):
"""
Sets the client's encoder
``encoder`` should be an instance of a ``json.JSONEncoder`` class
"""
if not encoder:
self._encoder = json.JSONEncoder()
else:
self._encoder = encoder
self._encode = self._encoder.encode
|
[
"def",
"setEncoder",
"(",
"self",
",",
"encoder",
")",
":",
"if",
"not",
"encoder",
":",
"self",
".",
"_encoder",
"=",
"json",
".",
"JSONEncoder",
"(",
")",
"else",
":",
"self",
".",
"_encoder",
"=",
"encoder",
"self",
".",
"_encode",
"=",
"self",
".",
"_encoder",
".",
"encode"
] |
Sets the client's encoder
``encoder`` should be an instance of a ``json.JSONEncoder`` class
|
[
"Sets",
"the",
"client",
"s",
"encoder",
"encoder",
"should",
"be",
"an",
"instance",
"of",
"a",
"json",
".",
"JSONEncoder",
"class"
] |
55f0adf3adc40f5a769e28e541dbbf5377b90ec6
|
https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L78-L87
|
16,825
|
RedisJSON/rejson-py
|
rejson/client.py
|
Client.setDecoder
|
def setDecoder(self, decoder):
"""
Sets the client's decoder
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
if not decoder:
self._decoder = json.JSONDecoder()
else:
self._decoder = decoder
self._decode = self._decoder.decode
|
python
|
def setDecoder(self, decoder):
"""
Sets the client's decoder
``decoder`` should be an instance of a ``json.JSONDecoder`` class
"""
if not decoder:
self._decoder = json.JSONDecoder()
else:
self._decoder = decoder
self._decode = self._decoder.decode
|
[
"def",
"setDecoder",
"(",
"self",
",",
"decoder",
")",
":",
"if",
"not",
"decoder",
":",
"self",
".",
"_decoder",
"=",
"json",
".",
"JSONDecoder",
"(",
")",
"else",
":",
"self",
".",
"_decoder",
"=",
"decoder",
"self",
".",
"_decode",
"=",
"self",
".",
"_decoder",
".",
"decode"
] |
Sets the client's decoder
``decoder`` should be an instance of a ``json.JSONDecoder`` class
|
[
"Sets",
"the",
"client",
"s",
"decoder",
"decoder",
"should",
"be",
"an",
"instance",
"of",
"a",
"json",
".",
"JSONDecoder",
"class"
] |
55f0adf3adc40f5a769e28e541dbbf5377b90ec6
|
https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L89-L98
|
16,826
|
RedisJSON/rejson-py
|
rejson/client.py
|
Client.jsondel
|
def jsondel(self, name, path=Path.rootPath()):
"""
Deletes the JSON value stored at key ``name`` under ``path``
"""
return self.execute_command('JSON.DEL', name, str_path(path))
|
python
|
def jsondel(self, name, path=Path.rootPath()):
"""
Deletes the JSON value stored at key ``name`` under ``path``
"""
return self.execute_command('JSON.DEL', name, str_path(path))
|
[
"def",
"jsondel",
"(",
"self",
",",
"name",
",",
"path",
"=",
"Path",
".",
"rootPath",
"(",
")",
")",
":",
"return",
"self",
".",
"execute_command",
"(",
"'JSON.DEL'",
",",
"name",
",",
"str_path",
"(",
"path",
")",
")"
] |
Deletes the JSON value stored at key ``name`` under ``path``
|
[
"Deletes",
"the",
"JSON",
"value",
"stored",
"at",
"key",
"name",
"under",
"path"
] |
55f0adf3adc40f5a769e28e541dbbf5377b90ec6
|
https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L100-L104
|
16,827
|
RedisJSON/rejson-py
|
rejson/client.py
|
Client.jsonget
|
def jsonget(self, name, *args):
"""
Get the object stored as a JSON value at key ``name``
``args`` is zero or more paths, and defaults to root path
"""
pieces = [name]
if len(args) == 0:
pieces.append(Path.rootPath())
else:
for p in args:
pieces.append(str_path(p))
# Handle case where key doesn't exist. The JSONDecoder would raise a
# TypeError exception since it can't decode None
try:
return self.execute_command('JSON.GET', *pieces)
except TypeError:
return None
|
python
|
def jsonget(self, name, *args):
"""
Get the object stored as a JSON value at key ``name``
``args`` is zero or more paths, and defaults to root path
"""
pieces = [name]
if len(args) == 0:
pieces.append(Path.rootPath())
else:
for p in args:
pieces.append(str_path(p))
# Handle case where key doesn't exist. The JSONDecoder would raise a
# TypeError exception since it can't decode None
try:
return self.execute_command('JSON.GET', *pieces)
except TypeError:
return None
|
[
"def",
"jsonget",
"(",
"self",
",",
"name",
",",
"*",
"args",
")",
":",
"pieces",
"=",
"[",
"name",
"]",
"if",
"len",
"(",
"args",
")",
"==",
"0",
":",
"pieces",
".",
"append",
"(",
"Path",
".",
"rootPath",
"(",
")",
")",
"else",
":",
"for",
"p",
"in",
"args",
":",
"pieces",
".",
"append",
"(",
"str_path",
"(",
"p",
")",
")",
"# Handle case where key doesn't exist. The JSONDecoder would raise a",
"# TypeError exception since it can't decode None",
"try",
":",
"return",
"self",
".",
"execute_command",
"(",
"'JSON.GET'",
",",
"*",
"pieces",
")",
"except",
"TypeError",
":",
"return",
"None"
] |
Get the object stored as a JSON value at key ``name``
``args`` is zero or more paths, and defaults to root path
|
[
"Get",
"the",
"object",
"stored",
"as",
"a",
"JSON",
"value",
"at",
"key",
"name",
"args",
"is",
"zero",
"or",
"more",
"paths",
"and",
"defaults",
"to",
"root",
"path"
] |
55f0adf3adc40f5a769e28e541dbbf5377b90ec6
|
https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L106-L123
|
16,828
|
RedisJSON/rejson-py
|
rejson/client.py
|
Client.jsonmget
|
def jsonmget(self, path, *args):
"""
Gets the objects stored as a JSON values under ``path`` from
keys ``args``
"""
pieces = []
pieces.extend(args)
pieces.append(str_path(path))
return self.execute_command('JSON.MGET', *pieces)
|
python
|
def jsonmget(self, path, *args):
"""
Gets the objects stored as a JSON values under ``path`` from
keys ``args``
"""
pieces = []
pieces.extend(args)
pieces.append(str_path(path))
return self.execute_command('JSON.MGET', *pieces)
|
[
"def",
"jsonmget",
"(",
"self",
",",
"path",
",",
"*",
"args",
")",
":",
"pieces",
"=",
"[",
"]",
"pieces",
".",
"extend",
"(",
"args",
")",
"pieces",
".",
"append",
"(",
"str_path",
"(",
"path",
")",
")",
"return",
"self",
".",
"execute_command",
"(",
"'JSON.MGET'",
",",
"*",
"pieces",
")"
] |
Gets the objects stored as a JSON values under ``path`` from
keys ``args``
|
[
"Gets",
"the",
"objects",
"stored",
"as",
"a",
"JSON",
"values",
"under",
"path",
"from",
"keys",
"args"
] |
55f0adf3adc40f5a769e28e541dbbf5377b90ec6
|
https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L125-L133
|
16,829
|
RedisJSON/rejson-py
|
rejson/client.py
|
Client.jsonset
|
def jsonset(self, name, path, obj, nx=False, xx=False):
"""
Set the JSON value at key ``name`` under the ``path`` to ``obj``
``nx`` if set to True, set ``value`` only if it does not exist
``xx`` if set to True, set ``value`` only if it exists
"""
pieces = [name, str_path(path), self._encode(obj)]
# Handle existential modifiers
if nx and xx:
raise Exception('nx and xx are mutually exclusive: use one, the '
'other or neither - but not both')
elif nx:
pieces.append('NX')
elif xx:
pieces.append('XX')
return self.execute_command('JSON.SET', *pieces)
|
python
|
def jsonset(self, name, path, obj, nx=False, xx=False):
"""
Set the JSON value at key ``name`` under the ``path`` to ``obj``
``nx`` if set to True, set ``value`` only if it does not exist
``xx`` if set to True, set ``value`` only if it exists
"""
pieces = [name, str_path(path), self._encode(obj)]
# Handle existential modifiers
if nx and xx:
raise Exception('nx and xx are mutually exclusive: use one, the '
'other or neither - but not both')
elif nx:
pieces.append('NX')
elif xx:
pieces.append('XX')
return self.execute_command('JSON.SET', *pieces)
|
[
"def",
"jsonset",
"(",
"self",
",",
"name",
",",
"path",
",",
"obj",
",",
"nx",
"=",
"False",
",",
"xx",
"=",
"False",
")",
":",
"pieces",
"=",
"[",
"name",
",",
"str_path",
"(",
"path",
")",
",",
"self",
".",
"_encode",
"(",
"obj",
")",
"]",
"# Handle existential modifiers",
"if",
"nx",
"and",
"xx",
":",
"raise",
"Exception",
"(",
"'nx and xx are mutually exclusive: use one, the '",
"'other or neither - but not both'",
")",
"elif",
"nx",
":",
"pieces",
".",
"append",
"(",
"'NX'",
")",
"elif",
"xx",
":",
"pieces",
".",
"append",
"(",
"'XX'",
")",
"return",
"self",
".",
"execute_command",
"(",
"'JSON.SET'",
",",
"*",
"pieces",
")"
] |
Set the JSON value at key ``name`` under the ``path`` to ``obj``
``nx`` if set to True, set ``value`` only if it does not exist
``xx`` if set to True, set ``value`` only if it exists
|
[
"Set",
"the",
"JSON",
"value",
"at",
"key",
"name",
"under",
"the",
"path",
"to",
"obj",
"nx",
"if",
"set",
"to",
"True",
"set",
"value",
"only",
"if",
"it",
"does",
"not",
"exist",
"xx",
"if",
"set",
"to",
"True",
"set",
"value",
"only",
"if",
"it",
"exists"
] |
55f0adf3adc40f5a769e28e541dbbf5377b90ec6
|
https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L135-L151
|
16,830
|
RedisJSON/rejson-py
|
rejson/client.py
|
Client.jsontype
|
def jsontype(self, name, path=Path.rootPath()):
"""
Gets the type of the JSON value under ``path`` from key ``name``
"""
return self.execute_command('JSON.TYPE', name, str_path(path))
|
python
|
def jsontype(self, name, path=Path.rootPath()):
"""
Gets the type of the JSON value under ``path`` from key ``name``
"""
return self.execute_command('JSON.TYPE', name, str_path(path))
|
[
"def",
"jsontype",
"(",
"self",
",",
"name",
",",
"path",
"=",
"Path",
".",
"rootPath",
"(",
")",
")",
":",
"return",
"self",
".",
"execute_command",
"(",
"'JSON.TYPE'",
",",
"name",
",",
"str_path",
"(",
"path",
")",
")"
] |
Gets the type of the JSON value under ``path`` from key ``name``
|
[
"Gets",
"the",
"type",
"of",
"the",
"JSON",
"value",
"under",
"path",
"from",
"key",
"name"
] |
55f0adf3adc40f5a769e28e541dbbf5377b90ec6
|
https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L153-L157
|
16,831
|
RedisJSON/rejson-py
|
rejson/client.py
|
Client.jsonstrappend
|
def jsonstrappend(self, name, string, path=Path.rootPath()):
"""
Appends to the string JSON value under ``path`` at key ``name`` the
provided ``string``
"""
return self.execute_command('JSON.STRAPPEND', name, str_path(path), self._encode(string))
|
python
|
def jsonstrappend(self, name, string, path=Path.rootPath()):
"""
Appends to the string JSON value under ``path`` at key ``name`` the
provided ``string``
"""
return self.execute_command('JSON.STRAPPEND', name, str_path(path), self._encode(string))
|
[
"def",
"jsonstrappend",
"(",
"self",
",",
"name",
",",
"string",
",",
"path",
"=",
"Path",
".",
"rootPath",
"(",
")",
")",
":",
"return",
"self",
".",
"execute_command",
"(",
"'JSON.STRAPPEND'",
",",
"name",
",",
"str_path",
"(",
"path",
")",
",",
"self",
".",
"_encode",
"(",
"string",
")",
")"
] |
Appends to the string JSON value under ``path`` at key ``name`` the
provided ``string``
|
[
"Appends",
"to",
"the",
"string",
"JSON",
"value",
"under",
"path",
"at",
"key",
"name",
"the",
"provided",
"string"
] |
55f0adf3adc40f5a769e28e541dbbf5377b90ec6
|
https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L173-L178
|
16,832
|
RedisJSON/rejson-py
|
rejson/client.py
|
Client.jsonstrlen
|
def jsonstrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the string JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.STRLEN', name, str_path(path))
|
python
|
def jsonstrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the string JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.STRLEN', name, str_path(path))
|
[
"def",
"jsonstrlen",
"(",
"self",
",",
"name",
",",
"path",
"=",
"Path",
".",
"rootPath",
"(",
")",
")",
":",
"return",
"self",
".",
"execute_command",
"(",
"'JSON.STRLEN'",
",",
"name",
",",
"str_path",
"(",
"path",
")",
")"
] |
Returns the length of the string JSON value under ``path`` at key
``name``
|
[
"Returns",
"the",
"length",
"of",
"the",
"string",
"JSON",
"value",
"under",
"path",
"at",
"key",
"name"
] |
55f0adf3adc40f5a769e28e541dbbf5377b90ec6
|
https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L180-L185
|
16,833
|
RedisJSON/rejson-py
|
rejson/client.py
|
Client.jsonarrappend
|
def jsonarrappend(self, name, path=Path.rootPath(), *args):
"""
Appends the objects ``args`` to the array under the ``path` in key
``name``
"""
pieces = [name, str_path(path)]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRAPPEND', *pieces)
|
python
|
def jsonarrappend(self, name, path=Path.rootPath(), *args):
"""
Appends the objects ``args`` to the array under the ``path` in key
``name``
"""
pieces = [name, str_path(path)]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRAPPEND', *pieces)
|
[
"def",
"jsonarrappend",
"(",
"self",
",",
"name",
",",
"path",
"=",
"Path",
".",
"rootPath",
"(",
")",
",",
"*",
"args",
")",
":",
"pieces",
"=",
"[",
"name",
",",
"str_path",
"(",
"path",
")",
"]",
"for",
"o",
"in",
"args",
":",
"pieces",
".",
"append",
"(",
"self",
".",
"_encode",
"(",
"o",
")",
")",
"return",
"self",
".",
"execute_command",
"(",
"'JSON.ARRAPPEND'",
",",
"*",
"pieces",
")"
] |
Appends the objects ``args`` to the array under the ``path` in key
``name``
|
[
"Appends",
"the",
"objects",
"args",
"to",
"the",
"array",
"under",
"the",
"path",
"in",
"key",
"name"
] |
55f0adf3adc40f5a769e28e541dbbf5377b90ec6
|
https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L187-L195
|
16,834
|
RedisJSON/rejson-py
|
rejson/client.py
|
Client.jsonarrindex
|
def jsonarrindex(self, name, path, scalar, start=0, stop=-1):
"""
Returns the index of ``scalar`` in the JSON array under ``path`` at key
``name``. The search can be limited using the optional inclusive
``start`` and exclusive ``stop`` indices.
"""
return self.execute_command('JSON.ARRINDEX', name, str_path(path), self._encode(scalar), start, stop)
|
python
|
def jsonarrindex(self, name, path, scalar, start=0, stop=-1):
"""
Returns the index of ``scalar`` in the JSON array under ``path`` at key
``name``. The search can be limited using the optional inclusive
``start`` and exclusive ``stop`` indices.
"""
return self.execute_command('JSON.ARRINDEX', name, str_path(path), self._encode(scalar), start, stop)
|
[
"def",
"jsonarrindex",
"(",
"self",
",",
"name",
",",
"path",
",",
"scalar",
",",
"start",
"=",
"0",
",",
"stop",
"=",
"-",
"1",
")",
":",
"return",
"self",
".",
"execute_command",
"(",
"'JSON.ARRINDEX'",
",",
"name",
",",
"str_path",
"(",
"path",
")",
",",
"self",
".",
"_encode",
"(",
"scalar",
")",
",",
"start",
",",
"stop",
")"
] |
Returns the index of ``scalar`` in the JSON array under ``path`` at key
``name``. The search can be limited using the optional inclusive
``start`` and exclusive ``stop`` indices.
|
[
"Returns",
"the",
"index",
"of",
"scalar",
"in",
"the",
"JSON",
"array",
"under",
"path",
"at",
"key",
"name",
".",
"The",
"search",
"can",
"be",
"limited",
"using",
"the",
"optional",
"inclusive",
"start",
"and",
"exclusive",
"stop",
"indices",
"."
] |
55f0adf3adc40f5a769e28e541dbbf5377b90ec6
|
https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L197-L203
|
16,835
|
RedisJSON/rejson-py
|
rejson/client.py
|
Client.jsonarrinsert
|
def jsonarrinsert(self, name, path, index, *args):
"""
Inserts the objects ``args`` to the array at index ``index`` under the
``path` in key ``name``
"""
pieces = [name, str_path(path), index]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRINSERT', *pieces)
|
python
|
def jsonarrinsert(self, name, path, index, *args):
"""
Inserts the objects ``args`` to the array at index ``index`` under the
``path` in key ``name``
"""
pieces = [name, str_path(path), index]
for o in args:
pieces.append(self._encode(o))
return self.execute_command('JSON.ARRINSERT', *pieces)
|
[
"def",
"jsonarrinsert",
"(",
"self",
",",
"name",
",",
"path",
",",
"index",
",",
"*",
"args",
")",
":",
"pieces",
"=",
"[",
"name",
",",
"str_path",
"(",
"path",
")",
",",
"index",
"]",
"for",
"o",
"in",
"args",
":",
"pieces",
".",
"append",
"(",
"self",
".",
"_encode",
"(",
"o",
")",
")",
"return",
"self",
".",
"execute_command",
"(",
"'JSON.ARRINSERT'",
",",
"*",
"pieces",
")"
] |
Inserts the objects ``args`` to the array at index ``index`` under the
``path` in key ``name``
|
[
"Inserts",
"the",
"objects",
"args",
"to",
"the",
"array",
"at",
"index",
"index",
"under",
"the",
"path",
"in",
"key",
"name"
] |
55f0adf3adc40f5a769e28e541dbbf5377b90ec6
|
https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L205-L213
|
16,836
|
RedisJSON/rejson-py
|
rejson/client.py
|
Client.jsonarrlen
|
def jsonarrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the array JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.ARRLEN', name, str_path(path))
|
python
|
def jsonarrlen(self, name, path=Path.rootPath()):
"""
Returns the length of the array JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.ARRLEN', name, str_path(path))
|
[
"def",
"jsonarrlen",
"(",
"self",
",",
"name",
",",
"path",
"=",
"Path",
".",
"rootPath",
"(",
")",
")",
":",
"return",
"self",
".",
"execute_command",
"(",
"'JSON.ARRLEN'",
",",
"name",
",",
"str_path",
"(",
"path",
")",
")"
] |
Returns the length of the array JSON value under ``path`` at key
``name``
|
[
"Returns",
"the",
"length",
"of",
"the",
"array",
"JSON",
"value",
"under",
"path",
"at",
"key",
"name"
] |
55f0adf3adc40f5a769e28e541dbbf5377b90ec6
|
https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L215-L220
|
16,837
|
RedisJSON/rejson-py
|
rejson/client.py
|
Client.jsonarrpop
|
def jsonarrpop(self, name, path=Path.rootPath(), index=-1):
"""
Pops the element at ``index`` in the array JSON value under ``path`` at
key ``name``
"""
return self.execute_command('JSON.ARRPOP', name, str_path(path), index)
|
python
|
def jsonarrpop(self, name, path=Path.rootPath(), index=-1):
"""
Pops the element at ``index`` in the array JSON value under ``path`` at
key ``name``
"""
return self.execute_command('JSON.ARRPOP', name, str_path(path), index)
|
[
"def",
"jsonarrpop",
"(",
"self",
",",
"name",
",",
"path",
"=",
"Path",
".",
"rootPath",
"(",
")",
",",
"index",
"=",
"-",
"1",
")",
":",
"return",
"self",
".",
"execute_command",
"(",
"'JSON.ARRPOP'",
",",
"name",
",",
"str_path",
"(",
"path",
")",
",",
"index",
")"
] |
Pops the element at ``index`` in the array JSON value under ``path`` at
key ``name``
|
[
"Pops",
"the",
"element",
"at",
"index",
"in",
"the",
"array",
"JSON",
"value",
"under",
"path",
"at",
"key",
"name"
] |
55f0adf3adc40f5a769e28e541dbbf5377b90ec6
|
https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L222-L227
|
16,838
|
RedisJSON/rejson-py
|
rejson/client.py
|
Client.jsonarrtrim
|
def jsonarrtrim(self, name, path, start, stop):
"""
Trim the array JSON value under ``path`` at key ``name`` to the
inclusive range given by ``start`` and ``stop``
"""
return self.execute_command('JSON.ARRTRIM', name, str_path(path), start, stop)
|
python
|
def jsonarrtrim(self, name, path, start, stop):
"""
Trim the array JSON value under ``path`` at key ``name`` to the
inclusive range given by ``start`` and ``stop``
"""
return self.execute_command('JSON.ARRTRIM', name, str_path(path), start, stop)
|
[
"def",
"jsonarrtrim",
"(",
"self",
",",
"name",
",",
"path",
",",
"start",
",",
"stop",
")",
":",
"return",
"self",
".",
"execute_command",
"(",
"'JSON.ARRTRIM'",
",",
"name",
",",
"str_path",
"(",
"path",
")",
",",
"start",
",",
"stop",
")"
] |
Trim the array JSON value under ``path`` at key ``name`` to the
inclusive range given by ``start`` and ``stop``
|
[
"Trim",
"the",
"array",
"JSON",
"value",
"under",
"path",
"at",
"key",
"name",
"to",
"the",
"inclusive",
"range",
"given",
"by",
"start",
"and",
"stop"
] |
55f0adf3adc40f5a769e28e541dbbf5377b90ec6
|
https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L229-L234
|
16,839
|
RedisJSON/rejson-py
|
rejson/client.py
|
Client.jsonobjkeys
|
def jsonobjkeys(self, name, path=Path.rootPath()):
"""
Returns the key names in the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJKEYS', name, str_path(path))
|
python
|
def jsonobjkeys(self, name, path=Path.rootPath()):
"""
Returns the key names in the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJKEYS', name, str_path(path))
|
[
"def",
"jsonobjkeys",
"(",
"self",
",",
"name",
",",
"path",
"=",
"Path",
".",
"rootPath",
"(",
")",
")",
":",
"return",
"self",
".",
"execute_command",
"(",
"'JSON.OBJKEYS'",
",",
"name",
",",
"str_path",
"(",
"path",
")",
")"
] |
Returns the key names in the dictionary JSON value under ``path`` at key
``name``
|
[
"Returns",
"the",
"key",
"names",
"in",
"the",
"dictionary",
"JSON",
"value",
"under",
"path",
"at",
"key",
"name"
] |
55f0adf3adc40f5a769e28e541dbbf5377b90ec6
|
https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L236-L241
|
16,840
|
RedisJSON/rejson-py
|
rejson/client.py
|
Client.jsonobjlen
|
def jsonobjlen(self, name, path=Path.rootPath()):
"""
Returns the length of the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJLEN', name, str_path(path))
|
python
|
def jsonobjlen(self, name, path=Path.rootPath()):
"""
Returns the length of the dictionary JSON value under ``path`` at key
``name``
"""
return self.execute_command('JSON.OBJLEN', name, str_path(path))
|
[
"def",
"jsonobjlen",
"(",
"self",
",",
"name",
",",
"path",
"=",
"Path",
".",
"rootPath",
"(",
")",
")",
":",
"return",
"self",
".",
"execute_command",
"(",
"'JSON.OBJLEN'",
",",
"name",
",",
"str_path",
"(",
"path",
")",
")"
] |
Returns the length of the dictionary JSON value under ``path`` at key
``name``
|
[
"Returns",
"the",
"length",
"of",
"the",
"dictionary",
"JSON",
"value",
"under",
"path",
"at",
"key",
"name"
] |
55f0adf3adc40f5a769e28e541dbbf5377b90ec6
|
https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L243-L248
|
16,841
|
mitodl/django-server-status
|
server_status/views.py
|
get_pg_info
|
def get_pg_info():
"""Check PostgreSQL connection."""
from psycopg2 import connect, OperationalError
log.debug("entered get_pg_info")
try:
conf = settings.DATABASES['default']
database = conf["NAME"]
user = conf["USER"]
host = conf["HOST"]
port = conf["PORT"]
password = conf["PASSWORD"]
except (AttributeError, KeyError):
log.error("No PostgreSQL connection info found in settings.")
return {"status": NO_CONFIG}
except TypeError:
return {"status": DOWN}
log.debug("got past getting conf")
try:
start = datetime.now()
connection = connect(
database=database, user=user, host=host,
port=port, password=password, connect_timeout=TIMEOUT_SECONDS,
)
log.debug("at end of context manager")
micro = (datetime.now() - start).microseconds
connection.close()
except (OperationalError, KeyError) as ex:
log.error("No PostgreSQL connection info found in settings. %s Error: %s",
conf, ex)
return {"status": DOWN}
log.debug("got to end of postgres check successfully")
return {"status": UP, "response_microseconds": micro}
|
python
|
def get_pg_info():
"""Check PostgreSQL connection."""
from psycopg2 import connect, OperationalError
log.debug("entered get_pg_info")
try:
conf = settings.DATABASES['default']
database = conf["NAME"]
user = conf["USER"]
host = conf["HOST"]
port = conf["PORT"]
password = conf["PASSWORD"]
except (AttributeError, KeyError):
log.error("No PostgreSQL connection info found in settings.")
return {"status": NO_CONFIG}
except TypeError:
return {"status": DOWN}
log.debug("got past getting conf")
try:
start = datetime.now()
connection = connect(
database=database, user=user, host=host,
port=port, password=password, connect_timeout=TIMEOUT_SECONDS,
)
log.debug("at end of context manager")
micro = (datetime.now() - start).microseconds
connection.close()
except (OperationalError, KeyError) as ex:
log.error("No PostgreSQL connection info found in settings. %s Error: %s",
conf, ex)
return {"status": DOWN}
log.debug("got to end of postgres check successfully")
return {"status": UP, "response_microseconds": micro}
|
[
"def",
"get_pg_info",
"(",
")",
":",
"from",
"psycopg2",
"import",
"connect",
",",
"OperationalError",
"log",
".",
"debug",
"(",
"\"entered get_pg_info\"",
")",
"try",
":",
"conf",
"=",
"settings",
".",
"DATABASES",
"[",
"'default'",
"]",
"database",
"=",
"conf",
"[",
"\"NAME\"",
"]",
"user",
"=",
"conf",
"[",
"\"USER\"",
"]",
"host",
"=",
"conf",
"[",
"\"HOST\"",
"]",
"port",
"=",
"conf",
"[",
"\"PORT\"",
"]",
"password",
"=",
"conf",
"[",
"\"PASSWORD\"",
"]",
"except",
"(",
"AttributeError",
",",
"KeyError",
")",
":",
"log",
".",
"error",
"(",
"\"No PostgreSQL connection info found in settings.\"",
")",
"return",
"{",
"\"status\"",
":",
"NO_CONFIG",
"}",
"except",
"TypeError",
":",
"return",
"{",
"\"status\"",
":",
"DOWN",
"}",
"log",
".",
"debug",
"(",
"\"got past getting conf\"",
")",
"try",
":",
"start",
"=",
"datetime",
".",
"now",
"(",
")",
"connection",
"=",
"connect",
"(",
"database",
"=",
"database",
",",
"user",
"=",
"user",
",",
"host",
"=",
"host",
",",
"port",
"=",
"port",
",",
"password",
"=",
"password",
",",
"connect_timeout",
"=",
"TIMEOUT_SECONDS",
",",
")",
"log",
".",
"debug",
"(",
"\"at end of context manager\"",
")",
"micro",
"=",
"(",
"datetime",
".",
"now",
"(",
")",
"-",
"start",
")",
".",
"microseconds",
"connection",
".",
"close",
"(",
")",
"except",
"(",
"OperationalError",
",",
"KeyError",
")",
"as",
"ex",
":",
"log",
".",
"error",
"(",
"\"No PostgreSQL connection info found in settings. %s Error: %s\"",
",",
"conf",
",",
"ex",
")",
"return",
"{",
"\"status\"",
":",
"DOWN",
"}",
"log",
".",
"debug",
"(",
"\"got to end of postgres check successfully\"",
")",
"return",
"{",
"\"status\"",
":",
"UP",
",",
"\"response_microseconds\"",
":",
"micro",
"}"
] |
Check PostgreSQL connection.
|
[
"Check",
"PostgreSQL",
"connection",
"."
] |
99bd29343138f94a08718fdbd9285e551751777b
|
https://github.com/mitodl/django-server-status/blob/99bd29343138f94a08718fdbd9285e551751777b/server_status/views.py#L30-L61
|
16,842
|
mitodl/django-server-status
|
server_status/views.py
|
get_redis_info
|
def get_redis_info():
"""Check Redis connection."""
from kombu.utils.url import _parse_url as parse_redis_url
from redis import (
StrictRedis,
ConnectionError as RedisConnectionError,
ResponseError as RedisResponseError,
)
for conf_name in ('REDIS_URL', 'BROKER_URL', 'CELERY_BROKER_URL'):
if hasattr(settings, conf_name):
url = getattr(settings, conf_name)
if url.startswith('redis://'):
break
else:
log.error("No redis connection info found in settings.")
return {"status": NO_CONFIG}
_, host, port, _, password, database, _ = parse_redis_url(url)
start = datetime.now()
try:
rdb = StrictRedis(
host=host, port=port, db=database,
password=password, socket_timeout=TIMEOUT_SECONDS,
)
info = rdb.info()
except (RedisConnectionError, TypeError) as ex:
log.error("Error making Redis connection: %s", ex.args)
return {"status": DOWN}
except RedisResponseError as ex:
log.error("Bad Redis response: %s", ex.args)
return {"status": DOWN, "message": "auth error"}
micro = (datetime.now() - start).microseconds
del rdb # the redis package does not support Redis's QUIT.
ret = {
"status": UP, "response_microseconds": micro,
}
fields = ("uptime_in_seconds", "used_memory", "used_memory_peak")
ret.update({x: info[x] for x in fields})
return ret
|
python
|
def get_redis_info():
"""Check Redis connection."""
from kombu.utils.url import _parse_url as parse_redis_url
from redis import (
StrictRedis,
ConnectionError as RedisConnectionError,
ResponseError as RedisResponseError,
)
for conf_name in ('REDIS_URL', 'BROKER_URL', 'CELERY_BROKER_URL'):
if hasattr(settings, conf_name):
url = getattr(settings, conf_name)
if url.startswith('redis://'):
break
else:
log.error("No redis connection info found in settings.")
return {"status": NO_CONFIG}
_, host, port, _, password, database, _ = parse_redis_url(url)
start = datetime.now()
try:
rdb = StrictRedis(
host=host, port=port, db=database,
password=password, socket_timeout=TIMEOUT_SECONDS,
)
info = rdb.info()
except (RedisConnectionError, TypeError) as ex:
log.error("Error making Redis connection: %s", ex.args)
return {"status": DOWN}
except RedisResponseError as ex:
log.error("Bad Redis response: %s", ex.args)
return {"status": DOWN, "message": "auth error"}
micro = (datetime.now() - start).microseconds
del rdb # the redis package does not support Redis's QUIT.
ret = {
"status": UP, "response_microseconds": micro,
}
fields = ("uptime_in_seconds", "used_memory", "used_memory_peak")
ret.update({x: info[x] for x in fields})
return ret
|
[
"def",
"get_redis_info",
"(",
")",
":",
"from",
"kombu",
".",
"utils",
".",
"url",
"import",
"_parse_url",
"as",
"parse_redis_url",
"from",
"redis",
"import",
"(",
"StrictRedis",
",",
"ConnectionError",
"as",
"RedisConnectionError",
",",
"ResponseError",
"as",
"RedisResponseError",
",",
")",
"for",
"conf_name",
"in",
"(",
"'REDIS_URL'",
",",
"'BROKER_URL'",
",",
"'CELERY_BROKER_URL'",
")",
":",
"if",
"hasattr",
"(",
"settings",
",",
"conf_name",
")",
":",
"url",
"=",
"getattr",
"(",
"settings",
",",
"conf_name",
")",
"if",
"url",
".",
"startswith",
"(",
"'redis://'",
")",
":",
"break",
"else",
":",
"log",
".",
"error",
"(",
"\"No redis connection info found in settings.\"",
")",
"return",
"{",
"\"status\"",
":",
"NO_CONFIG",
"}",
"_",
",",
"host",
",",
"port",
",",
"_",
",",
"password",
",",
"database",
",",
"_",
"=",
"parse_redis_url",
"(",
"url",
")",
"start",
"=",
"datetime",
".",
"now",
"(",
")",
"try",
":",
"rdb",
"=",
"StrictRedis",
"(",
"host",
"=",
"host",
",",
"port",
"=",
"port",
",",
"db",
"=",
"database",
",",
"password",
"=",
"password",
",",
"socket_timeout",
"=",
"TIMEOUT_SECONDS",
",",
")",
"info",
"=",
"rdb",
".",
"info",
"(",
")",
"except",
"(",
"RedisConnectionError",
",",
"TypeError",
")",
"as",
"ex",
":",
"log",
".",
"error",
"(",
"\"Error making Redis connection: %s\"",
",",
"ex",
".",
"args",
")",
"return",
"{",
"\"status\"",
":",
"DOWN",
"}",
"except",
"RedisResponseError",
"as",
"ex",
":",
"log",
".",
"error",
"(",
"\"Bad Redis response: %s\"",
",",
"ex",
".",
"args",
")",
"return",
"{",
"\"status\"",
":",
"DOWN",
",",
"\"message\"",
":",
"\"auth error\"",
"}",
"micro",
"=",
"(",
"datetime",
".",
"now",
"(",
")",
"-",
"start",
")",
".",
"microseconds",
"del",
"rdb",
"# the redis package does not support Redis's QUIT.",
"ret",
"=",
"{",
"\"status\"",
":",
"UP",
",",
"\"response_microseconds\"",
":",
"micro",
",",
"}",
"fields",
"=",
"(",
"\"uptime_in_seconds\"",
",",
"\"used_memory\"",
",",
"\"used_memory_peak\"",
")",
"ret",
".",
"update",
"(",
"{",
"x",
":",
"info",
"[",
"x",
"]",
"for",
"x",
"in",
"fields",
"}",
")",
"return",
"ret"
] |
Check Redis connection.
|
[
"Check",
"Redis",
"connection",
"."
] |
99bd29343138f94a08718fdbd9285e551751777b
|
https://github.com/mitodl/django-server-status/blob/99bd29343138f94a08718fdbd9285e551751777b/server_status/views.py#L64-L102
|
16,843
|
mitodl/django-server-status
|
server_status/views.py
|
get_elasticsearch_info
|
def get_elasticsearch_info():
"""Check Elasticsearch connection."""
from elasticsearch import (
Elasticsearch,
ConnectionError as ESConnectionError
)
if hasattr(settings, 'ELASTICSEARCH_URL'):
url = settings.ELASTICSEARCH_URL
else:
return {"status": NO_CONFIG}
start = datetime.now()
try:
search = Elasticsearch(url, request_timeout=TIMEOUT_SECONDS)
search.info()
except ESConnectionError:
return {"status": DOWN}
del search # The elasticsearch library has no "close" or "disconnect."
micro = (datetime.now() - start).microseconds
return {
"status": UP, "response_microseconds": micro,
}
|
python
|
def get_elasticsearch_info():
"""Check Elasticsearch connection."""
from elasticsearch import (
Elasticsearch,
ConnectionError as ESConnectionError
)
if hasattr(settings, 'ELASTICSEARCH_URL'):
url = settings.ELASTICSEARCH_URL
else:
return {"status": NO_CONFIG}
start = datetime.now()
try:
search = Elasticsearch(url, request_timeout=TIMEOUT_SECONDS)
search.info()
except ESConnectionError:
return {"status": DOWN}
del search # The elasticsearch library has no "close" or "disconnect."
micro = (datetime.now() - start).microseconds
return {
"status": UP, "response_microseconds": micro,
}
|
[
"def",
"get_elasticsearch_info",
"(",
")",
":",
"from",
"elasticsearch",
"import",
"(",
"Elasticsearch",
",",
"ConnectionError",
"as",
"ESConnectionError",
")",
"if",
"hasattr",
"(",
"settings",
",",
"'ELASTICSEARCH_URL'",
")",
":",
"url",
"=",
"settings",
".",
"ELASTICSEARCH_URL",
"else",
":",
"return",
"{",
"\"status\"",
":",
"NO_CONFIG",
"}",
"start",
"=",
"datetime",
".",
"now",
"(",
")",
"try",
":",
"search",
"=",
"Elasticsearch",
"(",
"url",
",",
"request_timeout",
"=",
"TIMEOUT_SECONDS",
")",
"search",
".",
"info",
"(",
")",
"except",
"ESConnectionError",
":",
"return",
"{",
"\"status\"",
":",
"DOWN",
"}",
"del",
"search",
"# The elasticsearch library has no \"close\" or \"disconnect.\"",
"micro",
"=",
"(",
"datetime",
".",
"now",
"(",
")",
"-",
"start",
")",
".",
"microseconds",
"return",
"{",
"\"status\"",
":",
"UP",
",",
"\"response_microseconds\"",
":",
"micro",
",",
"}"
] |
Check Elasticsearch connection.
|
[
"Check",
"Elasticsearch",
"connection",
"."
] |
99bd29343138f94a08718fdbd9285e551751777b
|
https://github.com/mitodl/django-server-status/blob/99bd29343138f94a08718fdbd9285e551751777b/server_status/views.py#L105-L125
|
16,844
|
mitodl/django-server-status
|
server_status/views.py
|
get_celery_info
|
def get_celery_info():
"""
Check celery availability
"""
import celery
if not getattr(settings, 'USE_CELERY', False):
log.error("No celery config found. Set USE_CELERY in settings to enable.")
return {"status": NO_CONFIG}
start = datetime.now()
try:
# pylint: disable=no-member
app = celery.Celery('tasks')
app.config_from_object('django.conf:settings', namespace='CELERY')
# Make sure celery is connected with max_retries=1
# and not the default of max_retries=None if the connection
# is made lazily
app.connection().ensure_connection(max_retries=1)
celery_stats = celery.task.control.inspect().stats()
if not celery_stats:
log.error("No running Celery workers were found.")
return {"status": DOWN, "message": "No running Celery workers"}
except Exception as exp: # pylint: disable=broad-except
log.error("Error connecting to the backend: %s", exp)
return {"status": DOWN, "message": "Error connecting to the backend"}
return {"status": UP, "response_microseconds": (datetime.now() - start).microseconds}
|
python
|
def get_celery_info():
"""
Check celery availability
"""
import celery
if not getattr(settings, 'USE_CELERY', False):
log.error("No celery config found. Set USE_CELERY in settings to enable.")
return {"status": NO_CONFIG}
start = datetime.now()
try:
# pylint: disable=no-member
app = celery.Celery('tasks')
app.config_from_object('django.conf:settings', namespace='CELERY')
# Make sure celery is connected with max_retries=1
# and not the default of max_retries=None if the connection
# is made lazily
app.connection().ensure_connection(max_retries=1)
celery_stats = celery.task.control.inspect().stats()
if not celery_stats:
log.error("No running Celery workers were found.")
return {"status": DOWN, "message": "No running Celery workers"}
except Exception as exp: # pylint: disable=broad-except
log.error("Error connecting to the backend: %s", exp)
return {"status": DOWN, "message": "Error connecting to the backend"}
return {"status": UP, "response_microseconds": (datetime.now() - start).microseconds}
|
[
"def",
"get_celery_info",
"(",
")",
":",
"import",
"celery",
"if",
"not",
"getattr",
"(",
"settings",
",",
"'USE_CELERY'",
",",
"False",
")",
":",
"log",
".",
"error",
"(",
"\"No celery config found. Set USE_CELERY in settings to enable.\"",
")",
"return",
"{",
"\"status\"",
":",
"NO_CONFIG",
"}",
"start",
"=",
"datetime",
".",
"now",
"(",
")",
"try",
":",
"# pylint: disable=no-member",
"app",
"=",
"celery",
".",
"Celery",
"(",
"'tasks'",
")",
"app",
".",
"config_from_object",
"(",
"'django.conf:settings'",
",",
"namespace",
"=",
"'CELERY'",
")",
"# Make sure celery is connected with max_retries=1",
"# and not the default of max_retries=None if the connection",
"# is made lazily",
"app",
".",
"connection",
"(",
")",
".",
"ensure_connection",
"(",
"max_retries",
"=",
"1",
")",
"celery_stats",
"=",
"celery",
".",
"task",
".",
"control",
".",
"inspect",
"(",
")",
".",
"stats",
"(",
")",
"if",
"not",
"celery_stats",
":",
"log",
".",
"error",
"(",
"\"No running Celery workers were found.\"",
")",
"return",
"{",
"\"status\"",
":",
"DOWN",
",",
"\"message\"",
":",
"\"No running Celery workers\"",
"}",
"except",
"Exception",
"as",
"exp",
":",
"# pylint: disable=broad-except",
"log",
".",
"error",
"(",
"\"Error connecting to the backend: %s\"",
",",
"exp",
")",
"return",
"{",
"\"status\"",
":",
"DOWN",
",",
"\"message\"",
":",
"\"Error connecting to the backend\"",
"}",
"return",
"{",
"\"status\"",
":",
"UP",
",",
"\"response_microseconds\"",
":",
"(",
"datetime",
".",
"now",
"(",
")",
"-",
"start",
")",
".",
"microseconds",
"}"
] |
Check celery availability
|
[
"Check",
"celery",
"availability"
] |
99bd29343138f94a08718fdbd9285e551751777b
|
https://github.com/mitodl/django-server-status/blob/99bd29343138f94a08718fdbd9285e551751777b/server_status/views.py#L128-L153
|
16,845
|
mitodl/django-server-status
|
server_status/views.py
|
get_certificate_info
|
def get_certificate_info():
"""
checks app certificate expiry status
"""
if hasattr(settings, 'MIT_WS_CERTIFICATE') and settings.MIT_WS_CERTIFICATE:
mit_ws_certificate = settings.MIT_WS_CERTIFICATE
else:
return {"status": NO_CONFIG}
app_cert = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM, (
mit_ws_certificate if not isinstance(mit_ws_certificate, str)
else mit_ws_certificate.encode().decode('unicode_escape').encode()
)
)
app_cert_expiration = datetime.strptime(
app_cert.get_notAfter().decode('ascii'),
'%Y%m%d%H%M%SZ'
)
date_delta = app_cert_expiration - datetime.now()
# if more then 30 days left in expiry of certificate then app is safe
return {
'app_cert_expires': app_cert_expiration.strftime('%Y-%m-%dT%H:%M:%S'),
'status': UP if date_delta.days > 30 else DOWN
}
|
python
|
def get_certificate_info():
"""
checks app certificate expiry status
"""
if hasattr(settings, 'MIT_WS_CERTIFICATE') and settings.MIT_WS_CERTIFICATE:
mit_ws_certificate = settings.MIT_WS_CERTIFICATE
else:
return {"status": NO_CONFIG}
app_cert = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM, (
mit_ws_certificate if not isinstance(mit_ws_certificate, str)
else mit_ws_certificate.encode().decode('unicode_escape').encode()
)
)
app_cert_expiration = datetime.strptime(
app_cert.get_notAfter().decode('ascii'),
'%Y%m%d%H%M%SZ'
)
date_delta = app_cert_expiration - datetime.now()
# if more then 30 days left in expiry of certificate then app is safe
return {
'app_cert_expires': app_cert_expiration.strftime('%Y-%m-%dT%H:%M:%S'),
'status': UP if date_delta.days > 30 else DOWN
}
|
[
"def",
"get_certificate_info",
"(",
")",
":",
"if",
"hasattr",
"(",
"settings",
",",
"'MIT_WS_CERTIFICATE'",
")",
"and",
"settings",
".",
"MIT_WS_CERTIFICATE",
":",
"mit_ws_certificate",
"=",
"settings",
".",
"MIT_WS_CERTIFICATE",
"else",
":",
"return",
"{",
"\"status\"",
":",
"NO_CONFIG",
"}",
"app_cert",
"=",
"OpenSSL",
".",
"crypto",
".",
"load_certificate",
"(",
"OpenSSL",
".",
"crypto",
".",
"FILETYPE_PEM",
",",
"(",
"mit_ws_certificate",
"if",
"not",
"isinstance",
"(",
"mit_ws_certificate",
",",
"str",
")",
"else",
"mit_ws_certificate",
".",
"encode",
"(",
")",
".",
"decode",
"(",
"'unicode_escape'",
")",
".",
"encode",
"(",
")",
")",
")",
"app_cert_expiration",
"=",
"datetime",
".",
"strptime",
"(",
"app_cert",
".",
"get_notAfter",
"(",
")",
".",
"decode",
"(",
"'ascii'",
")",
",",
"'%Y%m%d%H%M%SZ'",
")",
"date_delta",
"=",
"app_cert_expiration",
"-",
"datetime",
".",
"now",
"(",
")",
"# if more then 30 days left in expiry of certificate then app is safe",
"return",
"{",
"'app_cert_expires'",
":",
"app_cert_expiration",
".",
"strftime",
"(",
"'%Y-%m-%dT%H:%M:%S'",
")",
",",
"'status'",
":",
"UP",
"if",
"date_delta",
".",
"days",
">",
"30",
"else",
"DOWN",
"}"
] |
checks app certificate expiry status
|
[
"checks",
"app",
"certificate",
"expiry",
"status"
] |
99bd29343138f94a08718fdbd9285e551751777b
|
https://github.com/mitodl/django-server-status/blob/99bd29343138f94a08718fdbd9285e551751777b/server_status/views.py#L156-L182
|
16,846
|
bitcaster-io/bitcaster
|
src/telebot/__init__.py
|
TeleBot._start
|
def _start(self):
'''Requests bot information based on current api_key, and sets
self.whoami to dictionary with username, first_name, and id of the
configured bot.
'''
if self.whoami is None:
me = self.get_me()
if me.get('ok', False):
self.whoami = me['result']
else:
raise ValueError('Bot Cannot request information, check '
'api_key')
|
python
|
def _start(self):
'''Requests bot information based on current api_key, and sets
self.whoami to dictionary with username, first_name, and id of the
configured bot.
'''
if self.whoami is None:
me = self.get_me()
if me.get('ok', False):
self.whoami = me['result']
else:
raise ValueError('Bot Cannot request information, check '
'api_key')
|
[
"def",
"_start",
"(",
"self",
")",
":",
"if",
"self",
".",
"whoami",
"is",
"None",
":",
"me",
"=",
"self",
".",
"get_me",
"(",
")",
"if",
"me",
".",
"get",
"(",
"'ok'",
",",
"False",
")",
":",
"self",
".",
"whoami",
"=",
"me",
"[",
"'result'",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'Bot Cannot request information, check '",
"'api_key'",
")"
] |
Requests bot information based on current api_key, and sets
self.whoami to dictionary with username, first_name, and id of the
configured bot.
|
[
"Requests",
"bot",
"information",
"based",
"on",
"current",
"api_key",
"and",
"sets",
"self",
".",
"whoami",
"to",
"dictionary",
"with",
"username",
"first_name",
"and",
"id",
"of",
"the",
"configured",
"bot",
"."
] |
04625a4b67c1ad01e5d38faa3093828b360d4a98
|
https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/telebot/__init__.py#L74-L86
|
16,847
|
bitcaster-io/bitcaster
|
src/telebot/__init__.py
|
TeleBot.poll
|
def poll(self, offset=None, poll_timeout=600, cooldown=60, debug=False):
'''These should also be in the config section, but some here for
overrides
'''
if self.config['api_key'] is None:
raise ValueError('config api_key is undefined')
if offset or self.config.get('offset', None):
self.offset = offset or self.config.get('offset', None)
self._start()
while True:
try:
response = self.get_updates(poll_timeout, self.offset)
if response.get('ok', False) is False:
raise ValueError(response['error'])
else:
self.process_updates(response)
except Exception as e:
print('Error: Unknown Exception')
print(e)
if debug:
raise e
else:
time.sleep(cooldown)
|
python
|
def poll(self, offset=None, poll_timeout=600, cooldown=60, debug=False):
'''These should also be in the config section, but some here for
overrides
'''
if self.config['api_key'] is None:
raise ValueError('config api_key is undefined')
if offset or self.config.get('offset', None):
self.offset = offset or self.config.get('offset', None)
self._start()
while True:
try:
response = self.get_updates(poll_timeout, self.offset)
if response.get('ok', False) is False:
raise ValueError(response['error'])
else:
self.process_updates(response)
except Exception as e:
print('Error: Unknown Exception')
print(e)
if debug:
raise e
else:
time.sleep(cooldown)
|
[
"def",
"poll",
"(",
"self",
",",
"offset",
"=",
"None",
",",
"poll_timeout",
"=",
"600",
",",
"cooldown",
"=",
"60",
",",
"debug",
"=",
"False",
")",
":",
"if",
"self",
".",
"config",
"[",
"'api_key'",
"]",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'config api_key is undefined'",
")",
"if",
"offset",
"or",
"self",
".",
"config",
".",
"get",
"(",
"'offset'",
",",
"None",
")",
":",
"self",
".",
"offset",
"=",
"offset",
"or",
"self",
".",
"config",
".",
"get",
"(",
"'offset'",
",",
"None",
")",
"self",
".",
"_start",
"(",
")",
"while",
"True",
":",
"try",
":",
"response",
"=",
"self",
".",
"get_updates",
"(",
"poll_timeout",
",",
"self",
".",
"offset",
")",
"if",
"response",
".",
"get",
"(",
"'ok'",
",",
"False",
")",
"is",
"False",
":",
"raise",
"ValueError",
"(",
"response",
"[",
"'error'",
"]",
")",
"else",
":",
"self",
".",
"process_updates",
"(",
"response",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"'Error: Unknown Exception'",
")",
"print",
"(",
"e",
")",
"if",
"debug",
":",
"raise",
"e",
"else",
":",
"time",
".",
"sleep",
"(",
"cooldown",
")"
] |
These should also be in the config section, but some here for
overrides
|
[
"These",
"should",
"also",
"be",
"in",
"the",
"config",
"section",
"but",
"some",
"here",
"for",
"overrides"
] |
04625a4b67c1ad01e5d38faa3093828b360d4a98
|
https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/telebot/__init__.py#L88-L114
|
16,848
|
bitcaster-io/bitcaster
|
src/bitcaster/utils/language.py
|
get_attr
|
def get_attr(obj, attr, default=None):
"""Recursive get object's attribute. May use dot notation.
>>> class C(object): pass
>>> a = C()
>>> a.b = C()
>>> a.b.c = 4
>>> get_attr(a, 'b.c')
4
>>> get_attr(a, 'b.c.y', None)
>>> get_attr(a, 'b.c.y', 1)
1
"""
if '.' not in attr:
return getattr(obj, attr, default)
else:
L = attr.split('.')
return get_attr(getattr(obj, L[0], default), '.'.join(L[1:]), default)
|
python
|
def get_attr(obj, attr, default=None):
"""Recursive get object's attribute. May use dot notation.
>>> class C(object): pass
>>> a = C()
>>> a.b = C()
>>> a.b.c = 4
>>> get_attr(a, 'b.c')
4
>>> get_attr(a, 'b.c.y', None)
>>> get_attr(a, 'b.c.y', 1)
1
"""
if '.' not in attr:
return getattr(obj, attr, default)
else:
L = attr.split('.')
return get_attr(getattr(obj, L[0], default), '.'.join(L[1:]), default)
|
[
"def",
"get_attr",
"(",
"obj",
",",
"attr",
",",
"default",
"=",
"None",
")",
":",
"if",
"'.'",
"not",
"in",
"attr",
":",
"return",
"getattr",
"(",
"obj",
",",
"attr",
",",
"default",
")",
"else",
":",
"L",
"=",
"attr",
".",
"split",
"(",
"'.'",
")",
"return",
"get_attr",
"(",
"getattr",
"(",
"obj",
",",
"L",
"[",
"0",
"]",
",",
"default",
")",
",",
"'.'",
".",
"join",
"(",
"L",
"[",
"1",
":",
"]",
")",
",",
"default",
")"
] |
Recursive get object's attribute. May use dot notation.
>>> class C(object): pass
>>> a = C()
>>> a.b = C()
>>> a.b.c = 4
>>> get_attr(a, 'b.c')
4
>>> get_attr(a, 'b.c.y', None)
>>> get_attr(a, 'b.c.y', 1)
1
|
[
"Recursive",
"get",
"object",
"s",
"attribute",
".",
"May",
"use",
"dot",
"notation",
"."
] |
04625a4b67c1ad01e5d38faa3093828b360d4a98
|
https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/bitcaster/utils/language.py#L32-L51
|
16,849
|
bitcaster-io/bitcaster
|
src/bitcaster/web/templatetags/bc_assets.py
|
asset
|
def asset(path):
"""
Join the given path with the STATIC_URL setting.
Usage::
{% static path [as varname] %}
Examples::
{% static "myapp/css/base.css" %}
{% static variable_with_path %}
{% static "myapp/css/base.css" as admin_base_css %}
{% static variable_with_path as varname %}
"""
commit = bitcaster.get_full_version()
return mark_safe('{0}?{1}'.format(_static(path), commit))
|
python
|
def asset(path):
"""
Join the given path with the STATIC_URL setting.
Usage::
{% static path [as varname] %}
Examples::
{% static "myapp/css/base.css" %}
{% static variable_with_path %}
{% static "myapp/css/base.css" as admin_base_css %}
{% static variable_with_path as varname %}
"""
commit = bitcaster.get_full_version()
return mark_safe('{0}?{1}'.format(_static(path), commit))
|
[
"def",
"asset",
"(",
"path",
")",
":",
"commit",
"=",
"bitcaster",
".",
"get_full_version",
"(",
")",
"return",
"mark_safe",
"(",
"'{0}?{1}'",
".",
"format",
"(",
"_static",
"(",
"path",
")",
",",
"commit",
")",
")"
] |
Join the given path with the STATIC_URL setting.
Usage::
{% static path [as varname] %}
Examples::
{% static "myapp/css/base.css" %}
{% static variable_with_path %}
{% static "myapp/css/base.css" as admin_base_css %}
{% static variable_with_path as varname %}
|
[
"Join",
"the",
"given",
"path",
"with",
"the",
"STATIC_URL",
"setting",
"."
] |
04625a4b67c1ad01e5d38faa3093828b360d4a98
|
https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/bitcaster/web/templatetags/bc_assets.py#L19-L35
|
16,850
|
bitcaster-io/bitcaster
|
src/bitcaster/utils/wsgi.py
|
get_client_ip
|
def get_client_ip(request):
"""
Naively yank the first IP address in an X-Forwarded-For header
and assume this is correct.
Note: Don't use this in security sensitive situations since this
value may be forged from a client.
"""
try:
return request.META['HTTP_X_FORWARDED_FOR'].split(',')[0].strip()
except (KeyError, IndexError):
return request.META.get('REMOTE_ADDR')
|
python
|
def get_client_ip(request):
"""
Naively yank the first IP address in an X-Forwarded-For header
and assume this is correct.
Note: Don't use this in security sensitive situations since this
value may be forged from a client.
"""
try:
return request.META['HTTP_X_FORWARDED_FOR'].split(',')[0].strip()
except (KeyError, IndexError):
return request.META.get('REMOTE_ADDR')
|
[
"def",
"get_client_ip",
"(",
"request",
")",
":",
"try",
":",
"return",
"request",
".",
"META",
"[",
"'HTTP_X_FORWARDED_FOR'",
"]",
".",
"split",
"(",
"','",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"except",
"(",
"KeyError",
",",
"IndexError",
")",
":",
"return",
"request",
".",
"META",
".",
"get",
"(",
"'REMOTE_ADDR'",
")"
] |
Naively yank the first IP address in an X-Forwarded-For header
and assume this is correct.
Note: Don't use this in security sensitive situations since this
value may be forged from a client.
|
[
"Naively",
"yank",
"the",
"first",
"IP",
"address",
"in",
"an",
"X",
"-",
"Forwarded",
"-",
"For",
"header",
"and",
"assume",
"this",
"is",
"correct",
"."
] |
04625a4b67c1ad01e5d38faa3093828b360d4a98
|
https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/bitcaster/utils/wsgi.py#L6-L17
|
16,851
|
bitcaster-io/bitcaster
|
src/tweepy/api.py
|
API._pack_image
|
def _pack_image(filename, max_size, form_field='image', f=None):
"""Pack image from file into multipart-formdata post body"""
# image must be less than 700kb in size
if f is None:
try:
if os.path.getsize(filename) > (max_size * 1024):
raise TweepError('File is too big, must be less than %skb.' % max_size)
except os.error as e:
raise TweepError('Unable to access file: %s' % e.strerror)
# build the mulitpart-formdata body
fp = open(filename, 'rb')
else:
f.seek(0, 2) # Seek to end of file
if f.tell() > (max_size * 1024):
raise TweepError('File is too big, must be less than %skb.' % max_size)
f.seek(0) # Reset to beginning of file
fp = f
# image must be gif, jpeg, or png
file_type = mimetypes.guess_type(filename)
if file_type is None:
raise TweepError('Could not determine file type')
file_type = file_type[0]
if file_type not in ['image/gif', 'image/jpeg', 'image/png']:
raise TweepError('Invalid file type for image: %s' % file_type)
if isinstance(filename, six.text_type):
filename = filename.encode('utf-8')
BOUNDARY = b'Tw3ePy'
body = []
body.append(b'--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="{0}";'
' filename="{1}"'.format(form_field, filename)
.encode('utf-8'))
body.append('Content-Type: {0}'.format(file_type).encode('utf-8'))
body.append(b'')
body.append(fp.read())
body.append(b'--' + BOUNDARY + b'--')
body.append(b'')
fp.close()
body = b'\r\n'.join(body)
# build headers
headers = {
'Content-Type': 'multipart/form-data; boundary=Tw3ePy',
'Content-Length': str(len(body))
}
return headers, body
|
python
|
def _pack_image(filename, max_size, form_field='image', f=None):
"""Pack image from file into multipart-formdata post body"""
# image must be less than 700kb in size
if f is None:
try:
if os.path.getsize(filename) > (max_size * 1024):
raise TweepError('File is too big, must be less than %skb.' % max_size)
except os.error as e:
raise TweepError('Unable to access file: %s' % e.strerror)
# build the mulitpart-formdata body
fp = open(filename, 'rb')
else:
f.seek(0, 2) # Seek to end of file
if f.tell() > (max_size * 1024):
raise TweepError('File is too big, must be less than %skb.' % max_size)
f.seek(0) # Reset to beginning of file
fp = f
# image must be gif, jpeg, or png
file_type = mimetypes.guess_type(filename)
if file_type is None:
raise TweepError('Could not determine file type')
file_type = file_type[0]
if file_type not in ['image/gif', 'image/jpeg', 'image/png']:
raise TweepError('Invalid file type for image: %s' % file_type)
if isinstance(filename, six.text_type):
filename = filename.encode('utf-8')
BOUNDARY = b'Tw3ePy'
body = []
body.append(b'--' + BOUNDARY)
body.append('Content-Disposition: form-data; name="{0}";'
' filename="{1}"'.format(form_field, filename)
.encode('utf-8'))
body.append('Content-Type: {0}'.format(file_type).encode('utf-8'))
body.append(b'')
body.append(fp.read())
body.append(b'--' + BOUNDARY + b'--')
body.append(b'')
fp.close()
body = b'\r\n'.join(body)
# build headers
headers = {
'Content-Type': 'multipart/form-data; boundary=Tw3ePy',
'Content-Length': str(len(body))
}
return headers, body
|
[
"def",
"_pack_image",
"(",
"filename",
",",
"max_size",
",",
"form_field",
"=",
"'image'",
",",
"f",
"=",
"None",
")",
":",
"# image must be less than 700kb in size",
"if",
"f",
"is",
"None",
":",
"try",
":",
"if",
"os",
".",
"path",
".",
"getsize",
"(",
"filename",
")",
">",
"(",
"max_size",
"*",
"1024",
")",
":",
"raise",
"TweepError",
"(",
"'File is too big, must be less than %skb.'",
"%",
"max_size",
")",
"except",
"os",
".",
"error",
"as",
"e",
":",
"raise",
"TweepError",
"(",
"'Unable to access file: %s'",
"%",
"e",
".",
"strerror",
")",
"# build the mulitpart-formdata body",
"fp",
"=",
"open",
"(",
"filename",
",",
"'rb'",
")",
"else",
":",
"f",
".",
"seek",
"(",
"0",
",",
"2",
")",
"# Seek to end of file",
"if",
"f",
".",
"tell",
"(",
")",
">",
"(",
"max_size",
"*",
"1024",
")",
":",
"raise",
"TweepError",
"(",
"'File is too big, must be less than %skb.'",
"%",
"max_size",
")",
"f",
".",
"seek",
"(",
"0",
")",
"# Reset to beginning of file",
"fp",
"=",
"f",
"# image must be gif, jpeg, or png",
"file_type",
"=",
"mimetypes",
".",
"guess_type",
"(",
"filename",
")",
"if",
"file_type",
"is",
"None",
":",
"raise",
"TweepError",
"(",
"'Could not determine file type'",
")",
"file_type",
"=",
"file_type",
"[",
"0",
"]",
"if",
"file_type",
"not",
"in",
"[",
"'image/gif'",
",",
"'image/jpeg'",
",",
"'image/png'",
"]",
":",
"raise",
"TweepError",
"(",
"'Invalid file type for image: %s'",
"%",
"file_type",
")",
"if",
"isinstance",
"(",
"filename",
",",
"six",
".",
"text_type",
")",
":",
"filename",
"=",
"filename",
".",
"encode",
"(",
"'utf-8'",
")",
"BOUNDARY",
"=",
"b'Tw3ePy'",
"body",
"=",
"[",
"]",
"body",
".",
"append",
"(",
"b'--'",
"+",
"BOUNDARY",
")",
"body",
".",
"append",
"(",
"'Content-Disposition: form-data; name=\"{0}\";'",
"' filename=\"{1}\"'",
".",
"format",
"(",
"form_field",
",",
"filename",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"body",
".",
"append",
"(",
"'Content-Type: {0}'",
".",
"format",
"(",
"file_type",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"body",
".",
"append",
"(",
"b''",
")",
"body",
".",
"append",
"(",
"fp",
".",
"read",
"(",
")",
")",
"body",
".",
"append",
"(",
"b'--'",
"+",
"BOUNDARY",
"+",
"b'--'",
")",
"body",
".",
"append",
"(",
"b''",
")",
"fp",
".",
"close",
"(",
")",
"body",
"=",
"b'\\r\\n'",
".",
"join",
"(",
"body",
")",
"# build headers",
"headers",
"=",
"{",
"'Content-Type'",
":",
"'multipart/form-data; boundary=Tw3ePy'",
",",
"'Content-Length'",
":",
"str",
"(",
"len",
"(",
"body",
")",
")",
"}",
"return",
"headers",
",",
"body"
] |
Pack image from file into multipart-formdata post body
|
[
"Pack",
"image",
"from",
"file",
"into",
"multipart",
"-",
"formdata",
"post",
"body"
] |
04625a4b67c1ad01e5d38faa3093828b360d4a98
|
https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/tweepy/api.py#L1344-L1394
|
16,852
|
bitcaster-io/bitcaster
|
src/bitcaster/web/templatetags/bitcaster.py
|
channel_submit_row
|
def channel_submit_row(context):
"""
Display the row of buttons for delete and save.
"""
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
show_save = context.get('show_save', True)
show_save_and_continue = context.get('show_save_and_continue', True)
can_delete = context['has_delete_permission']
can_add = context['has_add_permission']
can_change = context['has_change_permission']
ctx = Context(context)
ctx.update({
'show_delete_link': (not is_popup and
can_delete and
change and
context.get('show_delete', True)
),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': (can_add and
not is_popup and
(not save_as or context['add'])
),
'show_save_and_continue': (not is_popup and can_change and show_save_and_continue),
'show_save': show_save,
})
return ctx
|
python
|
def channel_submit_row(context):
"""
Display the row of buttons for delete and save.
"""
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
show_save = context.get('show_save', True)
show_save_and_continue = context.get('show_save_and_continue', True)
can_delete = context['has_delete_permission']
can_add = context['has_add_permission']
can_change = context['has_change_permission']
ctx = Context(context)
ctx.update({
'show_delete_link': (not is_popup and
can_delete and
change and
context.get('show_delete', True)
),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': (can_add and
not is_popup and
(not save_as or context['add'])
),
'show_save_and_continue': (not is_popup and can_change and show_save_and_continue),
'show_save': show_save,
})
return ctx
|
[
"def",
"channel_submit_row",
"(",
"context",
")",
":",
"change",
"=",
"context",
"[",
"'change'",
"]",
"is_popup",
"=",
"context",
"[",
"'is_popup'",
"]",
"save_as",
"=",
"context",
"[",
"'save_as'",
"]",
"show_save",
"=",
"context",
".",
"get",
"(",
"'show_save'",
",",
"True",
")",
"show_save_and_continue",
"=",
"context",
".",
"get",
"(",
"'show_save_and_continue'",
",",
"True",
")",
"can_delete",
"=",
"context",
"[",
"'has_delete_permission'",
"]",
"can_add",
"=",
"context",
"[",
"'has_add_permission'",
"]",
"can_change",
"=",
"context",
"[",
"'has_change_permission'",
"]",
"ctx",
"=",
"Context",
"(",
"context",
")",
"ctx",
".",
"update",
"(",
"{",
"'show_delete_link'",
":",
"(",
"not",
"is_popup",
"and",
"can_delete",
"and",
"change",
"and",
"context",
".",
"get",
"(",
"'show_delete'",
",",
"True",
")",
")",
",",
"'show_save_as_new'",
":",
"not",
"is_popup",
"and",
"change",
"and",
"save_as",
",",
"'show_save_and_add_another'",
":",
"(",
"can_add",
"and",
"not",
"is_popup",
"and",
"(",
"not",
"save_as",
"or",
"context",
"[",
"'add'",
"]",
")",
")",
",",
"'show_save_and_continue'",
":",
"(",
"not",
"is_popup",
"and",
"can_change",
"and",
"show_save_and_continue",
")",
",",
"'show_save'",
":",
"show_save",
",",
"}",
")",
"return",
"ctx"
] |
Display the row of buttons for delete and save.
|
[
"Display",
"the",
"row",
"of",
"buttons",
"for",
"delete",
"and",
"save",
"."
] |
04625a4b67c1ad01e5d38faa3093828b360d4a98
|
https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/bitcaster/web/templatetags/bitcaster.py#L77-L106
|
16,853
|
bitcaster-io/bitcaster
|
src/bitcaster/social_auth.py
|
BitcasterStrategy.get_setting
|
def get_setting(self, name):
notfound = object()
"get configuration from 'constance.config' first "
value = getattr(config, name, notfound)
if name.endswith('_WHITELISTED_DOMAINS'):
if value:
return value.split(',')
else:
return []
if value is notfound:
value = getattr(settings, name)
# Force text on URL named settings that are instance of Promise
if name.endswith('_URL'):
if isinstance(value, Promise):
value = force_text(value)
value = resolve_url(value)
return value
|
python
|
def get_setting(self, name):
notfound = object()
"get configuration from 'constance.config' first "
value = getattr(config, name, notfound)
if name.endswith('_WHITELISTED_DOMAINS'):
if value:
return value.split(',')
else:
return []
if value is notfound:
value = getattr(settings, name)
# Force text on URL named settings that are instance of Promise
if name.endswith('_URL'):
if isinstance(value, Promise):
value = force_text(value)
value = resolve_url(value)
return value
|
[
"def",
"get_setting",
"(",
"self",
",",
"name",
")",
":",
"notfound",
"=",
"object",
"(",
")",
"value",
"=",
"getattr",
"(",
"config",
",",
"name",
",",
"notfound",
")",
"if",
"name",
".",
"endswith",
"(",
"'_WHITELISTED_DOMAINS'",
")",
":",
"if",
"value",
":",
"return",
"value",
".",
"split",
"(",
"','",
")",
"else",
":",
"return",
"[",
"]",
"if",
"value",
"is",
"notfound",
":",
"value",
"=",
"getattr",
"(",
"settings",
",",
"name",
")",
"# Force text on URL named settings that are instance of Promise",
"if",
"name",
".",
"endswith",
"(",
"'_URL'",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"Promise",
")",
":",
"value",
"=",
"force_text",
"(",
"value",
")",
"value",
"=",
"resolve_url",
"(",
"value",
")",
"return",
"value"
] |
get configuration from 'constance.config' first
|
[
"get",
"configuration",
"from",
"constance",
".",
"config",
"first"
] |
04625a4b67c1ad01e5d38faa3093828b360d4a98
|
https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/bitcaster/social_auth.py#L78-L95
|
16,854
|
bitcaster-io/bitcaster
|
src/bitcaster/messages.py
|
Wrapper.debug
|
def debug(self, request, message, extra_tags='', fail_silently=False):
"""Add a message with the ``DEBUG`` level."""
add(self.target_name, request, constants.DEBUG, message, extra_tags=extra_tags,
fail_silently=fail_silently)
|
python
|
def debug(self, request, message, extra_tags='', fail_silently=False):
"""Add a message with the ``DEBUG`` level."""
add(self.target_name, request, constants.DEBUG, message, extra_tags=extra_tags,
fail_silently=fail_silently)
|
[
"def",
"debug",
"(",
"self",
",",
"request",
",",
"message",
",",
"extra_tags",
"=",
"''",
",",
"fail_silently",
"=",
"False",
")",
":",
"add",
"(",
"self",
".",
"target_name",
",",
"request",
",",
"constants",
".",
"DEBUG",
",",
"message",
",",
"extra_tags",
"=",
"extra_tags",
",",
"fail_silently",
"=",
"fail_silently",
")"
] |
Add a message with the ``DEBUG`` level.
|
[
"Add",
"a",
"message",
"with",
"the",
"DEBUG",
"level",
"."
] |
04625a4b67c1ad01e5d38faa3093828b360d4a98
|
https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/bitcaster/messages.py#L54-L57
|
16,855
|
bitcaster-io/bitcaster
|
src/bitcaster/messages.py
|
Wrapper.info
|
def info(self, request, message, extra_tags='', fail_silently=False):
"""Add a message with the ``INFO`` level."""
add(self.target_name,
request, constants.INFO, message, extra_tags=extra_tags,
fail_silently=fail_silently)
|
python
|
def info(self, request, message, extra_tags='', fail_silently=False):
"""Add a message with the ``INFO`` level."""
add(self.target_name,
request, constants.INFO, message, extra_tags=extra_tags,
fail_silently=fail_silently)
|
[
"def",
"info",
"(",
"self",
",",
"request",
",",
"message",
",",
"extra_tags",
"=",
"''",
",",
"fail_silently",
"=",
"False",
")",
":",
"add",
"(",
"self",
".",
"target_name",
",",
"request",
",",
"constants",
".",
"INFO",
",",
"message",
",",
"extra_tags",
"=",
"extra_tags",
",",
"fail_silently",
"=",
"fail_silently",
")"
] |
Add a message with the ``INFO`` level.
|
[
"Add",
"a",
"message",
"with",
"the",
"INFO",
"level",
"."
] |
04625a4b67c1ad01e5d38faa3093828b360d4a98
|
https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/bitcaster/messages.py#L59-L63
|
16,856
|
bitcaster-io/bitcaster
|
src/bitcaster/messages.py
|
Wrapper.success
|
def success(self, request, message, extra_tags='', fail_silently=False):
"""Add a message with the ``SUCCESS`` level."""
add(self.target_name, request, constants.SUCCESS, message, extra_tags=extra_tags,
fail_silently=fail_silently)
|
python
|
def success(self, request, message, extra_tags='', fail_silently=False):
"""Add a message with the ``SUCCESS`` level."""
add(self.target_name, request, constants.SUCCESS, message, extra_tags=extra_tags,
fail_silently=fail_silently)
|
[
"def",
"success",
"(",
"self",
",",
"request",
",",
"message",
",",
"extra_tags",
"=",
"''",
",",
"fail_silently",
"=",
"False",
")",
":",
"add",
"(",
"self",
".",
"target_name",
",",
"request",
",",
"constants",
".",
"SUCCESS",
",",
"message",
",",
"extra_tags",
"=",
"extra_tags",
",",
"fail_silently",
"=",
"fail_silently",
")"
] |
Add a message with the ``SUCCESS`` level.
|
[
"Add",
"a",
"message",
"with",
"the",
"SUCCESS",
"level",
"."
] |
04625a4b67c1ad01e5d38faa3093828b360d4a98
|
https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/bitcaster/messages.py#L65-L68
|
16,857
|
bitcaster-io/bitcaster
|
src/bitcaster/messages.py
|
Wrapper.warning
|
def warning(self, request, message, extra_tags='', fail_silently=False):
"""Add a message with the ``WARNING`` level."""
add(self.target_name, request, constants.WARNING, message, extra_tags=extra_tags,
fail_silently=fail_silently)
|
python
|
def warning(self, request, message, extra_tags='', fail_silently=False):
"""Add a message with the ``WARNING`` level."""
add(self.target_name, request, constants.WARNING, message, extra_tags=extra_tags,
fail_silently=fail_silently)
|
[
"def",
"warning",
"(",
"self",
",",
"request",
",",
"message",
",",
"extra_tags",
"=",
"''",
",",
"fail_silently",
"=",
"False",
")",
":",
"add",
"(",
"self",
".",
"target_name",
",",
"request",
",",
"constants",
".",
"WARNING",
",",
"message",
",",
"extra_tags",
"=",
"extra_tags",
",",
"fail_silently",
"=",
"fail_silently",
")"
] |
Add a message with the ``WARNING`` level.
|
[
"Add",
"a",
"message",
"with",
"the",
"WARNING",
"level",
"."
] |
04625a4b67c1ad01e5d38faa3093828b360d4a98
|
https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/bitcaster/messages.py#L70-L73
|
16,858
|
bitcaster-io/bitcaster
|
src/bitcaster/messages.py
|
Wrapper.error
|
def error(self, request, message, extra_tags='', fail_silently=False):
"""Add a message with the ``ERROR`` level."""
add(self.target_name, request, constants.ERROR, message, extra_tags=extra_tags,
fail_silently=fail_silently)
|
python
|
def error(self, request, message, extra_tags='', fail_silently=False):
"""Add a message with the ``ERROR`` level."""
add(self.target_name, request, constants.ERROR, message, extra_tags=extra_tags,
fail_silently=fail_silently)
|
[
"def",
"error",
"(",
"self",
",",
"request",
",",
"message",
",",
"extra_tags",
"=",
"''",
",",
"fail_silently",
"=",
"False",
")",
":",
"add",
"(",
"self",
".",
"target_name",
",",
"request",
",",
"constants",
".",
"ERROR",
",",
"message",
",",
"extra_tags",
"=",
"extra_tags",
",",
"fail_silently",
"=",
"fail_silently",
")"
] |
Add a message with the ``ERROR`` level.
|
[
"Add",
"a",
"message",
"with",
"the",
"ERROR",
"level",
"."
] |
04625a4b67c1ad01e5d38faa3093828b360d4a98
|
https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/bitcaster/messages.py#L75-L78
|
16,859
|
bread-and-pepper/django-userena
|
userena/views.py
|
signup
|
def signup(request, signup_form=SignupForm,
template_name='userena/signup_form.html', success_url=None,
extra_context=None):
"""
Signup of an account.
Signup requiring a username, email and password. After signup a user gets
an email with an activation link used to activate their account. After
successful signup redirects to ``success_url``.
:param signup_form:
Form that will be used to sign a user. Defaults to userena's
:class:`SignupForm`.
:param template_name:
String containing the template name that will be used to display the
signup form. Defaults to ``userena/signup_form.html``.
:param success_url:
String containing the URI which should be redirected to after a
successful signup. If not supplied will redirect to
``userena_signup_complete`` view.
:param extra_context:
Dictionary containing variables which are added to the template
context. Defaults to a dictionary with a ``form`` key containing the
``signup_form``.
**Context**
``form``
Form supplied by ``signup_form``.
"""
# If signup is disabled, return 403
if userena_settings.USERENA_DISABLE_SIGNUP:
raise PermissionDenied
# If no usernames are wanted and the default form is used, fallback to the
# default form that doesn't display to enter the username.
if userena_settings.USERENA_WITHOUT_USERNAMES and (signup_form == SignupForm):
signup_form = SignupFormOnlyEmail
form = signup_form()
if request.method == 'POST':
form = signup_form(request.POST, request.FILES)
if form.is_valid():
user = form.save()
# Send the signup complete signal
userena_signals.signup_complete.send(sender=None,
user=user)
if success_url: redirect_to = success_url
else: redirect_to = reverse('userena_signup_complete',
kwargs={'username': user.username})
# A new signed user should logout the old one.
if request.user.is_authenticated():
logout(request)
if (userena_settings.USERENA_SIGNIN_AFTER_SIGNUP and
not userena_settings.USERENA_ACTIVATION_REQUIRED):
user = authenticate(identification=user.email, check_password=False)
login(request, user)
return redirect(redirect_to)
if not extra_context: extra_context = dict()
extra_context['form'] = form
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
|
python
|
def signup(request, signup_form=SignupForm,
template_name='userena/signup_form.html', success_url=None,
extra_context=None):
"""
Signup of an account.
Signup requiring a username, email and password. After signup a user gets
an email with an activation link used to activate their account. After
successful signup redirects to ``success_url``.
:param signup_form:
Form that will be used to sign a user. Defaults to userena's
:class:`SignupForm`.
:param template_name:
String containing the template name that will be used to display the
signup form. Defaults to ``userena/signup_form.html``.
:param success_url:
String containing the URI which should be redirected to after a
successful signup. If not supplied will redirect to
``userena_signup_complete`` view.
:param extra_context:
Dictionary containing variables which are added to the template
context. Defaults to a dictionary with a ``form`` key containing the
``signup_form``.
**Context**
``form``
Form supplied by ``signup_form``.
"""
# If signup is disabled, return 403
if userena_settings.USERENA_DISABLE_SIGNUP:
raise PermissionDenied
# If no usernames are wanted and the default form is used, fallback to the
# default form that doesn't display to enter the username.
if userena_settings.USERENA_WITHOUT_USERNAMES and (signup_form == SignupForm):
signup_form = SignupFormOnlyEmail
form = signup_form()
if request.method == 'POST':
form = signup_form(request.POST, request.FILES)
if form.is_valid():
user = form.save()
# Send the signup complete signal
userena_signals.signup_complete.send(sender=None,
user=user)
if success_url: redirect_to = success_url
else: redirect_to = reverse('userena_signup_complete',
kwargs={'username': user.username})
# A new signed user should logout the old one.
if request.user.is_authenticated():
logout(request)
if (userena_settings.USERENA_SIGNIN_AFTER_SIGNUP and
not userena_settings.USERENA_ACTIVATION_REQUIRED):
user = authenticate(identification=user.email, check_password=False)
login(request, user)
return redirect(redirect_to)
if not extra_context: extra_context = dict()
extra_context['form'] = form
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
|
[
"def",
"signup",
"(",
"request",
",",
"signup_form",
"=",
"SignupForm",
",",
"template_name",
"=",
"'userena/signup_form.html'",
",",
"success_url",
"=",
"None",
",",
"extra_context",
"=",
"None",
")",
":",
"# If signup is disabled, return 403",
"if",
"userena_settings",
".",
"USERENA_DISABLE_SIGNUP",
":",
"raise",
"PermissionDenied",
"# If no usernames are wanted and the default form is used, fallback to the",
"# default form that doesn't display to enter the username.",
"if",
"userena_settings",
".",
"USERENA_WITHOUT_USERNAMES",
"and",
"(",
"signup_form",
"==",
"SignupForm",
")",
":",
"signup_form",
"=",
"SignupFormOnlyEmail",
"form",
"=",
"signup_form",
"(",
")",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"form",
"=",
"signup_form",
"(",
"request",
".",
"POST",
",",
"request",
".",
"FILES",
")",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"user",
"=",
"form",
".",
"save",
"(",
")",
"# Send the signup complete signal",
"userena_signals",
".",
"signup_complete",
".",
"send",
"(",
"sender",
"=",
"None",
",",
"user",
"=",
"user",
")",
"if",
"success_url",
":",
"redirect_to",
"=",
"success_url",
"else",
":",
"redirect_to",
"=",
"reverse",
"(",
"'userena_signup_complete'",
",",
"kwargs",
"=",
"{",
"'username'",
":",
"user",
".",
"username",
"}",
")",
"# A new signed user should logout the old one.",
"if",
"request",
".",
"user",
".",
"is_authenticated",
"(",
")",
":",
"logout",
"(",
"request",
")",
"if",
"(",
"userena_settings",
".",
"USERENA_SIGNIN_AFTER_SIGNUP",
"and",
"not",
"userena_settings",
".",
"USERENA_ACTIVATION_REQUIRED",
")",
":",
"user",
"=",
"authenticate",
"(",
"identification",
"=",
"user",
".",
"email",
",",
"check_password",
"=",
"False",
")",
"login",
"(",
"request",
",",
"user",
")",
"return",
"redirect",
"(",
"redirect_to",
")",
"if",
"not",
"extra_context",
":",
"extra_context",
"=",
"dict",
"(",
")",
"extra_context",
"[",
"'form'",
"]",
"=",
"form",
"return",
"ExtraContextTemplateView",
".",
"as_view",
"(",
"template_name",
"=",
"template_name",
",",
"extra_context",
"=",
"extra_context",
")",
"(",
"request",
")"
] |
Signup of an account.
Signup requiring a username, email and password. After signup a user gets
an email with an activation link used to activate their account. After
successful signup redirects to ``success_url``.
:param signup_form:
Form that will be used to sign a user. Defaults to userena's
:class:`SignupForm`.
:param template_name:
String containing the template name that will be used to display the
signup form. Defaults to ``userena/signup_form.html``.
:param success_url:
String containing the URI which should be redirected to after a
successful signup. If not supplied will redirect to
``userena_signup_complete`` view.
:param extra_context:
Dictionary containing variables which are added to the template
context. Defaults to a dictionary with a ``form`` key containing the
``signup_form``.
**Context**
``form``
Form supplied by ``signup_form``.
|
[
"Signup",
"of",
"an",
"account",
"."
] |
7dfb3d5d148127e32f217a62096d507266a3a83c
|
https://github.com/bread-and-pepper/django-userena/blob/7dfb3d5d148127e32f217a62096d507266a3a83c/userena/views.py#L73-L146
|
16,860
|
openvax/mhcflurry
|
mhcflurry/hyperparameters.py
|
HyperparameterDefaults.extend
|
def extend(self, other):
"""
Return a new HyperparameterDefaults instance containing the
hyperparameters from the current instance combined with
those from other.
It is an error if self and other have any hyperparameters in
common.
"""
overlap = [key for key in other.defaults if key in self.defaults]
if overlap:
raise ValueError(
"Duplicate hyperparameter(s): %s" % " ".join(overlap))
new = dict(self.defaults)
new.update(other.defaults)
return HyperparameterDefaults(**new)
|
python
|
def extend(self, other):
"""
Return a new HyperparameterDefaults instance containing the
hyperparameters from the current instance combined with
those from other.
It is an error if self and other have any hyperparameters in
common.
"""
overlap = [key for key in other.defaults if key in self.defaults]
if overlap:
raise ValueError(
"Duplicate hyperparameter(s): %s" % " ".join(overlap))
new = dict(self.defaults)
new.update(other.defaults)
return HyperparameterDefaults(**new)
|
[
"def",
"extend",
"(",
"self",
",",
"other",
")",
":",
"overlap",
"=",
"[",
"key",
"for",
"key",
"in",
"other",
".",
"defaults",
"if",
"key",
"in",
"self",
".",
"defaults",
"]",
"if",
"overlap",
":",
"raise",
"ValueError",
"(",
"\"Duplicate hyperparameter(s): %s\"",
"%",
"\" \"",
".",
"join",
"(",
"overlap",
")",
")",
"new",
"=",
"dict",
"(",
"self",
".",
"defaults",
")",
"new",
".",
"update",
"(",
"other",
".",
"defaults",
")",
"return",
"HyperparameterDefaults",
"(",
"*",
"*",
"new",
")"
] |
Return a new HyperparameterDefaults instance containing the
hyperparameters from the current instance combined with
those from other.
It is an error if self and other have any hyperparameters in
common.
|
[
"Return",
"a",
"new",
"HyperparameterDefaults",
"instance",
"containing",
"the",
"hyperparameters",
"from",
"the",
"current",
"instance",
"combined",
"with",
"those",
"from",
"other",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/hyperparameters.py#L22-L37
|
16,861
|
openvax/mhcflurry
|
mhcflurry/hyperparameters.py
|
HyperparameterDefaults.with_defaults
|
def with_defaults(self, obj):
"""
Given a dict of hyperparameter settings, return a dict containing
those settings augmented by the defaults for any keys missing from
the dict.
"""
self.check_valid_keys(obj)
obj = dict(obj)
for (key, value) in self.defaults.items():
if key not in obj:
obj[key] = value
return obj
|
python
|
def with_defaults(self, obj):
"""
Given a dict of hyperparameter settings, return a dict containing
those settings augmented by the defaults for any keys missing from
the dict.
"""
self.check_valid_keys(obj)
obj = dict(obj)
for (key, value) in self.defaults.items():
if key not in obj:
obj[key] = value
return obj
|
[
"def",
"with_defaults",
"(",
"self",
",",
"obj",
")",
":",
"self",
".",
"check_valid_keys",
"(",
"obj",
")",
"obj",
"=",
"dict",
"(",
"obj",
")",
"for",
"(",
"key",
",",
"value",
")",
"in",
"self",
".",
"defaults",
".",
"items",
"(",
")",
":",
"if",
"key",
"not",
"in",
"obj",
":",
"obj",
"[",
"key",
"]",
"=",
"value",
"return",
"obj"
] |
Given a dict of hyperparameter settings, return a dict containing
those settings augmented by the defaults for any keys missing from
the dict.
|
[
"Given",
"a",
"dict",
"of",
"hyperparameter",
"settings",
"return",
"a",
"dict",
"containing",
"those",
"settings",
"augmented",
"by",
"the",
"defaults",
"for",
"any",
"keys",
"missing",
"from",
"the",
"dict",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/hyperparameters.py#L39-L50
|
16,862
|
openvax/mhcflurry
|
mhcflurry/hyperparameters.py
|
HyperparameterDefaults.subselect
|
def subselect(self, obj):
"""
Filter a dict of hyperparameter settings to only those keys defined
in this HyperparameterDefaults .
"""
return dict(
(key, value) for (key, value)
in obj.items()
if key in self.defaults)
|
python
|
def subselect(self, obj):
"""
Filter a dict of hyperparameter settings to only those keys defined
in this HyperparameterDefaults .
"""
return dict(
(key, value) for (key, value)
in obj.items()
if key in self.defaults)
|
[
"def",
"subselect",
"(",
"self",
",",
"obj",
")",
":",
"return",
"dict",
"(",
"(",
"key",
",",
"value",
")",
"for",
"(",
"key",
",",
"value",
")",
"in",
"obj",
".",
"items",
"(",
")",
"if",
"key",
"in",
"self",
".",
"defaults",
")"
] |
Filter a dict of hyperparameter settings to only those keys defined
in this HyperparameterDefaults .
|
[
"Filter",
"a",
"dict",
"of",
"hyperparameter",
"settings",
"to",
"only",
"those",
"keys",
"defined",
"in",
"this",
"HyperparameterDefaults",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/hyperparameters.py#L52-L60
|
16,863
|
openvax/mhcflurry
|
mhcflurry/hyperparameters.py
|
HyperparameterDefaults.check_valid_keys
|
def check_valid_keys(self, obj):
"""
Given a dict of hyperparameter settings, throw an exception if any
keys are not defined in this HyperparameterDefaults instance.
"""
invalid_keys = [
x for x in obj if x not in self.defaults
]
if invalid_keys:
raise ValueError(
"No such model parameters: %s. Valid parameters are: %s"
% (" ".join(invalid_keys),
" ".join(self.defaults)))
|
python
|
def check_valid_keys(self, obj):
"""
Given a dict of hyperparameter settings, throw an exception if any
keys are not defined in this HyperparameterDefaults instance.
"""
invalid_keys = [
x for x in obj if x not in self.defaults
]
if invalid_keys:
raise ValueError(
"No such model parameters: %s. Valid parameters are: %s"
% (" ".join(invalid_keys),
" ".join(self.defaults)))
|
[
"def",
"check_valid_keys",
"(",
"self",
",",
"obj",
")",
":",
"invalid_keys",
"=",
"[",
"x",
"for",
"x",
"in",
"obj",
"if",
"x",
"not",
"in",
"self",
".",
"defaults",
"]",
"if",
"invalid_keys",
":",
"raise",
"ValueError",
"(",
"\"No such model parameters: %s. Valid parameters are: %s\"",
"%",
"(",
"\" \"",
".",
"join",
"(",
"invalid_keys",
")",
",",
"\" \"",
".",
"join",
"(",
"self",
".",
"defaults",
")",
")",
")"
] |
Given a dict of hyperparameter settings, throw an exception if any
keys are not defined in this HyperparameterDefaults instance.
|
[
"Given",
"a",
"dict",
"of",
"hyperparameter",
"settings",
"throw",
"an",
"exception",
"if",
"any",
"keys",
"are",
"not",
"defined",
"in",
"this",
"HyperparameterDefaults",
"instance",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/hyperparameters.py#L62-L74
|
16,864
|
openvax/mhcflurry
|
mhcflurry/hyperparameters.py
|
HyperparameterDefaults.models_grid
|
def models_grid(self, **kwargs):
'''
Make a grid of models by taking the cartesian product of all specified
model parameter lists.
Parameters
-----------
The valid kwarg parameters are the entries of this
HyperparameterDefaults instance. Each parameter must be a list
giving the values to search across.
Returns
-----------
list of dict giving the parameters for each model. The length of the
list is the product of the lengths of the input lists.
'''
# Check parameters
self.check_valid_keys(kwargs)
for (key, value) in kwargs.items():
if not isinstance(value, list):
raise ValueError(
"All parameters must be lists, but %s is %s"
% (key, str(type(value))))
# Make models, using defaults.
parameters = dict(
(key, [value]) for (key, value) in self.defaults.items())
parameters.update(kwargs)
parameter_names = list(parameters)
parameter_values = [parameters[name] for name in parameter_names]
models = [
dict(zip(parameter_names, model_values))
for model_values in itertools.product(*parameter_values)
]
return models
|
python
|
def models_grid(self, **kwargs):
'''
Make a grid of models by taking the cartesian product of all specified
model parameter lists.
Parameters
-----------
The valid kwarg parameters are the entries of this
HyperparameterDefaults instance. Each parameter must be a list
giving the values to search across.
Returns
-----------
list of dict giving the parameters for each model. The length of the
list is the product of the lengths of the input lists.
'''
# Check parameters
self.check_valid_keys(kwargs)
for (key, value) in kwargs.items():
if not isinstance(value, list):
raise ValueError(
"All parameters must be lists, but %s is %s"
% (key, str(type(value))))
# Make models, using defaults.
parameters = dict(
(key, [value]) for (key, value) in self.defaults.items())
parameters.update(kwargs)
parameter_names = list(parameters)
parameter_values = [parameters[name] for name in parameter_names]
models = [
dict(zip(parameter_names, model_values))
for model_values in itertools.product(*parameter_values)
]
return models
|
[
"def",
"models_grid",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"# Check parameters",
"self",
".",
"check_valid_keys",
"(",
"kwargs",
")",
"for",
"(",
"key",
",",
"value",
")",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"raise",
"ValueError",
"(",
"\"All parameters must be lists, but %s is %s\"",
"%",
"(",
"key",
",",
"str",
"(",
"type",
"(",
"value",
")",
")",
")",
")",
"# Make models, using defaults.",
"parameters",
"=",
"dict",
"(",
"(",
"key",
",",
"[",
"value",
"]",
")",
"for",
"(",
"key",
",",
"value",
")",
"in",
"self",
".",
"defaults",
".",
"items",
"(",
")",
")",
"parameters",
".",
"update",
"(",
"kwargs",
")",
"parameter_names",
"=",
"list",
"(",
"parameters",
")",
"parameter_values",
"=",
"[",
"parameters",
"[",
"name",
"]",
"for",
"name",
"in",
"parameter_names",
"]",
"models",
"=",
"[",
"dict",
"(",
"zip",
"(",
"parameter_names",
",",
"model_values",
")",
")",
"for",
"model_values",
"in",
"itertools",
".",
"product",
"(",
"*",
"parameter_values",
")",
"]",
"return",
"models"
] |
Make a grid of models by taking the cartesian product of all specified
model parameter lists.
Parameters
-----------
The valid kwarg parameters are the entries of this
HyperparameterDefaults instance. Each parameter must be a list
giving the values to search across.
Returns
-----------
list of dict giving the parameters for each model. The length of the
list is the product of the lengths of the input lists.
|
[
"Make",
"a",
"grid",
"of",
"models",
"by",
"taking",
"the",
"cartesian",
"product",
"of",
"all",
"specified",
"model",
"parameter",
"lists",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/hyperparameters.py#L76-L112
|
16,865
|
openvax/mhcflurry
|
mhcflurry/allele_encoding.py
|
AlleleEncoding.fixed_length_vector_encoded_sequences
|
def fixed_length_vector_encoded_sequences(self, vector_encoding_name):
"""
Encode alleles.
Parameters
----------
vector_encoding_name : string
How to represent amino acids.
One of "BLOSUM62", "one-hot", etc. Full list of supported vector
encodings is given by available_vector_encodings() in amino_acid.
Returns
-------
numpy.array with shape (num sequences, sequence length, m) where m is
vector_encoding_length(vector_encoding_name)
"""
cache_key = (
"fixed_length_vector_encoding",
vector_encoding_name)
if cache_key not in self.encoding_cache:
index_encoded_matrix = amino_acid.index_encoding(
self.fixed_length_sequences.values,
amino_acid.AMINO_ACID_INDEX)
vector_encoded = amino_acid.fixed_vectors_encoding(
index_encoded_matrix,
amino_acid.ENCODING_DATA_FRAMES[vector_encoding_name])
result = vector_encoded[self.indices]
self.encoding_cache[cache_key] = result
return self.encoding_cache[cache_key]
|
python
|
def fixed_length_vector_encoded_sequences(self, vector_encoding_name):
"""
Encode alleles.
Parameters
----------
vector_encoding_name : string
How to represent amino acids.
One of "BLOSUM62", "one-hot", etc. Full list of supported vector
encodings is given by available_vector_encodings() in amino_acid.
Returns
-------
numpy.array with shape (num sequences, sequence length, m) where m is
vector_encoding_length(vector_encoding_name)
"""
cache_key = (
"fixed_length_vector_encoding",
vector_encoding_name)
if cache_key not in self.encoding_cache:
index_encoded_matrix = amino_acid.index_encoding(
self.fixed_length_sequences.values,
amino_acid.AMINO_ACID_INDEX)
vector_encoded = amino_acid.fixed_vectors_encoding(
index_encoded_matrix,
amino_acid.ENCODING_DATA_FRAMES[vector_encoding_name])
result = vector_encoded[self.indices]
self.encoding_cache[cache_key] = result
return self.encoding_cache[cache_key]
|
[
"def",
"fixed_length_vector_encoded_sequences",
"(",
"self",
",",
"vector_encoding_name",
")",
":",
"cache_key",
"=",
"(",
"\"fixed_length_vector_encoding\"",
",",
"vector_encoding_name",
")",
"if",
"cache_key",
"not",
"in",
"self",
".",
"encoding_cache",
":",
"index_encoded_matrix",
"=",
"amino_acid",
".",
"index_encoding",
"(",
"self",
".",
"fixed_length_sequences",
".",
"values",
",",
"amino_acid",
".",
"AMINO_ACID_INDEX",
")",
"vector_encoded",
"=",
"amino_acid",
".",
"fixed_vectors_encoding",
"(",
"index_encoded_matrix",
",",
"amino_acid",
".",
"ENCODING_DATA_FRAMES",
"[",
"vector_encoding_name",
"]",
")",
"result",
"=",
"vector_encoded",
"[",
"self",
".",
"indices",
"]",
"self",
".",
"encoding_cache",
"[",
"cache_key",
"]",
"=",
"result",
"return",
"self",
".",
"encoding_cache",
"[",
"cache_key",
"]"
] |
Encode alleles.
Parameters
----------
vector_encoding_name : string
How to represent amino acids.
One of "BLOSUM62", "one-hot", etc. Full list of supported vector
encodings is given by available_vector_encodings() in amino_acid.
Returns
-------
numpy.array with shape (num sequences, sequence length, m) where m is
vector_encoding_length(vector_encoding_name)
|
[
"Encode",
"alleles",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/allele_encoding.py#L40-L68
|
16,866
|
openvax/mhcflurry
|
mhcflurry/amino_acid.py
|
index_encoding
|
def index_encoding(sequences, letter_to_index_dict):
"""
Encode a sequence of same-length strings to a matrix of integers of the
same shape. The map from characters to integers is given by
`letter_to_index_dict`.
Given a sequence of `n` strings all of length `k`, return a `k * n` array where
the (`i`, `j`)th element is `letter_to_index_dict[sequence[i][j]]`.
Parameters
----------
sequences : list of length n of strings of length k
letter_to_index_dict : dict : string -> int
Returns
-------
numpy.array of integers with shape (`k`, `n`)
"""
df = pandas.DataFrame(iter(s) for s in sequences)
result = df.replace(letter_to_index_dict)
return result.values
|
python
|
def index_encoding(sequences, letter_to_index_dict):
"""
Encode a sequence of same-length strings to a matrix of integers of the
same shape. The map from characters to integers is given by
`letter_to_index_dict`.
Given a sequence of `n` strings all of length `k`, return a `k * n` array where
the (`i`, `j`)th element is `letter_to_index_dict[sequence[i][j]]`.
Parameters
----------
sequences : list of length n of strings of length k
letter_to_index_dict : dict : string -> int
Returns
-------
numpy.array of integers with shape (`k`, `n`)
"""
df = pandas.DataFrame(iter(s) for s in sequences)
result = df.replace(letter_to_index_dict)
return result.values
|
[
"def",
"index_encoding",
"(",
"sequences",
",",
"letter_to_index_dict",
")",
":",
"df",
"=",
"pandas",
".",
"DataFrame",
"(",
"iter",
"(",
"s",
")",
"for",
"s",
"in",
"sequences",
")",
"result",
"=",
"df",
".",
"replace",
"(",
"letter_to_index_dict",
")",
"return",
"result",
".",
"values"
] |
Encode a sequence of same-length strings to a matrix of integers of the
same shape. The map from characters to integers is given by
`letter_to_index_dict`.
Given a sequence of `n` strings all of length `k`, return a `k * n` array where
the (`i`, `j`)th element is `letter_to_index_dict[sequence[i][j]]`.
Parameters
----------
sequences : list of length n of strings of length k
letter_to_index_dict : dict : string -> int
Returns
-------
numpy.array of integers with shape (`k`, `n`)
|
[
"Encode",
"a",
"sequence",
"of",
"same",
"-",
"length",
"strings",
"to",
"a",
"matrix",
"of",
"integers",
"of",
"the",
"same",
"shape",
".",
"The",
"map",
"from",
"characters",
"to",
"integers",
"is",
"given",
"by",
"letter_to_index_dict",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/amino_acid.py#L110-L130
|
16,867
|
openvax/mhcflurry
|
mhcflurry/class1_neural_network.py
|
Class1NeuralNetwork.apply_hyperparameter_renames
|
def apply_hyperparameter_renames(cls, hyperparameters):
"""
Handle hyperparameter renames.
Parameters
----------
hyperparameters : dict
Returns
-------
dict : updated hyperparameters
"""
for (from_name, to_name) in cls.hyperparameter_renames.items():
if from_name in hyperparameters:
value = hyperparameters.pop(from_name)
if to_name:
hyperparameters[to_name] = value
return hyperparameters
|
python
|
def apply_hyperparameter_renames(cls, hyperparameters):
"""
Handle hyperparameter renames.
Parameters
----------
hyperparameters : dict
Returns
-------
dict : updated hyperparameters
"""
for (from_name, to_name) in cls.hyperparameter_renames.items():
if from_name in hyperparameters:
value = hyperparameters.pop(from_name)
if to_name:
hyperparameters[to_name] = value
return hyperparameters
|
[
"def",
"apply_hyperparameter_renames",
"(",
"cls",
",",
"hyperparameters",
")",
":",
"for",
"(",
"from_name",
",",
"to_name",
")",
"in",
"cls",
".",
"hyperparameter_renames",
".",
"items",
"(",
")",
":",
"if",
"from_name",
"in",
"hyperparameters",
":",
"value",
"=",
"hyperparameters",
".",
"pop",
"(",
"from_name",
")",
"if",
"to_name",
":",
"hyperparameters",
"[",
"to_name",
"]",
"=",
"value",
"return",
"hyperparameters"
] |
Handle hyperparameter renames.
Parameters
----------
hyperparameters : dict
Returns
-------
dict : updated hyperparameters
|
[
"Handle",
"hyperparameter",
"renames",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_neural_network.py#L136-L154
|
16,868
|
openvax/mhcflurry
|
mhcflurry/class1_neural_network.py
|
Class1NeuralNetwork.borrow_cached_network
|
def borrow_cached_network(klass, network_json, network_weights):
"""
Return a keras Model with the specified architecture and weights.
As an optimization, when possible this will reuse architectures from a
process-wide cache.
The returned object is "borrowed" in the sense that its weights can
change later after subsequent calls to this method from other objects.
If you're using this from a parallel implementation you'll need to
hold a lock while using the returned object.
Parameters
----------
network_json : string of JSON
network_weights : list of numpy.array
Returns
-------
keras.models.Model
"""
assert network_weights is not None
key = klass.keras_network_cache_key(network_json)
if key not in klass.KERAS_MODELS_CACHE:
# Cache miss.
import keras.models
network = keras.models.model_from_json(network_json)
existing_weights = None
else:
# Cache hit.
(network, existing_weights) = klass.KERAS_MODELS_CACHE[key]
if existing_weights is not network_weights:
network.set_weights(network_weights)
klass.KERAS_MODELS_CACHE[key] = (network, network_weights)
# As an added safety check we overwrite the fit method on the returned
# model to throw an error if it is called.
def throw(*args, **kwargs):
raise NotImplementedError("Do not call fit on cached model.")
network.fit = throw
return network
|
python
|
def borrow_cached_network(klass, network_json, network_weights):
"""
Return a keras Model with the specified architecture and weights.
As an optimization, when possible this will reuse architectures from a
process-wide cache.
The returned object is "borrowed" in the sense that its weights can
change later after subsequent calls to this method from other objects.
If you're using this from a parallel implementation you'll need to
hold a lock while using the returned object.
Parameters
----------
network_json : string of JSON
network_weights : list of numpy.array
Returns
-------
keras.models.Model
"""
assert network_weights is not None
key = klass.keras_network_cache_key(network_json)
if key not in klass.KERAS_MODELS_CACHE:
# Cache miss.
import keras.models
network = keras.models.model_from_json(network_json)
existing_weights = None
else:
# Cache hit.
(network, existing_weights) = klass.KERAS_MODELS_CACHE[key]
if existing_weights is not network_weights:
network.set_weights(network_weights)
klass.KERAS_MODELS_CACHE[key] = (network, network_weights)
# As an added safety check we overwrite the fit method on the returned
# model to throw an error if it is called.
def throw(*args, **kwargs):
raise NotImplementedError("Do not call fit on cached model.")
network.fit = throw
return network
|
[
"def",
"borrow_cached_network",
"(",
"klass",
",",
"network_json",
",",
"network_weights",
")",
":",
"assert",
"network_weights",
"is",
"not",
"None",
"key",
"=",
"klass",
".",
"keras_network_cache_key",
"(",
"network_json",
")",
"if",
"key",
"not",
"in",
"klass",
".",
"KERAS_MODELS_CACHE",
":",
"# Cache miss.",
"import",
"keras",
".",
"models",
"network",
"=",
"keras",
".",
"models",
".",
"model_from_json",
"(",
"network_json",
")",
"existing_weights",
"=",
"None",
"else",
":",
"# Cache hit.",
"(",
"network",
",",
"existing_weights",
")",
"=",
"klass",
".",
"KERAS_MODELS_CACHE",
"[",
"key",
"]",
"if",
"existing_weights",
"is",
"not",
"network_weights",
":",
"network",
".",
"set_weights",
"(",
"network_weights",
")",
"klass",
".",
"KERAS_MODELS_CACHE",
"[",
"key",
"]",
"=",
"(",
"network",
",",
"network_weights",
")",
"# As an added safety check we overwrite the fit method on the returned",
"# model to throw an error if it is called.",
"def",
"throw",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"Do not call fit on cached model.\"",
")",
"network",
".",
"fit",
"=",
"throw",
"return",
"network"
] |
Return a keras Model with the specified architecture and weights.
As an optimization, when possible this will reuse architectures from a
process-wide cache.
The returned object is "borrowed" in the sense that its weights can
change later after subsequent calls to this method from other objects.
If you're using this from a parallel implementation you'll need to
hold a lock while using the returned object.
Parameters
----------
network_json : string of JSON
network_weights : list of numpy.array
Returns
-------
keras.models.Model
|
[
"Return",
"a",
"keras",
"Model",
"with",
"the",
"specified",
"architecture",
"and",
"weights",
".",
"As",
"an",
"optimization",
"when",
"possible",
"this",
"will",
"reuse",
"architectures",
"from",
"a",
"process",
"-",
"wide",
"cache",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_neural_network.py#L183-L224
|
16,869
|
openvax/mhcflurry
|
mhcflurry/class1_neural_network.py
|
Class1NeuralNetwork.network
|
def network(self, borrow=False):
"""
Return the keras model associated with this predictor.
Parameters
----------
borrow : bool
Whether to return a cached model if possible. See
borrow_cached_network for details
Returns
-------
keras.models.Model
"""
if self._network is None and self.network_json is not None:
self.load_weights()
if borrow:
return self.borrow_cached_network(
self.network_json,
self.network_weights)
else:
import keras.models
self._network = keras.models.model_from_json(self.network_json)
if self.network_weights is not None:
self._network.set_weights(self.network_weights)
self.network_json = None
self.network_weights = None
return self._network
|
python
|
def network(self, borrow=False):
"""
Return the keras model associated with this predictor.
Parameters
----------
borrow : bool
Whether to return a cached model if possible. See
borrow_cached_network for details
Returns
-------
keras.models.Model
"""
if self._network is None and self.network_json is not None:
self.load_weights()
if borrow:
return self.borrow_cached_network(
self.network_json,
self.network_weights)
else:
import keras.models
self._network = keras.models.model_from_json(self.network_json)
if self.network_weights is not None:
self._network.set_weights(self.network_weights)
self.network_json = None
self.network_weights = None
return self._network
|
[
"def",
"network",
"(",
"self",
",",
"borrow",
"=",
"False",
")",
":",
"if",
"self",
".",
"_network",
"is",
"None",
"and",
"self",
".",
"network_json",
"is",
"not",
"None",
":",
"self",
".",
"load_weights",
"(",
")",
"if",
"borrow",
":",
"return",
"self",
".",
"borrow_cached_network",
"(",
"self",
".",
"network_json",
",",
"self",
".",
"network_weights",
")",
"else",
":",
"import",
"keras",
".",
"models",
"self",
".",
"_network",
"=",
"keras",
".",
"models",
".",
"model_from_json",
"(",
"self",
".",
"network_json",
")",
"if",
"self",
".",
"network_weights",
"is",
"not",
"None",
":",
"self",
".",
"_network",
".",
"set_weights",
"(",
"self",
".",
"network_weights",
")",
"self",
".",
"network_json",
"=",
"None",
"self",
".",
"network_weights",
"=",
"None",
"return",
"self",
".",
"_network"
] |
Return the keras model associated with this predictor.
Parameters
----------
borrow : bool
Whether to return a cached model if possible. See
borrow_cached_network for details
Returns
-------
keras.models.Model
|
[
"Return",
"the",
"keras",
"model",
"associated",
"with",
"this",
"predictor",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_neural_network.py#L226-L253
|
16,870
|
openvax/mhcflurry
|
mhcflurry/class1_neural_network.py
|
Class1NeuralNetwork.load_weights
|
def load_weights(self):
"""
Load weights by evaluating self.network_weights_loader, if needed.
After calling this, self.network_weights_loader will be None and
self.network_weights will be the weights list, if available.
"""
if self.network_weights_loader:
self.network_weights = self.network_weights_loader()
self.network_weights_loader = None
|
python
|
def load_weights(self):
"""
Load weights by evaluating self.network_weights_loader, if needed.
After calling this, self.network_weights_loader will be None and
self.network_weights will be the weights list, if available.
"""
if self.network_weights_loader:
self.network_weights = self.network_weights_loader()
self.network_weights_loader = None
|
[
"def",
"load_weights",
"(",
"self",
")",
":",
"if",
"self",
".",
"network_weights_loader",
":",
"self",
".",
"network_weights",
"=",
"self",
".",
"network_weights_loader",
"(",
")",
"self",
".",
"network_weights_loader",
"=",
"None"
] |
Load weights by evaluating self.network_weights_loader, if needed.
After calling this, self.network_weights_loader will be None and
self.network_weights will be the weights list, if available.
|
[
"Load",
"weights",
"by",
"evaluating",
"self",
".",
"network_weights_loader",
"if",
"needed",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_neural_network.py#L315-L324
|
16,871
|
openvax/mhcflurry
|
mhcflurry/class1_neural_network.py
|
Class1NeuralNetwork.predict
|
def predict(self, peptides, allele_encoding=None, batch_size=4096):
"""
Predict affinities.
If peptides are specified as EncodableSequences, then the predictions
will be cached for this predictor as long as the EncodableSequences object
remains in memory. The cache is keyed in the object identity of the
EncodableSequences, not the sequences themselves.
Parameters
----------
peptides : EncodableSequences or list of string
allele_encoding : AlleleEncoding, optional
Only required when this model is a pan-allele model
batch_size : int
batch_size passed to Keras
Returns
-------
numpy.array of nM affinity predictions
"""
assert self.prediction_cache is not None
use_cache = (
allele_encoding is None and
isinstance(peptides, EncodableSequences))
if use_cache and peptides in self.prediction_cache:
return self.prediction_cache[peptides].copy()
x_dict = {
'peptide': self.peptides_to_network_input(peptides)
}
if allele_encoding is not None:
allele_input = self.allele_encoding_to_network_input(allele_encoding)
x_dict['allele'] = allele_input
network = self.network(borrow=True)
raw_predictions = network.predict(x_dict, batch_size=batch_size)
predictions = numpy.array(raw_predictions, dtype = "float64")[:,0]
result = to_ic50(predictions)
if use_cache:
self.prediction_cache[peptides] = result
return result
|
python
|
def predict(self, peptides, allele_encoding=None, batch_size=4096):
"""
Predict affinities.
If peptides are specified as EncodableSequences, then the predictions
will be cached for this predictor as long as the EncodableSequences object
remains in memory. The cache is keyed in the object identity of the
EncodableSequences, not the sequences themselves.
Parameters
----------
peptides : EncodableSequences or list of string
allele_encoding : AlleleEncoding, optional
Only required when this model is a pan-allele model
batch_size : int
batch_size passed to Keras
Returns
-------
numpy.array of nM affinity predictions
"""
assert self.prediction_cache is not None
use_cache = (
allele_encoding is None and
isinstance(peptides, EncodableSequences))
if use_cache and peptides in self.prediction_cache:
return self.prediction_cache[peptides].copy()
x_dict = {
'peptide': self.peptides_to_network_input(peptides)
}
if allele_encoding is not None:
allele_input = self.allele_encoding_to_network_input(allele_encoding)
x_dict['allele'] = allele_input
network = self.network(borrow=True)
raw_predictions = network.predict(x_dict, batch_size=batch_size)
predictions = numpy.array(raw_predictions, dtype = "float64")[:,0]
result = to_ic50(predictions)
if use_cache:
self.prediction_cache[peptides] = result
return result
|
[
"def",
"predict",
"(",
"self",
",",
"peptides",
",",
"allele_encoding",
"=",
"None",
",",
"batch_size",
"=",
"4096",
")",
":",
"assert",
"self",
".",
"prediction_cache",
"is",
"not",
"None",
"use_cache",
"=",
"(",
"allele_encoding",
"is",
"None",
"and",
"isinstance",
"(",
"peptides",
",",
"EncodableSequences",
")",
")",
"if",
"use_cache",
"and",
"peptides",
"in",
"self",
".",
"prediction_cache",
":",
"return",
"self",
".",
"prediction_cache",
"[",
"peptides",
"]",
".",
"copy",
"(",
")",
"x_dict",
"=",
"{",
"'peptide'",
":",
"self",
".",
"peptides_to_network_input",
"(",
"peptides",
")",
"}",
"if",
"allele_encoding",
"is",
"not",
"None",
":",
"allele_input",
"=",
"self",
".",
"allele_encoding_to_network_input",
"(",
"allele_encoding",
")",
"x_dict",
"[",
"'allele'",
"]",
"=",
"allele_input",
"network",
"=",
"self",
".",
"network",
"(",
"borrow",
"=",
"True",
")",
"raw_predictions",
"=",
"network",
".",
"predict",
"(",
"x_dict",
",",
"batch_size",
"=",
"batch_size",
")",
"predictions",
"=",
"numpy",
".",
"array",
"(",
"raw_predictions",
",",
"dtype",
"=",
"\"float64\"",
")",
"[",
":",
",",
"0",
"]",
"result",
"=",
"to_ic50",
"(",
"predictions",
")",
"if",
"use_cache",
":",
"self",
".",
"prediction_cache",
"[",
"peptides",
"]",
"=",
"result",
"return",
"result"
] |
Predict affinities.
If peptides are specified as EncodableSequences, then the predictions
will be cached for this predictor as long as the EncodableSequences object
remains in memory. The cache is keyed in the object identity of the
EncodableSequences, not the sequences themselves.
Parameters
----------
peptides : EncodableSequences or list of string
allele_encoding : AlleleEncoding, optional
Only required when this model is a pan-allele model
batch_size : int
batch_size passed to Keras
Returns
-------
numpy.array of nM affinity predictions
|
[
"Predict",
"affinities",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_neural_network.py#L739-L782
|
16,872
|
openvax/mhcflurry
|
mhcflurry/scoring.py
|
make_scores
|
def make_scores(
ic50_y,
ic50_y_pred,
sample_weight=None,
threshold_nm=500,
max_ic50=50000):
"""
Calculate AUC, F1, and Kendall Tau scores.
Parameters
-----------
ic50_y : float list
true IC50s (i.e. affinities)
ic50_y_pred : float list
predicted IC50s
sample_weight : float list [optional]
threshold_nm : float [optional]
max_ic50 : float [optional]
Returns
-----------
dict with entries "auc", "f1", "tau"
"""
y_pred = from_ic50(ic50_y_pred, max_ic50)
try:
auc = sklearn.metrics.roc_auc_score(
ic50_y <= threshold_nm,
y_pred,
sample_weight=sample_weight)
except ValueError as e:
logging.warning(e)
auc = numpy.nan
try:
f1 = sklearn.metrics.f1_score(
ic50_y <= threshold_nm,
ic50_y_pred <= threshold_nm,
sample_weight=sample_weight)
except ValueError as e:
logging.warning(e)
f1 = numpy.nan
try:
tau = scipy.stats.kendalltau(ic50_y_pred, ic50_y)[0]
except ValueError as e:
logging.warning(e)
tau = numpy.nan
return dict(
auc=auc,
f1=f1,
tau=tau)
|
python
|
def make_scores(
ic50_y,
ic50_y_pred,
sample_weight=None,
threshold_nm=500,
max_ic50=50000):
"""
Calculate AUC, F1, and Kendall Tau scores.
Parameters
-----------
ic50_y : float list
true IC50s (i.e. affinities)
ic50_y_pred : float list
predicted IC50s
sample_weight : float list [optional]
threshold_nm : float [optional]
max_ic50 : float [optional]
Returns
-----------
dict with entries "auc", "f1", "tau"
"""
y_pred = from_ic50(ic50_y_pred, max_ic50)
try:
auc = sklearn.metrics.roc_auc_score(
ic50_y <= threshold_nm,
y_pred,
sample_weight=sample_weight)
except ValueError as e:
logging.warning(e)
auc = numpy.nan
try:
f1 = sklearn.metrics.f1_score(
ic50_y <= threshold_nm,
ic50_y_pred <= threshold_nm,
sample_weight=sample_weight)
except ValueError as e:
logging.warning(e)
f1 = numpy.nan
try:
tau = scipy.stats.kendalltau(ic50_y_pred, ic50_y)[0]
except ValueError as e:
logging.warning(e)
tau = numpy.nan
return dict(
auc=auc,
f1=f1,
tau=tau)
|
[
"def",
"make_scores",
"(",
"ic50_y",
",",
"ic50_y_pred",
",",
"sample_weight",
"=",
"None",
",",
"threshold_nm",
"=",
"500",
",",
"max_ic50",
"=",
"50000",
")",
":",
"y_pred",
"=",
"from_ic50",
"(",
"ic50_y_pred",
",",
"max_ic50",
")",
"try",
":",
"auc",
"=",
"sklearn",
".",
"metrics",
".",
"roc_auc_score",
"(",
"ic50_y",
"<=",
"threshold_nm",
",",
"y_pred",
",",
"sample_weight",
"=",
"sample_weight",
")",
"except",
"ValueError",
"as",
"e",
":",
"logging",
".",
"warning",
"(",
"e",
")",
"auc",
"=",
"numpy",
".",
"nan",
"try",
":",
"f1",
"=",
"sklearn",
".",
"metrics",
".",
"f1_score",
"(",
"ic50_y",
"<=",
"threshold_nm",
",",
"ic50_y_pred",
"<=",
"threshold_nm",
",",
"sample_weight",
"=",
"sample_weight",
")",
"except",
"ValueError",
"as",
"e",
":",
"logging",
".",
"warning",
"(",
"e",
")",
"f1",
"=",
"numpy",
".",
"nan",
"try",
":",
"tau",
"=",
"scipy",
".",
"stats",
".",
"kendalltau",
"(",
"ic50_y_pred",
",",
"ic50_y",
")",
"[",
"0",
"]",
"except",
"ValueError",
"as",
"e",
":",
"logging",
".",
"warning",
"(",
"e",
")",
"tau",
"=",
"numpy",
".",
"nan",
"return",
"dict",
"(",
"auc",
"=",
"auc",
",",
"f1",
"=",
"f1",
",",
"tau",
"=",
"tau",
")"
] |
Calculate AUC, F1, and Kendall Tau scores.
Parameters
-----------
ic50_y : float list
true IC50s (i.e. affinities)
ic50_y_pred : float list
predicted IC50s
sample_weight : float list [optional]
threshold_nm : float [optional]
max_ic50 : float [optional]
Returns
-----------
dict with entries "auc", "f1", "tau"
|
[
"Calculate",
"AUC",
"F1",
"and",
"Kendall",
"Tau",
"scores",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/scoring.py#L14-L68
|
16,873
|
openvax/mhcflurry
|
mhcflurry/encodable_sequences.py
|
EncodableSequences.variable_length_to_fixed_length_vector_encoding
|
def variable_length_to_fixed_length_vector_encoding(
self, vector_encoding_name, left_edge=4, right_edge=4, max_length=15):
"""
Encode variable-length sequences using a fixed-length encoding designed
for preserving the anchor positions of class I peptides.
The sequences must be of length at least left_edge + right_edge, and at
most max_length.
Parameters
----------
vector_encoding_name : string
How to represent amino acids.
One of "BLOSUM62", "one-hot", etc. Full list of supported vector
encodings is given by available_vector_encodings().
left_edge : int, size of fixed-position left side
right_edge : int, size of the fixed-position right side
max_length : sequence length of the resulting encoding
Returns
-------
numpy.array with shape (num sequences, max_length, m) where m is
vector_encoding_length(vector_encoding_name)
"""
cache_key = (
"fixed_length_vector_encoding",
vector_encoding_name,
left_edge,
right_edge,
max_length)
if cache_key not in self.encoding_cache:
fixed_length_sequences = (
self.sequences_to_fixed_length_index_encoded_array(
self.sequences,
left_edge=left_edge,
right_edge=right_edge,
max_length=max_length))
result = amino_acid.fixed_vectors_encoding(
fixed_length_sequences,
amino_acid.ENCODING_DATA_FRAMES[vector_encoding_name])
assert result.shape[0] == len(self.sequences)
self.encoding_cache[cache_key] = result
return self.encoding_cache[cache_key]
|
python
|
def variable_length_to_fixed_length_vector_encoding(
self, vector_encoding_name, left_edge=4, right_edge=4, max_length=15):
"""
Encode variable-length sequences using a fixed-length encoding designed
for preserving the anchor positions of class I peptides.
The sequences must be of length at least left_edge + right_edge, and at
most max_length.
Parameters
----------
vector_encoding_name : string
How to represent amino acids.
One of "BLOSUM62", "one-hot", etc. Full list of supported vector
encodings is given by available_vector_encodings().
left_edge : int, size of fixed-position left side
right_edge : int, size of the fixed-position right side
max_length : sequence length of the resulting encoding
Returns
-------
numpy.array with shape (num sequences, max_length, m) where m is
vector_encoding_length(vector_encoding_name)
"""
cache_key = (
"fixed_length_vector_encoding",
vector_encoding_name,
left_edge,
right_edge,
max_length)
if cache_key not in self.encoding_cache:
fixed_length_sequences = (
self.sequences_to_fixed_length_index_encoded_array(
self.sequences,
left_edge=left_edge,
right_edge=right_edge,
max_length=max_length))
result = amino_acid.fixed_vectors_encoding(
fixed_length_sequences,
amino_acid.ENCODING_DATA_FRAMES[vector_encoding_name])
assert result.shape[0] == len(self.sequences)
self.encoding_cache[cache_key] = result
return self.encoding_cache[cache_key]
|
[
"def",
"variable_length_to_fixed_length_vector_encoding",
"(",
"self",
",",
"vector_encoding_name",
",",
"left_edge",
"=",
"4",
",",
"right_edge",
"=",
"4",
",",
"max_length",
"=",
"15",
")",
":",
"cache_key",
"=",
"(",
"\"fixed_length_vector_encoding\"",
",",
"vector_encoding_name",
",",
"left_edge",
",",
"right_edge",
",",
"max_length",
")",
"if",
"cache_key",
"not",
"in",
"self",
".",
"encoding_cache",
":",
"fixed_length_sequences",
"=",
"(",
"self",
".",
"sequences_to_fixed_length_index_encoded_array",
"(",
"self",
".",
"sequences",
",",
"left_edge",
"=",
"left_edge",
",",
"right_edge",
"=",
"right_edge",
",",
"max_length",
"=",
"max_length",
")",
")",
"result",
"=",
"amino_acid",
".",
"fixed_vectors_encoding",
"(",
"fixed_length_sequences",
",",
"amino_acid",
".",
"ENCODING_DATA_FRAMES",
"[",
"vector_encoding_name",
"]",
")",
"assert",
"result",
".",
"shape",
"[",
"0",
"]",
"==",
"len",
"(",
"self",
".",
"sequences",
")",
"self",
".",
"encoding_cache",
"[",
"cache_key",
"]",
"=",
"result",
"return",
"self",
".",
"encoding_cache",
"[",
"cache_key",
"]"
] |
Encode variable-length sequences using a fixed-length encoding designed
for preserving the anchor positions of class I peptides.
The sequences must be of length at least left_edge + right_edge, and at
most max_length.
Parameters
----------
vector_encoding_name : string
How to represent amino acids.
One of "BLOSUM62", "one-hot", etc. Full list of supported vector
encodings is given by available_vector_encodings().
left_edge : int, size of fixed-position left side
right_edge : int, size of the fixed-position right side
max_length : sequence length of the resulting encoding
Returns
-------
numpy.array with shape (num sequences, max_length, m) where m is
vector_encoding_length(vector_encoding_name)
|
[
"Encode",
"variable",
"-",
"length",
"sequences",
"using",
"a",
"fixed",
"-",
"length",
"encoding",
"designed",
"for",
"preserving",
"the",
"anchor",
"positions",
"of",
"class",
"I",
"peptides",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/encodable_sequences.py#L89-L131
|
16,874
|
openvax/mhcflurry
|
mhcflurry/encodable_sequences.py
|
EncodableSequences.sequences_to_fixed_length_index_encoded_array
|
def sequences_to_fixed_length_index_encoded_array(
klass, sequences, left_edge=4, right_edge=4, max_length=15):
"""
Transform a sequence of strings, where each string is of length at least
left_edge + right_edge and at most max_length into strings of length
max_length using a scheme designed to preserve the anchor positions of
class I peptides.
The first left_edge characters in the input always map to the first
left_edge characters in the output. Similarly for the last right_edge
characters. The middle characters are filled in based on the length,
with the X character filling in the blanks.
For example, using defaults:
AAAACDDDD -> AAAAXXXCXXXDDDD
The strings are also converted to int categorical amino acid indices.
Parameters
----------
sequence : string
left_edge : int
right_edge : int
max_length : int
Returns
-------
numpy array of shape (len(sequences), max_length) and dtype int
"""
# Result array is int32, filled with X (null amino acid) value.
result = numpy.full(
fill_value=amino_acid.AMINO_ACID_INDEX['X'],
shape=(len(sequences), max_length),
dtype="int32")
df = pandas.DataFrame({"peptide": sequences})
df["length"] = df.peptide.str.len()
middle_length = max_length - left_edge - right_edge
# For efficiency we handle each supported peptide length using bulk
# array operations.
for (length, sub_df) in df.groupby("length"):
if length < left_edge + right_edge:
raise ValueError(
"Sequence '%s' (length %d) unsupported: length must be at "
"least %d. There are %d total peptides with this length." % (
sub_df.iloc[0].peptide, length, left_edge + right_edge,
len(sub_df)))
if length > max_length:
raise ValueError(
"Sequence '%s' (length %d) unsupported: length must be at "
"most %d. There are %d total peptides with this length." % (
sub_df.iloc[0].peptide, length, max_length,
len(sub_df)))
# Array of shape (num peptides, length) giving fixed-length amino
# acid encoding each peptide of the current length.
fixed_length_sequences = numpy.stack(
sub_df.peptide.map(
lambda s: numpy.array([
amino_acid.AMINO_ACID_INDEX[char] for char in s
])).values)
num_null = max_length - length
num_null_left = int(math.ceil(num_null / 2))
num_middle_filled = middle_length - num_null
middle_start = left_edge + num_null_left
# Set left edge
result[sub_df.index, :left_edge] = fixed_length_sequences[
:, :left_edge
]
# Set middle.
result[
sub_df.index,
middle_start : middle_start + num_middle_filled
] = fixed_length_sequences[
:, left_edge : left_edge + num_middle_filled
]
# Set right edge.
result[
sub_df.index,
-right_edge:
] = fixed_length_sequences[:, -right_edge:]
return result
|
python
|
def sequences_to_fixed_length_index_encoded_array(
klass, sequences, left_edge=4, right_edge=4, max_length=15):
"""
Transform a sequence of strings, where each string is of length at least
left_edge + right_edge and at most max_length into strings of length
max_length using a scheme designed to preserve the anchor positions of
class I peptides.
The first left_edge characters in the input always map to the first
left_edge characters in the output. Similarly for the last right_edge
characters. The middle characters are filled in based on the length,
with the X character filling in the blanks.
For example, using defaults:
AAAACDDDD -> AAAAXXXCXXXDDDD
The strings are also converted to int categorical amino acid indices.
Parameters
----------
sequence : string
left_edge : int
right_edge : int
max_length : int
Returns
-------
numpy array of shape (len(sequences), max_length) and dtype int
"""
# Result array is int32, filled with X (null amino acid) value.
result = numpy.full(
fill_value=amino_acid.AMINO_ACID_INDEX['X'],
shape=(len(sequences), max_length),
dtype="int32")
df = pandas.DataFrame({"peptide": sequences})
df["length"] = df.peptide.str.len()
middle_length = max_length - left_edge - right_edge
# For efficiency we handle each supported peptide length using bulk
# array operations.
for (length, sub_df) in df.groupby("length"):
if length < left_edge + right_edge:
raise ValueError(
"Sequence '%s' (length %d) unsupported: length must be at "
"least %d. There are %d total peptides with this length." % (
sub_df.iloc[0].peptide, length, left_edge + right_edge,
len(sub_df)))
if length > max_length:
raise ValueError(
"Sequence '%s' (length %d) unsupported: length must be at "
"most %d. There are %d total peptides with this length." % (
sub_df.iloc[0].peptide, length, max_length,
len(sub_df)))
# Array of shape (num peptides, length) giving fixed-length amino
# acid encoding each peptide of the current length.
fixed_length_sequences = numpy.stack(
sub_df.peptide.map(
lambda s: numpy.array([
amino_acid.AMINO_ACID_INDEX[char] for char in s
])).values)
num_null = max_length - length
num_null_left = int(math.ceil(num_null / 2))
num_middle_filled = middle_length - num_null
middle_start = left_edge + num_null_left
# Set left edge
result[sub_df.index, :left_edge] = fixed_length_sequences[
:, :left_edge
]
# Set middle.
result[
sub_df.index,
middle_start : middle_start + num_middle_filled
] = fixed_length_sequences[
:, left_edge : left_edge + num_middle_filled
]
# Set right edge.
result[
sub_df.index,
-right_edge:
] = fixed_length_sequences[:, -right_edge:]
return result
|
[
"def",
"sequences_to_fixed_length_index_encoded_array",
"(",
"klass",
",",
"sequences",
",",
"left_edge",
"=",
"4",
",",
"right_edge",
"=",
"4",
",",
"max_length",
"=",
"15",
")",
":",
"# Result array is int32, filled with X (null amino acid) value.",
"result",
"=",
"numpy",
".",
"full",
"(",
"fill_value",
"=",
"amino_acid",
".",
"AMINO_ACID_INDEX",
"[",
"'X'",
"]",
",",
"shape",
"=",
"(",
"len",
"(",
"sequences",
")",
",",
"max_length",
")",
",",
"dtype",
"=",
"\"int32\"",
")",
"df",
"=",
"pandas",
".",
"DataFrame",
"(",
"{",
"\"peptide\"",
":",
"sequences",
"}",
")",
"df",
"[",
"\"length\"",
"]",
"=",
"df",
".",
"peptide",
".",
"str",
".",
"len",
"(",
")",
"middle_length",
"=",
"max_length",
"-",
"left_edge",
"-",
"right_edge",
"# For efficiency we handle each supported peptide length using bulk",
"# array operations.",
"for",
"(",
"length",
",",
"sub_df",
")",
"in",
"df",
".",
"groupby",
"(",
"\"length\"",
")",
":",
"if",
"length",
"<",
"left_edge",
"+",
"right_edge",
":",
"raise",
"ValueError",
"(",
"\"Sequence '%s' (length %d) unsupported: length must be at \"",
"\"least %d. There are %d total peptides with this length.\"",
"%",
"(",
"sub_df",
".",
"iloc",
"[",
"0",
"]",
".",
"peptide",
",",
"length",
",",
"left_edge",
"+",
"right_edge",
",",
"len",
"(",
"sub_df",
")",
")",
")",
"if",
"length",
">",
"max_length",
":",
"raise",
"ValueError",
"(",
"\"Sequence '%s' (length %d) unsupported: length must be at \"",
"\"most %d. There are %d total peptides with this length.\"",
"%",
"(",
"sub_df",
".",
"iloc",
"[",
"0",
"]",
".",
"peptide",
",",
"length",
",",
"max_length",
",",
"len",
"(",
"sub_df",
")",
")",
")",
"# Array of shape (num peptides, length) giving fixed-length amino",
"# acid encoding each peptide of the current length.",
"fixed_length_sequences",
"=",
"numpy",
".",
"stack",
"(",
"sub_df",
".",
"peptide",
".",
"map",
"(",
"lambda",
"s",
":",
"numpy",
".",
"array",
"(",
"[",
"amino_acid",
".",
"AMINO_ACID_INDEX",
"[",
"char",
"]",
"for",
"char",
"in",
"s",
"]",
")",
")",
".",
"values",
")",
"num_null",
"=",
"max_length",
"-",
"length",
"num_null_left",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"num_null",
"/",
"2",
")",
")",
"num_middle_filled",
"=",
"middle_length",
"-",
"num_null",
"middle_start",
"=",
"left_edge",
"+",
"num_null_left",
"# Set left edge",
"result",
"[",
"sub_df",
".",
"index",
",",
":",
"left_edge",
"]",
"=",
"fixed_length_sequences",
"[",
":",
",",
":",
"left_edge",
"]",
"# Set middle.",
"result",
"[",
"sub_df",
".",
"index",
",",
"middle_start",
":",
"middle_start",
"+",
"num_middle_filled",
"]",
"=",
"fixed_length_sequences",
"[",
":",
",",
"left_edge",
":",
"left_edge",
"+",
"num_middle_filled",
"]",
"# Set right edge.",
"result",
"[",
"sub_df",
".",
"index",
",",
"-",
"right_edge",
":",
"]",
"=",
"fixed_length_sequences",
"[",
":",
",",
"-",
"right_edge",
":",
"]",
"return",
"result"
] |
Transform a sequence of strings, where each string is of length at least
left_edge + right_edge and at most max_length into strings of length
max_length using a scheme designed to preserve the anchor positions of
class I peptides.
The first left_edge characters in the input always map to the first
left_edge characters in the output. Similarly for the last right_edge
characters. The middle characters are filled in based on the length,
with the X character filling in the blanks.
For example, using defaults:
AAAACDDDD -> AAAAXXXCXXXDDDD
The strings are also converted to int categorical amino acid indices.
Parameters
----------
sequence : string
left_edge : int
right_edge : int
max_length : int
Returns
-------
numpy array of shape (len(sequences), max_length) and dtype int
|
[
"Transform",
"a",
"sequence",
"of",
"strings",
"where",
"each",
"string",
"is",
"of",
"length",
"at",
"least",
"left_edge",
"+",
"right_edge",
"and",
"at",
"most",
"max_length",
"into",
"strings",
"of",
"length",
"max_length",
"using",
"a",
"scheme",
"designed",
"to",
"preserve",
"the",
"anchor",
"positions",
"of",
"class",
"I",
"peptides",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/encodable_sequences.py#L134-L223
|
16,875
|
openvax/mhcflurry
|
mhcflurry/ensemble_centrality.py
|
robust_mean
|
def robust_mean(log_values):
"""
Mean of values falling within the 25-75 percentiles.
Parameters
----------
log_values : 2-d numpy.array
Center is computed along the second axis (i.e. per row).
Returns
-------
center : numpy.array of length log_values.shape[1]
"""
if log_values.shape[1] <= 3:
# Too few values to use robust mean.
return numpy.nanmean(log_values, axis=1)
without_nans = numpy.nan_to_num(log_values) # replace nan with 0
mask = (
(~numpy.isnan(log_values)) &
(without_nans <= numpy.nanpercentile(log_values, 75, axis=1).reshape((-1, 1))) &
(without_nans >= numpy.nanpercentile(log_values, 25, axis=1).reshape((-1, 1))))
return (without_nans * mask.astype(float)).sum(1) / mask.sum(1)
|
python
|
def robust_mean(log_values):
"""
Mean of values falling within the 25-75 percentiles.
Parameters
----------
log_values : 2-d numpy.array
Center is computed along the second axis (i.e. per row).
Returns
-------
center : numpy.array of length log_values.shape[1]
"""
if log_values.shape[1] <= 3:
# Too few values to use robust mean.
return numpy.nanmean(log_values, axis=1)
without_nans = numpy.nan_to_num(log_values) # replace nan with 0
mask = (
(~numpy.isnan(log_values)) &
(without_nans <= numpy.nanpercentile(log_values, 75, axis=1).reshape((-1, 1))) &
(without_nans >= numpy.nanpercentile(log_values, 25, axis=1).reshape((-1, 1))))
return (without_nans * mask.astype(float)).sum(1) / mask.sum(1)
|
[
"def",
"robust_mean",
"(",
"log_values",
")",
":",
"if",
"log_values",
".",
"shape",
"[",
"1",
"]",
"<=",
"3",
":",
"# Too few values to use robust mean.",
"return",
"numpy",
".",
"nanmean",
"(",
"log_values",
",",
"axis",
"=",
"1",
")",
"without_nans",
"=",
"numpy",
".",
"nan_to_num",
"(",
"log_values",
")",
"# replace nan with 0",
"mask",
"=",
"(",
"(",
"~",
"numpy",
".",
"isnan",
"(",
"log_values",
")",
")",
"&",
"(",
"without_nans",
"<=",
"numpy",
".",
"nanpercentile",
"(",
"log_values",
",",
"75",
",",
"axis",
"=",
"1",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
")",
"&",
"(",
"without_nans",
">=",
"numpy",
".",
"nanpercentile",
"(",
"log_values",
",",
"25",
",",
"axis",
"=",
"1",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
")",
")",
"return",
"(",
"without_nans",
"*",
"mask",
".",
"astype",
"(",
"float",
")",
")",
".",
"sum",
"(",
"1",
")",
"/",
"mask",
".",
"sum",
"(",
"1",
")"
] |
Mean of values falling within the 25-75 percentiles.
Parameters
----------
log_values : 2-d numpy.array
Center is computed along the second axis (i.e. per row).
Returns
-------
center : numpy.array of length log_values.shape[1]
|
[
"Mean",
"of",
"values",
"falling",
"within",
"the",
"25",
"-",
"75",
"percentiles",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/ensemble_centrality.py#L11-L33
|
16,876
|
openvax/mhcflurry
|
mhcflurry/class1_affinity_predictor.py
|
Class1AffinityPredictor.neural_networks
|
def neural_networks(self):
"""
List of the neural networks in the ensemble.
Returns
-------
list of `Class1NeuralNetwork`
"""
result = []
for models in self.allele_to_allele_specific_models.values():
result.extend(models)
result.extend(self.class1_pan_allele_models)
return result
|
python
|
def neural_networks(self):
"""
List of the neural networks in the ensemble.
Returns
-------
list of `Class1NeuralNetwork`
"""
result = []
for models in self.allele_to_allele_specific_models.values():
result.extend(models)
result.extend(self.class1_pan_allele_models)
return result
|
[
"def",
"neural_networks",
"(",
"self",
")",
":",
"result",
"=",
"[",
"]",
"for",
"models",
"in",
"self",
".",
"allele_to_allele_specific_models",
".",
"values",
"(",
")",
":",
"result",
".",
"extend",
"(",
"models",
")",
"result",
".",
"extend",
"(",
"self",
".",
"class1_pan_allele_models",
")",
"return",
"result"
] |
List of the neural networks in the ensemble.
Returns
-------
list of `Class1NeuralNetwork`
|
[
"List",
"of",
"the",
"neural",
"networks",
"in",
"the",
"ensemble",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_affinity_predictor.py#L140-L152
|
16,877
|
openvax/mhcflurry
|
mhcflurry/class1_affinity_predictor.py
|
Class1AffinityPredictor.merge
|
def merge(cls, predictors):
"""
Merge the ensembles of two or more `Class1AffinityPredictor` instances.
Note: the resulting merged predictor will NOT have calibrated percentile
ranks. Call `calibrate_percentile_ranks` on it if these are needed.
Parameters
----------
predictors : sequence of `Class1AffinityPredictor`
Returns
-------
`Class1AffinityPredictor` instance
"""
assert len(predictors) > 0
if len(predictors) == 1:
return predictors[0]
allele_to_allele_specific_models = collections.defaultdict(list)
class1_pan_allele_models = []
allele_to_fixed_length_sequence = predictors[0].allele_to_fixed_length_sequence
for predictor in predictors:
for (allele, networks) in (
predictor.allele_to_allele_specific_models.items()):
allele_to_allele_specific_models[allele].extend(networks)
class1_pan_allele_models.extend(
predictor.class1_pan_allele_models)
return Class1AffinityPredictor(
allele_to_allele_specific_models=allele_to_allele_specific_models,
class1_pan_allele_models=class1_pan_allele_models,
allele_to_fixed_length_sequence=allele_to_fixed_length_sequence
)
|
python
|
def merge(cls, predictors):
"""
Merge the ensembles of two or more `Class1AffinityPredictor` instances.
Note: the resulting merged predictor will NOT have calibrated percentile
ranks. Call `calibrate_percentile_ranks` on it if these are needed.
Parameters
----------
predictors : sequence of `Class1AffinityPredictor`
Returns
-------
`Class1AffinityPredictor` instance
"""
assert len(predictors) > 0
if len(predictors) == 1:
return predictors[0]
allele_to_allele_specific_models = collections.defaultdict(list)
class1_pan_allele_models = []
allele_to_fixed_length_sequence = predictors[0].allele_to_fixed_length_sequence
for predictor in predictors:
for (allele, networks) in (
predictor.allele_to_allele_specific_models.items()):
allele_to_allele_specific_models[allele].extend(networks)
class1_pan_allele_models.extend(
predictor.class1_pan_allele_models)
return Class1AffinityPredictor(
allele_to_allele_specific_models=allele_to_allele_specific_models,
class1_pan_allele_models=class1_pan_allele_models,
allele_to_fixed_length_sequence=allele_to_fixed_length_sequence
)
|
[
"def",
"merge",
"(",
"cls",
",",
"predictors",
")",
":",
"assert",
"len",
"(",
"predictors",
")",
">",
"0",
"if",
"len",
"(",
"predictors",
")",
"==",
"1",
":",
"return",
"predictors",
"[",
"0",
"]",
"allele_to_allele_specific_models",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"class1_pan_allele_models",
"=",
"[",
"]",
"allele_to_fixed_length_sequence",
"=",
"predictors",
"[",
"0",
"]",
".",
"allele_to_fixed_length_sequence",
"for",
"predictor",
"in",
"predictors",
":",
"for",
"(",
"allele",
",",
"networks",
")",
"in",
"(",
"predictor",
".",
"allele_to_allele_specific_models",
".",
"items",
"(",
")",
")",
":",
"allele_to_allele_specific_models",
"[",
"allele",
"]",
".",
"extend",
"(",
"networks",
")",
"class1_pan_allele_models",
".",
"extend",
"(",
"predictor",
".",
"class1_pan_allele_models",
")",
"return",
"Class1AffinityPredictor",
"(",
"allele_to_allele_specific_models",
"=",
"allele_to_allele_specific_models",
",",
"class1_pan_allele_models",
"=",
"class1_pan_allele_models",
",",
"allele_to_fixed_length_sequence",
"=",
"allele_to_fixed_length_sequence",
")"
] |
Merge the ensembles of two or more `Class1AffinityPredictor` instances.
Note: the resulting merged predictor will NOT have calibrated percentile
ranks. Call `calibrate_percentile_ranks` on it if these are needed.
Parameters
----------
predictors : sequence of `Class1AffinityPredictor`
Returns
-------
`Class1AffinityPredictor` instance
|
[
"Merge",
"the",
"ensembles",
"of",
"two",
"or",
"more",
"Class1AffinityPredictor",
"instances",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_affinity_predictor.py#L155-L190
|
16,878
|
openvax/mhcflurry
|
mhcflurry/class1_affinity_predictor.py
|
Class1AffinityPredictor.merge_in_place
|
def merge_in_place(self, others):
"""
Add the models present other predictors into the current predictor.
Parameters
----------
others : list of Class1AffinityPredictor
Other predictors to merge into the current predictor.
Returns
-------
list of string : names of newly added models
"""
new_model_names = []
for predictor in others:
for model in predictor.class1_pan_allele_models:
model_name = self.model_name(
"pan-class1",
len(self.class1_pan_allele_models))
self.class1_pan_allele_models.append(model)
row = pandas.Series(collections.OrderedDict([
("model_name", model_name),
("allele", "pan-class1"),
("config_json", json.dumps(model.get_config())),
("model", model),
])).to_frame().T
self._manifest_df = pandas.concat(
[self.manifest_df, row], ignore_index=True)
new_model_names.append(model_name)
for allele in predictor.allele_to_allele_specific_models:
if allele not in self.allele_to_allele_specific_models:
self.allele_to_allele_specific_models[allele] = []
current_models = self.allele_to_allele_specific_models[allele]
for model in predictor.allele_to_allele_specific_models[allele]:
model_name = self.model_name(allele, len(current_models))
row = pandas.Series(collections.OrderedDict([
("model_name", model_name),
("allele", allele),
("config_json", json.dumps(model.get_config())),
("model", model),
])).to_frame().T
self._manifest_df = pandas.concat(
[self.manifest_df, row], ignore_index=True)
current_models.append(model)
new_model_names.append(model_name)
self.clear_cache()
return new_model_names
|
python
|
def merge_in_place(self, others):
"""
Add the models present other predictors into the current predictor.
Parameters
----------
others : list of Class1AffinityPredictor
Other predictors to merge into the current predictor.
Returns
-------
list of string : names of newly added models
"""
new_model_names = []
for predictor in others:
for model in predictor.class1_pan_allele_models:
model_name = self.model_name(
"pan-class1",
len(self.class1_pan_allele_models))
self.class1_pan_allele_models.append(model)
row = pandas.Series(collections.OrderedDict([
("model_name", model_name),
("allele", "pan-class1"),
("config_json", json.dumps(model.get_config())),
("model", model),
])).to_frame().T
self._manifest_df = pandas.concat(
[self.manifest_df, row], ignore_index=True)
new_model_names.append(model_name)
for allele in predictor.allele_to_allele_specific_models:
if allele not in self.allele_to_allele_specific_models:
self.allele_to_allele_specific_models[allele] = []
current_models = self.allele_to_allele_specific_models[allele]
for model in predictor.allele_to_allele_specific_models[allele]:
model_name = self.model_name(allele, len(current_models))
row = pandas.Series(collections.OrderedDict([
("model_name", model_name),
("allele", allele),
("config_json", json.dumps(model.get_config())),
("model", model),
])).to_frame().T
self._manifest_df = pandas.concat(
[self.manifest_df, row], ignore_index=True)
current_models.append(model)
new_model_names.append(model_name)
self.clear_cache()
return new_model_names
|
[
"def",
"merge_in_place",
"(",
"self",
",",
"others",
")",
":",
"new_model_names",
"=",
"[",
"]",
"for",
"predictor",
"in",
"others",
":",
"for",
"model",
"in",
"predictor",
".",
"class1_pan_allele_models",
":",
"model_name",
"=",
"self",
".",
"model_name",
"(",
"\"pan-class1\"",
",",
"len",
"(",
"self",
".",
"class1_pan_allele_models",
")",
")",
"self",
".",
"class1_pan_allele_models",
".",
"append",
"(",
"model",
")",
"row",
"=",
"pandas",
".",
"Series",
"(",
"collections",
".",
"OrderedDict",
"(",
"[",
"(",
"\"model_name\"",
",",
"model_name",
")",
",",
"(",
"\"allele\"",
",",
"\"pan-class1\"",
")",
",",
"(",
"\"config_json\"",
",",
"json",
".",
"dumps",
"(",
"model",
".",
"get_config",
"(",
")",
")",
")",
",",
"(",
"\"model\"",
",",
"model",
")",
",",
"]",
")",
")",
".",
"to_frame",
"(",
")",
".",
"T",
"self",
".",
"_manifest_df",
"=",
"pandas",
".",
"concat",
"(",
"[",
"self",
".",
"manifest_df",
",",
"row",
"]",
",",
"ignore_index",
"=",
"True",
")",
"new_model_names",
".",
"append",
"(",
"model_name",
")",
"for",
"allele",
"in",
"predictor",
".",
"allele_to_allele_specific_models",
":",
"if",
"allele",
"not",
"in",
"self",
".",
"allele_to_allele_specific_models",
":",
"self",
".",
"allele_to_allele_specific_models",
"[",
"allele",
"]",
"=",
"[",
"]",
"current_models",
"=",
"self",
".",
"allele_to_allele_specific_models",
"[",
"allele",
"]",
"for",
"model",
"in",
"predictor",
".",
"allele_to_allele_specific_models",
"[",
"allele",
"]",
":",
"model_name",
"=",
"self",
".",
"model_name",
"(",
"allele",
",",
"len",
"(",
"current_models",
")",
")",
"row",
"=",
"pandas",
".",
"Series",
"(",
"collections",
".",
"OrderedDict",
"(",
"[",
"(",
"\"model_name\"",
",",
"model_name",
")",
",",
"(",
"\"allele\"",
",",
"allele",
")",
",",
"(",
"\"config_json\"",
",",
"json",
".",
"dumps",
"(",
"model",
".",
"get_config",
"(",
")",
")",
")",
",",
"(",
"\"model\"",
",",
"model",
")",
",",
"]",
")",
")",
".",
"to_frame",
"(",
")",
".",
"T",
"self",
".",
"_manifest_df",
"=",
"pandas",
".",
"concat",
"(",
"[",
"self",
".",
"manifest_df",
",",
"row",
"]",
",",
"ignore_index",
"=",
"True",
")",
"current_models",
".",
"append",
"(",
"model",
")",
"new_model_names",
".",
"append",
"(",
"model_name",
")",
"self",
".",
"clear_cache",
"(",
")",
"return",
"new_model_names"
] |
Add the models present other predictors into the current predictor.
Parameters
----------
others : list of Class1AffinityPredictor
Other predictors to merge into the current predictor.
Returns
-------
list of string : names of newly added models
|
[
"Add",
"the",
"models",
"present",
"other",
"predictors",
"into",
"the",
"current",
"predictor",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_affinity_predictor.py#L192-L241
|
16,879
|
openvax/mhcflurry
|
mhcflurry/class1_affinity_predictor.py
|
Class1AffinityPredictor.percentile_ranks
|
def percentile_ranks(self, affinities, allele=None, alleles=None, throw=True):
"""
Return percentile ranks for the given ic50 affinities and alleles.
The 'allele' and 'alleles' argument are as in the `predict` method.
Specify one of these.
Parameters
----------
affinities : sequence of float
nM affinities
allele : string
alleles : sequence of string
throw : boolean
If True, a ValueError will be raised in the case of unsupported
alleles. If False, a warning will be logged and NaN will be returned
for those percentile ranks.
Returns
-------
numpy.array of float
"""
if allele is not None:
try:
transform = self.allele_to_percent_rank_transform[allele]
return transform.transform(affinities)
except KeyError:
msg = "Allele %s has no percentile rank information" % allele
if throw:
raise ValueError(msg)
else:
warnings.warn(msg)
# Return NaNs
return numpy.ones(len(affinities)) * numpy.nan
if alleles is None:
raise ValueError("Specify allele or alleles")
df = pandas.DataFrame({"affinity": affinities})
df["allele"] = alleles
df["result"] = numpy.nan
for (allele, sub_df) in df.groupby("allele"):
df.loc[sub_df.index, "result"] = self.percentile_ranks(
sub_df.affinity, allele=allele, throw=throw)
return df.result.values
|
python
|
def percentile_ranks(self, affinities, allele=None, alleles=None, throw=True):
"""
Return percentile ranks for the given ic50 affinities and alleles.
The 'allele' and 'alleles' argument are as in the `predict` method.
Specify one of these.
Parameters
----------
affinities : sequence of float
nM affinities
allele : string
alleles : sequence of string
throw : boolean
If True, a ValueError will be raised in the case of unsupported
alleles. If False, a warning will be logged and NaN will be returned
for those percentile ranks.
Returns
-------
numpy.array of float
"""
if allele is not None:
try:
transform = self.allele_to_percent_rank_transform[allele]
return transform.transform(affinities)
except KeyError:
msg = "Allele %s has no percentile rank information" % allele
if throw:
raise ValueError(msg)
else:
warnings.warn(msg)
# Return NaNs
return numpy.ones(len(affinities)) * numpy.nan
if alleles is None:
raise ValueError("Specify allele or alleles")
df = pandas.DataFrame({"affinity": affinities})
df["allele"] = alleles
df["result"] = numpy.nan
for (allele, sub_df) in df.groupby("allele"):
df.loc[sub_df.index, "result"] = self.percentile_ranks(
sub_df.affinity, allele=allele, throw=throw)
return df.result.values
|
[
"def",
"percentile_ranks",
"(",
"self",
",",
"affinities",
",",
"allele",
"=",
"None",
",",
"alleles",
"=",
"None",
",",
"throw",
"=",
"True",
")",
":",
"if",
"allele",
"is",
"not",
"None",
":",
"try",
":",
"transform",
"=",
"self",
".",
"allele_to_percent_rank_transform",
"[",
"allele",
"]",
"return",
"transform",
".",
"transform",
"(",
"affinities",
")",
"except",
"KeyError",
":",
"msg",
"=",
"\"Allele %s has no percentile rank information\"",
"%",
"allele",
"if",
"throw",
":",
"raise",
"ValueError",
"(",
"msg",
")",
"else",
":",
"warnings",
".",
"warn",
"(",
"msg",
")",
"# Return NaNs",
"return",
"numpy",
".",
"ones",
"(",
"len",
"(",
"affinities",
")",
")",
"*",
"numpy",
".",
"nan",
"if",
"alleles",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Specify allele or alleles\"",
")",
"df",
"=",
"pandas",
".",
"DataFrame",
"(",
"{",
"\"affinity\"",
":",
"affinities",
"}",
")",
"df",
"[",
"\"allele\"",
"]",
"=",
"alleles",
"df",
"[",
"\"result\"",
"]",
"=",
"numpy",
".",
"nan",
"for",
"(",
"allele",
",",
"sub_df",
")",
"in",
"df",
".",
"groupby",
"(",
"\"allele\"",
")",
":",
"df",
".",
"loc",
"[",
"sub_df",
".",
"index",
",",
"\"result\"",
"]",
"=",
"self",
".",
"percentile_ranks",
"(",
"sub_df",
".",
"affinity",
",",
"allele",
"=",
"allele",
",",
"throw",
"=",
"throw",
")",
"return",
"df",
".",
"result",
".",
"values"
] |
Return percentile ranks for the given ic50 affinities and alleles.
The 'allele' and 'alleles' argument are as in the `predict` method.
Specify one of these.
Parameters
----------
affinities : sequence of float
nM affinities
allele : string
alleles : sequence of string
throw : boolean
If True, a ValueError will be raised in the case of unsupported
alleles. If False, a warning will be logged and NaN will be returned
for those percentile ranks.
Returns
-------
numpy.array of float
|
[
"Return",
"percentile",
"ranks",
"for",
"the",
"given",
"ic50",
"affinities",
"and",
"alleles",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_affinity_predictor.py#L722-L766
|
16,880
|
openvax/mhcflurry
|
mhcflurry/class1_affinity_predictor.py
|
Class1AffinityPredictor.calibrate_percentile_ranks
|
def calibrate_percentile_ranks(
self,
peptides=None,
num_peptides_per_length=int(1e5),
alleles=None,
bins=None):
"""
Compute the cumulative distribution of ic50 values for a set of alleles
over a large universe of random peptides, to enable computing quantiles in
this distribution later.
Parameters
----------
peptides : sequence of string or EncodableSequences, optional
Peptides to use
num_peptides_per_length : int, optional
If peptides argument is not specified, then num_peptides_per_length
peptides are randomly sampled from a uniform distribution for each
supported length
alleles : sequence of string, optional
Alleles to perform calibration for. If not specified all supported
alleles will be calibrated.
bins : object
Anything that can be passed to numpy.histogram's "bins" argument
can be used here, i.e. either an integer or a sequence giving bin
edges. This is in ic50 space.
Returns
----------
EncodableSequences : peptides used for calibration
"""
if bins is None:
bins = to_ic50(numpy.linspace(1, 0, 1000))
if alleles is None:
alleles = self.supported_alleles
if peptides is None:
peptides = []
lengths = range(
self.supported_peptide_lengths[0],
self.supported_peptide_lengths[1] + 1)
for length in lengths:
peptides.extend(
random_peptides(num_peptides_per_length, length))
encoded_peptides = EncodableSequences.create(peptides)
for (i, allele) in enumerate(alleles):
predictions = self.predict(encoded_peptides, allele=allele)
transform = PercentRankTransform()
transform.fit(predictions, bins=bins)
self.allele_to_percent_rank_transform[allele] = transform
return encoded_peptides
|
python
|
def calibrate_percentile_ranks(
self,
peptides=None,
num_peptides_per_length=int(1e5),
alleles=None,
bins=None):
"""
Compute the cumulative distribution of ic50 values for a set of alleles
over a large universe of random peptides, to enable computing quantiles in
this distribution later.
Parameters
----------
peptides : sequence of string or EncodableSequences, optional
Peptides to use
num_peptides_per_length : int, optional
If peptides argument is not specified, then num_peptides_per_length
peptides are randomly sampled from a uniform distribution for each
supported length
alleles : sequence of string, optional
Alleles to perform calibration for. If not specified all supported
alleles will be calibrated.
bins : object
Anything that can be passed to numpy.histogram's "bins" argument
can be used here, i.e. either an integer or a sequence giving bin
edges. This is in ic50 space.
Returns
----------
EncodableSequences : peptides used for calibration
"""
if bins is None:
bins = to_ic50(numpy.linspace(1, 0, 1000))
if alleles is None:
alleles = self.supported_alleles
if peptides is None:
peptides = []
lengths = range(
self.supported_peptide_lengths[0],
self.supported_peptide_lengths[1] + 1)
for length in lengths:
peptides.extend(
random_peptides(num_peptides_per_length, length))
encoded_peptides = EncodableSequences.create(peptides)
for (i, allele) in enumerate(alleles):
predictions = self.predict(encoded_peptides, allele=allele)
transform = PercentRankTransform()
transform.fit(predictions, bins=bins)
self.allele_to_percent_rank_transform[allele] = transform
return encoded_peptides
|
[
"def",
"calibrate_percentile_ranks",
"(",
"self",
",",
"peptides",
"=",
"None",
",",
"num_peptides_per_length",
"=",
"int",
"(",
"1e5",
")",
",",
"alleles",
"=",
"None",
",",
"bins",
"=",
"None",
")",
":",
"if",
"bins",
"is",
"None",
":",
"bins",
"=",
"to_ic50",
"(",
"numpy",
".",
"linspace",
"(",
"1",
",",
"0",
",",
"1000",
")",
")",
"if",
"alleles",
"is",
"None",
":",
"alleles",
"=",
"self",
".",
"supported_alleles",
"if",
"peptides",
"is",
"None",
":",
"peptides",
"=",
"[",
"]",
"lengths",
"=",
"range",
"(",
"self",
".",
"supported_peptide_lengths",
"[",
"0",
"]",
",",
"self",
".",
"supported_peptide_lengths",
"[",
"1",
"]",
"+",
"1",
")",
"for",
"length",
"in",
"lengths",
":",
"peptides",
".",
"extend",
"(",
"random_peptides",
"(",
"num_peptides_per_length",
",",
"length",
")",
")",
"encoded_peptides",
"=",
"EncodableSequences",
".",
"create",
"(",
"peptides",
")",
"for",
"(",
"i",
",",
"allele",
")",
"in",
"enumerate",
"(",
"alleles",
")",
":",
"predictions",
"=",
"self",
".",
"predict",
"(",
"encoded_peptides",
",",
"allele",
"=",
"allele",
")",
"transform",
"=",
"PercentRankTransform",
"(",
")",
"transform",
".",
"fit",
"(",
"predictions",
",",
"bins",
"=",
"bins",
")",
"self",
".",
"allele_to_percent_rank_transform",
"[",
"allele",
"]",
"=",
"transform",
"return",
"encoded_peptides"
] |
Compute the cumulative distribution of ic50 values for a set of alleles
over a large universe of random peptides, to enable computing quantiles in
this distribution later.
Parameters
----------
peptides : sequence of string or EncodableSequences, optional
Peptides to use
num_peptides_per_length : int, optional
If peptides argument is not specified, then num_peptides_per_length
peptides are randomly sampled from a uniform distribution for each
supported length
alleles : sequence of string, optional
Alleles to perform calibration for. If not specified all supported
alleles will be calibrated.
bins : object
Anything that can be passed to numpy.histogram's "bins" argument
can be used here, i.e. either an integer or a sequence giving bin
edges. This is in ic50 space.
Returns
----------
EncodableSequences : peptides used for calibration
|
[
"Compute",
"the",
"cumulative",
"distribution",
"of",
"ic50",
"values",
"for",
"a",
"set",
"of",
"alleles",
"over",
"a",
"large",
"universe",
"of",
"random",
"peptides",
"to",
"enable",
"computing",
"quantiles",
"in",
"this",
"distribution",
"later",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_affinity_predictor.py#L1074-L1128
|
16,881
|
openvax/mhcflurry
|
mhcflurry/class1_affinity_predictor.py
|
Class1AffinityPredictor.filter_networks
|
def filter_networks(self, predicate):
"""
Return a new Class1AffinityPredictor containing a subset of this
predictor's neural networks.
Parameters
----------
predicate : Class1NeuralNetwork -> boolean
Function specifying which neural networks to include
Returns
-------
Class1AffinityPredictor
"""
allele_to_allele_specific_models = {}
for (allele, models) in self.allele_to_allele_specific_models.items():
allele_to_allele_specific_models[allele] = [
m for m in models if predicate(m)
]
class1_pan_allele_models = [
m for m in self.class1_pan_allele_models if predicate(m)
]
return Class1AffinityPredictor(
allele_to_allele_specific_models=allele_to_allele_specific_models,
class1_pan_allele_models=class1_pan_allele_models,
allele_to_fixed_length_sequence=self.allele_to_fixed_length_sequence,
)
|
python
|
def filter_networks(self, predicate):
"""
Return a new Class1AffinityPredictor containing a subset of this
predictor's neural networks.
Parameters
----------
predicate : Class1NeuralNetwork -> boolean
Function specifying which neural networks to include
Returns
-------
Class1AffinityPredictor
"""
allele_to_allele_specific_models = {}
for (allele, models) in self.allele_to_allele_specific_models.items():
allele_to_allele_specific_models[allele] = [
m for m in models if predicate(m)
]
class1_pan_allele_models = [
m for m in self.class1_pan_allele_models if predicate(m)
]
return Class1AffinityPredictor(
allele_to_allele_specific_models=allele_to_allele_specific_models,
class1_pan_allele_models=class1_pan_allele_models,
allele_to_fixed_length_sequence=self.allele_to_fixed_length_sequence,
)
|
[
"def",
"filter_networks",
"(",
"self",
",",
"predicate",
")",
":",
"allele_to_allele_specific_models",
"=",
"{",
"}",
"for",
"(",
"allele",
",",
"models",
")",
"in",
"self",
".",
"allele_to_allele_specific_models",
".",
"items",
"(",
")",
":",
"allele_to_allele_specific_models",
"[",
"allele",
"]",
"=",
"[",
"m",
"for",
"m",
"in",
"models",
"if",
"predicate",
"(",
"m",
")",
"]",
"class1_pan_allele_models",
"=",
"[",
"m",
"for",
"m",
"in",
"self",
".",
"class1_pan_allele_models",
"if",
"predicate",
"(",
"m",
")",
"]",
"return",
"Class1AffinityPredictor",
"(",
"allele_to_allele_specific_models",
"=",
"allele_to_allele_specific_models",
",",
"class1_pan_allele_models",
"=",
"class1_pan_allele_models",
",",
"allele_to_fixed_length_sequence",
"=",
"self",
".",
"allele_to_fixed_length_sequence",
",",
")"
] |
Return a new Class1AffinityPredictor containing a subset of this
predictor's neural networks.
Parameters
----------
predicate : Class1NeuralNetwork -> boolean
Function specifying which neural networks to include
Returns
-------
Class1AffinityPredictor
|
[
"Return",
"a",
"new",
"Class1AffinityPredictor",
"containing",
"a",
"subset",
"of",
"this",
"predictor",
"s",
"neural",
"networks",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_affinity_predictor.py#L1130-L1157
|
16,882
|
openvax/mhcflurry
|
mhcflurry/class1_affinity_predictor.py
|
Class1AffinityPredictor.model_select
|
def model_select(
self,
score_function,
alleles=None,
min_models=1,
max_models=10000):
"""
Perform model selection using a user-specified scoring function.
Model selection is done using a "step up" variable selection procedure,
in which models are repeatedly added to an ensemble until the score
stops improving.
Parameters
----------
score_function : Class1AffinityPredictor -> float function
Scoring function
alleles : list of string, optional
If not specified, model selection is performed for all alleles.
min_models : int, optional
Min models to select per allele
max_models : int, optional
Max models to select per allele
Returns
-------
Class1AffinityPredictor : predictor containing the selected models
"""
if alleles is None:
alleles = self.supported_alleles
dfs = []
allele_to_allele_specific_models = {}
for allele in alleles:
df = pandas.DataFrame({
'model': self.allele_to_allele_specific_models[allele]
})
df["model_num"] = df.index
df["allele"] = allele
df["selected"] = False
round_num = 1
while not df.selected.all() and sum(df.selected) < max_models:
score_col = "score_%2d" % round_num
prev_score_col = "score_%2d" % (round_num - 1)
existing_selected = list(df[df.selected].model)
df[score_col] = [
numpy.nan if row.selected else
score_function(
Class1AffinityPredictor(
allele_to_allele_specific_models={
allele: [row.model] + existing_selected
}
)
)
for (_, row) in df.iterrows()
]
if round_num > min_models and (
df[score_col].max() < df[prev_score_col].max()):
break
# In case of a tie, pick a model at random.
(best_model_index,) = df.loc[
(df[score_col] == df[score_col].max())
].sample(1).index
df.loc[best_model_index, "selected"] = True
round_num += 1
dfs.append(df)
allele_to_allele_specific_models[allele] = list(
df.loc[df.selected].model)
df = pandas.concat(dfs, ignore_index=True)
new_predictor = Class1AffinityPredictor(
allele_to_allele_specific_models,
metadata_dataframes={
"model_selection": df,
})
return new_predictor
|
python
|
def model_select(
self,
score_function,
alleles=None,
min_models=1,
max_models=10000):
"""
Perform model selection using a user-specified scoring function.
Model selection is done using a "step up" variable selection procedure,
in which models are repeatedly added to an ensemble until the score
stops improving.
Parameters
----------
score_function : Class1AffinityPredictor -> float function
Scoring function
alleles : list of string, optional
If not specified, model selection is performed for all alleles.
min_models : int, optional
Min models to select per allele
max_models : int, optional
Max models to select per allele
Returns
-------
Class1AffinityPredictor : predictor containing the selected models
"""
if alleles is None:
alleles = self.supported_alleles
dfs = []
allele_to_allele_specific_models = {}
for allele in alleles:
df = pandas.DataFrame({
'model': self.allele_to_allele_specific_models[allele]
})
df["model_num"] = df.index
df["allele"] = allele
df["selected"] = False
round_num = 1
while not df.selected.all() and sum(df.selected) < max_models:
score_col = "score_%2d" % round_num
prev_score_col = "score_%2d" % (round_num - 1)
existing_selected = list(df[df.selected].model)
df[score_col] = [
numpy.nan if row.selected else
score_function(
Class1AffinityPredictor(
allele_to_allele_specific_models={
allele: [row.model] + existing_selected
}
)
)
for (_, row) in df.iterrows()
]
if round_num > min_models and (
df[score_col].max() < df[prev_score_col].max()):
break
# In case of a tie, pick a model at random.
(best_model_index,) = df.loc[
(df[score_col] == df[score_col].max())
].sample(1).index
df.loc[best_model_index, "selected"] = True
round_num += 1
dfs.append(df)
allele_to_allele_specific_models[allele] = list(
df.loc[df.selected].model)
df = pandas.concat(dfs, ignore_index=True)
new_predictor = Class1AffinityPredictor(
allele_to_allele_specific_models,
metadata_dataframes={
"model_selection": df,
})
return new_predictor
|
[
"def",
"model_select",
"(",
"self",
",",
"score_function",
",",
"alleles",
"=",
"None",
",",
"min_models",
"=",
"1",
",",
"max_models",
"=",
"10000",
")",
":",
"if",
"alleles",
"is",
"None",
":",
"alleles",
"=",
"self",
".",
"supported_alleles",
"dfs",
"=",
"[",
"]",
"allele_to_allele_specific_models",
"=",
"{",
"}",
"for",
"allele",
"in",
"alleles",
":",
"df",
"=",
"pandas",
".",
"DataFrame",
"(",
"{",
"'model'",
":",
"self",
".",
"allele_to_allele_specific_models",
"[",
"allele",
"]",
"}",
")",
"df",
"[",
"\"model_num\"",
"]",
"=",
"df",
".",
"index",
"df",
"[",
"\"allele\"",
"]",
"=",
"allele",
"df",
"[",
"\"selected\"",
"]",
"=",
"False",
"round_num",
"=",
"1",
"while",
"not",
"df",
".",
"selected",
".",
"all",
"(",
")",
"and",
"sum",
"(",
"df",
".",
"selected",
")",
"<",
"max_models",
":",
"score_col",
"=",
"\"score_%2d\"",
"%",
"round_num",
"prev_score_col",
"=",
"\"score_%2d\"",
"%",
"(",
"round_num",
"-",
"1",
")",
"existing_selected",
"=",
"list",
"(",
"df",
"[",
"df",
".",
"selected",
"]",
".",
"model",
")",
"df",
"[",
"score_col",
"]",
"=",
"[",
"numpy",
".",
"nan",
"if",
"row",
".",
"selected",
"else",
"score_function",
"(",
"Class1AffinityPredictor",
"(",
"allele_to_allele_specific_models",
"=",
"{",
"allele",
":",
"[",
"row",
".",
"model",
"]",
"+",
"existing_selected",
"}",
")",
")",
"for",
"(",
"_",
",",
"row",
")",
"in",
"df",
".",
"iterrows",
"(",
")",
"]",
"if",
"round_num",
">",
"min_models",
"and",
"(",
"df",
"[",
"score_col",
"]",
".",
"max",
"(",
")",
"<",
"df",
"[",
"prev_score_col",
"]",
".",
"max",
"(",
")",
")",
":",
"break",
"# In case of a tie, pick a model at random.",
"(",
"best_model_index",
",",
")",
"=",
"df",
".",
"loc",
"[",
"(",
"df",
"[",
"score_col",
"]",
"==",
"df",
"[",
"score_col",
"]",
".",
"max",
"(",
")",
")",
"]",
".",
"sample",
"(",
"1",
")",
".",
"index",
"df",
".",
"loc",
"[",
"best_model_index",
",",
"\"selected\"",
"]",
"=",
"True",
"round_num",
"+=",
"1",
"dfs",
".",
"append",
"(",
"df",
")",
"allele_to_allele_specific_models",
"[",
"allele",
"]",
"=",
"list",
"(",
"df",
".",
"loc",
"[",
"df",
".",
"selected",
"]",
".",
"model",
")",
"df",
"=",
"pandas",
".",
"concat",
"(",
"dfs",
",",
"ignore_index",
"=",
"True",
")",
"new_predictor",
"=",
"Class1AffinityPredictor",
"(",
"allele_to_allele_specific_models",
",",
"metadata_dataframes",
"=",
"{",
"\"model_selection\"",
":",
"df",
",",
"}",
")",
"return",
"new_predictor"
] |
Perform model selection using a user-specified scoring function.
Model selection is done using a "step up" variable selection procedure,
in which models are repeatedly added to an ensemble until the score
stops improving.
Parameters
----------
score_function : Class1AffinityPredictor -> float function
Scoring function
alleles : list of string, optional
If not specified, model selection is performed for all alleles.
min_models : int, optional
Min models to select per allele
max_models : int, optional
Max models to select per allele
Returns
-------
Class1AffinityPredictor : predictor containing the selected models
|
[
"Perform",
"model",
"selection",
"using",
"a",
"user",
"-",
"specified",
"scoring",
"function",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/class1_affinity_predictor.py#L1159-L1245
|
16,883
|
openvax/mhcflurry
|
mhcflurry/percent_rank_transform.py
|
PercentRankTransform.to_series
|
def to_series(self):
"""
Serialize the fit to a pandas.Series.
The index on the series gives the bin edges and the valeus give the CDF.
Returns
-------
pandas.Series
"""
return pandas.Series(
self.cdf, index=[numpy.nan] + list(self.bin_edges) + [numpy.nan])
|
python
|
def to_series(self):
"""
Serialize the fit to a pandas.Series.
The index on the series gives the bin edges and the valeus give the CDF.
Returns
-------
pandas.Series
"""
return pandas.Series(
self.cdf, index=[numpy.nan] + list(self.bin_edges) + [numpy.nan])
|
[
"def",
"to_series",
"(",
"self",
")",
":",
"return",
"pandas",
".",
"Series",
"(",
"self",
".",
"cdf",
",",
"index",
"=",
"[",
"numpy",
".",
"nan",
"]",
"+",
"list",
"(",
"self",
".",
"bin_edges",
")",
"+",
"[",
"numpy",
".",
"nan",
"]",
")"
] |
Serialize the fit to a pandas.Series.
The index on the series gives the bin edges and the valeus give the CDF.
Returns
-------
pandas.Series
|
[
"Serialize",
"the",
"fit",
"to",
"a",
"pandas",
".",
"Series",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/percent_rank_transform.py#L46-L58
|
16,884
|
openvax/mhcflurry
|
mhcflurry/downloads.py
|
get_default_class1_models_dir
|
def get_default_class1_models_dir(test_exists=True):
"""
Return the absolute path to the default class1 models dir.
If environment variable MHCFLURRY_DEFAULT_CLASS1_MODELS is set to an
absolute path, return that path. If it's set to a relative path (i.e. does
not start with /) then return that path taken to be relative to the mhcflurry
downloads dir.
If environment variable MHCFLURRY_DEFAULT_CLASS1_MODELS is NOT set,
then return the path to downloaded models in the "models_class1" download.
Parameters
----------
test_exists : boolean, optional
Whether to raise an exception of the path does not exist
Returns
-------
string : absolute path
"""
if _MHCFLURRY_DEFAULT_CLASS1_MODELS_DIR:
result = join(get_downloads_dir(), _MHCFLURRY_DEFAULT_CLASS1_MODELS_DIR)
if test_exists and not exists(result):
raise IOError("No such directory: %s" % result)
return result
else:
return get_path("models_class1", "models", test_exists=test_exists)
|
python
|
def get_default_class1_models_dir(test_exists=True):
"""
Return the absolute path to the default class1 models dir.
If environment variable MHCFLURRY_DEFAULT_CLASS1_MODELS is set to an
absolute path, return that path. If it's set to a relative path (i.e. does
not start with /) then return that path taken to be relative to the mhcflurry
downloads dir.
If environment variable MHCFLURRY_DEFAULT_CLASS1_MODELS is NOT set,
then return the path to downloaded models in the "models_class1" download.
Parameters
----------
test_exists : boolean, optional
Whether to raise an exception of the path does not exist
Returns
-------
string : absolute path
"""
if _MHCFLURRY_DEFAULT_CLASS1_MODELS_DIR:
result = join(get_downloads_dir(), _MHCFLURRY_DEFAULT_CLASS1_MODELS_DIR)
if test_exists and not exists(result):
raise IOError("No such directory: %s" % result)
return result
else:
return get_path("models_class1", "models", test_exists=test_exists)
|
[
"def",
"get_default_class1_models_dir",
"(",
"test_exists",
"=",
"True",
")",
":",
"if",
"_MHCFLURRY_DEFAULT_CLASS1_MODELS_DIR",
":",
"result",
"=",
"join",
"(",
"get_downloads_dir",
"(",
")",
",",
"_MHCFLURRY_DEFAULT_CLASS1_MODELS_DIR",
")",
"if",
"test_exists",
"and",
"not",
"exists",
"(",
"result",
")",
":",
"raise",
"IOError",
"(",
"\"No such directory: %s\"",
"%",
"result",
")",
"return",
"result",
"else",
":",
"return",
"get_path",
"(",
"\"models_class1\"",
",",
"\"models\"",
",",
"test_exists",
"=",
"test_exists",
")"
] |
Return the absolute path to the default class1 models dir.
If environment variable MHCFLURRY_DEFAULT_CLASS1_MODELS is set to an
absolute path, return that path. If it's set to a relative path (i.e. does
not start with /) then return that path taken to be relative to the mhcflurry
downloads dir.
If environment variable MHCFLURRY_DEFAULT_CLASS1_MODELS is NOT set,
then return the path to downloaded models in the "models_class1" download.
Parameters
----------
test_exists : boolean, optional
Whether to raise an exception of the path does not exist
Returns
-------
string : absolute path
|
[
"Return",
"the",
"absolute",
"path",
"to",
"the",
"default",
"class1",
"models",
"dir",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/downloads.py#L57-L85
|
16,885
|
openvax/mhcflurry
|
mhcflurry/downloads.py
|
get_current_release_downloads
|
def get_current_release_downloads():
"""
Return a dict of all available downloads in the current release.
The dict keys are the names of the downloads. The values are a dict
with two entries:
downloaded : bool
Whether the download is currently available locally
metadata : dict
Info about the download from downloads.yml such as URL
"""
downloads = (
get_downloads_metadata()
['releases']
[get_current_release()]
['downloads'])
return OrderedDict(
(download["name"], {
'downloaded': exists(join(get_downloads_dir(), download["name"])),
'metadata': download,
}) for download in downloads
)
|
python
|
def get_current_release_downloads():
"""
Return a dict of all available downloads in the current release.
The dict keys are the names of the downloads. The values are a dict
with two entries:
downloaded : bool
Whether the download is currently available locally
metadata : dict
Info about the download from downloads.yml such as URL
"""
downloads = (
get_downloads_metadata()
['releases']
[get_current_release()]
['downloads'])
return OrderedDict(
(download["name"], {
'downloaded': exists(join(get_downloads_dir(), download["name"])),
'metadata': download,
}) for download in downloads
)
|
[
"def",
"get_current_release_downloads",
"(",
")",
":",
"downloads",
"=",
"(",
"get_downloads_metadata",
"(",
")",
"[",
"'releases'",
"]",
"[",
"get_current_release",
"(",
")",
"]",
"[",
"'downloads'",
"]",
")",
"return",
"OrderedDict",
"(",
"(",
"download",
"[",
"\"name\"",
"]",
",",
"{",
"'downloaded'",
":",
"exists",
"(",
"join",
"(",
"get_downloads_dir",
"(",
")",
",",
"download",
"[",
"\"name\"",
"]",
")",
")",
",",
"'metadata'",
":",
"download",
",",
"}",
")",
"for",
"download",
"in",
"downloads",
")"
] |
Return a dict of all available downloads in the current release.
The dict keys are the names of the downloads. The values are a dict
with two entries:
downloaded : bool
Whether the download is currently available locally
metadata : dict
Info about the download from downloads.yml such as URL
|
[
"Return",
"a",
"dict",
"of",
"all",
"available",
"downloads",
"in",
"the",
"current",
"release",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/downloads.py#L88-L111
|
16,886
|
openvax/mhcflurry
|
mhcflurry/downloads.py
|
get_path
|
def get_path(download_name, filename='', test_exists=True):
"""
Get the local path to a file in a MHCflurry download
Parameters
-----------
download_name : string
filename : string
Relative path within the download to the file of interest
test_exists : boolean
If True (default) throw an error telling the user how to download the
data if the file does not exist
Returns
-----------
string giving local absolute path
"""
assert '/' not in download_name, "Invalid download: %s" % download_name
path = join(get_downloads_dir(), download_name, filename)
if test_exists and not exists(path):
raise RuntimeError(
"Missing MHCflurry downloadable file: %s. "
"To download this data, run:\n\tmhcflurry-downloads fetch %s\n"
"in a shell."
% (quote(path), download_name))
return path
|
python
|
def get_path(download_name, filename='', test_exists=True):
"""
Get the local path to a file in a MHCflurry download
Parameters
-----------
download_name : string
filename : string
Relative path within the download to the file of interest
test_exists : boolean
If True (default) throw an error telling the user how to download the
data if the file does not exist
Returns
-----------
string giving local absolute path
"""
assert '/' not in download_name, "Invalid download: %s" % download_name
path = join(get_downloads_dir(), download_name, filename)
if test_exists and not exists(path):
raise RuntimeError(
"Missing MHCflurry downloadable file: %s. "
"To download this data, run:\n\tmhcflurry-downloads fetch %s\n"
"in a shell."
% (quote(path), download_name))
return path
|
[
"def",
"get_path",
"(",
"download_name",
",",
"filename",
"=",
"''",
",",
"test_exists",
"=",
"True",
")",
":",
"assert",
"'/'",
"not",
"in",
"download_name",
",",
"\"Invalid download: %s\"",
"%",
"download_name",
"path",
"=",
"join",
"(",
"get_downloads_dir",
"(",
")",
",",
"download_name",
",",
"filename",
")",
"if",
"test_exists",
"and",
"not",
"exists",
"(",
"path",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Missing MHCflurry downloadable file: %s. \"",
"\"To download this data, run:\\n\\tmhcflurry-downloads fetch %s\\n\"",
"\"in a shell.\"",
"%",
"(",
"quote",
"(",
"path",
")",
",",
"download_name",
")",
")",
"return",
"path"
] |
Get the local path to a file in a MHCflurry download
Parameters
-----------
download_name : string
filename : string
Relative path within the download to the file of interest
test_exists : boolean
If True (default) throw an error telling the user how to download the
data if the file does not exist
Returns
-----------
string giving local absolute path
|
[
"Get",
"the",
"local",
"path",
"to",
"a",
"file",
"in",
"a",
"MHCflurry",
"download"
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/downloads.py#L114-L141
|
16,887
|
openvax/mhcflurry
|
mhcflurry/downloads.py
|
configure
|
def configure():
"""
Setup various global variables based on environment variables.
"""
global _DOWNLOADS_DIR
global _CURRENT_RELEASE
_CURRENT_RELEASE = None
_DOWNLOADS_DIR = environ.get("MHCFLURRY_DOWNLOADS_DIR")
if not _DOWNLOADS_DIR:
metadata = get_downloads_metadata()
_CURRENT_RELEASE = environ.get("MHCFLURRY_DOWNLOADS_CURRENT_RELEASE")
if not _CURRENT_RELEASE:
_CURRENT_RELEASE = metadata['current-release']
current_release_compatability = (
metadata["releases"][_CURRENT_RELEASE]["compatibility-version"])
current_compatability = metadata["current-compatibility-version"]
if current_release_compatability != current_compatability:
logging.warn(
"The specified downloads are not compatible with this version "
"of the MHCflurry codebase. Downloads: release %s, "
"compatability version: %d. Code compatability version: %d" % (
_CURRENT_RELEASE,
current_release_compatability,
current_compatability))
data_dir = environ.get("MHCFLURRY_DATA_DIR")
if not data_dir:
# increase the version every time we make a breaking change in
# how the data is organized. For changes to e.g. just model
# serialization, the downloads release numbers should be used.
data_dir = user_data_dir("mhcflurry", version="4")
_DOWNLOADS_DIR = join(data_dir, _CURRENT_RELEASE)
logging.debug("Configured MHCFLURRY_DOWNLOADS_DIR: %s" % _DOWNLOADS_DIR)
|
python
|
def configure():
"""
Setup various global variables based on environment variables.
"""
global _DOWNLOADS_DIR
global _CURRENT_RELEASE
_CURRENT_RELEASE = None
_DOWNLOADS_DIR = environ.get("MHCFLURRY_DOWNLOADS_DIR")
if not _DOWNLOADS_DIR:
metadata = get_downloads_metadata()
_CURRENT_RELEASE = environ.get("MHCFLURRY_DOWNLOADS_CURRENT_RELEASE")
if not _CURRENT_RELEASE:
_CURRENT_RELEASE = metadata['current-release']
current_release_compatability = (
metadata["releases"][_CURRENT_RELEASE]["compatibility-version"])
current_compatability = metadata["current-compatibility-version"]
if current_release_compatability != current_compatability:
logging.warn(
"The specified downloads are not compatible with this version "
"of the MHCflurry codebase. Downloads: release %s, "
"compatability version: %d. Code compatability version: %d" % (
_CURRENT_RELEASE,
current_release_compatability,
current_compatability))
data_dir = environ.get("MHCFLURRY_DATA_DIR")
if not data_dir:
# increase the version every time we make a breaking change in
# how the data is organized. For changes to e.g. just model
# serialization, the downloads release numbers should be used.
data_dir = user_data_dir("mhcflurry", version="4")
_DOWNLOADS_DIR = join(data_dir, _CURRENT_RELEASE)
logging.debug("Configured MHCFLURRY_DOWNLOADS_DIR: %s" % _DOWNLOADS_DIR)
|
[
"def",
"configure",
"(",
")",
":",
"global",
"_DOWNLOADS_DIR",
"global",
"_CURRENT_RELEASE",
"_CURRENT_RELEASE",
"=",
"None",
"_DOWNLOADS_DIR",
"=",
"environ",
".",
"get",
"(",
"\"MHCFLURRY_DOWNLOADS_DIR\"",
")",
"if",
"not",
"_DOWNLOADS_DIR",
":",
"metadata",
"=",
"get_downloads_metadata",
"(",
")",
"_CURRENT_RELEASE",
"=",
"environ",
".",
"get",
"(",
"\"MHCFLURRY_DOWNLOADS_CURRENT_RELEASE\"",
")",
"if",
"not",
"_CURRENT_RELEASE",
":",
"_CURRENT_RELEASE",
"=",
"metadata",
"[",
"'current-release'",
"]",
"current_release_compatability",
"=",
"(",
"metadata",
"[",
"\"releases\"",
"]",
"[",
"_CURRENT_RELEASE",
"]",
"[",
"\"compatibility-version\"",
"]",
")",
"current_compatability",
"=",
"metadata",
"[",
"\"current-compatibility-version\"",
"]",
"if",
"current_release_compatability",
"!=",
"current_compatability",
":",
"logging",
".",
"warn",
"(",
"\"The specified downloads are not compatible with this version \"",
"\"of the MHCflurry codebase. Downloads: release %s, \"",
"\"compatability version: %d. Code compatability version: %d\"",
"%",
"(",
"_CURRENT_RELEASE",
",",
"current_release_compatability",
",",
"current_compatability",
")",
")",
"data_dir",
"=",
"environ",
".",
"get",
"(",
"\"MHCFLURRY_DATA_DIR\"",
")",
"if",
"not",
"data_dir",
":",
"# increase the version every time we make a breaking change in",
"# how the data is organized. For changes to e.g. just model",
"# serialization, the downloads release numbers should be used.",
"data_dir",
"=",
"user_data_dir",
"(",
"\"mhcflurry\"",
",",
"version",
"=",
"\"4\"",
")",
"_DOWNLOADS_DIR",
"=",
"join",
"(",
"data_dir",
",",
"_CURRENT_RELEASE",
")",
"logging",
".",
"debug",
"(",
"\"Configured MHCFLURRY_DOWNLOADS_DIR: %s\"",
"%",
"_DOWNLOADS_DIR",
")"
] |
Setup various global variables based on environment variables.
|
[
"Setup",
"various",
"global",
"variables",
"based",
"on",
"environment",
"variables",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/downloads.py#L144-L179
|
16,888
|
openvax/mhcflurry
|
mhcflurry/parallelism.py
|
make_worker_pool
|
def make_worker_pool(
processes=None,
initializer=None,
initializer_kwargs_per_process=None,
max_tasks_per_worker=None):
"""
Convenience wrapper to create a multiprocessing.Pool.
This function adds support for per-worker initializer arguments, which are
not natively supported by the multiprocessing module. The motivation for
this feature is to support allocating each worker to a (different) GPU.
IMPLEMENTATION NOTE:
The per-worker initializer arguments are implemented using a Queue. Each
worker reads its arguments from this queue when it starts. When it
terminates, it adds its initializer arguments back to the queue, so a
future process can initialize itself using these arguments.
There is one issue with this approach, however. If a worker crashes, it
never repopulates the queue of initializer arguments. This will prevent
any future worker from re-using those arguments. To deal with this
issue we add a second 'backup queue'. This queue always contains the
full set of initializer arguments: whenever a worker reads from it, it
always pushes the pop'd args back to the end of the queue immediately.
If the primary arg queue is ever empty, then workers will read
from this backup queue.
Parameters
----------
processes : int
Number of workers. Default: num CPUs.
initializer : function, optional
Init function to call in each worker
initializer_kwargs_per_process : list of dict, optional
Arguments to pass to initializer function for each worker. Length of
list must equal the number of workers.
max_tasks_per_worker : int, optional
Restart workers after this many tasks. Requires Python >=3.2.
Returns
-------
multiprocessing.Pool
"""
if not processes:
processes = cpu_count()
pool_kwargs = {
'processes': processes,
}
if max_tasks_per_worker:
pool_kwargs["maxtasksperchild"] = max_tasks_per_worker
if initializer:
if initializer_kwargs_per_process:
assert len(initializer_kwargs_per_process) == processes
kwargs_queue = Queue()
kwargs_queue_backup = Queue()
for kwargs in initializer_kwargs_per_process:
kwargs_queue.put(kwargs)
kwargs_queue_backup.put(kwargs)
pool_kwargs["initializer"] = worker_init_entry_point
pool_kwargs["initargs"] = (
initializer, kwargs_queue, kwargs_queue_backup)
else:
pool_kwargs["initializer"] = initializer
worker_pool = Pool(**pool_kwargs)
print("Started pool: %s" % str(worker_pool))
pprint(pool_kwargs)
return worker_pool
|
python
|
def make_worker_pool(
processes=None,
initializer=None,
initializer_kwargs_per_process=None,
max_tasks_per_worker=None):
"""
Convenience wrapper to create a multiprocessing.Pool.
This function adds support for per-worker initializer arguments, which are
not natively supported by the multiprocessing module. The motivation for
this feature is to support allocating each worker to a (different) GPU.
IMPLEMENTATION NOTE:
The per-worker initializer arguments are implemented using a Queue. Each
worker reads its arguments from this queue when it starts. When it
terminates, it adds its initializer arguments back to the queue, so a
future process can initialize itself using these arguments.
There is one issue with this approach, however. If a worker crashes, it
never repopulates the queue of initializer arguments. This will prevent
any future worker from re-using those arguments. To deal with this
issue we add a second 'backup queue'. This queue always contains the
full set of initializer arguments: whenever a worker reads from it, it
always pushes the pop'd args back to the end of the queue immediately.
If the primary arg queue is ever empty, then workers will read
from this backup queue.
Parameters
----------
processes : int
Number of workers. Default: num CPUs.
initializer : function, optional
Init function to call in each worker
initializer_kwargs_per_process : list of dict, optional
Arguments to pass to initializer function for each worker. Length of
list must equal the number of workers.
max_tasks_per_worker : int, optional
Restart workers after this many tasks. Requires Python >=3.2.
Returns
-------
multiprocessing.Pool
"""
if not processes:
processes = cpu_count()
pool_kwargs = {
'processes': processes,
}
if max_tasks_per_worker:
pool_kwargs["maxtasksperchild"] = max_tasks_per_worker
if initializer:
if initializer_kwargs_per_process:
assert len(initializer_kwargs_per_process) == processes
kwargs_queue = Queue()
kwargs_queue_backup = Queue()
for kwargs in initializer_kwargs_per_process:
kwargs_queue.put(kwargs)
kwargs_queue_backup.put(kwargs)
pool_kwargs["initializer"] = worker_init_entry_point
pool_kwargs["initargs"] = (
initializer, kwargs_queue, kwargs_queue_backup)
else:
pool_kwargs["initializer"] = initializer
worker_pool = Pool(**pool_kwargs)
print("Started pool: %s" % str(worker_pool))
pprint(pool_kwargs)
return worker_pool
|
[
"def",
"make_worker_pool",
"(",
"processes",
"=",
"None",
",",
"initializer",
"=",
"None",
",",
"initializer_kwargs_per_process",
"=",
"None",
",",
"max_tasks_per_worker",
"=",
"None",
")",
":",
"if",
"not",
"processes",
":",
"processes",
"=",
"cpu_count",
"(",
")",
"pool_kwargs",
"=",
"{",
"'processes'",
":",
"processes",
",",
"}",
"if",
"max_tasks_per_worker",
":",
"pool_kwargs",
"[",
"\"maxtasksperchild\"",
"]",
"=",
"max_tasks_per_worker",
"if",
"initializer",
":",
"if",
"initializer_kwargs_per_process",
":",
"assert",
"len",
"(",
"initializer_kwargs_per_process",
")",
"==",
"processes",
"kwargs_queue",
"=",
"Queue",
"(",
")",
"kwargs_queue_backup",
"=",
"Queue",
"(",
")",
"for",
"kwargs",
"in",
"initializer_kwargs_per_process",
":",
"kwargs_queue",
".",
"put",
"(",
"kwargs",
")",
"kwargs_queue_backup",
".",
"put",
"(",
"kwargs",
")",
"pool_kwargs",
"[",
"\"initializer\"",
"]",
"=",
"worker_init_entry_point",
"pool_kwargs",
"[",
"\"initargs\"",
"]",
"=",
"(",
"initializer",
",",
"kwargs_queue",
",",
"kwargs_queue_backup",
")",
"else",
":",
"pool_kwargs",
"[",
"\"initializer\"",
"]",
"=",
"initializer",
"worker_pool",
"=",
"Pool",
"(",
"*",
"*",
"pool_kwargs",
")",
"print",
"(",
"\"Started pool: %s\"",
"%",
"str",
"(",
"worker_pool",
")",
")",
"pprint",
"(",
"pool_kwargs",
")",
"return",
"worker_pool"
] |
Convenience wrapper to create a multiprocessing.Pool.
This function adds support for per-worker initializer arguments, which are
not natively supported by the multiprocessing module. The motivation for
this feature is to support allocating each worker to a (different) GPU.
IMPLEMENTATION NOTE:
The per-worker initializer arguments are implemented using a Queue. Each
worker reads its arguments from this queue when it starts. When it
terminates, it adds its initializer arguments back to the queue, so a
future process can initialize itself using these arguments.
There is one issue with this approach, however. If a worker crashes, it
never repopulates the queue of initializer arguments. This will prevent
any future worker from re-using those arguments. To deal with this
issue we add a second 'backup queue'. This queue always contains the
full set of initializer arguments: whenever a worker reads from it, it
always pushes the pop'd args back to the end of the queue immediately.
If the primary arg queue is ever empty, then workers will read
from this backup queue.
Parameters
----------
processes : int
Number of workers. Default: num CPUs.
initializer : function, optional
Init function to call in each worker
initializer_kwargs_per_process : list of dict, optional
Arguments to pass to initializer function for each worker. Length of
list must equal the number of workers.
max_tasks_per_worker : int, optional
Restart workers after this many tasks. Requires Python >=3.2.
Returns
-------
multiprocessing.Pool
|
[
"Convenience",
"wrapper",
"to",
"create",
"a",
"multiprocessing",
".",
"Pool",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/parallelism.py#L115-L188
|
16,889
|
openvax/mhcflurry
|
mhcflurry/calibrate_percentile_ranks_command.py
|
calibrate_percentile_ranks
|
def calibrate_percentile_ranks(allele, predictor, peptides=None):
"""
Private helper function.
"""
global GLOBAL_DATA
if peptides is None:
peptides = GLOBAL_DATA["calibration_peptides"]
predictor.calibrate_percentile_ranks(
peptides=peptides,
alleles=[allele])
return {
allele: predictor.allele_to_percent_rank_transform[allele],
}
|
python
|
def calibrate_percentile_ranks(allele, predictor, peptides=None):
"""
Private helper function.
"""
global GLOBAL_DATA
if peptides is None:
peptides = GLOBAL_DATA["calibration_peptides"]
predictor.calibrate_percentile_ranks(
peptides=peptides,
alleles=[allele])
return {
allele: predictor.allele_to_percent_rank_transform[allele],
}
|
[
"def",
"calibrate_percentile_ranks",
"(",
"allele",
",",
"predictor",
",",
"peptides",
"=",
"None",
")",
":",
"global",
"GLOBAL_DATA",
"if",
"peptides",
"is",
"None",
":",
"peptides",
"=",
"GLOBAL_DATA",
"[",
"\"calibration_peptides\"",
"]",
"predictor",
".",
"calibrate_percentile_ranks",
"(",
"peptides",
"=",
"peptides",
",",
"alleles",
"=",
"[",
"allele",
"]",
")",
"return",
"{",
"allele",
":",
"predictor",
".",
"allele_to_percent_rank_transform",
"[",
"allele",
"]",
",",
"}"
] |
Private helper function.
|
[
"Private",
"helper",
"function",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/calibrate_percentile_ranks_command.py#L140-L152
|
16,890
|
openvax/mhcflurry
|
mhcflurry/common.py
|
set_keras_backend
|
def set_keras_backend(backend=None, gpu_device_nums=None, num_threads=None):
"""
Configure Keras backend to use GPU or CPU. Only tensorflow is supported.
Parameters
----------
backend : string, optional
one of 'tensorflow-default', 'tensorflow-cpu', 'tensorflow-gpu'
gpu_device_nums : list of int, optional
GPU devices to potentially use
num_threads : int, optional
Tensorflow threads to use
"""
os.environ["KERAS_BACKEND"] = "tensorflow"
original_backend = backend
if not backend:
backend = "tensorflow-default"
if gpu_device_nums is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
[str(i) for i in gpu_device_nums])
if backend == "tensorflow-cpu" or gpu_device_nums == []:
print("Forcing tensorflow/CPU backend.")
os.environ["CUDA_VISIBLE_DEVICES"] = ""
device_count = {'CPU': 1, 'GPU': 0}
elif backend == "tensorflow-gpu":
print("Forcing tensorflow/GPU backend.")
device_count = {'CPU': 0, 'GPU': 1}
elif backend == "tensorflow-default":
print("Forcing tensorflow backend.")
device_count = None
else:
raise ValueError("Unsupported backend: %s" % backend)
import tensorflow
from keras import backend as K
if K.backend() == 'tensorflow':
config = tensorflow.ConfigProto(device_count=device_count)
config.gpu_options.allow_growth = True
if num_threads:
config.inter_op_parallelism_threads = num_threads
config.intra_op_parallelism_threads = num_threads
session = tensorflow.Session(config=config)
K.set_session(session)
else:
if original_backend or gpu_device_nums or num_threads:
warnings.warn(
"Only tensorflow backend can be customized. Ignoring "
" customization. Backend: %s" % K.backend())
|
python
|
def set_keras_backend(backend=None, gpu_device_nums=None, num_threads=None):
"""
Configure Keras backend to use GPU or CPU. Only tensorflow is supported.
Parameters
----------
backend : string, optional
one of 'tensorflow-default', 'tensorflow-cpu', 'tensorflow-gpu'
gpu_device_nums : list of int, optional
GPU devices to potentially use
num_threads : int, optional
Tensorflow threads to use
"""
os.environ["KERAS_BACKEND"] = "tensorflow"
original_backend = backend
if not backend:
backend = "tensorflow-default"
if gpu_device_nums is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
[str(i) for i in gpu_device_nums])
if backend == "tensorflow-cpu" or gpu_device_nums == []:
print("Forcing tensorflow/CPU backend.")
os.environ["CUDA_VISIBLE_DEVICES"] = ""
device_count = {'CPU': 1, 'GPU': 0}
elif backend == "tensorflow-gpu":
print("Forcing tensorflow/GPU backend.")
device_count = {'CPU': 0, 'GPU': 1}
elif backend == "tensorflow-default":
print("Forcing tensorflow backend.")
device_count = None
else:
raise ValueError("Unsupported backend: %s" % backend)
import tensorflow
from keras import backend as K
if K.backend() == 'tensorflow':
config = tensorflow.ConfigProto(device_count=device_count)
config.gpu_options.allow_growth = True
if num_threads:
config.inter_op_parallelism_threads = num_threads
config.intra_op_parallelism_threads = num_threads
session = tensorflow.Session(config=config)
K.set_session(session)
else:
if original_backend or gpu_device_nums or num_threads:
warnings.warn(
"Only tensorflow backend can be customized. Ignoring "
" customization. Backend: %s" % K.backend())
|
[
"def",
"set_keras_backend",
"(",
"backend",
"=",
"None",
",",
"gpu_device_nums",
"=",
"None",
",",
"num_threads",
"=",
"None",
")",
":",
"os",
".",
"environ",
"[",
"\"KERAS_BACKEND\"",
"]",
"=",
"\"tensorflow\"",
"original_backend",
"=",
"backend",
"if",
"not",
"backend",
":",
"backend",
"=",
"\"tensorflow-default\"",
"if",
"gpu_device_nums",
"is",
"not",
"None",
":",
"os",
".",
"environ",
"[",
"\"CUDA_VISIBLE_DEVICES\"",
"]",
"=",
"\",\"",
".",
"join",
"(",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"gpu_device_nums",
"]",
")",
"if",
"backend",
"==",
"\"tensorflow-cpu\"",
"or",
"gpu_device_nums",
"==",
"[",
"]",
":",
"print",
"(",
"\"Forcing tensorflow/CPU backend.\"",
")",
"os",
".",
"environ",
"[",
"\"CUDA_VISIBLE_DEVICES\"",
"]",
"=",
"\"\"",
"device_count",
"=",
"{",
"'CPU'",
":",
"1",
",",
"'GPU'",
":",
"0",
"}",
"elif",
"backend",
"==",
"\"tensorflow-gpu\"",
":",
"print",
"(",
"\"Forcing tensorflow/GPU backend.\"",
")",
"device_count",
"=",
"{",
"'CPU'",
":",
"0",
",",
"'GPU'",
":",
"1",
"}",
"elif",
"backend",
"==",
"\"tensorflow-default\"",
":",
"print",
"(",
"\"Forcing tensorflow backend.\"",
")",
"device_count",
"=",
"None",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unsupported backend: %s\"",
"%",
"backend",
")",
"import",
"tensorflow",
"from",
"keras",
"import",
"backend",
"as",
"K",
"if",
"K",
".",
"backend",
"(",
")",
"==",
"'tensorflow'",
":",
"config",
"=",
"tensorflow",
".",
"ConfigProto",
"(",
"device_count",
"=",
"device_count",
")",
"config",
".",
"gpu_options",
".",
"allow_growth",
"=",
"True",
"if",
"num_threads",
":",
"config",
".",
"inter_op_parallelism_threads",
"=",
"num_threads",
"config",
".",
"intra_op_parallelism_threads",
"=",
"num_threads",
"session",
"=",
"tensorflow",
".",
"Session",
"(",
"config",
"=",
"config",
")",
"K",
".",
"set_session",
"(",
"session",
")",
"else",
":",
"if",
"original_backend",
"or",
"gpu_device_nums",
"or",
"num_threads",
":",
"warnings",
".",
"warn",
"(",
"\"Only tensorflow backend can be customized. Ignoring \"",
"\" customization. Backend: %s\"",
"%",
"K",
".",
"backend",
"(",
")",
")"
] |
Configure Keras backend to use GPU or CPU. Only tensorflow is supported.
Parameters
----------
backend : string, optional
one of 'tensorflow-default', 'tensorflow-cpu', 'tensorflow-gpu'
gpu_device_nums : list of int, optional
GPU devices to potentially use
num_threads : int, optional
Tensorflow threads to use
|
[
"Configure",
"Keras",
"backend",
"to",
"use",
"GPU",
"or",
"CPU",
".",
"Only",
"tensorflow",
"is",
"supported",
"."
] |
deb7c1629111254b484a2711619eb2347db36524
|
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/common.py#L14-L68
|
16,891
|
JonathanRaiman/pytreebank
|
pytreebank/labeled_trees.py
|
LabeledTree.uproot
|
def uproot(tree):
"""
Take a subranch of a tree and deep-copy the children
of this subbranch into a new LabeledTree
"""
uprooted = tree.copy()
uprooted.parent = None
for child in tree.all_children():
uprooted.add_general_child(child)
return uprooted
|
python
|
def uproot(tree):
"""
Take a subranch of a tree and deep-copy the children
of this subbranch into a new LabeledTree
"""
uprooted = tree.copy()
uprooted.parent = None
for child in tree.all_children():
uprooted.add_general_child(child)
return uprooted
|
[
"def",
"uproot",
"(",
"tree",
")",
":",
"uprooted",
"=",
"tree",
".",
"copy",
"(",
")",
"uprooted",
".",
"parent",
"=",
"None",
"for",
"child",
"in",
"tree",
".",
"all_children",
"(",
")",
":",
"uprooted",
".",
"add_general_child",
"(",
"child",
")",
"return",
"uprooted"
] |
Take a subranch of a tree and deep-copy the children
of this subbranch into a new LabeledTree
|
[
"Take",
"a",
"subranch",
"of",
"a",
"tree",
"and",
"deep",
"-",
"copy",
"the",
"children",
"of",
"this",
"subbranch",
"into",
"a",
"new",
"LabeledTree"
] |
7b4c671d3dff661cc3677e54db817e50c5a1c666
|
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/labeled_trees.py#L35-L44
|
16,892
|
JonathanRaiman/pytreebank
|
pytreebank/labeled_trees.py
|
LabeledTree.copy
|
def copy(self):
"""
Deep Copy of a LabeledTree
"""
return LabeledTree(
udepth = self.udepth,
depth = self.depth,
text = self.text,
label = self.label,
children = self.children.copy() if self.children != None else [],
parent = self.parent)
|
python
|
def copy(self):
"""
Deep Copy of a LabeledTree
"""
return LabeledTree(
udepth = self.udepth,
depth = self.depth,
text = self.text,
label = self.label,
children = self.children.copy() if self.children != None else [],
parent = self.parent)
|
[
"def",
"copy",
"(",
"self",
")",
":",
"return",
"LabeledTree",
"(",
"udepth",
"=",
"self",
".",
"udepth",
",",
"depth",
"=",
"self",
".",
"depth",
",",
"text",
"=",
"self",
".",
"text",
",",
"label",
"=",
"self",
".",
"label",
",",
"children",
"=",
"self",
".",
"children",
".",
"copy",
"(",
")",
"if",
"self",
".",
"children",
"!=",
"None",
"else",
"[",
"]",
",",
"parent",
"=",
"self",
".",
"parent",
")"
] |
Deep Copy of a LabeledTree
|
[
"Deep",
"Copy",
"of",
"a",
"LabeledTree"
] |
7b4c671d3dff661cc3677e54db817e50c5a1c666
|
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/labeled_trees.py#L60-L70
|
16,893
|
JonathanRaiman/pytreebank
|
pytreebank/labeled_trees.py
|
LabeledTree.add_child
|
def add_child(self, child):
"""
Adds a branch to the current tree.
"""
self.children.append(child)
child.parent = self
self.udepth = max([child.udepth for child in self.children]) + 1
|
python
|
def add_child(self, child):
"""
Adds a branch to the current tree.
"""
self.children.append(child)
child.parent = self
self.udepth = max([child.udepth for child in self.children]) + 1
|
[
"def",
"add_child",
"(",
"self",
",",
"child",
")",
":",
"self",
".",
"children",
".",
"append",
"(",
"child",
")",
"child",
".",
"parent",
"=",
"self",
"self",
".",
"udepth",
"=",
"max",
"(",
"[",
"child",
".",
"udepth",
"for",
"child",
"in",
"self",
".",
"children",
"]",
")",
"+",
"1"
] |
Adds a branch to the current tree.
|
[
"Adds",
"a",
"branch",
"to",
"the",
"current",
"tree",
"."
] |
7b4c671d3dff661cc3677e54db817e50c5a1c666
|
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/labeled_trees.py#L72-L78
|
16,894
|
JonathanRaiman/pytreebank
|
pytreebank/labeled_trees.py
|
LabeledTree.lowercase
|
def lowercase(self):
"""
Lowercase all strings in this tree.
Works recursively and in-place.
"""
if len(self.children) > 0:
for child in self.children:
child.lowercase()
else:
self.text = self.text.lower()
|
python
|
def lowercase(self):
"""
Lowercase all strings in this tree.
Works recursively and in-place.
"""
if len(self.children) > 0:
for child in self.children:
child.lowercase()
else:
self.text = self.text.lower()
|
[
"def",
"lowercase",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"children",
")",
">",
"0",
":",
"for",
"child",
"in",
"self",
".",
"children",
":",
"child",
".",
"lowercase",
"(",
")",
"else",
":",
"self",
".",
"text",
"=",
"self",
".",
"text",
".",
"lower",
"(",
")"
] |
Lowercase all strings in this tree.
Works recursively and in-place.
|
[
"Lowercase",
"all",
"strings",
"in",
"this",
"tree",
".",
"Works",
"recursively",
"and",
"in",
"-",
"place",
"."
] |
7b4c671d3dff661cc3677e54db817e50c5a1c666
|
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/labeled_trees.py#L92-L101
|
16,895
|
JonathanRaiman/pytreebank
|
pytreebank/labeled_trees.py
|
LabeledTree.inject_visualization_javascript
|
def inject_visualization_javascript(tree_width=1200, tree_height=400, tree_node_radius=10):
"""
In an Ipython notebook, show SST trees using the same Javascript
code as used by Jason Chuang's visualisations.
"""
from .javascript import insert_sentiment_markup
insert_sentiment_markup(tree_width=tree_width, tree_height=tree_height, tree_node_radius=tree_node_radius)
|
python
|
def inject_visualization_javascript(tree_width=1200, tree_height=400, tree_node_radius=10):
"""
In an Ipython notebook, show SST trees using the same Javascript
code as used by Jason Chuang's visualisations.
"""
from .javascript import insert_sentiment_markup
insert_sentiment_markup(tree_width=tree_width, tree_height=tree_height, tree_node_radius=tree_node_radius)
|
[
"def",
"inject_visualization_javascript",
"(",
"tree_width",
"=",
"1200",
",",
"tree_height",
"=",
"400",
",",
"tree_node_radius",
"=",
"10",
")",
":",
"from",
".",
"javascript",
"import",
"insert_sentiment_markup",
"insert_sentiment_markup",
"(",
"tree_width",
"=",
"tree_width",
",",
"tree_height",
"=",
"tree_height",
",",
"tree_node_radius",
"=",
"tree_node_radius",
")"
] |
In an Ipython notebook, show SST trees using the same Javascript
code as used by Jason Chuang's visualisations.
|
[
"In",
"an",
"Ipython",
"notebook",
"show",
"SST",
"trees",
"using",
"the",
"same",
"Javascript",
"code",
"as",
"used",
"by",
"Jason",
"Chuang",
"s",
"visualisations",
"."
] |
7b4c671d3dff661cc3677e54db817e50c5a1c666
|
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/labeled_trees.py#L195-L201
|
16,896
|
JonathanRaiman/pytreebank
|
pytreebank/parse.py
|
create_tree_from_string
|
def create_tree_from_string(line):
"""
Parse and convert a string representation
of an example into a LabeledTree datastructure.
Arguments:
----------
line : str, string version of the tree.
Returns:
--------
LabeledTree : parsed tree.
"""
depth = 0
current_word = ""
root = None
current_node = root
for char in line:
if char == '(':
if current_node is not None and len(current_word) > 0:
attribute_text_label(current_node, current_word)
current_word = ""
depth += 1
if depth > 1:
# replace current head node by this node:
child = LabeledTree(depth=depth)
current_node.add_child(child)
current_node = child
root.add_general_child(child)
else:
root = LabeledTree(depth=depth)
root.add_general_child(root)
current_node = root
elif char == ')':
# assign current word:
if len(current_word) > 0:
attribute_text_label(current_node, current_word)
current_word = ""
# go up a level:
depth -= 1
if current_node.parent != None:
current_node.parent.udepth = max(current_node.udepth+1, current_node.parent.udepth)
current_node = current_node.parent
else:
# add to current read word
current_word += char
if depth != 0:
raise ParseError("Not an equal amount of closing and opening parentheses")
return root
|
python
|
def create_tree_from_string(line):
"""
Parse and convert a string representation
of an example into a LabeledTree datastructure.
Arguments:
----------
line : str, string version of the tree.
Returns:
--------
LabeledTree : parsed tree.
"""
depth = 0
current_word = ""
root = None
current_node = root
for char in line:
if char == '(':
if current_node is not None and len(current_word) > 0:
attribute_text_label(current_node, current_word)
current_word = ""
depth += 1
if depth > 1:
# replace current head node by this node:
child = LabeledTree(depth=depth)
current_node.add_child(child)
current_node = child
root.add_general_child(child)
else:
root = LabeledTree(depth=depth)
root.add_general_child(root)
current_node = root
elif char == ')':
# assign current word:
if len(current_word) > 0:
attribute_text_label(current_node, current_word)
current_word = ""
# go up a level:
depth -= 1
if current_node.parent != None:
current_node.parent.udepth = max(current_node.udepth+1, current_node.parent.udepth)
current_node = current_node.parent
else:
# add to current read word
current_word += char
if depth != 0:
raise ParseError("Not an equal amount of closing and opening parentheses")
return root
|
[
"def",
"create_tree_from_string",
"(",
"line",
")",
":",
"depth",
"=",
"0",
"current_word",
"=",
"\"\"",
"root",
"=",
"None",
"current_node",
"=",
"root",
"for",
"char",
"in",
"line",
":",
"if",
"char",
"==",
"'('",
":",
"if",
"current_node",
"is",
"not",
"None",
"and",
"len",
"(",
"current_word",
")",
">",
"0",
":",
"attribute_text_label",
"(",
"current_node",
",",
"current_word",
")",
"current_word",
"=",
"\"\"",
"depth",
"+=",
"1",
"if",
"depth",
">",
"1",
":",
"# replace current head node by this node:",
"child",
"=",
"LabeledTree",
"(",
"depth",
"=",
"depth",
")",
"current_node",
".",
"add_child",
"(",
"child",
")",
"current_node",
"=",
"child",
"root",
".",
"add_general_child",
"(",
"child",
")",
"else",
":",
"root",
"=",
"LabeledTree",
"(",
"depth",
"=",
"depth",
")",
"root",
".",
"add_general_child",
"(",
"root",
")",
"current_node",
"=",
"root",
"elif",
"char",
"==",
"')'",
":",
"# assign current word:",
"if",
"len",
"(",
"current_word",
")",
">",
"0",
":",
"attribute_text_label",
"(",
"current_node",
",",
"current_word",
")",
"current_word",
"=",
"\"\"",
"# go up a level:",
"depth",
"-=",
"1",
"if",
"current_node",
".",
"parent",
"!=",
"None",
":",
"current_node",
".",
"parent",
".",
"udepth",
"=",
"max",
"(",
"current_node",
".",
"udepth",
"+",
"1",
",",
"current_node",
".",
"parent",
".",
"udepth",
")",
"current_node",
"=",
"current_node",
".",
"parent",
"else",
":",
"# add to current read word",
"current_word",
"+=",
"char",
"if",
"depth",
"!=",
"0",
":",
"raise",
"ParseError",
"(",
"\"Not an equal amount of closing and opening parentheses\"",
")",
"return",
"root"
] |
Parse and convert a string representation
of an example into a LabeledTree datastructure.
Arguments:
----------
line : str, string version of the tree.
Returns:
--------
LabeledTree : parsed tree.
|
[
"Parse",
"and",
"convert",
"a",
"string",
"representation",
"of",
"an",
"example",
"into",
"a",
"LabeledTree",
"datastructure",
"."
] |
7b4c671d3dff661cc3677e54db817e50c5a1c666
|
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/parse.py#L49-L101
|
16,897
|
JonathanRaiman/pytreebank
|
pytreebank/parse.py
|
import_tree_corpus
|
def import_tree_corpus(path):
"""
Import a text file of treebank trees.
Arguments:
----------
path : str, filename for tree corpus.
Returns:
--------
list<LabeledTree> : loaded examples.
"""
tree_list = LabeledTreeCorpus()
with codecs.open(path, "r", "UTF-8") as f:
for line in f:
tree_list.append(create_tree_from_string(line))
return tree_list
|
python
|
def import_tree_corpus(path):
"""
Import a text file of treebank trees.
Arguments:
----------
path : str, filename for tree corpus.
Returns:
--------
list<LabeledTree> : loaded examples.
"""
tree_list = LabeledTreeCorpus()
with codecs.open(path, "r", "UTF-8") as f:
for line in f:
tree_list.append(create_tree_from_string(line))
return tree_list
|
[
"def",
"import_tree_corpus",
"(",
"path",
")",
":",
"tree_list",
"=",
"LabeledTreeCorpus",
"(",
")",
"with",
"codecs",
".",
"open",
"(",
"path",
",",
"\"r\"",
",",
"\"UTF-8\"",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"tree_list",
".",
"append",
"(",
"create_tree_from_string",
"(",
"line",
")",
")",
"return",
"tree_list"
] |
Import a text file of treebank trees.
Arguments:
----------
path : str, filename for tree corpus.
Returns:
--------
list<LabeledTree> : loaded examples.
|
[
"Import",
"a",
"text",
"file",
"of",
"treebank",
"trees",
"."
] |
7b4c671d3dff661cc3677e54db817e50c5a1c666
|
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/parse.py#L144-L160
|
16,898
|
JonathanRaiman/pytreebank
|
pytreebank/parse.py
|
load_sst
|
def load_sst(path=None,
url='http://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip'):
"""
Download and read in the Stanford Sentiment Treebank dataset
into a dictionary with a 'train', 'dev', and 'test' keys. The
dictionary keys point to lists of LabeledTrees.
Arguments:
----------
path : str, (optional defaults to ~/stanford_sentiment_treebank),
directory where the corpus should be downloaded (and
imported from).
url : str, where the corpus should be downloaded from (defaults
to nlp.stanford.edu address).
Returns:
--------
dict : loaded dataset
"""
if path is None:
# find a good temporary path
path = os.path.expanduser("~/stanford_sentiment_treebank/")
makedirs(path, exist_ok=True)
fnames = download_sst(path, url)
return {key: import_tree_corpus(value) for key, value in fnames.items()}
|
python
|
def load_sst(path=None,
url='http://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip'):
"""
Download and read in the Stanford Sentiment Treebank dataset
into a dictionary with a 'train', 'dev', and 'test' keys. The
dictionary keys point to lists of LabeledTrees.
Arguments:
----------
path : str, (optional defaults to ~/stanford_sentiment_treebank),
directory where the corpus should be downloaded (and
imported from).
url : str, where the corpus should be downloaded from (defaults
to nlp.stanford.edu address).
Returns:
--------
dict : loaded dataset
"""
if path is None:
# find a good temporary path
path = os.path.expanduser("~/stanford_sentiment_treebank/")
makedirs(path, exist_ok=True)
fnames = download_sst(path, url)
return {key: import_tree_corpus(value) for key, value in fnames.items()}
|
[
"def",
"load_sst",
"(",
"path",
"=",
"None",
",",
"url",
"=",
"'http://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip'",
")",
":",
"if",
"path",
"is",
"None",
":",
"# find a good temporary path",
"path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~/stanford_sentiment_treebank/\"",
")",
"makedirs",
"(",
"path",
",",
"exist_ok",
"=",
"True",
")",
"fnames",
"=",
"download_sst",
"(",
"path",
",",
"url",
")",
"return",
"{",
"key",
":",
"import_tree_corpus",
"(",
"value",
")",
"for",
"key",
",",
"value",
"in",
"fnames",
".",
"items",
"(",
")",
"}"
] |
Download and read in the Stanford Sentiment Treebank dataset
into a dictionary with a 'train', 'dev', and 'test' keys. The
dictionary keys point to lists of LabeledTrees.
Arguments:
----------
path : str, (optional defaults to ~/stanford_sentiment_treebank),
directory where the corpus should be downloaded (and
imported from).
url : str, where the corpus should be downloaded from (defaults
to nlp.stanford.edu address).
Returns:
--------
dict : loaded dataset
|
[
"Download",
"and",
"read",
"in",
"the",
"Stanford",
"Sentiment",
"Treebank",
"dataset",
"into",
"a",
"dictionary",
"with",
"a",
"train",
"dev",
"and",
"test",
"keys",
".",
"The",
"dictionary",
"keys",
"point",
"to",
"lists",
"of",
"LabeledTrees",
"."
] |
7b4c671d3dff661cc3677e54db817e50c5a1c666
|
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/parse.py#L163-L187
|
16,899
|
JonathanRaiman/pytreebank
|
pytreebank/parse.py
|
LabeledTreeCorpus.labels
|
def labels(self):
"""
Construct a dictionary of string -> labels
Returns:
--------
OrderedDict<str, int> : string label pairs.
"""
labelings = OrderedDict()
for tree in self:
for label, line in tree.to_labeled_lines():
labelings[line] = label
return labelings
|
python
|
def labels(self):
"""
Construct a dictionary of string -> labels
Returns:
--------
OrderedDict<str, int> : string label pairs.
"""
labelings = OrderedDict()
for tree in self:
for label, line in tree.to_labeled_lines():
labelings[line] = label
return labelings
|
[
"def",
"labels",
"(",
"self",
")",
":",
"labelings",
"=",
"OrderedDict",
"(",
")",
"for",
"tree",
"in",
"self",
":",
"for",
"label",
",",
"line",
"in",
"tree",
".",
"to_labeled_lines",
"(",
")",
":",
"labelings",
"[",
"line",
"]",
"=",
"label",
"return",
"labelings"
] |
Construct a dictionary of string -> labels
Returns:
--------
OrderedDict<str, int> : string label pairs.
|
[
"Construct",
"a",
"dictionary",
"of",
"string",
"-",
">",
"labels"
] |
7b4c671d3dff661cc3677e54db817e50c5a1c666
|
https://github.com/JonathanRaiman/pytreebank/blob/7b4c671d3dff661cc3677e54db817e50c5a1c666/pytreebank/parse.py#L112-L124
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.