repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
d0c-s4vage/pfp
pfp/interp.py
PfpInterp._handle_break
def _handle_break(self, node, scope, ctxt, stream): """Handle break node :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """ self._dlog("handling break") raise errors.InterpBreak()
python
def _handle_break(self, node, scope, ctxt, stream): """Handle break node :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """ self._dlog("handling break") raise errors.InterpBreak()
[ "def", "_handle_break", "(", "self", ",", "node", ",", "scope", ",", "ctxt", ",", "stream", ")", ":", "self", ".", "_dlog", "(", "\"handling break\"", ")", "raise", "errors", ".", "InterpBreak", "(", ")" ]
Handle break node :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO
[ "Handle", "break", "node" ]
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/interp.py#L2217-L2228
d0c-s4vage/pfp
pfp/interp.py
PfpInterp._handle_continue
def _handle_continue(self, node, scope, ctxt, stream): """Handle continue node :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """ self._dlog("handling continue") raise errors.InterpContinue()
python
def _handle_continue(self, node, scope, ctxt, stream): """Handle continue node :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """ self._dlog("handling continue") raise errors.InterpContinue()
[ "def", "_handle_continue", "(", "self", ",", "node", ",", "scope", ",", "ctxt", ",", "stream", ")", ":", "self", ".", "_dlog", "(", "\"handling continue\"", ")", "raise", "errors", ".", "InterpContinue", "(", ")" ]
Handle continue node :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO
[ "Handle", "continue", "node" ]
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/interp.py#L2230-L2241
d0c-s4vage/pfp
pfp/interp.py
PfpInterp._handle_decl_list
def _handle_decl_list(self, node, scope, ctxt, stream): """Handle For nodes :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """ self._dlog("handling decl list") # just handle each declaration for decl in node.decls: self._handle_node(decl, scope, ctxt, stream)
python
def _handle_decl_list(self, node, scope, ctxt, stream): """Handle For nodes :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """ self._dlog("handling decl list") # just handle each declaration for decl in node.decls: self._handle_node(decl, scope, ctxt, stream)
[ "def", "_handle_decl_list", "(", "self", ",", "node", ",", "scope", ",", "ctxt", ",", "stream", ")", ":", "self", ".", "_dlog", "(", "\"handling decl list\"", ")", "# just handle each declaration", "for", "decl", "in", "node", ".", "decls", ":", "self", ".", "_handle_node", "(", "decl", ",", "scope", ",", "ctxt", ",", "stream", ")" ]
Handle For nodes :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO
[ "Handle", "For", "nodes" ]
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/interp.py#L2243-L2256
d0c-s4vage/pfp
pfp/interp.py
PfpInterp._create_scope
def _create_scope(self): """TODO: Docstring for _create_scope. :returns: TODO """ res = Scope(self._log) for func_name,native_func in six.iteritems(self._natives): res.add_local(func_name, native_func) return res
python
def _create_scope(self): """TODO: Docstring for _create_scope. :returns: TODO """ res = Scope(self._log) for func_name,native_func in six.iteritems(self._natives): res.add_local(func_name, native_func) return res
[ "def", "_create_scope", "(", "self", ")", ":", "res", "=", "Scope", "(", "self", ".", "_log", ")", "for", "func_name", ",", "native_func", "in", "six", ".", "iteritems", "(", "self", ".", "_natives", ")", ":", "res", ".", "add_local", "(", "func_name", ",", "native_func", ")", "return", "res" ]
TODO: Docstring for _create_scope. :returns: TODO
[ "TODO", ":", "Docstring", "for", "_create_scope", ".", ":", "returns", ":", "TODO" ]
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/interp.py#L2306-L2316
d0c-s4vage/pfp
pfp/interp.py
PfpInterp._get_value
def _get_value(self, node, scope, ctxt, stream): """Return the value of the node. It is expected to be either an AST.ID instance or a constant :node: TODO :returns: TODO """ res = self._handle_node(node, scope, ctxt, stream) if isinstance(res, fields.Field): return res._pfp__value # assume it's a constant else: return res
python
def _get_value(self, node, scope, ctxt, stream): """Return the value of the node. It is expected to be either an AST.ID instance or a constant :node: TODO :returns: TODO """ res = self._handle_node(node, scope, ctxt, stream) if isinstance(res, fields.Field): return res._pfp__value # assume it's a constant else: return res
[ "def", "_get_value", "(", "self", ",", "node", ",", "scope", ",", "ctxt", ",", "stream", ")", ":", "res", "=", "self", ".", "_handle_node", "(", "node", ",", "scope", ",", "ctxt", ",", "stream", ")", "if", "isinstance", "(", "res", ",", "fields", ".", "Field", ")", ":", "return", "res", ".", "_pfp__value", "# assume it's a constant", "else", ":", "return", "res" ]
Return the value of the node. It is expected to be either an AST.ID instance or a constant :node: TODO :returns: TODO
[ "Return", "the", "value", "of", "the", "node", ".", "It", "is", "expected", "to", "be", "either", "an", "AST", ".", "ID", "instance", "or", "a", "constant" ]
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/interp.py#L2318-L2334
d0c-s4vage/pfp
pfp/interp.py
PfpInterp._resolve_to_field_class
def _resolve_to_field_class(self, names, scope): """Resolve the names to a class in fields.py, resolving past typedefs, etc :names: TODO :scope: TODO :ctxt: TODO :returns: TODO """ switch = { "char" : "Char", "int" : "Int", "long" : "Int", "int64" : "Int64", "uint64" : "UInt64", "short" : "Short", "double" : "Double", "float" : "Float", "void" : "Void", "string" : "String", "wstring" : "WString" } core = names[-1] if core not in switch: # will return a list of resolved names type_info = scope.get_type(core) if type(type_info) is type and issubclass(type_info, fields.Field): return type_info resolved_names = type_info if resolved_names is None: raise errors.UnresolvedType(self._coord, " ".join(names), " ") if resolved_names[-1] not in switch: raise errors.UnresolvedType(self._coord, " ".join(names), " ".join(resolved_names)) names = copy.copy(names) names.pop() names += resolved_names if len(names) >= 2 and names[-1] == names[-2] and names[-1] == "long": res = "Int64" else: res = switch[names[-1]] if names[-1] in ["char", "short", "int", "long"] and "unsigned" in names[:-1]: res = "U" + res cls = getattr(fields, res) return cls
python
def _resolve_to_field_class(self, names, scope): """Resolve the names to a class in fields.py, resolving past typedefs, etc :names: TODO :scope: TODO :ctxt: TODO :returns: TODO """ switch = { "char" : "Char", "int" : "Int", "long" : "Int", "int64" : "Int64", "uint64" : "UInt64", "short" : "Short", "double" : "Double", "float" : "Float", "void" : "Void", "string" : "String", "wstring" : "WString" } core = names[-1] if core not in switch: # will return a list of resolved names type_info = scope.get_type(core) if type(type_info) is type and issubclass(type_info, fields.Field): return type_info resolved_names = type_info if resolved_names is None: raise errors.UnresolvedType(self._coord, " ".join(names), " ") if resolved_names[-1] not in switch: raise errors.UnresolvedType(self._coord, " ".join(names), " ".join(resolved_names)) names = copy.copy(names) names.pop() names += resolved_names if len(names) >= 2 and names[-1] == names[-2] and names[-1] == "long": res = "Int64" else: res = switch[names[-1]] if names[-1] in ["char", "short", "int", "long"] and "unsigned" in names[:-1]: res = "U" + res cls = getattr(fields, res) return cls
[ "def", "_resolve_to_field_class", "(", "self", ",", "names", ",", "scope", ")", ":", "switch", "=", "{", "\"char\"", ":", "\"Char\"", ",", "\"int\"", ":", "\"Int\"", ",", "\"long\"", ":", "\"Int\"", ",", "\"int64\"", ":", "\"Int64\"", ",", "\"uint64\"", ":", "\"UInt64\"", ",", "\"short\"", ":", "\"Short\"", ",", "\"double\"", ":", "\"Double\"", ",", "\"float\"", ":", "\"Float\"", ",", "\"void\"", ":", "\"Void\"", ",", "\"string\"", ":", "\"String\"", ",", "\"wstring\"", ":", "\"WString\"", "}", "core", "=", "names", "[", "-", "1", "]", "if", "core", "not", "in", "switch", ":", "# will return a list of resolved names", "type_info", "=", "scope", ".", "get_type", "(", "core", ")", "if", "type", "(", "type_info", ")", "is", "type", "and", "issubclass", "(", "type_info", ",", "fields", ".", "Field", ")", ":", "return", "type_info", "resolved_names", "=", "type_info", "if", "resolved_names", "is", "None", ":", "raise", "errors", ".", "UnresolvedType", "(", "self", ".", "_coord", ",", "\" \"", ".", "join", "(", "names", ")", ",", "\" \"", ")", "if", "resolved_names", "[", "-", "1", "]", "not", "in", "switch", ":", "raise", "errors", ".", "UnresolvedType", "(", "self", ".", "_coord", ",", "\" \"", ".", "join", "(", "names", ")", ",", "\" \"", ".", "join", "(", "resolved_names", ")", ")", "names", "=", "copy", ".", "copy", "(", "names", ")", "names", ".", "pop", "(", ")", "names", "+=", "resolved_names", "if", "len", "(", "names", ")", ">=", "2", "and", "names", "[", "-", "1", "]", "==", "names", "[", "-", "2", "]", "and", "names", "[", "-", "1", "]", "==", "\"long\"", ":", "res", "=", "\"Int64\"", "else", ":", "res", "=", "switch", "[", "names", "[", "-", "1", "]", "]", "if", "names", "[", "-", "1", "]", "in", "[", "\"char\"", ",", "\"short\"", ",", "\"int\"", ",", "\"long\"", "]", "and", "\"unsigned\"", "in", "names", "[", ":", "-", "1", "]", ":", "res", "=", "\"U\"", "+", "res", "cls", "=", "getattr", "(", "fields", ",", "res", ")", "return", "cls" ]
Resolve the names to a class in fields.py, resolving past typedefs, etc :names: TODO :scope: TODO :ctxt: TODO :returns: TODO
[ "Resolve", "the", "names", "to", "a", "class", "in", "fields", ".", "py", "resolving", "past", "typedefs", "etc" ]
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/interp.py#L2336-L2385
d0c-s4vage/pfp
pfp/bitwrap.py
bits_to_bytes
def bits_to_bytes(bits): """Convert the bit list into bytes. (Assumes bits is a list whose length is a multiple of 8) """ if len(bits) % 8 != 0: raise Exception("num bits must be multiple of 8") res = "" for x in six.moves.range(0, len(bits), 8): byte_bits = bits[x:x+8] byte_val = int(''.join(map(str, byte_bits)), 2) res += chr(byte_val) return utils.binary(res)
python
def bits_to_bytes(bits): """Convert the bit list into bytes. (Assumes bits is a list whose length is a multiple of 8) """ if len(bits) % 8 != 0: raise Exception("num bits must be multiple of 8") res = "" for x in six.moves.range(0, len(bits), 8): byte_bits = bits[x:x+8] byte_val = int(''.join(map(str, byte_bits)), 2) res += chr(byte_val) return utils.binary(res)
[ "def", "bits_to_bytes", "(", "bits", ")", ":", "if", "len", "(", "bits", ")", "%", "8", "!=", "0", ":", "raise", "Exception", "(", "\"num bits must be multiple of 8\"", ")", "res", "=", "\"\"", "for", "x", "in", "six", ".", "moves", ".", "range", "(", "0", ",", "len", "(", "bits", ")", ",", "8", ")", ":", "byte_bits", "=", "bits", "[", "x", ":", "x", "+", "8", "]", "byte_val", "=", "int", "(", "''", ".", "join", "(", "map", "(", "str", ",", "byte_bits", ")", ")", ",", "2", ")", "res", "+=", "chr", "(", "byte_val", ")", "return", "utils", ".", "binary", "(", "res", ")" ]
Convert the bit list into bytes. (Assumes bits is a list whose length is a multiple of 8)
[ "Convert", "the", "bit", "list", "into", "bytes", ".", "(", "Assumes", "bits", "is", "a", "list", "whose", "length", "is", "a", "multiple", "of", "8", ")" ]
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/bitwrap.py#L15-L29
d0c-s4vage/pfp
pfp/bitwrap.py
bytes_to_bits
def bytes_to_bits(bytes_): """Convert bytes to a list of bits """ res = [] for x in bytes_: if not isinstance(x, int): x = ord(x) res += byte_to_bits(x) return res
python
def bytes_to_bits(bytes_): """Convert bytes to a list of bits """ res = [] for x in bytes_: if not isinstance(x, int): x = ord(x) res += byte_to_bits(x) return res
[ "def", "bytes_to_bits", "(", "bytes_", ")", ":", "res", "=", "[", "]", "for", "x", "in", "bytes_", ":", "if", "not", "isinstance", "(", "x", ",", "int", ")", ":", "x", "=", "ord", "(", "x", ")", "res", "+=", "byte_to_bits", "(", "x", ")", "return", "res" ]
Convert bytes to a list of bits
[ "Convert", "bytes", "to", "a", "list", "of", "bits" ]
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/bitwrap.py#L31-L39
d0c-s4vage/pfp
pfp/bitwrap.py
BitwrappedStream.is_eof
def is_eof(self): """Return if the stream has reached EOF or not without discarding any unflushed bits :returns: True/False """ pos = self._stream.tell() byte = self._stream.read(1) self._stream.seek(pos, 0) return utils.binary(byte) == utils.binary("")
python
def is_eof(self): """Return if the stream has reached EOF or not without discarding any unflushed bits :returns: True/False """ pos = self._stream.tell() byte = self._stream.read(1) self._stream.seek(pos, 0) return utils.binary(byte) == utils.binary("")
[ "def", "is_eof", "(", "self", ")", ":", "pos", "=", "self", ".", "_stream", ".", "tell", "(", ")", "byte", "=", "self", ".", "_stream", ".", "read", "(", "1", ")", "self", ".", "_stream", ".", "seek", "(", "pos", ",", "0", ")", "return", "utils", ".", "binary", "(", "byte", ")", "==", "utils", ".", "binary", "(", "\"\"", ")" ]
Return if the stream has reached EOF or not without discarding any unflushed bits :returns: True/False
[ "Return", "if", "the", "stream", "has", "reached", "EOF", "or", "not", "without", "discarding", "any", "unflushed", "bits" ]
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/bitwrap.py#L70-L80
d0c-s4vage/pfp
pfp/bitwrap.py
BitwrappedStream.close
def close(self): """Close the stream """ self.closed = True self._flush_bits_to_stream() self._stream.close()
python
def close(self): """Close the stream """ self.closed = True self._flush_bits_to_stream() self._stream.close()
[ "def", "close", "(", "self", ")", ":", "self", ".", "closed", "=", "True", "self", ".", "_flush_bits_to_stream", "(", ")", "self", ".", "_stream", ".", "close", "(", ")" ]
Close the stream
[ "Close", "the", "stream" ]
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/bitwrap.py#L82-L87
d0c-s4vage/pfp
pfp/bitwrap.py
BitwrappedStream.read
def read(self, num): """Read ``num`` number of bytes from the stream. Note that this will automatically resets/ends the current bit-reading if it does not end on an even byte AND ``self.padded`` is True. If ``self.padded`` is True, then the entire stream is treated as a bitstream. :num: number of bytes to read :returns: the read bytes, or empty string if EOF has been reached """ start_pos = self.tell() if self.padded: # we toss out any uneven bytes self._bits.clear() res = utils.binary(self._stream.read(num)) else: bits = self.read_bits(num * 8) res = bits_to_bytes(bits) res = utils.binary(res) end_pos = self.tell() self._update_consumed_ranges(start_pos, end_pos) return res
python
def read(self, num): """Read ``num`` number of bytes from the stream. Note that this will automatically resets/ends the current bit-reading if it does not end on an even byte AND ``self.padded`` is True. If ``self.padded`` is True, then the entire stream is treated as a bitstream. :num: number of bytes to read :returns: the read bytes, or empty string if EOF has been reached """ start_pos = self.tell() if self.padded: # we toss out any uneven bytes self._bits.clear() res = utils.binary(self._stream.read(num)) else: bits = self.read_bits(num * 8) res = bits_to_bytes(bits) res = utils.binary(res) end_pos = self.tell() self._update_consumed_ranges(start_pos, end_pos) return res
[ "def", "read", "(", "self", ",", "num", ")", ":", "start_pos", "=", "self", ".", "tell", "(", ")", "if", "self", ".", "padded", ":", "# we toss out any uneven bytes", "self", ".", "_bits", ".", "clear", "(", ")", "res", "=", "utils", ".", "binary", "(", "self", ".", "_stream", ".", "read", "(", "num", ")", ")", "else", ":", "bits", "=", "self", ".", "read_bits", "(", "num", "*", "8", ")", "res", "=", "bits_to_bytes", "(", "bits", ")", "res", "=", "utils", ".", "binary", "(", "res", ")", "end_pos", "=", "self", ".", "tell", "(", ")", "self", ".", "_update_consumed_ranges", "(", "start_pos", ",", "end_pos", ")", "return", "res" ]
Read ``num`` number of bytes from the stream. Note that this will automatically resets/ends the current bit-reading if it does not end on an even byte AND ``self.padded`` is True. If ``self.padded`` is True, then the entire stream is treated as a bitstream. :num: number of bytes to read :returns: the read bytes, or empty string if EOF has been reached
[ "Read", "num", "number", "of", "bytes", "from", "the", "stream", ".", "Note", "that", "this", "will", "automatically", "resets", "/", "ends", "the", "current", "bit", "-", "reading", "if", "it", "does", "not", "end", "on", "an", "even", "byte", "AND", "self", ".", "padded", "is", "True", ".", "If", "self", ".", "padded", "is", "True", "then", "the", "entire", "stream", "is", "treated", "as", "a", "bitstream", "." ]
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/bitwrap.py#L100-L123
d0c-s4vage/pfp
pfp/bitwrap.py
BitwrappedStream.read_bits
def read_bits(self, num): """Read ``num`` number of bits from the stream :num: number of bits to read :returns: a list of ``num`` bits, or an empty list if EOF has been reached """ if num > len(self._bits): needed = num - len(self._bits) num_bytes = int(math.ceil(needed / 8.0)) read_bytes = self._stream.read(num_bytes) for bit in bytes_to_bits(read_bytes): self._bits.append(bit) res = [] while len(res) < num and len(self._bits) > 0: res.append(self._bits.popleft()) return res
python
def read_bits(self, num): """Read ``num`` number of bits from the stream :num: number of bits to read :returns: a list of ``num`` bits, or an empty list if EOF has been reached """ if num > len(self._bits): needed = num - len(self._bits) num_bytes = int(math.ceil(needed / 8.0)) read_bytes = self._stream.read(num_bytes) for bit in bytes_to_bits(read_bytes): self._bits.append(bit) res = [] while len(res) < num and len(self._bits) > 0: res.append(self._bits.popleft()) return res
[ "def", "read_bits", "(", "self", ",", "num", ")", ":", "if", "num", ">", "len", "(", "self", ".", "_bits", ")", ":", "needed", "=", "num", "-", "len", "(", "self", ".", "_bits", ")", "num_bytes", "=", "int", "(", "math", ".", "ceil", "(", "needed", "/", "8.0", ")", ")", "read_bytes", "=", "self", ".", "_stream", ".", "read", "(", "num_bytes", ")", "for", "bit", "in", "bytes_to_bits", "(", "read_bytes", ")", ":", "self", ".", "_bits", ".", "append", "(", "bit", ")", "res", "=", "[", "]", "while", "len", "(", "res", ")", "<", "num", "and", "len", "(", "self", ".", "_bits", ")", ">", "0", ":", "res", ".", "append", "(", "self", ".", "_bits", ".", "popleft", "(", ")", ")", "return", "res" ]
Read ``num`` number of bits from the stream :num: number of bits to read :returns: a list of ``num`` bits, or an empty list if EOF has been reached
[ "Read", "num", "number", "of", "bits", "from", "the", "stream" ]
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/bitwrap.py#L125-L143
d0c-s4vage/pfp
pfp/bitwrap.py
BitwrappedStream.write
def write(self, data): """Write data to the stream :data: the data to write to the stream :returns: None """ if self.padded: # flush out any remaining bits first if len(self._bits) > 0: self._flush_bits_to_stream() self._stream.write(data) else: # nothing to do here if len(data) == 0: return bits = bytes_to_bits(data) self.write_bits(bits)
python
def write(self, data): """Write data to the stream :data: the data to write to the stream :returns: None """ if self.padded: # flush out any remaining bits first if len(self._bits) > 0: self._flush_bits_to_stream() self._stream.write(data) else: # nothing to do here if len(data) == 0: return bits = bytes_to_bits(data) self.write_bits(bits)
[ "def", "write", "(", "self", ",", "data", ")", ":", "if", "self", ".", "padded", ":", "# flush out any remaining bits first", "if", "len", "(", "self", ".", "_bits", ")", ">", "0", ":", "self", ".", "_flush_bits_to_stream", "(", ")", "self", ".", "_stream", ".", "write", "(", "data", ")", "else", ":", "# nothing to do here", "if", "len", "(", "data", ")", "==", "0", ":", "return", "bits", "=", "bytes_to_bits", "(", "data", ")", "self", ".", "write_bits", "(", "bits", ")" ]
Write data to the stream :data: the data to write to the stream :returns: None
[ "Write", "data", "to", "the", "stream" ]
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/bitwrap.py#L145-L162
d0c-s4vage/pfp
pfp/bitwrap.py
BitwrappedStream.write_bits
def write_bits(self, bits): """Write the bits to the stream. Add the bits to the existing unflushed bits and write complete bytes to the stream. """ for bit in bits: self._bits.append(bit) while len(self._bits) >= 8: byte_bits = [self._bits.popleft() for x in six.moves.range(8)] byte = bits_to_bytes(byte_bits) self._stream.write(byte)
python
def write_bits(self, bits): """Write the bits to the stream. Add the bits to the existing unflushed bits and write complete bytes to the stream. """ for bit in bits: self._bits.append(bit) while len(self._bits) >= 8: byte_bits = [self._bits.popleft() for x in six.moves.range(8)] byte = bits_to_bytes(byte_bits) self._stream.write(byte)
[ "def", "write_bits", "(", "self", ",", "bits", ")", ":", "for", "bit", "in", "bits", ":", "self", ".", "_bits", ".", "append", "(", "bit", ")", "while", "len", "(", "self", ".", "_bits", ")", ">=", "8", ":", "byte_bits", "=", "[", "self", ".", "_bits", ".", "popleft", "(", ")", "for", "x", "in", "six", ".", "moves", ".", "range", "(", "8", ")", "]", "byte", "=", "bits_to_bytes", "(", "byte_bits", ")", "self", ".", "_stream", ".", "write", "(", "byte", ")" ]
Write the bits to the stream. Add the bits to the existing unflushed bits and write complete bytes to the stream.
[ "Write", "the", "bits", "to", "the", "stream", "." ]
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/bitwrap.py#L164-L176
d0c-s4vage/pfp
pfp/bitwrap.py
BitwrappedStream.tell
def tell(self): """Return the current position in the stream (ignoring bit position) :returns: int for the position in the stream """ res = self._stream.tell() if len(self._bits) > 0: res -= 1 return res
python
def tell(self): """Return the current position in the stream (ignoring bit position) :returns: int for the position in the stream """ res = self._stream.tell() if len(self._bits) > 0: res -= 1 return res
[ "def", "tell", "(", "self", ")", ":", "res", "=", "self", ".", "_stream", ".", "tell", "(", ")", "if", "len", "(", "self", ".", "_bits", ")", ">", "0", ":", "res", "-=", "1", "return", "res" ]
Return the current position in the stream (ignoring bit position) :returns: int for the position in the stream
[ "Return", "the", "current", "position", "in", "the", "stream", "(", "ignoring", "bit", "position", ")" ]
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/bitwrap.py#L180-L189
d0c-s4vage/pfp
pfp/bitwrap.py
BitwrappedStream.seek
def seek(self, pos, seek_type=0): """Seek to the specified position in the stream with seek_type. Unflushed bits will be discarded in the case of a seek. The stream will also keep track of which bytes have and have not been consumed so that the dom will capture all of the bytes in the stream. :pos: offset :seek_type: direction :returns: TODO """ self._bits.clear() return self._stream.seek(pos, seek_type)
python
def seek(self, pos, seek_type=0): """Seek to the specified position in the stream with seek_type. Unflushed bits will be discarded in the case of a seek. The stream will also keep track of which bytes have and have not been consumed so that the dom will capture all of the bytes in the stream. :pos: offset :seek_type: direction :returns: TODO """ self._bits.clear() return self._stream.seek(pos, seek_type)
[ "def", "seek", "(", "self", ",", "pos", ",", "seek_type", "=", "0", ")", ":", "self", ".", "_bits", ".", "clear", "(", ")", "return", "self", ".", "_stream", ".", "seek", "(", "pos", ",", "seek_type", ")" ]
Seek to the specified position in the stream with seek_type. Unflushed bits will be discarded in the case of a seek. The stream will also keep track of which bytes have and have not been consumed so that the dom will capture all of the bytes in the stream. :pos: offset :seek_type: direction :returns: TODO
[ "Seek", "to", "the", "specified", "position", "in", "the", "stream", "with", "seek_type", ".", "Unflushed", "bits", "will", "be", "discarded", "in", "the", "case", "of", "a", "seek", "." ]
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/bitwrap.py#L191-L205
d0c-s4vage/pfp
pfp/bitwrap.py
BitwrappedStream.size
def size(self): """Return the size of the stream, or -1 if it cannot be determined. """ pos = self._stream.tell() # seek to the end of the stream self._stream.seek(0,2) size = self._stream.tell() self._stream.seek(pos, 0) return size
python
def size(self): """Return the size of the stream, or -1 if it cannot be determined. """ pos = self._stream.tell() # seek to the end of the stream self._stream.seek(0,2) size = self._stream.tell() self._stream.seek(pos, 0) return size
[ "def", "size", "(", "self", ")", ":", "pos", "=", "self", ".", "_stream", ".", "tell", "(", ")", "# seek to the end of the stream", "self", ".", "_stream", ".", "seek", "(", "0", ",", "2", ")", "size", "=", "self", ".", "_stream", ".", "tell", "(", ")", "self", ".", "_stream", ".", "seek", "(", "pos", ",", "0", ")", "return", "size" ]
Return the size of the stream, or -1 if it cannot be determined.
[ "Return", "the", "size", "of", "the", "stream", "or", "-", "1", "if", "it", "cannot", "be", "determined", "." ]
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/bitwrap.py#L207-L217
d0c-s4vage/pfp
pfp/bitwrap.py
BitwrappedStream.unconsumed_ranges
def unconsumed_ranges(self): """Return an IntervalTree of unconsumed ranges, of the format (start, end] with the end value not being included """ res = IntervalTree() prev = None # normal iteration is not in a predictable order ranges = sorted([x for x in self.range_set], key=lambda x: x.begin) for rng in ranges: if prev is None: prev = rng continue res.add(Interval(prev.end, rng.begin)) prev = rng # means we've seeked past the end if len(self.range_set[self.tell()]) != 1: res.add(Interval(prev.end, self.tell())) return res
python
def unconsumed_ranges(self): """Return an IntervalTree of unconsumed ranges, of the format (start, end] with the end value not being included """ res = IntervalTree() prev = None # normal iteration is not in a predictable order ranges = sorted([x for x in self.range_set], key=lambda x: x.begin) for rng in ranges: if prev is None: prev = rng continue res.add(Interval(prev.end, rng.begin)) prev = rng # means we've seeked past the end if len(self.range_set[self.tell()]) != 1: res.add(Interval(prev.end, self.tell())) return res
[ "def", "unconsumed_ranges", "(", "self", ")", ":", "res", "=", "IntervalTree", "(", ")", "prev", "=", "None", "# normal iteration is not in a predictable order", "ranges", "=", "sorted", "(", "[", "x", "for", "x", "in", "self", ".", "range_set", "]", ",", "key", "=", "lambda", "x", ":", "x", ".", "begin", ")", "for", "rng", "in", "ranges", ":", "if", "prev", "is", "None", ":", "prev", "=", "rng", "continue", "res", ".", "add", "(", "Interval", "(", "prev", ".", "end", ",", "rng", ".", "begin", ")", ")", "prev", "=", "rng", "# means we've seeked past the end", "if", "len", "(", "self", ".", "range_set", "[", "self", ".", "tell", "(", ")", "]", ")", "!=", "1", ":", "res", ".", "add", "(", "Interval", "(", "prev", ".", "end", ",", "self", ".", "tell", "(", ")", ")", ")", "return", "res" ]
Return an IntervalTree of unconsumed ranges, of the format (start, end] with the end value not being included
[ "Return", "an", "IntervalTree", "of", "unconsumed", "ranges", "of", "the", "format", "(", "start", "end", "]", "with", "the", "end", "value", "not", "being", "included" ]
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/bitwrap.py#L219-L241
d0c-s4vage/pfp
pfp/bitwrap.py
BitwrappedStream._update_consumed_ranges
def _update_consumed_ranges(self, start_pos, end_pos): """Update the ``self.consumed_ranges`` array with which byte ranges have been consecutively consumed. """ self.range_set.add(Interval(start_pos, end_pos+1)) self.range_set.merge_overlaps()
python
def _update_consumed_ranges(self, start_pos, end_pos): """Update the ``self.consumed_ranges`` array with which byte ranges have been consecutively consumed. """ self.range_set.add(Interval(start_pos, end_pos+1)) self.range_set.merge_overlaps()
[ "def", "_update_consumed_ranges", "(", "self", ",", "start_pos", ",", "end_pos", ")", ":", "self", ".", "range_set", ".", "add", "(", "Interval", "(", "start_pos", ",", "end_pos", "+", "1", ")", ")", "self", ".", "range_set", ".", "merge_overlaps", "(", ")" ]
Update the ``self.consumed_ranges`` array with which byte ranges have been consecutively consumed.
[ "Update", "the", "self", ".", "consumed_ranges", "array", "with", "which", "byte", "ranges", "have", "been", "consecutively", "consumed", "." ]
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/bitwrap.py#L247-L252
d0c-s4vage/pfp
pfp/bitwrap.py
BitwrappedStream._flush_bits_to_stream
def _flush_bits_to_stream(self): """Flush the bits to the stream. This is used when a few bits have been read and ``self._bits`` contains unconsumed/ flushed bits when data is to be written to the stream """ if len(self._bits) == 0: return 0 bits = list(self._bits) diff = 8 - (len(bits) % 8) padding = [0] * diff bits = bits + padding self._stream.write(bits_to_bytes(bits)) self._bits.clear()
python
def _flush_bits_to_stream(self): """Flush the bits to the stream. This is used when a few bits have been read and ``self._bits`` contains unconsumed/ flushed bits when data is to be written to the stream """ if len(self._bits) == 0: return 0 bits = list(self._bits) diff = 8 - (len(bits) % 8) padding = [0] * diff bits = bits + padding self._stream.write(bits_to_bytes(bits)) self._bits.clear()
[ "def", "_flush_bits_to_stream", "(", "self", ")", ":", "if", "len", "(", "self", ".", "_bits", ")", "==", "0", ":", "return", "0", "bits", "=", "list", "(", "self", ".", "_bits", ")", "diff", "=", "8", "-", "(", "len", "(", "bits", ")", "%", "8", ")", "padding", "=", "[", "0", "]", "*", "diff", "bits", "=", "bits", "+", "padding", "self", ".", "_stream", ".", "write", "(", "bits_to_bytes", "(", "bits", ")", ")", "self", ".", "_bits", ".", "clear", "(", ")" ]
Flush the bits to the stream. This is used when a few bits have been read and ``self._bits`` contains unconsumed/ flushed bits when data is to be written to the stream
[ "Flush", "the", "bits", "to", "the", "stream", ".", "This", "is", "used", "when", "a", "few", "bits", "have", "been", "read", "and", "self", ".", "_bits", "contains", "unconsumed", "/", "flushed", "bits", "when", "data", "is", "to", "be", "written", "to", "the", "stream" ]
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/bitwrap.py#L254-L271
expfactory/expfactory
expfactory/validator/library.py
LibraryValidator._validate_markdown
def _validate_markdown(self, expfile): '''ensure that fields are present in markdown file''' try: import yaml except: bot.error('Python yaml is required for testing yml/markdown files.') sys.exit(1) self.metadata = {} uid = os.path.basename(expfile).strip('.md') if os.path.exists(expfile): with open(expfile, "r") as stream: docs = yaml.load_all(stream) for doc in docs: if isinstance(doc,dict): for k,v in doc.items(): print('%s: %s' %(k,v)) self.metadata[k] = v self.metadata['uid'] = uid fields = ['github', 'preview', 'name', 'layout', 'tags', 'uid', 'maintainer'] # Tests for all fields for field in fields: if field not in self.metadata: return False if self.metadata[field] in ['',None]: return False if 'github' not in self.metadata['github']: return notvalid('%s: not a valid github repository' % name) if not isinstance(self.metadata['tags'],list): return notvalid('%s: tags must be a list' % name) if not re.search("(\w+://)(.+@)*([\w\d\.]+)(:[\d]+){0,1}/*(.*)", self.metadata['github']): return notvalid('%s is not a valid URL.' %(self.metadata['github'])) return True
python
def _validate_markdown(self, expfile): '''ensure that fields are present in markdown file''' try: import yaml except: bot.error('Python yaml is required for testing yml/markdown files.') sys.exit(1) self.metadata = {} uid = os.path.basename(expfile).strip('.md') if os.path.exists(expfile): with open(expfile, "r") as stream: docs = yaml.load_all(stream) for doc in docs: if isinstance(doc,dict): for k,v in doc.items(): print('%s: %s' %(k,v)) self.metadata[k] = v self.metadata['uid'] = uid fields = ['github', 'preview', 'name', 'layout', 'tags', 'uid', 'maintainer'] # Tests for all fields for field in fields: if field not in self.metadata: return False if self.metadata[field] in ['',None]: return False if 'github' not in self.metadata['github']: return notvalid('%s: not a valid github repository' % name) if not isinstance(self.metadata['tags'],list): return notvalid('%s: tags must be a list' % name) if not re.search("(\w+://)(.+@)*([\w\d\.]+)(:[\d]+){0,1}/*(.*)", self.metadata['github']): return notvalid('%s is not a valid URL.' %(self.metadata['github'])) return True
[ "def", "_validate_markdown", "(", "self", ",", "expfile", ")", ":", "try", ":", "import", "yaml", "except", ":", "bot", ".", "error", "(", "'Python yaml is required for testing yml/markdown files.'", ")", "sys", ".", "exit", "(", "1", ")", "self", ".", "metadata", "=", "{", "}", "uid", "=", "os", ".", "path", ".", "basename", "(", "expfile", ")", ".", "strip", "(", "'.md'", ")", "if", "os", ".", "path", ".", "exists", "(", "expfile", ")", ":", "with", "open", "(", "expfile", ",", "\"r\"", ")", "as", "stream", ":", "docs", "=", "yaml", ".", "load_all", "(", "stream", ")", "for", "doc", "in", "docs", ":", "if", "isinstance", "(", "doc", ",", "dict", ")", ":", "for", "k", ",", "v", "in", "doc", ".", "items", "(", ")", ":", "print", "(", "'%s: %s'", "%", "(", "k", ",", "v", ")", ")", "self", ".", "metadata", "[", "k", "]", "=", "v", "self", ".", "metadata", "[", "'uid'", "]", "=", "uid", "fields", "=", "[", "'github'", ",", "'preview'", ",", "'name'", ",", "'layout'", ",", "'tags'", ",", "'uid'", ",", "'maintainer'", "]", "# Tests for all fields", "for", "field", "in", "fields", ":", "if", "field", "not", "in", "self", ".", "metadata", ":", "return", "False", "if", "self", ".", "metadata", "[", "field", "]", "in", "[", "''", ",", "None", "]", ":", "return", "False", "if", "'github'", "not", "in", "self", ".", "metadata", "[", "'github'", "]", ":", "return", "notvalid", "(", "'%s: not a valid github repository'", "%", "name", ")", "if", "not", "isinstance", "(", "self", ".", "metadata", "[", "'tags'", "]", ",", "list", ")", ":", "return", "notvalid", "(", "'%s: tags must be a list'", "%", "name", ")", "if", "not", "re", ".", "search", "(", "\"(\\w+://)(.+@)*([\\w\\d\\.]+)(:[\\d]+){0,1}/*(.*)\"", ",", "self", ".", "metadata", "[", "'github'", "]", ")", ":", "return", "notvalid", "(", "'%s is not a valid URL.'", "%", "(", "self", ".", "metadata", "[", "'github'", "]", ")", ")", "return", "True" ]
ensure that fields are present in markdown file
[ "ensure", "that", "fields", "are", "present", "in", "markdown", "file" ]
train
https://github.com/expfactory/expfactory/blob/27ce6cc93e17231df8a8024f18e631336afd3501/expfactory/validator/library.py#L62-L101
expfactory/expfactory
expfactory/views/utils.py
perform_checks
def perform_checks(template, do_redirect=False, context=None, next=None, quiet=False): '''return all checks for required variables before returning to desired view Parameters ========== template: the html template to render do_redirect: if True, perform a redirect and not render context: dictionary of context variables to pass to render_template next: a pre-defined next experiment, will calculate if None quiet: decrease verbosity ''' from expfactory.server import app username = session.get('username') subid = session.get('subid') # If redirect, "last" is currently active (about to start) # If render, "last" is last completed / active experiment (just finished) last = session.get('exp_id') if next is None: next = app.get_next(session) session['exp_id'] = next # Headless mode requires token if "token" not in session and app.headless is True: flash('A token is required for these experiments.') return redirect('/') # Update the user / log if quiet is False: app.logger.info("[router] %s --> %s [subid] %s [user] %s" %(last, next, subid, username)) if username is None and app.headless is False: flash('You must start a session before doing experiments.') return redirect('/') if subid is None: flash('You must have a participant identifier before doing experiments') return redirect('/') if next is None: flash('Congratulations, you have finished the battery!') return redirect('/finish') if do_redirect is True: app.logger.debug('Redirecting to %s' %template) return redirect(template) if context is not None and isinstance(context, dict): app.logger.debug('Rendering %s' %template) return render_template(template, **context) return render_template(template)
python
def perform_checks(template, do_redirect=False, context=None, next=None, quiet=False): '''return all checks for required variables before returning to desired view Parameters ========== template: the html template to render do_redirect: if True, perform a redirect and not render context: dictionary of context variables to pass to render_template next: a pre-defined next experiment, will calculate if None quiet: decrease verbosity ''' from expfactory.server import app username = session.get('username') subid = session.get('subid') # If redirect, "last" is currently active (about to start) # If render, "last" is last completed / active experiment (just finished) last = session.get('exp_id') if next is None: next = app.get_next(session) session['exp_id'] = next # Headless mode requires token if "token" not in session and app.headless is True: flash('A token is required for these experiments.') return redirect('/') # Update the user / log if quiet is False: app.logger.info("[router] %s --> %s [subid] %s [user] %s" %(last, next, subid, username)) if username is None and app.headless is False: flash('You must start a session before doing experiments.') return redirect('/') if subid is None: flash('You must have a participant identifier before doing experiments') return redirect('/') if next is None: flash('Congratulations, you have finished the battery!') return redirect('/finish') if do_redirect is True: app.logger.debug('Redirecting to %s' %template) return redirect(template) if context is not None and isinstance(context, dict): app.logger.debug('Rendering %s' %template) return render_template(template, **context) return render_template(template)
[ "def", "perform_checks", "(", "template", ",", "do_redirect", "=", "False", ",", "context", "=", "None", ",", "next", "=", "None", ",", "quiet", "=", "False", ")", ":", "from", "expfactory", ".", "server", "import", "app", "username", "=", "session", ".", "get", "(", "'username'", ")", "subid", "=", "session", ".", "get", "(", "'subid'", ")", "# If redirect, \"last\" is currently active (about to start)", "# If render, \"last\" is last completed / active experiment (just finished)", "last", "=", "session", ".", "get", "(", "'exp_id'", ")", "if", "next", "is", "None", ":", "next", "=", "app", ".", "get_next", "(", "session", ")", "session", "[", "'exp_id'", "]", "=", "next", "# Headless mode requires token", "if", "\"token\"", "not", "in", "session", "and", "app", ".", "headless", "is", "True", ":", "flash", "(", "'A token is required for these experiments.'", ")", "return", "redirect", "(", "'/'", ")", "# Update the user / log", "if", "quiet", "is", "False", ":", "app", ".", "logger", ".", "info", "(", "\"[router] %s --> %s [subid] %s [user] %s\"", "%", "(", "last", ",", "next", ",", "subid", ",", "username", ")", ")", "if", "username", "is", "None", "and", "app", ".", "headless", "is", "False", ":", "flash", "(", "'You must start a session before doing experiments.'", ")", "return", "redirect", "(", "'/'", ")", "if", "subid", "is", "None", ":", "flash", "(", "'You must have a participant identifier before doing experiments'", ")", "return", "redirect", "(", "'/'", ")", "if", "next", "is", "None", ":", "flash", "(", "'Congratulations, you have finished the battery!'", ")", "return", "redirect", "(", "'/finish'", ")", "if", "do_redirect", "is", "True", ":", "app", ".", "logger", ".", "debug", "(", "'Redirecting to %s'", "%", "template", ")", "return", "redirect", "(", "template", ")", "if", "context", "is", "not", "None", "and", "isinstance", "(", "context", ",", "dict", ")", ":", "app", ".", "logger", ".", "debug", "(", "'Rendering %s'", "%", "template", ")", "return", "render_template", "(", "template", ",", "*", "*", "context", ")", "return", "render_template", "(", "template", ")" ]
return all checks for required variables before returning to desired view Parameters ========== template: the html template to render do_redirect: if True, perform a redirect and not render context: dictionary of context variables to pass to render_template next: a pre-defined next experiment, will calculate if None quiet: decrease verbosity
[ "return", "all", "checks", "for", "required", "variables", "before", "returning", "to", "desired", "view" ]
train
https://github.com/expfactory/expfactory/blob/27ce6cc93e17231df8a8024f18e631336afd3501/expfactory/views/utils.py#L45-L105
d0c-s4vage/pfp
pfp/native/compat_io.py
FSeek
def FSeek(params, ctxt, scope, stream, coord): """Returns 0 if successful or -1 if the address is out of range """ if len(params) != 1: raise errors.InvalidArguments(coord, "{} args".format(len(params)), "FSeek accepts only one argument") pos = PYVAL(params[0]) curr_pos = stream.tell() fsize = stream.size() if pos > fsize: stream.seek(fsize) return -1 elif pos < 0: stream.seek(0) return -1 diff = pos - curr_pos if diff < 0: stream.seek(pos) return 0 data = stream.read(diff) # let the ctxt automatically append numbers, as needed, unless the previous # child was also a skipped field skipped_name = "_skipped" if len(ctxt._pfp__children) > 0 and ctxt._pfp__children[-1]._pfp__name.startswith("_skipped"): old_name = ctxt._pfp__children[-1]._pfp__name data = ctxt._pfp__children[-1].raw_data + data skipped_name = old_name ctxt._pfp__children = ctxt._pfp__children[:-1] del ctxt._pfp__children_map[old_name] tmp_stream = bitwrap.BitwrappedStream(six.BytesIO(data)) new_field = pfp.fields.Array(len(data), pfp.fields.Char, tmp_stream) ctxt._pfp__add_child(skipped_name, new_field, stream) scope.add_var(skipped_name, new_field) return 0
python
def FSeek(params, ctxt, scope, stream, coord): """Returns 0 if successful or -1 if the address is out of range """ if len(params) != 1: raise errors.InvalidArguments(coord, "{} args".format(len(params)), "FSeek accepts only one argument") pos = PYVAL(params[0]) curr_pos = stream.tell() fsize = stream.size() if pos > fsize: stream.seek(fsize) return -1 elif pos < 0: stream.seek(0) return -1 diff = pos - curr_pos if diff < 0: stream.seek(pos) return 0 data = stream.read(diff) # let the ctxt automatically append numbers, as needed, unless the previous # child was also a skipped field skipped_name = "_skipped" if len(ctxt._pfp__children) > 0 and ctxt._pfp__children[-1]._pfp__name.startswith("_skipped"): old_name = ctxt._pfp__children[-1]._pfp__name data = ctxt._pfp__children[-1].raw_data + data skipped_name = old_name ctxt._pfp__children = ctxt._pfp__children[:-1] del ctxt._pfp__children_map[old_name] tmp_stream = bitwrap.BitwrappedStream(six.BytesIO(data)) new_field = pfp.fields.Array(len(data), pfp.fields.Char, tmp_stream) ctxt._pfp__add_child(skipped_name, new_field, stream) scope.add_var(skipped_name, new_field) return 0
[ "def", "FSeek", "(", "params", ",", "ctxt", ",", "scope", ",", "stream", ",", "coord", ")", ":", "if", "len", "(", "params", ")", "!=", "1", ":", "raise", "errors", ".", "InvalidArguments", "(", "coord", ",", "\"{} args\"", ".", "format", "(", "len", "(", "params", ")", ")", ",", "\"FSeek accepts only one argument\"", ")", "pos", "=", "PYVAL", "(", "params", "[", "0", "]", ")", "curr_pos", "=", "stream", ".", "tell", "(", ")", "fsize", "=", "stream", ".", "size", "(", ")", "if", "pos", ">", "fsize", ":", "stream", ".", "seek", "(", "fsize", ")", "return", "-", "1", "elif", "pos", "<", "0", ":", "stream", ".", "seek", "(", "0", ")", "return", "-", "1", "diff", "=", "pos", "-", "curr_pos", "if", "diff", "<", "0", ":", "stream", ".", "seek", "(", "pos", ")", "return", "0", "data", "=", "stream", ".", "read", "(", "diff", ")", "# let the ctxt automatically append numbers, as needed, unless the previous", "# child was also a skipped field", "skipped_name", "=", "\"_skipped\"", "if", "len", "(", "ctxt", ".", "_pfp__children", ")", ">", "0", "and", "ctxt", ".", "_pfp__children", "[", "-", "1", "]", ".", "_pfp__name", ".", "startswith", "(", "\"_skipped\"", ")", ":", "old_name", "=", "ctxt", ".", "_pfp__children", "[", "-", "1", "]", ".", "_pfp__name", "data", "=", "ctxt", ".", "_pfp__children", "[", "-", "1", "]", ".", "raw_data", "+", "data", "skipped_name", "=", "old_name", "ctxt", ".", "_pfp__children", "=", "ctxt", ".", "_pfp__children", "[", ":", "-", "1", "]", "del", "ctxt", ".", "_pfp__children_map", "[", "old_name", "]", "tmp_stream", "=", "bitwrap", ".", "BitwrappedStream", "(", "six", ".", "BytesIO", "(", "data", ")", ")", "new_field", "=", "pfp", ".", "fields", ".", "Array", "(", "len", "(", "data", ")", ",", "pfp", ".", "fields", ".", "Char", ",", "tmp_stream", ")", "ctxt", ".", "_pfp__add_child", "(", "skipped_name", ",", "new_field", ",", "stream", ")", "scope", ".", "add_var", "(", "skipped_name", ",", "new_field", ")", "return", "0" ]
Returns 0 if successful or -1 if the address is out of range
[ "Returns", "0", "if", "successful", "or", "-", "1", "if", "the", "address", "is", "out", "of", "range" ]
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/native/compat_io.py#L117-L158
d0c-s4vage/pfp
pfp/native/compat_io.py
FSkip
def FSkip(params, ctxt, scope, stream, coord): """Returns 0 if successful or -1 if the address is out of range """ if len(params) != 1: raise errors.InvalidArguments(coord, "{} args".format(len(params)), "FSkip accepts only one argument") skip_amt = PYVAL(params[0]) pos = skip_amt + stream.tell() return FSeek([pos], ctxt, scope, stream, coord)
python
def FSkip(params, ctxt, scope, stream, coord): """Returns 0 if successful or -1 if the address is out of range """ if len(params) != 1: raise errors.InvalidArguments(coord, "{} args".format(len(params)), "FSkip accepts only one argument") skip_amt = PYVAL(params[0]) pos = skip_amt + stream.tell() return FSeek([pos], ctxt, scope, stream, coord)
[ "def", "FSkip", "(", "params", ",", "ctxt", ",", "scope", ",", "stream", ",", "coord", ")", ":", "if", "len", "(", "params", ")", "!=", "1", ":", "raise", "errors", ".", "InvalidArguments", "(", "coord", ",", "\"{} args\"", ".", "format", "(", "len", "(", "params", ")", ")", ",", "\"FSkip accepts only one argument\"", ")", "skip_amt", "=", "PYVAL", "(", "params", "[", "0", "]", ")", "pos", "=", "skip_amt", "+", "stream", ".", "tell", "(", ")", "return", "FSeek", "(", "[", "pos", "]", ",", "ctxt", ",", "scope", ",", "stream", ",", "coord", ")" ]
Returns 0 if successful or -1 if the address is out of range
[ "Returns", "0", "if", "successful", "or", "-", "1", "if", "the", "address", "is", "out", "of", "range" ]
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/native/compat_io.py#L162-L170
d0c-s4vage/pfp
pfp/native/packers.py
packer_gzip
def packer_gzip(params, ctxt, scope, stream, coord): """``PackerGZip`` - implements both unpacking and packing. Can be used as the ``packer`` for a field. When packing, concats the build output of all params and gzip-compresses the result. When unpacking, concats the build output of all params and gzip-decompresses the result. Example: The code below specifies that the ``data`` field is gzipped and that once decompressed, should be parsed with ``PACK_TYPE``. When building the ``PACK_TYPE`` structure, ``data`` will be updated with the compressed data.:: char data[0x100]<packer=PackerGZip, packtype=PACK_TYPE>; :pack: True if the data should be packed, false if it should be unpacked :data: The data to operate on :returns: An array """ if len(params) <= 1: raise errors.InvalidArguments(coord, "{} args".format(len(params)), "at least two arguments") # to gzip it (pack it) if params[0]: return pack_gzip(params[1:], ctxt, scope, stream, coord) else: return unpack_gzip(params[1:], ctxt, scope, stream, coord)
python
def packer_gzip(params, ctxt, scope, stream, coord): """``PackerGZip`` - implements both unpacking and packing. Can be used as the ``packer`` for a field. When packing, concats the build output of all params and gzip-compresses the result. When unpacking, concats the build output of all params and gzip-decompresses the result. Example: The code below specifies that the ``data`` field is gzipped and that once decompressed, should be parsed with ``PACK_TYPE``. When building the ``PACK_TYPE`` structure, ``data`` will be updated with the compressed data.:: char data[0x100]<packer=PackerGZip, packtype=PACK_TYPE>; :pack: True if the data should be packed, false if it should be unpacked :data: The data to operate on :returns: An array """ if len(params) <= 1: raise errors.InvalidArguments(coord, "{} args".format(len(params)), "at least two arguments") # to gzip it (pack it) if params[0]: return pack_gzip(params[1:], ctxt, scope, stream, coord) else: return unpack_gzip(params[1:], ctxt, scope, stream, coord)
[ "def", "packer_gzip", "(", "params", ",", "ctxt", ",", "scope", ",", "stream", ",", "coord", ")", ":", "if", "len", "(", "params", ")", "<=", "1", ":", "raise", "errors", ".", "InvalidArguments", "(", "coord", ",", "\"{} args\"", ".", "format", "(", "len", "(", "params", ")", ")", ",", "\"at least two arguments\"", ")", "# to gzip it (pack it)", "if", "params", "[", "0", "]", ":", "return", "pack_gzip", "(", "params", "[", "1", ":", "]", ",", "ctxt", ",", "scope", ",", "stream", ",", "coord", ")", "else", ":", "return", "unpack_gzip", "(", "params", "[", "1", ":", "]", ",", "ctxt", ",", "scope", ",", "stream", ",", "coord", ")" ]
``PackerGZip`` - implements both unpacking and packing. Can be used as the ``packer`` for a field. When packing, concats the build output of all params and gzip-compresses the result. When unpacking, concats the build output of all params and gzip-decompresses the result. Example: The code below specifies that the ``data`` field is gzipped and that once decompressed, should be parsed with ``PACK_TYPE``. When building the ``PACK_TYPE`` structure, ``data`` will be updated with the compressed data.:: char data[0x100]<packer=PackerGZip, packtype=PACK_TYPE>; :pack: True if the data should be packed, false if it should be unpacked :data: The data to operate on :returns: An array
[ "PackerGZip", "-", "implements", "both", "unpacking", "and", "packing", ".", "Can", "be", "used", "as", "the", "packer", "for", "a", "field", ".", "When", "packing", "concats", "the", "build", "output", "of", "all", "params", "and", "gzip", "-", "compresses", "the", "result", ".", "When", "unpacking", "concats", "the", "build", "output", "of", "all", "params", "and", "gzip", "-", "decompresses", "the", "result", ".", "Example", ":" ]
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/native/packers.py#L14-L40
d0c-s4vage/pfp
pfp/native/packers.py
pack_gzip
def pack_gzip(params, ctxt, scope, stream, coord): """``PackGZip`` - Concats the build output of all params and gzips the resulting data, returning a char array. Example: :: char data[0x100]<pack=PackGZip, ...>; """ if len(params) == 0: raise errors.InvalidArguments(coord, "{} args".format(len(params)), "at least one argument") built = utils.binary("") for param in params: if isinstance(param, pfp.fields.Field): built += param._pfp__build() else: built += param return zlib.compress(built)
python
def pack_gzip(params, ctxt, scope, stream, coord): """``PackGZip`` - Concats the build output of all params and gzips the resulting data, returning a char array. Example: :: char data[0x100]<pack=PackGZip, ...>; """ if len(params) == 0: raise errors.InvalidArguments(coord, "{} args".format(len(params)), "at least one argument") built = utils.binary("") for param in params: if isinstance(param, pfp.fields.Field): built += param._pfp__build() else: built += param return zlib.compress(built)
[ "def", "pack_gzip", "(", "params", ",", "ctxt", ",", "scope", ",", "stream", ",", "coord", ")", ":", "if", "len", "(", "params", ")", "==", "0", ":", "raise", "errors", ".", "InvalidArguments", "(", "coord", ",", "\"{} args\"", ".", "format", "(", "len", "(", "params", ")", ")", ",", "\"at least one argument\"", ")", "built", "=", "utils", ".", "binary", "(", "\"\"", ")", "for", "param", "in", "params", ":", "if", "isinstance", "(", "param", ",", "pfp", ".", "fields", ".", "Field", ")", ":", "built", "+=", "param", ".", "_pfp__build", "(", ")", "else", ":", "built", "+=", "param", "return", "zlib", ".", "compress", "(", "built", ")" ]
``PackGZip`` - Concats the build output of all params and gzips the resulting data, returning a char array. Example: :: char data[0x100]<pack=PackGZip, ...>;
[ "PackGZip", "-", "Concats", "the", "build", "output", "of", "all", "params", "and", "gzips", "the", "resulting", "data", "returning", "a", "char", "array", "." ]
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/native/packers.py#L43-L61
d0c-s4vage/pfp
pfp/native/watchers.py
watch_length
def watch_length(params, ctxt, scope, stream, coord): """WatchLength - Watch the total length of each of the params. Example: The code below uses the ``WatchLength`` update function to update the ``length`` field to the length of the ``data`` field :: int length<watch=data, update=WatchLength>; char data[length]; """ if len(params) <= 1: raise errors.InvalidArguments(coord, "{} args".format(len(params)), "at least two arguments") to_update = params[0] total_size = 0 for param in params[1:]: total_size += param._pfp__width() to_update._pfp__set_value(total_size)
python
def watch_length(params, ctxt, scope, stream, coord): """WatchLength - Watch the total length of each of the params. Example: The code below uses the ``WatchLength`` update function to update the ``length`` field to the length of the ``data`` field :: int length<watch=data, update=WatchLength>; char data[length]; """ if len(params) <= 1: raise errors.InvalidArguments(coord, "{} args".format(len(params)), "at least two arguments") to_update = params[0] total_size = 0 for param in params[1:]: total_size += param._pfp__width() to_update._pfp__set_value(total_size)
[ "def", "watch_length", "(", "params", ",", "ctxt", ",", "scope", ",", "stream", ",", "coord", ")", ":", "if", "len", "(", "params", ")", "<=", "1", ":", "raise", "errors", ".", "InvalidArguments", "(", "coord", ",", "\"{} args\"", ".", "format", "(", "len", "(", "params", ")", ")", ",", "\"at least two arguments\"", ")", "to_update", "=", "params", "[", "0", "]", "total_size", "=", "0", "for", "param", "in", "params", "[", "1", ":", "]", ":", "total_size", "+=", "param", ".", "_pfp__width", "(", ")", "to_update", ".", "_pfp__set_value", "(", "total_size", ")" ]
WatchLength - Watch the total length of each of the params. Example: The code below uses the ``WatchLength`` update function to update the ``length`` field to the length of the ``data`` field :: int length<watch=data, update=WatchLength>; char data[length];
[ "WatchLength", "-", "Watch", "the", "total", "length", "of", "each", "of", "the", "params", ".", "Example", ":", "The", "code", "below", "uses", "the", "WatchLength", "update", "function", "to", "update", "the", "length", "field", "to", "the", "length", "of", "the", "data", "field", "::" ]
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/native/watchers.py#L15-L34
d0c-s4vage/pfp
pfp/native/watchers.py
watch_crc
def watch_crc(params, ctxt, scope, stream, coord): """WatchCrc32 - Watch the total crc32 of the params. Example: The code below uses the ``WatchCrc32`` update function to update the ``crc`` field to the crc of the ``length`` and ``data`` fields :: char length; char data[length]; int crc<watch=length;data, update=WatchCrc32>; """ if len(params) <= 1: raise errors.InvalidArguments(coord, "{} args".format(len(params)), "at least two arguments") to_update = params[0] total_data = utils.binary("") for param in params[1:]: total_data += param._pfp__build() to_update._pfp__set_value(binascii.crc32(total_data))
python
def watch_crc(params, ctxt, scope, stream, coord): """WatchCrc32 - Watch the total crc32 of the params. Example: The code below uses the ``WatchCrc32`` update function to update the ``crc`` field to the crc of the ``length`` and ``data`` fields :: char length; char data[length]; int crc<watch=length;data, update=WatchCrc32>; """ if len(params) <= 1: raise errors.InvalidArguments(coord, "{} args".format(len(params)), "at least two arguments") to_update = params[0] total_data = utils.binary("") for param in params[1:]: total_data += param._pfp__build() to_update._pfp__set_value(binascii.crc32(total_data))
[ "def", "watch_crc", "(", "params", ",", "ctxt", ",", "scope", ",", "stream", ",", "coord", ")", ":", "if", "len", "(", "params", ")", "<=", "1", ":", "raise", "errors", ".", "InvalidArguments", "(", "coord", ",", "\"{} args\"", ".", "format", "(", "len", "(", "params", ")", ")", ",", "\"at least two arguments\"", ")", "to_update", "=", "params", "[", "0", "]", "total_data", "=", "utils", ".", "binary", "(", "\"\"", ")", "for", "param", "in", "params", "[", "1", ":", "]", ":", "total_data", "+=", "param", ".", "_pfp__build", "(", ")", "to_update", ".", "_pfp__set_value", "(", "binascii", ".", "crc32", "(", "total_data", ")", ")" ]
WatchCrc32 - Watch the total crc32 of the params. Example: The code below uses the ``WatchCrc32`` update function to update the ``crc`` field to the crc of the ``length`` and ``data`` fields :: char length; char data[length]; int crc<watch=length;data, update=WatchCrc32>;
[ "WatchCrc32", "-", "Watch", "the", "total", "crc32", "of", "the", "params", ".", "Example", ":", "The", "code", "below", "uses", "the", "WatchCrc32", "update", "function", "to", "update", "the", "crc", "field", "to", "the", "crc", "of", "the", "length", "and", "data", "fields", "::" ]
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/native/watchers.py#L37-L57
expfactory/expfactory
expfactory/validator/experiments.py
ExperimentValidator._validate_folder
def _validate_folder(self, folder=None): ''' validate folder takes a cloned github repo, ensures the existence of the config.json, and validates it. ''' from expfactory.experiment import load_experiment if folder is None: folder=os.path.abspath(os.getcwd()) config = load_experiment(folder, return_path=True) if not config: return notvalid("%s is not an experiment." %(folder)) return self._validate_config(folder)
python
def _validate_folder(self, folder=None): ''' validate folder takes a cloned github repo, ensures the existence of the config.json, and validates it. ''' from expfactory.experiment import load_experiment if folder is None: folder=os.path.abspath(os.getcwd()) config = load_experiment(folder, return_path=True) if not config: return notvalid("%s is not an experiment." %(folder)) return self._validate_config(folder)
[ "def", "_validate_folder", "(", "self", ",", "folder", "=", "None", ")", ":", "from", "expfactory", ".", "experiment", "import", "load_experiment", "if", "folder", "is", "None", ":", "folder", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "getcwd", "(", ")", ")", "config", "=", "load_experiment", "(", "folder", ",", "return_path", "=", "True", ")", "if", "not", "config", ":", "return", "notvalid", "(", "\"%s is not an experiment.\"", "%", "(", "folder", ")", ")", "return", "self", ".", "_validate_config", "(", "folder", ")" ]
validate folder takes a cloned github repo, ensures the existence of the config.json, and validates it.
[ "validate", "folder", "takes", "a", "cloned", "github", "repo", "ensures", "the", "existence", "of", "the", "config", ".", "json", "and", "validates", "it", "." ]
train
https://github.com/expfactory/expfactory/blob/27ce6cc93e17231df8a8024f18e631336afd3501/expfactory/validator/experiments.py#L52-L66
expfactory/expfactory
expfactory/validator/experiments.py
ExperimentValidator.validate
def validate(self, folder, cleanup=False, validate_folder=True): ''' validate is the entrypoint to all validation, for a folder, config, or url. If a URL is found, it is cloned and cleaned up. :param validate_folder: ensures the folder name (github repo) matches. ''' # Obtain any repository URL provided if folder.startswith('http') or 'github' in folder: folder = clone(folder, tmpdir=self.tmpdir) # Load config.json if provided directly elif os.path.basename(folder) == 'config.json': config = os.path.dirname(folder) return self._validate_config(config, validate_folder) # Otherwise, validate folder and cleanup valid = self._validate_folder(folder) if cleanup is True: shutil.rmtree(folder) return valid
python
def validate(self, folder, cleanup=False, validate_folder=True): ''' validate is the entrypoint to all validation, for a folder, config, or url. If a URL is found, it is cloned and cleaned up. :param validate_folder: ensures the folder name (github repo) matches. ''' # Obtain any repository URL provided if folder.startswith('http') or 'github' in folder: folder = clone(folder, tmpdir=self.tmpdir) # Load config.json if provided directly elif os.path.basename(folder) == 'config.json': config = os.path.dirname(folder) return self._validate_config(config, validate_folder) # Otherwise, validate folder and cleanup valid = self._validate_folder(folder) if cleanup is True: shutil.rmtree(folder) return valid
[ "def", "validate", "(", "self", ",", "folder", ",", "cleanup", "=", "False", ",", "validate_folder", "=", "True", ")", ":", "# Obtain any repository URL provided", "if", "folder", ".", "startswith", "(", "'http'", ")", "or", "'github'", "in", "folder", ":", "folder", "=", "clone", "(", "folder", ",", "tmpdir", "=", "self", ".", "tmpdir", ")", "# Load config.json if provided directly", "elif", "os", ".", "path", ".", "basename", "(", "folder", ")", "==", "'config.json'", ":", "config", "=", "os", ".", "path", ".", "dirname", "(", "folder", ")", "return", "self", ".", "_validate_config", "(", "config", ",", "validate_folder", ")", "# Otherwise, validate folder and cleanup", "valid", "=", "self", ".", "_validate_folder", "(", "folder", ")", "if", "cleanup", "is", "True", ":", "shutil", ".", "rmtree", "(", "folder", ")", "return", "valid" ]
validate is the entrypoint to all validation, for a folder, config, or url. If a URL is found, it is cloned and cleaned up. :param validate_folder: ensures the folder name (github repo) matches.
[ "validate", "is", "the", "entrypoint", "to", "all", "validation", "for", "a", "folder", "config", "or", "url", ".", "If", "a", "URL", "is", "found", "it", "is", "cloned", "and", "cleaned", "up", ".", ":", "param", "validate_folder", ":", "ensures", "the", "folder", "name", "(", "github", "repo", ")", "matches", "." ]
train
https://github.com/expfactory/expfactory/blob/27ce6cc93e17231df8a8024f18e631336afd3501/expfactory/validator/experiments.py#L69-L90
expfactory/expfactory
expfactory/validator/experiments.py
ExperimentValidator._validate_config
def _validate_config(self, folder, validate_folder=True): ''' validate config is the primary validation function that checks for presence and format of required fields. Parameters ========== :folder: full path to folder with config.json :name: if provided, the folder name to check against exp_id ''' config = "%s/config.json" % folder name = os.path.basename(folder) if not os.path.exists(config): return notvalid("%s: config.json not found." %(folder)) # Load the config try: config = read_json(config) except: return notvalid("%s: cannot load json, invalid." %(name)) # Config.json should be single dict if isinstance(config, list): return notvalid("%s: config.json is a list, not valid." %(name)) # Check over required fields fields = self.get_validation_fields() for field,value,ftype in fields: bot.verbose('field: %s, required: %s' %(field,value)) # Field must be in the keys if required if field not in config.keys(): if value == 1: return notvalid("%s: config.json is missing required field %s" %(name,field)) # Field is present, check type else: if not isinstance(config[field], ftype): return notvalid("%s: invalid type, must be %s." %(name,str(ftype))) # Expid gets special treatment if field == "exp_id" and validate_folder is True: if config[field] != name: return notvalid("%s: exp_id parameter %s does not match folder name." %(name,config[field])) # name cannot have special characters, only _ and letters/numbers if not re.match("^[a-z0-9_-]*$", config[field]): message = "%s: exp_id parameter %s has invalid characters" message += "only lowercase [a-z],[0-9], -, and _ allowed." return notvalid(message %(name,config[field])) return True
python
def _validate_config(self, folder, validate_folder=True): ''' validate config is the primary validation function that checks for presence and format of required fields. Parameters ========== :folder: full path to folder with config.json :name: if provided, the folder name to check against exp_id ''' config = "%s/config.json" % folder name = os.path.basename(folder) if not os.path.exists(config): return notvalid("%s: config.json not found." %(folder)) # Load the config try: config = read_json(config) except: return notvalid("%s: cannot load json, invalid." %(name)) # Config.json should be single dict if isinstance(config, list): return notvalid("%s: config.json is a list, not valid." %(name)) # Check over required fields fields = self.get_validation_fields() for field,value,ftype in fields: bot.verbose('field: %s, required: %s' %(field,value)) # Field must be in the keys if required if field not in config.keys(): if value == 1: return notvalid("%s: config.json is missing required field %s" %(name,field)) # Field is present, check type else: if not isinstance(config[field], ftype): return notvalid("%s: invalid type, must be %s." %(name,str(ftype))) # Expid gets special treatment if field == "exp_id" and validate_folder is True: if config[field] != name: return notvalid("%s: exp_id parameter %s does not match folder name." %(name,config[field])) # name cannot have special characters, only _ and letters/numbers if not re.match("^[a-z0-9_-]*$", config[field]): message = "%s: exp_id parameter %s has invalid characters" message += "only lowercase [a-z],[0-9], -, and _ allowed." return notvalid(message %(name,config[field])) return True
[ "def", "_validate_config", "(", "self", ",", "folder", ",", "validate_folder", "=", "True", ")", ":", "config", "=", "\"%s/config.json\"", "%", "folder", "name", "=", "os", ".", "path", ".", "basename", "(", "folder", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "config", ")", ":", "return", "notvalid", "(", "\"%s: config.json not found.\"", "%", "(", "folder", ")", ")", "# Load the config", "try", ":", "config", "=", "read_json", "(", "config", ")", "except", ":", "return", "notvalid", "(", "\"%s: cannot load json, invalid.\"", "%", "(", "name", ")", ")", "# Config.json should be single dict", "if", "isinstance", "(", "config", ",", "list", ")", ":", "return", "notvalid", "(", "\"%s: config.json is a list, not valid.\"", "%", "(", "name", ")", ")", "# Check over required fields", "fields", "=", "self", ".", "get_validation_fields", "(", ")", "for", "field", ",", "value", ",", "ftype", "in", "fields", ":", "bot", ".", "verbose", "(", "'field: %s, required: %s'", "%", "(", "field", ",", "value", ")", ")", "# Field must be in the keys if required", "if", "field", "not", "in", "config", ".", "keys", "(", ")", ":", "if", "value", "==", "1", ":", "return", "notvalid", "(", "\"%s: config.json is missing required field %s\"", "%", "(", "name", ",", "field", ")", ")", "# Field is present, check type", "else", ":", "if", "not", "isinstance", "(", "config", "[", "field", "]", ",", "ftype", ")", ":", "return", "notvalid", "(", "\"%s: invalid type, must be %s.\"", "%", "(", "name", ",", "str", "(", "ftype", ")", ")", ")", "# Expid gets special treatment", "if", "field", "==", "\"exp_id\"", "and", "validate_folder", "is", "True", ":", "if", "config", "[", "field", "]", "!=", "name", ":", "return", "notvalid", "(", "\"%s: exp_id parameter %s does not match folder name.\"", "%", "(", "name", ",", "config", "[", "field", "]", ")", ")", "# name cannot have special characters, only _ and letters/numbers", "if", "not", "re", ".", "match", "(", "\"^[a-z0-9_-]*$\"", ",", "config", "[", "field", "]", ")", ":", "message", "=", "\"%s: exp_id parameter %s has invalid characters\"", "message", "+=", "\"only lowercase [a-z],[0-9], -, and _ allowed.\"", "return", "notvalid", "(", "message", "%", "(", "name", ",", "config", "[", "field", "]", ")", ")", "return", "True" ]
validate config is the primary validation function that checks for presence and format of required fields. Parameters ========== :folder: full path to folder with config.json :name: if provided, the folder name to check against exp_id
[ "validate", "config", "is", "the", "primary", "validation", "function", "that", "checks", "for", "presence", "and", "format", "of", "required", "fields", "." ]
train
https://github.com/expfactory/expfactory/blob/27ce6cc93e17231df8a8024f18e631336afd3501/expfactory/validator/experiments.py#L93-L146
expfactory/expfactory
expfactory/validator/experiments.py
ExperimentValidator.get_validation_fields
def get_validation_fields(self): '''get_validation_fields returns a list of tuples (each a field) we only require the exp_id to coincide with the folder name, for the sake of reproducibility (given that all are served from sample image or Github organization). All other fields are optional. To specify runtime variables, add to "experiment_variables" 0: not required, no warning 1: required, not valid 2: not required, warning type: indicates the variable type ''' return [("name",1,str), # required ("time",1,int), ("url",1,str), ("description",1, str), ("instructions",1, str), ("exp_id",1,str), ("install",0, list), # list of commands to install / build experiment ("contributors",0, list), # not required ("reference",0, list), ("cognitive_atlas_task_id",0,str), ("template",0,str)]
python
def get_validation_fields(self): '''get_validation_fields returns a list of tuples (each a field) we only require the exp_id to coincide with the folder name, for the sake of reproducibility (given that all are served from sample image or Github organization). All other fields are optional. To specify runtime variables, add to "experiment_variables" 0: not required, no warning 1: required, not valid 2: not required, warning type: indicates the variable type ''' return [("name",1,str), # required ("time",1,int), ("url",1,str), ("description",1, str), ("instructions",1, str), ("exp_id",1,str), ("install",0, list), # list of commands to install / build experiment ("contributors",0, list), # not required ("reference",0, list), ("cognitive_atlas_task_id",0,str), ("template",0,str)]
[ "def", "get_validation_fields", "(", "self", ")", ":", "return", "[", "(", "\"name\"", ",", "1", ",", "str", ")", ",", "# required", "(", "\"time\"", ",", "1", ",", "int", ")", ",", "(", "\"url\"", ",", "1", ",", "str", ")", ",", "(", "\"description\"", ",", "1", ",", "str", ")", ",", "(", "\"instructions\"", ",", "1", ",", "str", ")", ",", "(", "\"exp_id\"", ",", "1", ",", "str", ")", ",", "(", "\"install\"", ",", "0", ",", "list", ")", ",", "# list of commands to install / build experiment ", "(", "\"contributors\"", ",", "0", ",", "list", ")", ",", "# not required", "(", "\"reference\"", ",", "0", ",", "list", ")", ",", "(", "\"cognitive_atlas_task_id\"", ",", "0", ",", "str", ")", ",", "(", "\"template\"", ",", "0", ",", "str", ")", "]" ]
get_validation_fields returns a list of tuples (each a field) we only require the exp_id to coincide with the folder name, for the sake of reproducibility (given that all are served from sample image or Github organization). All other fields are optional. To specify runtime variables, add to "experiment_variables" 0: not required, no warning 1: required, not valid 2: not required, warning type: indicates the variable type
[ "get_validation_fields", "returns", "a", "list", "of", "tuples", "(", "each", "a", "field", ")", "we", "only", "require", "the", "exp_id", "to", "coincide", "with", "the", "folder", "name", "for", "the", "sake", "of", "reproducibility", "(", "given", "that", "all", "are", "served", "from", "sample", "image", "or", "Github", "organization", ")", ".", "All", "other", "fields", "are", "optional", ".", "To", "specify", "runtime", "variables", "add", "to", "experiment_variables" ]
train
https://github.com/expfactory/expfactory/blob/27ce6cc93e17231df8a8024f18e631336afd3501/expfactory/validator/experiments.py#L149-L172
expfactory/expfactory
expfactory/variables.py
get_runtime_vars
def get_runtime_vars(varset, experiment, token): '''get_runtime_vars will return the urlparsed string of one or more runtime variables. If None are present, None is returned. Parameters ========== varset: the variable set, a dictionary lookup with exp_id, token, vars experiment: the exp_id to look up token: the participant id (or token) that must be defined. Returns ======= url: the variable portion of the url to be passed to experiment, e.g, '?words=at the thing&color=red&globalname=globalvalue' ''' url = '' if experiment in varset: variables = dict() # Participant set variables if token in varset[experiment]: for k,v in varset[experiment][token].items(): variables[k] = v # Global set variables if "*" in varset[experiment]: for k,v in varset[experiment]['*'].items(): # Only add the variable if not already defined if k not in variables: variables[k] = v # Join together, the first ? is added by calling function varlist = ["%s=%s" %(k,v) for k,v in variables.items()] url = '&'.join(varlist) bot.debug('Parsed url: %s' %url) return url
python
def get_runtime_vars(varset, experiment, token): '''get_runtime_vars will return the urlparsed string of one or more runtime variables. If None are present, None is returned. Parameters ========== varset: the variable set, a dictionary lookup with exp_id, token, vars experiment: the exp_id to look up token: the participant id (or token) that must be defined. Returns ======= url: the variable portion of the url to be passed to experiment, e.g, '?words=at the thing&color=red&globalname=globalvalue' ''' url = '' if experiment in varset: variables = dict() # Participant set variables if token in varset[experiment]: for k,v in varset[experiment][token].items(): variables[k] = v # Global set variables if "*" in varset[experiment]: for k,v in varset[experiment]['*'].items(): # Only add the variable if not already defined if k not in variables: variables[k] = v # Join together, the first ? is added by calling function varlist = ["%s=%s" %(k,v) for k,v in variables.items()] url = '&'.join(varlist) bot.debug('Parsed url: %s' %url) return url
[ "def", "get_runtime_vars", "(", "varset", ",", "experiment", ",", "token", ")", ":", "url", "=", "''", "if", "experiment", "in", "varset", ":", "variables", "=", "dict", "(", ")", "# Participant set variables", "if", "token", "in", "varset", "[", "experiment", "]", ":", "for", "k", ",", "v", "in", "varset", "[", "experiment", "]", "[", "token", "]", ".", "items", "(", ")", ":", "variables", "[", "k", "]", "=", "v", "# Global set variables", "if", "\"*\"", "in", "varset", "[", "experiment", "]", ":", "for", "k", ",", "v", "in", "varset", "[", "experiment", "]", "[", "'*'", "]", ".", "items", "(", ")", ":", "# Only add the variable if not already defined", "if", "k", "not", "in", "variables", ":", "variables", "[", "k", "]", "=", "v", "# Join together, the first ? is added by calling function", "varlist", "=", "[", "\"%s=%s\"", "%", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "variables", ".", "items", "(", ")", "]", "url", "=", "'&'", ".", "join", "(", "varlist", ")", "bot", ".", "debug", "(", "'Parsed url: %s'", "%", "url", ")", "return", "url" ]
get_runtime_vars will return the urlparsed string of one or more runtime variables. If None are present, None is returned. Parameters ========== varset: the variable set, a dictionary lookup with exp_id, token, vars experiment: the exp_id to look up token: the participant id (or token) that must be defined. Returns ======= url: the variable portion of the url to be passed to experiment, e.g, '?words=at the thing&color=red&globalname=globalvalue'
[ "get_runtime_vars", "will", "return", "the", "urlparsed", "string", "of", "one", "or", "more", "runtime", "variables", ".", "If", "None", "are", "present", "None", "is", "returned", ".", "Parameters", "==========", "varset", ":", "the", "variable", "set", "a", "dictionary", "lookup", "with", "exp_id", "token", "vars", "experiment", ":", "the", "exp_id", "to", "look", "up", "token", ":", "the", "participant", "id", "(", "or", "token", ")", "that", "must", "be", "defined", ".", "Returns", "=======", "url", ":", "the", "variable", "portion", "of", "the", "url", "to", "be", "passed", "to", "experiment", "e", ".", "g", "?words", "=", "at", "the", "thing&color", "=", "red&globalname", "=", "globalvalue" ]
train
https://github.com/expfactory/expfactory/blob/27ce6cc93e17231df8a8024f18e631336afd3501/expfactory/variables.py#L45-L85
expfactory/expfactory
expfactory/variables.py
generate_runtime_vars
def generate_runtime_vars(variable_file=None, sep=','): '''generate a lookup data structure from a delimited file. We typically obtain the file name and delimiter from the environment by way of EXPFACTORY_RUNTIME_VARS, and EXPFACTORY_RUNTIME_DELIM, respectively, but the user can also parse from a custom variable file by way of specifying it to the function (preference is given here). The file should be csv, with the only required first header field as "token" and second as "exp_id" to distinguish the participant ID and experiment id. The subsequent columns should correspond to experiment variable names. No special parsing of either is done. Parameters ========== variable_file: full path to the tabular file with token, exp_id, etc. sep: the default delimiter to use, if not set in enironment. Returns ======= varset: a dictionary lookup by exp_id and then participant ID. { 'test-parse-url': { '123': { 'color': 'red', 'globalname': 'globalvalue', 'words': 'at the thing' }, '456': {'color': 'blue', 'globalname': 'globalvalue', 'words': 'omg tacos'} } } ''' # First preference goes to runtime, then environment, then unset if variable_file is None: if EXPFACTORY_RUNTIME_VARS is not None: variable_file = EXPFACTORY_RUNTIME_VARS if variable_file is not None: if not os.path.exists(variable_file): bot.warning('%s is set, but not found' %variable_file) return variable_file # If still None, no file if variable_file is None: return variable_file # If we get here, we have a variable file that exists delim = sep if EXPFACTORY_RUNTIME_DELIM is not None: delim = EXPFACTORY_RUNTIME_DELIM bot.debug('Delim for variables file set to %s' %sep) # Read in the file, generate config varset = dict() rows = _read_runtime_vars(variable_file) if len(rows) > 0: # When we get here, we are sure to have # 'exp_id', 'var_name', 'var_value', 'token' for row in rows: exp_id = row[0].lower() # exp-id must be lowercase var_name = row[1] var_value = row[2] token = row[3] # Level 1: Experiment ID if exp_id not in varset: varset[exp_id] = {} # Level 2: Participant ID if token not in varset[exp_id]: varset[exp_id][token] = {} # If found global setting, courtesy debug message if token == "*": bot.debug('Found global variable %s' %var_name) # Level 3: is the variable, issue warning if already defined if var_name in varset[exp_id][token]: bot.warning('%s defined twice %s:%s' %(var_name, exp_id, token)) varset[exp_id][token][var_name] = var_value return varset
python
def generate_runtime_vars(variable_file=None, sep=','): '''generate a lookup data structure from a delimited file. We typically obtain the file name and delimiter from the environment by way of EXPFACTORY_RUNTIME_VARS, and EXPFACTORY_RUNTIME_DELIM, respectively, but the user can also parse from a custom variable file by way of specifying it to the function (preference is given here). The file should be csv, with the only required first header field as "token" and second as "exp_id" to distinguish the participant ID and experiment id. The subsequent columns should correspond to experiment variable names. No special parsing of either is done. Parameters ========== variable_file: full path to the tabular file with token, exp_id, etc. sep: the default delimiter to use, if not set in enironment. Returns ======= varset: a dictionary lookup by exp_id and then participant ID. { 'test-parse-url': { '123': { 'color': 'red', 'globalname': 'globalvalue', 'words': 'at the thing' }, '456': {'color': 'blue', 'globalname': 'globalvalue', 'words': 'omg tacos'} } } ''' # First preference goes to runtime, then environment, then unset if variable_file is None: if EXPFACTORY_RUNTIME_VARS is not None: variable_file = EXPFACTORY_RUNTIME_VARS if variable_file is not None: if not os.path.exists(variable_file): bot.warning('%s is set, but not found' %variable_file) return variable_file # If still None, no file if variable_file is None: return variable_file # If we get here, we have a variable file that exists delim = sep if EXPFACTORY_RUNTIME_DELIM is not None: delim = EXPFACTORY_RUNTIME_DELIM bot.debug('Delim for variables file set to %s' %sep) # Read in the file, generate config varset = dict() rows = _read_runtime_vars(variable_file) if len(rows) > 0: # When we get here, we are sure to have # 'exp_id', 'var_name', 'var_value', 'token' for row in rows: exp_id = row[0].lower() # exp-id must be lowercase var_name = row[1] var_value = row[2] token = row[3] # Level 1: Experiment ID if exp_id not in varset: varset[exp_id] = {} # Level 2: Participant ID if token not in varset[exp_id]: varset[exp_id][token] = {} # If found global setting, courtesy debug message if token == "*": bot.debug('Found global variable %s' %var_name) # Level 3: is the variable, issue warning if already defined if var_name in varset[exp_id][token]: bot.warning('%s defined twice %s:%s' %(var_name, exp_id, token)) varset[exp_id][token][var_name] = var_value return varset
[ "def", "generate_runtime_vars", "(", "variable_file", "=", "None", ",", "sep", "=", "','", ")", ":", "# First preference goes to runtime, then environment, then unset", "if", "variable_file", "is", "None", ":", "if", "EXPFACTORY_RUNTIME_VARS", "is", "not", "None", ":", "variable_file", "=", "EXPFACTORY_RUNTIME_VARS", "if", "variable_file", "is", "not", "None", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "variable_file", ")", ":", "bot", ".", "warning", "(", "'%s is set, but not found'", "%", "variable_file", ")", "return", "variable_file", "# If still None, no file", "if", "variable_file", "is", "None", ":", "return", "variable_file", "# If we get here, we have a variable file that exists", "delim", "=", "sep", "if", "EXPFACTORY_RUNTIME_DELIM", "is", "not", "None", ":", "delim", "=", "EXPFACTORY_RUNTIME_DELIM", "bot", ".", "debug", "(", "'Delim for variables file set to %s'", "%", "sep", ")", "# Read in the file, generate config", "varset", "=", "dict", "(", ")", "rows", "=", "_read_runtime_vars", "(", "variable_file", ")", "if", "len", "(", "rows", ")", ">", "0", ":", "# When we get here, we are sure to have ", "# 'exp_id', 'var_name', 'var_value', 'token'", "for", "row", "in", "rows", ":", "exp_id", "=", "row", "[", "0", "]", ".", "lower", "(", ")", "# exp-id must be lowercase", "var_name", "=", "row", "[", "1", "]", "var_value", "=", "row", "[", "2", "]", "token", "=", "row", "[", "3", "]", "# Level 1: Experiment ID", "if", "exp_id", "not", "in", "varset", ":", "varset", "[", "exp_id", "]", "=", "{", "}", "# Level 2: Participant ID", "if", "token", "not", "in", "varset", "[", "exp_id", "]", ":", "varset", "[", "exp_id", "]", "[", "token", "]", "=", "{", "}", "# If found global setting, courtesy debug message", "if", "token", "==", "\"*\"", ":", "bot", ".", "debug", "(", "'Found global variable %s'", "%", "var_name", ")", "# Level 3: is the variable, issue warning if already defined", "if", "var_name", "in", "varset", "[", "exp_id", "]", "[", "token", "]", ":", "bot", ".", "warning", "(", "'%s defined twice %s:%s'", "%", "(", "var_name", ",", "exp_id", ",", "token", ")", ")", "varset", "[", "exp_id", "]", "[", "token", "]", "[", "var_name", "]", "=", "var_value", "return", "varset" ]
generate a lookup data structure from a delimited file. We typically obtain the file name and delimiter from the environment by way of EXPFACTORY_RUNTIME_VARS, and EXPFACTORY_RUNTIME_DELIM, respectively, but the user can also parse from a custom variable file by way of specifying it to the function (preference is given here). The file should be csv, with the only required first header field as "token" and second as "exp_id" to distinguish the participant ID and experiment id. The subsequent columns should correspond to experiment variable names. No special parsing of either is done. Parameters ========== variable_file: full path to the tabular file with token, exp_id, etc. sep: the default delimiter to use, if not set in enironment. Returns ======= varset: a dictionary lookup by exp_id and then participant ID. { 'test-parse-url': { '123': { 'color': 'red', 'globalname': 'globalvalue', 'words': 'at the thing' }, '456': {'color': 'blue', 'globalname': 'globalvalue', 'words': 'omg tacos'} } }
[ "generate", "a", "lookup", "data", "structure", "from", "a", "delimited", "file", ".", "We", "typically", "obtain", "the", "file", "name", "and", "delimiter", "from", "the", "environment", "by", "way", "of", "EXPFACTORY_RUNTIME_VARS", "and", "EXPFACTORY_RUNTIME_DELIM", "respectively", "but", "the", "user", "can", "also", "parse", "from", "a", "custom", "variable", "file", "by", "way", "of", "specifying", "it", "to", "the", "function", "(", "preference", "is", "given", "here", ")", ".", "The", "file", "should", "be", "csv", "with", "the", "only", "required", "first", "header", "field", "as", "token", "and", "second", "as", "exp_id", "to", "distinguish", "the", "participant", "ID", "and", "experiment", "id", ".", "The", "subsequent", "columns", "should", "correspond", "to", "experiment", "variable", "names", ".", "No", "special", "parsing", "of", "either", "is", "done", "." ]
train
https://github.com/expfactory/expfactory/blob/27ce6cc93e17231df8a8024f18e631336afd3501/expfactory/variables.py#L88-L180
expfactory/expfactory
expfactory/variables.py
_read_runtime_vars
def _read_runtime_vars(variable_file, sep=','): '''read the entire runtime variable file, and return a list of lists, each corresponding to a row. We also check the header, and exit if anything is missing or malformed. Parameters ========== variable_file: full path to the tabular file with token, exp_id, etc. sep: the default delimiter to use, if not set in enironment. Returns ======= valid_rows: a list of lists, each a valid row [['test-parse-url', 'globalname', 'globalvalue', '*'], ['test-parse-url', 'color', 'red', '123'], ['test-parse-url', 'color', 'blue', '456'], ['test-parse-url', 'words', 'at the thing', '123'], ['test-parse-url', 'words', 'omg tacos', '456']] ''' rows = [x for x in read_file(variable_file).split('\n') if x.strip()] valid_rows = [] if len(rows) > 0: # Validate header and rows, exit if not valid header = rows.pop(0).split(sep) validate_header(header) for row in rows: row = _validate_row(row, sep=sep, required_length=4) # If the row is returned, it is valid if row: valid_rows.append(row) return valid_rows
python
def _read_runtime_vars(variable_file, sep=','): '''read the entire runtime variable file, and return a list of lists, each corresponding to a row. We also check the header, and exit if anything is missing or malformed. Parameters ========== variable_file: full path to the tabular file with token, exp_id, etc. sep: the default delimiter to use, if not set in enironment. Returns ======= valid_rows: a list of lists, each a valid row [['test-parse-url', 'globalname', 'globalvalue', '*'], ['test-parse-url', 'color', 'red', '123'], ['test-parse-url', 'color', 'blue', '456'], ['test-parse-url', 'words', 'at the thing', '123'], ['test-parse-url', 'words', 'omg tacos', '456']] ''' rows = [x for x in read_file(variable_file).split('\n') if x.strip()] valid_rows = [] if len(rows) > 0: # Validate header and rows, exit if not valid header = rows.pop(0).split(sep) validate_header(header) for row in rows: row = _validate_row(row, sep=sep, required_length=4) # If the row is returned, it is valid if row: valid_rows.append(row) return valid_rows
[ "def", "_read_runtime_vars", "(", "variable_file", ",", "sep", "=", "','", ")", ":", "rows", "=", "[", "x", "for", "x", "in", "read_file", "(", "variable_file", ")", ".", "split", "(", "'\\n'", ")", "if", "x", ".", "strip", "(", ")", "]", "valid_rows", "=", "[", "]", "if", "len", "(", "rows", ")", ">", "0", ":", "# Validate header and rows, exit if not valid", "header", "=", "rows", ".", "pop", "(", "0", ")", ".", "split", "(", "sep", ")", "validate_header", "(", "header", ")", "for", "row", "in", "rows", ":", "row", "=", "_validate_row", "(", "row", ",", "sep", "=", "sep", ",", "required_length", "=", "4", ")", "# If the row is returned, it is valid", "if", "row", ":", "valid_rows", ".", "append", "(", "row", ")", "return", "valid_rows" ]
read the entire runtime variable file, and return a list of lists, each corresponding to a row. We also check the header, and exit if anything is missing or malformed. Parameters ========== variable_file: full path to the tabular file with token, exp_id, etc. sep: the default delimiter to use, if not set in enironment. Returns ======= valid_rows: a list of lists, each a valid row [['test-parse-url', 'globalname', 'globalvalue', '*'], ['test-parse-url', 'color', 'red', '123'], ['test-parse-url', 'color', 'blue', '456'], ['test-parse-url', 'words', 'at the thing', '123'], ['test-parse-url', 'words', 'omg tacos', '456']]
[ "read", "the", "entire", "runtime", "variable", "file", "and", "return", "a", "list", "of", "lists", "each", "corresponding", "to", "a", "row", ".", "We", "also", "check", "the", "header", "and", "exit", "if", "anything", "is", "missing", "or", "malformed", "." ]
train
https://github.com/expfactory/expfactory/blob/27ce6cc93e17231df8a8024f18e631336afd3501/expfactory/variables.py#L183-L224
expfactory/expfactory
expfactory/variables.py
_validate_row
def _validate_row(row, sep=',', required_length=None): '''validate_row will ensure that a row has the proper length, and is not empty and cleaned of extra spaces. Parameters ========== row: a single row, not yet parsed. Returns a valid row, or None if not valid ''' if not isinstance(row, list): row = _parse_row(row, sep) if required_length: length = len(row) if length != required_length: bot.warning('Row should have length %s (not %s)' %(required_length, length)) bot.warning(row) row = None return row
python
def _validate_row(row, sep=',', required_length=None): '''validate_row will ensure that a row has the proper length, and is not empty and cleaned of extra spaces. Parameters ========== row: a single row, not yet parsed. Returns a valid row, or None if not valid ''' if not isinstance(row, list): row = _parse_row(row, sep) if required_length: length = len(row) if length != required_length: bot.warning('Row should have length %s (not %s)' %(required_length, length)) bot.warning(row) row = None return row
[ "def", "_validate_row", "(", "row", ",", "sep", "=", "','", ",", "required_length", "=", "None", ")", ":", "if", "not", "isinstance", "(", "row", ",", "list", ")", ":", "row", "=", "_parse_row", "(", "row", ",", "sep", ")", "if", "required_length", ":", "length", "=", "len", "(", "row", ")", "if", "length", "!=", "required_length", ":", "bot", ".", "warning", "(", "'Row should have length %s (not %s)'", "%", "(", "required_length", ",", "length", ")", ")", "bot", ".", "warning", "(", "row", ")", "row", "=", "None", "return", "row" ]
validate_row will ensure that a row has the proper length, and is not empty and cleaned of extra spaces. Parameters ========== row: a single row, not yet parsed. Returns a valid row, or None if not valid
[ "validate_row", "will", "ensure", "that", "a", "row", "has", "the", "proper", "length", "and", "is", "not", "empty", "and", "cleaned", "of", "extra", "spaces", ".", "Parameters", "==========", "row", ":", "a", "single", "row", "not", "yet", "parsed", "." ]
train
https://github.com/expfactory/expfactory/blob/27ce6cc93e17231df8a8024f18e631336afd3501/expfactory/variables.py#L227-L249
expfactory/expfactory
expfactory/variables.py
_parse_row
def _parse_row(row, sep=','): '''parse row is a helper function to simply clean up a string, and parse into a row based on a delimiter. If a required length is provided, we check for this too. ''' parsed = row.split(sep) parsed = [x for x in parsed if x.strip()] return parsed
python
def _parse_row(row, sep=','): '''parse row is a helper function to simply clean up a string, and parse into a row based on a delimiter. If a required length is provided, we check for this too. ''' parsed = row.split(sep) parsed = [x for x in parsed if x.strip()] return parsed
[ "def", "_parse_row", "(", "row", ",", "sep", "=", "','", ")", ":", "parsed", "=", "row", ".", "split", "(", "sep", ")", "parsed", "=", "[", "x", "for", "x", "in", "parsed", "if", "x", ".", "strip", "(", ")", "]", "return", "parsed" ]
parse row is a helper function to simply clean up a string, and parse into a row based on a delimiter. If a required length is provided, we check for this too.
[ "parse", "row", "is", "a", "helper", "function", "to", "simply", "clean", "up", "a", "string", "and", "parse", "into", "a", "row", "based", "on", "a", "delimiter", ".", "If", "a", "required", "length", "is", "provided", "we", "check", "for", "this", "too", "." ]
train
https://github.com/expfactory/expfactory/blob/27ce6cc93e17231df8a8024f18e631336afd3501/expfactory/variables.py#L252-L260
expfactory/expfactory
expfactory/variables.py
validate_header
def validate_header(header, required_fields=None): '''validate_header ensures that the first row contains the exp_id, var_name, var_value, and token. Capitalization isn't important, but ordering is. This criteria is very strict, but it's reasonable to require. Parameters ========== header: the header row, as a list required_fields: a list of required fields. We derive the required length from this list. Does not return, instead exits if malformed. Runs silently if OK. ''' if required_fields is None: required_fields = ['exp_id', 'var_name', 'var_value', 'token'] # The required length of the header based on required fields length = len(required_fields) # This is very strict, but no reason not to be header = _validate_row(header, required_length=length) header = [x.lower() for x in header] for idx in range(length): field = header[idx].lower().strip() if required_fields[idx] != field: bot.error('Malformed header field %s, exiting.' %field) sys.exit(1)
python
def validate_header(header, required_fields=None): '''validate_header ensures that the first row contains the exp_id, var_name, var_value, and token. Capitalization isn't important, but ordering is. This criteria is very strict, but it's reasonable to require. Parameters ========== header: the header row, as a list required_fields: a list of required fields. We derive the required length from this list. Does not return, instead exits if malformed. Runs silently if OK. ''' if required_fields is None: required_fields = ['exp_id', 'var_name', 'var_value', 'token'] # The required length of the header based on required fields length = len(required_fields) # This is very strict, but no reason not to be header = _validate_row(header, required_length=length) header = [x.lower() for x in header] for idx in range(length): field = header[idx].lower().strip() if required_fields[idx] != field: bot.error('Malformed header field %s, exiting.' %field) sys.exit(1)
[ "def", "validate_header", "(", "header", ",", "required_fields", "=", "None", ")", ":", "if", "required_fields", "is", "None", ":", "required_fields", "=", "[", "'exp_id'", ",", "'var_name'", ",", "'var_value'", ",", "'token'", "]", "# The required length of the header based on required fields", "length", "=", "len", "(", "required_fields", ")", "# This is very strict, but no reason not to be", "header", "=", "_validate_row", "(", "header", ",", "required_length", "=", "length", ")", "header", "=", "[", "x", ".", "lower", "(", ")", "for", "x", "in", "header", "]", "for", "idx", "in", "range", "(", "length", ")", ":", "field", "=", "header", "[", "idx", "]", ".", "lower", "(", ")", ".", "strip", "(", ")", "if", "required_fields", "[", "idx", "]", "!=", "field", ":", "bot", ".", "error", "(", "'Malformed header field %s, exiting.'", "%", "field", ")", "sys", ".", "exit", "(", "1", ")" ]
validate_header ensures that the first row contains the exp_id, var_name, var_value, and token. Capitalization isn't important, but ordering is. This criteria is very strict, but it's reasonable to require. Parameters ========== header: the header row, as a list required_fields: a list of required fields. We derive the required length from this list. Does not return, instead exits if malformed. Runs silently if OK.
[ "validate_header", "ensures", "that", "the", "first", "row", "contains", "the", "exp_id", "var_name", "var_value", "and", "token", ".", "Capitalization", "isn", "t", "important", "but", "ordering", "is", ".", "This", "criteria", "is", "very", "strict", "but", "it", "s", "reasonable", "to", "require", ".", "Parameters", "==========", "header", ":", "the", "header", "row", "as", "a", "list", "required_fields", ":", "a", "list", "of", "required", "fields", ".", "We", "derive", "the", "required", "length", "from", "this", "list", "." ]
train
https://github.com/expfactory/expfactory/blob/27ce6cc93e17231df8a8024f18e631336afd3501/expfactory/variables.py#L263-L294
littlepea/django-docs
docs/views.py
superuser_required
def superuser_required(view_func): """ Decorator for views that checks that the user is logged in and is a staff member, displaying the login page if necessary. """ @wraps(view_func) def _checklogin(request, *args, **kwargs): if request.user.is_active and request.user.is_superuser: # The user is valid. Continue to the admin page. return view_func(request, *args, **kwargs) assert hasattr(request, 'session'), "The Django admin requires session middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.sessions.middleware.SessionMiddleware'." defaults = { 'template_name': 'admin/login.html', 'redirect_field_name': request.get_full_path(), 'authentication_form': AdminAuthenticationForm, 'extra_context': { 'title': _('Log in'), 'app_path': request.get_full_path() } } return LoginView(request, **defaults) return _checklogin
python
def superuser_required(view_func): """ Decorator for views that checks that the user is logged in and is a staff member, displaying the login page if necessary. """ @wraps(view_func) def _checklogin(request, *args, **kwargs): if request.user.is_active and request.user.is_superuser: # The user is valid. Continue to the admin page. return view_func(request, *args, **kwargs) assert hasattr(request, 'session'), "The Django admin requires session middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.sessions.middleware.SessionMiddleware'." defaults = { 'template_name': 'admin/login.html', 'redirect_field_name': request.get_full_path(), 'authentication_form': AdminAuthenticationForm, 'extra_context': { 'title': _('Log in'), 'app_path': request.get_full_path() } } return LoginView(request, **defaults) return _checklogin
[ "def", "superuser_required", "(", "view_func", ")", ":", "@", "wraps", "(", "view_func", ")", "def", "_checklogin", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "request", ".", "user", ".", "is_active", "and", "request", ".", "user", ".", "is_superuser", ":", "# The user is valid. Continue to the admin page.", "return", "view_func", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "assert", "hasattr", "(", "request", ",", "'session'", ")", ",", "\"The Django admin requires session middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.sessions.middleware.SessionMiddleware'.\"", "defaults", "=", "{", "'template_name'", ":", "'admin/login.html'", ",", "'redirect_field_name'", ":", "request", ".", "get_full_path", "(", ")", ",", "'authentication_form'", ":", "AdminAuthenticationForm", ",", "'extra_context'", ":", "{", "'title'", ":", "_", "(", "'Log in'", ")", ",", "'app_path'", ":", "request", ".", "get_full_path", "(", ")", "}", "}", "return", "LoginView", "(", "request", ",", "*", "*", "defaults", ")", "return", "_checklogin" ]
Decorator for views that checks that the user is logged in and is a staff member, displaying the login page if necessary.
[ "Decorator", "for", "views", "that", "checks", "that", "the", "user", "is", "logged", "in", "and", "is", "a", "staff", "member", "displaying", "the", "login", "page", "if", "necessary", "." ]
train
https://github.com/littlepea/django-docs/blob/8e6c867dab51746c6482bd52c4f1270684af3870/docs/views.py#L18-L40
cpburnz/python-path-specification
pathspec/pathspec.py
PathSpec.from_lines
def from_lines(cls, pattern_factory, lines): """ Compiles the pattern lines. *pattern_factory* can be either the name of a registered pattern factory (:class:`str`), or a :class:`~collections.abc.Callable` used to compile patterns. It must accept an uncompiled pattern (:class:`str`) and return the compiled pattern (:class:`.Pattern`). *lines* (:class:`~collections.abc.Iterable`) yields each uncompiled pattern (:class:`str`). This simply has to yield each line so it can be a :class:`file` (e.g., from :func:`open` or :class:`io.StringIO`) or the result from :meth:`str.splitlines`. Returns the :class:`PathSpec` instance. """ if isinstance(pattern_factory, string_types): pattern_factory = util.lookup_pattern(pattern_factory) if not callable(pattern_factory): raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory)) if isinstance(lines, (bytes, unicode)): raise TypeError("lines:{!r} is not an iterable.".format(lines)) lines = [pattern_factory(line) for line in lines if line] return cls(lines)
python
def from_lines(cls, pattern_factory, lines): """ Compiles the pattern lines. *pattern_factory* can be either the name of a registered pattern factory (:class:`str`), or a :class:`~collections.abc.Callable` used to compile patterns. It must accept an uncompiled pattern (:class:`str`) and return the compiled pattern (:class:`.Pattern`). *lines* (:class:`~collections.abc.Iterable`) yields each uncompiled pattern (:class:`str`). This simply has to yield each line so it can be a :class:`file` (e.g., from :func:`open` or :class:`io.StringIO`) or the result from :meth:`str.splitlines`. Returns the :class:`PathSpec` instance. """ if isinstance(pattern_factory, string_types): pattern_factory = util.lookup_pattern(pattern_factory) if not callable(pattern_factory): raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory)) if isinstance(lines, (bytes, unicode)): raise TypeError("lines:{!r} is not an iterable.".format(lines)) lines = [pattern_factory(line) for line in lines if line] return cls(lines)
[ "def", "from_lines", "(", "cls", ",", "pattern_factory", ",", "lines", ")", ":", "if", "isinstance", "(", "pattern_factory", ",", "string_types", ")", ":", "pattern_factory", "=", "util", ".", "lookup_pattern", "(", "pattern_factory", ")", "if", "not", "callable", "(", "pattern_factory", ")", ":", "raise", "TypeError", "(", "\"pattern_factory:{!r} is not callable.\"", ".", "format", "(", "pattern_factory", ")", ")", "if", "isinstance", "(", "lines", ",", "(", "bytes", ",", "unicode", ")", ")", ":", "raise", "TypeError", "(", "\"lines:{!r} is not an iterable.\"", ".", "format", "(", "lines", ")", ")", "lines", "=", "[", "pattern_factory", "(", "line", ")", "for", "line", "in", "lines", "if", "line", "]", "return", "cls", "(", "lines", ")" ]
Compiles the pattern lines. *pattern_factory* can be either the name of a registered pattern factory (:class:`str`), or a :class:`~collections.abc.Callable` used to compile patterns. It must accept an uncompiled pattern (:class:`str`) and return the compiled pattern (:class:`.Pattern`). *lines* (:class:`~collections.abc.Iterable`) yields each uncompiled pattern (:class:`str`). This simply has to yield each line so it can be a :class:`file` (e.g., from :func:`open` or :class:`io.StringIO`) or the result from :meth:`str.splitlines`. Returns the :class:`PathSpec` instance.
[ "Compiles", "the", "pattern", "lines", "." ]
train
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/pathspec.py#L50-L75
cpburnz/python-path-specification
pathspec/pathspec.py
PathSpec.match_file
def match_file(self, file, separators=None): """ Matches the file to this path-spec. *file* (:class:`str`) is the file path to be matched against :attr:`self.patterns <PathSpec.patterns>`. *separators* (:class:`~collections.abc.Collection` of :class:`str`) optionally contains the path separators to normalize. See :func:`~pathspec.util.normalize_file` for more information. Returns :data:`True` if *file* matched; otherwise, :data:`False`. """ norm_file = util.normalize_file(file, separators=separators) return util.match_file(self.patterns, norm_file)
python
def match_file(self, file, separators=None): """ Matches the file to this path-spec. *file* (:class:`str`) is the file path to be matched against :attr:`self.patterns <PathSpec.patterns>`. *separators* (:class:`~collections.abc.Collection` of :class:`str`) optionally contains the path separators to normalize. See :func:`~pathspec.util.normalize_file` for more information. Returns :data:`True` if *file* matched; otherwise, :data:`False`. """ norm_file = util.normalize_file(file, separators=separators) return util.match_file(self.patterns, norm_file)
[ "def", "match_file", "(", "self", ",", "file", ",", "separators", "=", "None", ")", ":", "norm_file", "=", "util", ".", "normalize_file", "(", "file", ",", "separators", "=", "separators", ")", "return", "util", ".", "match_file", "(", "self", ".", "patterns", ",", "norm_file", ")" ]
Matches the file to this path-spec. *file* (:class:`str`) is the file path to be matched against :attr:`self.patterns <PathSpec.patterns>`. *separators* (:class:`~collections.abc.Collection` of :class:`str`) optionally contains the path separators to normalize. See :func:`~pathspec.util.normalize_file` for more information. Returns :data:`True` if *file* matched; otherwise, :data:`False`.
[ "Matches", "the", "file", "to", "this", "path", "-", "spec", "." ]
train
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/pathspec.py#L77-L91
cpburnz/python-path-specification
pathspec/pathspec.py
PathSpec.match_files
def match_files(self, files, separators=None): """ Matches the files to this path-spec. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains the file paths to be matched against :attr:`self.patterns <PathSpec.patterns>`. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. See :func:`~pathspec.util.normalize_file` for more information. Returns the matched files (:class:`~collections.abc.Iterable` of :class:`str`). """ if isinstance(files, (bytes, unicode)): raise TypeError("files:{!r} is not an iterable.".format(files)) file_map = util.normalize_files(files, separators=separators) matched_files = util.match_files(self.patterns, iterkeys(file_map)) for path in matched_files: yield file_map[path]
python
def match_files(self, files, separators=None): """ Matches the files to this path-spec. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains the file paths to be matched against :attr:`self.patterns <PathSpec.patterns>`. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. See :func:`~pathspec.util.normalize_file` for more information. Returns the matched files (:class:`~collections.abc.Iterable` of :class:`str`). """ if isinstance(files, (bytes, unicode)): raise TypeError("files:{!r} is not an iterable.".format(files)) file_map = util.normalize_files(files, separators=separators) matched_files = util.match_files(self.patterns, iterkeys(file_map)) for path in matched_files: yield file_map[path]
[ "def", "match_files", "(", "self", ",", "files", ",", "separators", "=", "None", ")", ":", "if", "isinstance", "(", "files", ",", "(", "bytes", ",", "unicode", ")", ")", ":", "raise", "TypeError", "(", "\"files:{!r} is not an iterable.\"", ".", "format", "(", "files", ")", ")", "file_map", "=", "util", ".", "normalize_files", "(", "files", ",", "separators", "=", "separators", ")", "matched_files", "=", "util", ".", "match_files", "(", "self", ".", "patterns", ",", "iterkeys", "(", "file_map", ")", ")", "for", "path", "in", "matched_files", ":", "yield", "file_map", "[", "path", "]" ]
Matches the files to this path-spec. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains the file paths to be matched against :attr:`self.patterns <PathSpec.patterns>`. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. See :func:`~pathspec.util.normalize_file` for more information. Returns the matched files (:class:`~collections.abc.Iterable` of :class:`str`).
[ "Matches", "the", "files", "to", "this", "path", "-", "spec", "." ]
train
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/pathspec.py#L93-L115
cpburnz/python-path-specification
pathspec/pathspec.py
PathSpec.match_tree
def match_tree(self, root, on_error=None, follow_links=None): """ Walks the specified root path for all files and matches them to this path-spec. *root* (:class:`str`) is the root directory to search for files. *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. See :func:`~pathspec.util.iter_tree` for more information. *follow_links* (:class:`bool` or :data:`None`) optionally is whether to walk symbolik links that resolve to directories. See :func:`~pathspec.util.iter_tree` for more information. Returns the matched files (:class:`~collections.abc.Iterable` of :class:`str`). """ files = util.iter_tree(root, on_error=on_error, follow_links=follow_links) return self.match_files(files)
python
def match_tree(self, root, on_error=None, follow_links=None): """ Walks the specified root path for all files and matches them to this path-spec. *root* (:class:`str`) is the root directory to search for files. *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. See :func:`~pathspec.util.iter_tree` for more information. *follow_links* (:class:`bool` or :data:`None`) optionally is whether to walk symbolik links that resolve to directories. See :func:`~pathspec.util.iter_tree` for more information. Returns the matched files (:class:`~collections.abc.Iterable` of :class:`str`). """ files = util.iter_tree(root, on_error=on_error, follow_links=follow_links) return self.match_files(files)
[ "def", "match_tree", "(", "self", ",", "root", ",", "on_error", "=", "None", ",", "follow_links", "=", "None", ")", ":", "files", "=", "util", ".", "iter_tree", "(", "root", ",", "on_error", "=", "on_error", ",", "follow_links", "=", "follow_links", ")", "return", "self", ".", "match_files", "(", "files", ")" ]
Walks the specified root path for all files and matches them to this path-spec. *root* (:class:`str`) is the root directory to search for files. *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. See :func:`~pathspec.util.iter_tree` for more information. *follow_links* (:class:`bool` or :data:`None`) optionally is whether to walk symbolik links that resolve to directories. See :func:`~pathspec.util.iter_tree` for more information. Returns the matched files (:class:`~collections.abc.Iterable` of :class:`str`).
[ "Walks", "the", "specified", "root", "path", "for", "all", "files", "and", "matches", "them", "to", "this", "path", "-", "spec", "." ]
train
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/pathspec.py#L117-L137
cpburnz/python-path-specification
pathspec/patterns/gitwildmatch.py
GitWildMatchPattern.pattern_to_regex
def pattern_to_regex(cls, pattern): """ Convert the pattern into a regular expression. *pattern* (:class:`unicode` or :class:`bytes`) is the pattern to convert into a regular expression. Returns the uncompiled regular expression (:class:`unicode`, :class:`bytes`, or :data:`None`), and whether matched files should be included (:data:`True`), excluded (:data:`False`), or if it is a null-operation (:data:`None`). """ if isinstance(pattern, unicode): return_type = unicode elif isinstance(pattern, bytes): return_type = bytes pattern = pattern.decode(_BYTES_ENCODING) else: raise TypeError("pattern:{!r} is not a unicode or byte string.".format(pattern)) pattern = pattern.strip() if pattern.startswith('#'): # A pattern starting with a hash ('#') serves as a comment # (neither includes nor excludes files). Escape the hash with a # back-slash to match a literal hash (i.e., '\#'). regex = None include = None elif pattern == '/': # EDGE CASE: According to `git check-ignore` (v2.4.1), a single # '/' does not match any file. regex = None include = None elif pattern: if pattern.startswith('!'): # A pattern starting with an exclamation mark ('!') negates the # pattern (exclude instead of include). Escape the exclamation # mark with a back-slash to match a literal exclamation mark # (i.e., '\!'). include = False # Remove leading exclamation mark. pattern = pattern[1:] else: include = True if pattern.startswith('\\'): # Remove leading back-slash escape for escaped hash ('#') or # exclamation mark ('!'). pattern = pattern[1:] # Split pattern into segments. pattern_segs = pattern.split('/') # Normalize pattern to make processing easier. if not pattern_segs[0]: # A pattern beginning with a slash ('/') will only match paths # directly on the root directory instead of any descendant # paths. So, remove empty first segment to make pattern relative # to root. del pattern_segs[0] elif len(pattern_segs) == 1 or (len(pattern_segs) == 2 and not pattern_segs[1]): # A single pattern without a beginning slash ('/') will match # any descendant path. This is equivalent to "**/{pattern}". So, # prepend with double-asterisks to make pattern relative to # root. # EDGE CASE: This also holds for a single pattern with a # trailing slash (e.g. dir/). if pattern_segs[0] != '**': pattern_segs.insert(0, '**') else: # EDGE CASE: A pattern without a beginning slash ('/') but # contains at least one prepended directory (e.g. # "dir/{pattern}") should not match "**/dir/{pattern}", # according to `git check-ignore` (v2.4.1). pass if not pattern_segs[-1] and len(pattern_segs) > 1: # A pattern ending with a slash ('/') will match all descendant # paths if it is a directory but not if it is a regular file. # This is equivilent to "{pattern}/**". So, set last segment to # double asterisks to include all descendants. pattern_segs[-1] = '**' # Build regular expression from pattern. output = ['^'] need_slash = False end = len(pattern_segs) - 1 for i, seg in enumerate(pattern_segs): if seg == '**': if i == 0 and i == end: # A pattern consisting solely of double-asterisks ('**') # will match every path. output.append('.+') elif i == 0: # A normalized pattern beginning with double-asterisks # ('**') will match any leading path segments. output.append('(?:.+/)?') need_slash = False elif i == end: # A normalized pattern ending with double-asterisks ('**') # will match any trailing path segments. output.append('/.*') else: # A pattern with inner double-asterisks ('**') will match # multiple (or zero) inner path segments. output.append('(?:/.+)?') need_slash = True elif seg == '*': # Match single path segment. if need_slash: output.append('/') output.append('[^/]+') need_slash = True else: # Match segment glob pattern. if need_slash: output.append('/') output.append(cls._translate_segment_glob(seg)) if i == end and include is True: # A pattern ending without a slash ('/') will match a file # or a directory (with paths underneath it). E.g., "foo" # matches "foo", "foo/bar", "foo/bar/baz", etc. # EDGE CASE: However, this does not hold for exclusion cases # according to `git check-ignore` (v2.4.1). output.append('(?:/.*)?') need_slash = True output.append('$') regex = ''.join(output) else: # A blank pattern is a null-operation (neither includes nor # excludes files). regex = None include = None if regex is not None and return_type is bytes: regex = regex.encode(_BYTES_ENCODING) return regex, include
python
def pattern_to_regex(cls, pattern): """ Convert the pattern into a regular expression. *pattern* (:class:`unicode` or :class:`bytes`) is the pattern to convert into a regular expression. Returns the uncompiled regular expression (:class:`unicode`, :class:`bytes`, or :data:`None`), and whether matched files should be included (:data:`True`), excluded (:data:`False`), or if it is a null-operation (:data:`None`). """ if isinstance(pattern, unicode): return_type = unicode elif isinstance(pattern, bytes): return_type = bytes pattern = pattern.decode(_BYTES_ENCODING) else: raise TypeError("pattern:{!r} is not a unicode or byte string.".format(pattern)) pattern = pattern.strip() if pattern.startswith('#'): # A pattern starting with a hash ('#') serves as a comment # (neither includes nor excludes files). Escape the hash with a # back-slash to match a literal hash (i.e., '\#'). regex = None include = None elif pattern == '/': # EDGE CASE: According to `git check-ignore` (v2.4.1), a single # '/' does not match any file. regex = None include = None elif pattern: if pattern.startswith('!'): # A pattern starting with an exclamation mark ('!') negates the # pattern (exclude instead of include). Escape the exclamation # mark with a back-slash to match a literal exclamation mark # (i.e., '\!'). include = False # Remove leading exclamation mark. pattern = pattern[1:] else: include = True if pattern.startswith('\\'): # Remove leading back-slash escape for escaped hash ('#') or # exclamation mark ('!'). pattern = pattern[1:] # Split pattern into segments. pattern_segs = pattern.split('/') # Normalize pattern to make processing easier. if not pattern_segs[0]: # A pattern beginning with a slash ('/') will only match paths # directly on the root directory instead of any descendant # paths. So, remove empty first segment to make pattern relative # to root. del pattern_segs[0] elif len(pattern_segs) == 1 or (len(pattern_segs) == 2 and not pattern_segs[1]): # A single pattern without a beginning slash ('/') will match # any descendant path. This is equivalent to "**/{pattern}". So, # prepend with double-asterisks to make pattern relative to # root. # EDGE CASE: This also holds for a single pattern with a # trailing slash (e.g. dir/). if pattern_segs[0] != '**': pattern_segs.insert(0, '**') else: # EDGE CASE: A pattern without a beginning slash ('/') but # contains at least one prepended directory (e.g. # "dir/{pattern}") should not match "**/dir/{pattern}", # according to `git check-ignore` (v2.4.1). pass if not pattern_segs[-1] and len(pattern_segs) > 1: # A pattern ending with a slash ('/') will match all descendant # paths if it is a directory but not if it is a regular file. # This is equivilent to "{pattern}/**". So, set last segment to # double asterisks to include all descendants. pattern_segs[-1] = '**' # Build regular expression from pattern. output = ['^'] need_slash = False end = len(pattern_segs) - 1 for i, seg in enumerate(pattern_segs): if seg == '**': if i == 0 and i == end: # A pattern consisting solely of double-asterisks ('**') # will match every path. output.append('.+') elif i == 0: # A normalized pattern beginning with double-asterisks # ('**') will match any leading path segments. output.append('(?:.+/)?') need_slash = False elif i == end: # A normalized pattern ending with double-asterisks ('**') # will match any trailing path segments. output.append('/.*') else: # A pattern with inner double-asterisks ('**') will match # multiple (or zero) inner path segments. output.append('(?:/.+)?') need_slash = True elif seg == '*': # Match single path segment. if need_slash: output.append('/') output.append('[^/]+') need_slash = True else: # Match segment glob pattern. if need_slash: output.append('/') output.append(cls._translate_segment_glob(seg)) if i == end and include is True: # A pattern ending without a slash ('/') will match a file # or a directory (with paths underneath it). E.g., "foo" # matches "foo", "foo/bar", "foo/bar/baz", etc. # EDGE CASE: However, this does not hold for exclusion cases # according to `git check-ignore` (v2.4.1). output.append('(?:/.*)?') need_slash = True output.append('$') regex = ''.join(output) else: # A blank pattern is a null-operation (neither includes nor # excludes files). regex = None include = None if regex is not None and return_type is bytes: regex = regex.encode(_BYTES_ENCODING) return regex, include
[ "def", "pattern_to_regex", "(", "cls", ",", "pattern", ")", ":", "if", "isinstance", "(", "pattern", ",", "unicode", ")", ":", "return_type", "=", "unicode", "elif", "isinstance", "(", "pattern", ",", "bytes", ")", ":", "return_type", "=", "bytes", "pattern", "=", "pattern", ".", "decode", "(", "_BYTES_ENCODING", ")", "else", ":", "raise", "TypeError", "(", "\"pattern:{!r} is not a unicode or byte string.\"", ".", "format", "(", "pattern", ")", ")", "pattern", "=", "pattern", ".", "strip", "(", ")", "if", "pattern", ".", "startswith", "(", "'#'", ")", ":", "# A pattern starting with a hash ('#') serves as a comment", "# (neither includes nor excludes files). Escape the hash with a", "# back-slash to match a literal hash (i.e., '\\#').", "regex", "=", "None", "include", "=", "None", "elif", "pattern", "==", "'/'", ":", "# EDGE CASE: According to `git check-ignore` (v2.4.1), a single", "# '/' does not match any file.", "regex", "=", "None", "include", "=", "None", "elif", "pattern", ":", "if", "pattern", ".", "startswith", "(", "'!'", ")", ":", "# A pattern starting with an exclamation mark ('!') negates the", "# pattern (exclude instead of include). Escape the exclamation", "# mark with a back-slash to match a literal exclamation mark", "# (i.e., '\\!').", "include", "=", "False", "# Remove leading exclamation mark.", "pattern", "=", "pattern", "[", "1", ":", "]", "else", ":", "include", "=", "True", "if", "pattern", ".", "startswith", "(", "'\\\\'", ")", ":", "# Remove leading back-slash escape for escaped hash ('#') or", "# exclamation mark ('!').", "pattern", "=", "pattern", "[", "1", ":", "]", "# Split pattern into segments.", "pattern_segs", "=", "pattern", ".", "split", "(", "'/'", ")", "# Normalize pattern to make processing easier.", "if", "not", "pattern_segs", "[", "0", "]", ":", "# A pattern beginning with a slash ('/') will only match paths", "# directly on the root directory instead of any descendant", "# paths. So, remove empty first segment to make pattern relative", "# to root.", "del", "pattern_segs", "[", "0", "]", "elif", "len", "(", "pattern_segs", ")", "==", "1", "or", "(", "len", "(", "pattern_segs", ")", "==", "2", "and", "not", "pattern_segs", "[", "1", "]", ")", ":", "# A single pattern without a beginning slash ('/') will match", "# any descendant path. This is equivalent to \"**/{pattern}\". So,", "# prepend with double-asterisks to make pattern relative to", "# root.", "# EDGE CASE: This also holds for a single pattern with a", "# trailing slash (e.g. dir/).", "if", "pattern_segs", "[", "0", "]", "!=", "'**'", ":", "pattern_segs", ".", "insert", "(", "0", ",", "'**'", ")", "else", ":", "# EDGE CASE: A pattern without a beginning slash ('/') but", "# contains at least one prepended directory (e.g.", "# \"dir/{pattern}\") should not match \"**/dir/{pattern}\",", "# according to `git check-ignore` (v2.4.1).", "pass", "if", "not", "pattern_segs", "[", "-", "1", "]", "and", "len", "(", "pattern_segs", ")", ">", "1", ":", "# A pattern ending with a slash ('/') will match all descendant", "# paths if it is a directory but not if it is a regular file.", "# This is equivilent to \"{pattern}/**\". So, set last segment to", "# double asterisks to include all descendants.", "pattern_segs", "[", "-", "1", "]", "=", "'**'", "# Build regular expression from pattern.", "output", "=", "[", "'^'", "]", "need_slash", "=", "False", "end", "=", "len", "(", "pattern_segs", ")", "-", "1", "for", "i", ",", "seg", "in", "enumerate", "(", "pattern_segs", ")", ":", "if", "seg", "==", "'**'", ":", "if", "i", "==", "0", "and", "i", "==", "end", ":", "# A pattern consisting solely of double-asterisks ('**')", "# will match every path.", "output", ".", "append", "(", "'.+'", ")", "elif", "i", "==", "0", ":", "# A normalized pattern beginning with double-asterisks", "# ('**') will match any leading path segments.", "output", ".", "append", "(", "'(?:.+/)?'", ")", "need_slash", "=", "False", "elif", "i", "==", "end", ":", "# A normalized pattern ending with double-asterisks ('**')", "# will match any trailing path segments.", "output", ".", "append", "(", "'/.*'", ")", "else", ":", "# A pattern with inner double-asterisks ('**') will match", "# multiple (or zero) inner path segments.", "output", ".", "append", "(", "'(?:/.+)?'", ")", "need_slash", "=", "True", "elif", "seg", "==", "'*'", ":", "# Match single path segment.", "if", "need_slash", ":", "output", ".", "append", "(", "'/'", ")", "output", ".", "append", "(", "'[^/]+'", ")", "need_slash", "=", "True", "else", ":", "# Match segment glob pattern.", "if", "need_slash", ":", "output", ".", "append", "(", "'/'", ")", "output", ".", "append", "(", "cls", ".", "_translate_segment_glob", "(", "seg", ")", ")", "if", "i", "==", "end", "and", "include", "is", "True", ":", "# A pattern ending without a slash ('/') will match a file", "# or a directory (with paths underneath it). E.g., \"foo\"", "# matches \"foo\", \"foo/bar\", \"foo/bar/baz\", etc.", "# EDGE CASE: However, this does not hold for exclusion cases", "# according to `git check-ignore` (v2.4.1).", "output", ".", "append", "(", "'(?:/.*)?'", ")", "need_slash", "=", "True", "output", ".", "append", "(", "'$'", ")", "regex", "=", "''", ".", "join", "(", "output", ")", "else", ":", "# A blank pattern is a null-operation (neither includes nor", "# excludes files).", "regex", "=", "None", "include", "=", "None", "if", "regex", "is", "not", "None", "and", "return_type", "is", "bytes", ":", "regex", "=", "regex", ".", "encode", "(", "_BYTES_ENCODING", ")", "return", "regex", ",", "include" ]
Convert the pattern into a regular expression. *pattern* (:class:`unicode` or :class:`bytes`) is the pattern to convert into a regular expression. Returns the uncompiled regular expression (:class:`unicode`, :class:`bytes`, or :data:`None`), and whether matched files should be included (:data:`True`), excluded (:data:`False`), or if it is a null-operation (:data:`None`).
[ "Convert", "the", "pattern", "into", "a", "regular", "expression", "." ]
train
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/patterns/gitwildmatch.py#L30-L174
cpburnz/python-path-specification
pathspec/patterns/gitwildmatch.py
GitWildMatchPattern._translate_segment_glob
def _translate_segment_glob(pattern): """ Translates the glob pattern to a regular expression. This is used in the constructor to translate a path segment glob pattern to its corresponding regular expression. *pattern* (:class:`str`) is the glob pattern. Returns the regular expression (:class:`str`). """ # NOTE: This is derived from `fnmatch.translate()` and is similar to # the POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set. escape = False regex = '' i, end = 0, len(pattern) while i < end: # Get next character. char = pattern[i] i += 1 if escape: # Escape the character. escape = False regex += re.escape(char) elif char == '\\': # Escape character, escape next character. escape = True elif char == '*': # Multi-character wildcard. Match any string (except slashes), # including an empty string. regex += '[^/]*' elif char == '?': # Single-character wildcard. Match any single character (except # a slash). regex += '[^/]' elif char == '[': # Braket expression wildcard. Except for the beginning # exclamation mark, the whole braket expression can be used # directly as regex but we have to find where the expression # ends. # - "[][!]" matchs ']', '[' and '!'. # - "[]-]" matchs ']' and '-'. # - "[!]a-]" matchs any character except ']', 'a' and '-'. j = i # Pass brack expression negation. if j < end and pattern[j] == '!': j += 1 # Pass first closing braket if it is at the beginning of the # expression. if j < end and pattern[j] == ']': j += 1 # Find closing braket. Stop once we reach the end or find it. while j < end and pattern[j] != ']': j += 1 if j < end: # Found end of braket expression. Increment j to be one past # the closing braket: # # [...] # ^ ^ # i j # j += 1 expr = '[' if pattern[i] == '!': # Braket expression needs to be negated. expr += '^' i += 1 elif pattern[i] == '^': # POSIX declares that the regex braket expression negation # "[^...]" is undefined in a glob pattern. Python's # `fnmatch.translate()` escapes the caret ('^') as a # literal. To maintain consistency with undefined behavior, # I am escaping the '^' as well. expr += '\\^' i += 1 # Build regex braket expression. Escape slashes so they are # treated as literal slashes by regex as defined by POSIX. expr += pattern[i:j].replace('\\', '\\\\') # Add regex braket expression to regex result. regex += expr # Set i to one past the closing braket. i = j else: # Failed to find closing braket, treat opening braket as a # braket literal instead of as an expression. regex += '\\[' else: # Regular character, escape it for regex. regex += re.escape(char) return regex
python
def _translate_segment_glob(pattern): """ Translates the glob pattern to a regular expression. This is used in the constructor to translate a path segment glob pattern to its corresponding regular expression. *pattern* (:class:`str`) is the glob pattern. Returns the regular expression (:class:`str`). """ # NOTE: This is derived from `fnmatch.translate()` and is similar to # the POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set. escape = False regex = '' i, end = 0, len(pattern) while i < end: # Get next character. char = pattern[i] i += 1 if escape: # Escape the character. escape = False regex += re.escape(char) elif char == '\\': # Escape character, escape next character. escape = True elif char == '*': # Multi-character wildcard. Match any string (except slashes), # including an empty string. regex += '[^/]*' elif char == '?': # Single-character wildcard. Match any single character (except # a slash). regex += '[^/]' elif char == '[': # Braket expression wildcard. Except for the beginning # exclamation mark, the whole braket expression can be used # directly as regex but we have to find where the expression # ends. # - "[][!]" matchs ']', '[' and '!'. # - "[]-]" matchs ']' and '-'. # - "[!]a-]" matchs any character except ']', 'a' and '-'. j = i # Pass brack expression negation. if j < end and pattern[j] == '!': j += 1 # Pass first closing braket if it is at the beginning of the # expression. if j < end and pattern[j] == ']': j += 1 # Find closing braket. Stop once we reach the end or find it. while j < end and pattern[j] != ']': j += 1 if j < end: # Found end of braket expression. Increment j to be one past # the closing braket: # # [...] # ^ ^ # i j # j += 1 expr = '[' if pattern[i] == '!': # Braket expression needs to be negated. expr += '^' i += 1 elif pattern[i] == '^': # POSIX declares that the regex braket expression negation # "[^...]" is undefined in a glob pattern. Python's # `fnmatch.translate()` escapes the caret ('^') as a # literal. To maintain consistency with undefined behavior, # I am escaping the '^' as well. expr += '\\^' i += 1 # Build regex braket expression. Escape slashes so they are # treated as literal slashes by regex as defined by POSIX. expr += pattern[i:j].replace('\\', '\\\\') # Add regex braket expression to regex result. regex += expr # Set i to one past the closing braket. i = j else: # Failed to find closing braket, treat opening braket as a # braket literal instead of as an expression. regex += '\\[' else: # Regular character, escape it for regex. regex += re.escape(char) return regex
[ "def", "_translate_segment_glob", "(", "pattern", ")", ":", "# NOTE: This is derived from `fnmatch.translate()` and is similar to", "# the POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set.", "escape", "=", "False", "regex", "=", "''", "i", ",", "end", "=", "0", ",", "len", "(", "pattern", ")", "while", "i", "<", "end", ":", "# Get next character.", "char", "=", "pattern", "[", "i", "]", "i", "+=", "1", "if", "escape", ":", "# Escape the character.", "escape", "=", "False", "regex", "+=", "re", ".", "escape", "(", "char", ")", "elif", "char", "==", "'\\\\'", ":", "# Escape character, escape next character.", "escape", "=", "True", "elif", "char", "==", "'*'", ":", "# Multi-character wildcard. Match any string (except slashes),", "# including an empty string.", "regex", "+=", "'[^/]*'", "elif", "char", "==", "'?'", ":", "# Single-character wildcard. Match any single character (except", "# a slash).", "regex", "+=", "'[^/]'", "elif", "char", "==", "'['", ":", "# Braket expression wildcard. Except for the beginning", "# exclamation mark, the whole braket expression can be used", "# directly as regex but we have to find where the expression", "# ends.", "# - \"[][!]\" matchs ']', '[' and '!'.", "# - \"[]-]\" matchs ']' and '-'.", "# - \"[!]a-]\" matchs any character except ']', 'a' and '-'.", "j", "=", "i", "# Pass brack expression negation.", "if", "j", "<", "end", "and", "pattern", "[", "j", "]", "==", "'!'", ":", "j", "+=", "1", "# Pass first closing braket if it is at the beginning of the", "# expression.", "if", "j", "<", "end", "and", "pattern", "[", "j", "]", "==", "']'", ":", "j", "+=", "1", "# Find closing braket. Stop once we reach the end or find it.", "while", "j", "<", "end", "and", "pattern", "[", "j", "]", "!=", "']'", ":", "j", "+=", "1", "if", "j", "<", "end", ":", "# Found end of braket expression. Increment j to be one past", "# the closing braket:", "#", "# [...]", "# ^ ^", "# i j", "#", "j", "+=", "1", "expr", "=", "'['", "if", "pattern", "[", "i", "]", "==", "'!'", ":", "# Braket expression needs to be negated.", "expr", "+=", "'^'", "i", "+=", "1", "elif", "pattern", "[", "i", "]", "==", "'^'", ":", "# POSIX declares that the regex braket expression negation", "# \"[^...]\" is undefined in a glob pattern. Python's", "# `fnmatch.translate()` escapes the caret ('^') as a", "# literal. To maintain consistency with undefined behavior,", "# I am escaping the '^' as well.", "expr", "+=", "'\\\\^'", "i", "+=", "1", "# Build regex braket expression. Escape slashes so they are", "# treated as literal slashes by regex as defined by POSIX.", "expr", "+=", "pattern", "[", "i", ":", "j", "]", ".", "replace", "(", "'\\\\'", ",", "'\\\\\\\\'", ")", "# Add regex braket expression to regex result.", "regex", "+=", "expr", "# Set i to one past the closing braket.", "i", "=", "j", "else", ":", "# Failed to find closing braket, treat opening braket as a", "# braket literal instead of as an expression.", "regex", "+=", "'\\\\['", "else", ":", "# Regular character, escape it for regex.", "regex", "+=", "re", ".", "escape", "(", "char", ")", "return", "regex" ]
Translates the glob pattern to a regular expression. This is used in the constructor to translate a path segment glob pattern to its corresponding regular expression. *pattern* (:class:`str`) is the glob pattern. Returns the regular expression (:class:`str`).
[ "Translates", "the", "glob", "pattern", "to", "a", "regular", "expression", ".", "This", "is", "used", "in", "the", "constructor", "to", "translate", "a", "path", "segment", "glob", "pattern", "to", "its", "corresponding", "regular", "expression", "." ]
train
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/patterns/gitwildmatch.py#L177-L280
cpburnz/python-path-specification
pathspec/patterns/gitwildmatch.py
GitIgnorePattern.pattern_to_regex
def pattern_to_regex(cls, *args, **kw): """ Warn about deprecation. """ cls._deprecated() return super(GitIgnorePattern, cls).pattern_to_regex(*args, **kw)
python
def pattern_to_regex(cls, *args, **kw): """ Warn about deprecation. """ cls._deprecated() return super(GitIgnorePattern, cls).pattern_to_regex(*args, **kw)
[ "def", "pattern_to_regex", "(", "cls", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "cls", ".", "_deprecated", "(", ")", "return", "super", "(", "GitIgnorePattern", ",", "cls", ")", ".", "pattern_to_regex", "(", "*", "args", ",", "*", "*", "kw", ")" ]
Warn about deprecation.
[ "Warn", "about", "deprecation", "." ]
train
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/patterns/gitwildmatch.py#L306-L311
cpburnz/python-path-specification
pathspec/util.py
iter_tree
def iter_tree(root, on_error=None, follow_links=None): """ Walks the specified directory for all files. *root* (:class:`str`) is the root directory to search for files. *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. It will be called with the exception (:exc:`OSError`). Reraise the exception to abort the walk. Default is :data:`None` to ignore file-system exceptions. *follow_links* (:class:`bool` or :data:`None`) optionally is whether to walk symbolik links that resolve to directories. Default is :data:`None` for :data:`True`. Raises :exc:`RecursionError` if recursion is detected. Returns an :class:`~collections.abc.Iterable` yielding the path to each file (:class:`str`) relative to *root*. """ if on_error is not None and not callable(on_error): raise TypeError("on_error:{!r} is not callable.".format(on_error)) if follow_links is None: follow_links = True for file_rel in _iter_tree_next(os.path.abspath(root), '', {}, on_error, follow_links): yield file_rel
python
def iter_tree(root, on_error=None, follow_links=None): """ Walks the specified directory for all files. *root* (:class:`str`) is the root directory to search for files. *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. It will be called with the exception (:exc:`OSError`). Reraise the exception to abort the walk. Default is :data:`None` to ignore file-system exceptions. *follow_links* (:class:`bool` or :data:`None`) optionally is whether to walk symbolik links that resolve to directories. Default is :data:`None` for :data:`True`. Raises :exc:`RecursionError` if recursion is detected. Returns an :class:`~collections.abc.Iterable` yielding the path to each file (:class:`str`) relative to *root*. """ if on_error is not None and not callable(on_error): raise TypeError("on_error:{!r} is not callable.".format(on_error)) if follow_links is None: follow_links = True for file_rel in _iter_tree_next(os.path.abspath(root), '', {}, on_error, follow_links): yield file_rel
[ "def", "iter_tree", "(", "root", ",", "on_error", "=", "None", ",", "follow_links", "=", "None", ")", ":", "if", "on_error", "is", "not", "None", "and", "not", "callable", "(", "on_error", ")", ":", "raise", "TypeError", "(", "\"on_error:{!r} is not callable.\"", ".", "format", "(", "on_error", ")", ")", "if", "follow_links", "is", "None", ":", "follow_links", "=", "True", "for", "file_rel", "in", "_iter_tree_next", "(", "os", ".", "path", ".", "abspath", "(", "root", ")", ",", "''", ",", "{", "}", ",", "on_error", ",", "follow_links", ")", ":", "yield", "file_rel" ]
Walks the specified directory for all files. *root* (:class:`str`) is the root directory to search for files. *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. It will be called with the exception (:exc:`OSError`). Reraise the exception to abort the walk. Default is :data:`None` to ignore file-system exceptions. *follow_links* (:class:`bool` or :data:`None`) optionally is whether to walk symbolik links that resolve to directories. Default is :data:`None` for :data:`True`. Raises :exc:`RecursionError` if recursion is detected. Returns an :class:`~collections.abc.Iterable` yielding the path to each file (:class:`str`) relative to *root*.
[ "Walks", "the", "specified", "directory", "for", "all", "files", "." ]
train
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/util.py#L27-L55
cpburnz/python-path-specification
pathspec/util.py
_iter_tree_next
def _iter_tree_next(root_full, dir_rel, memo, on_error, follow_links): """ Scan the directory for all descendant files. *root_full* (:class:`str`) the absolute path to the root directory. *dir_rel* (:class:`str`) the path to the directory to scan relative to *root_full*. *memo* (:class:`dict`) keeps track of ancestor directories encountered. Maps each ancestor real path (:class:`str``) to relative path (:class:`str`). *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. *follow_links* (:class:`bool`) is whether to walk symbolik links that resolve to directories. """ dir_full = os.path.join(root_full, dir_rel) dir_real = os.path.realpath(dir_full) # Remember each encountered ancestor directory and its canonical # (real) path. If a canonical path is encountered more than once, # recursion has occurred. if dir_real not in memo: memo[dir_real] = dir_rel else: raise RecursionError(real_path=dir_real, first_path=memo[dir_real], second_path=dir_rel) for node in os.listdir(dir_full): node_rel = os.path.join(dir_rel, node) node_full = os.path.join(root_full, node_rel) # Inspect child node. try: node_stat = os.lstat(node_full) except OSError as e: if on_error is not None: on_error(e) continue if stat.S_ISLNK(node_stat.st_mode): # Child node is a link, inspect the target node. is_link = True try: node_stat = os.stat(node_full) except OSError as e: if on_error is not None: on_error(e) continue else: is_link = False if stat.S_ISDIR(node_stat.st_mode) and (follow_links or not is_link): # Child node is a directory, recurse into it and yield its # decendant files. for file_rel in _iter_tree_next(root_full, node_rel, memo, on_error, follow_links): yield file_rel elif stat.S_ISREG(node_stat.st_mode): # Child node is a file, yield it. yield node_rel # NOTE: Make sure to remove the canonical (real) path of the directory # from the ancestors memo once we are done with it. This allows the # same directory to appear multiple times. If this is not done, the # second occurance of the directory will be incorrectly interpreted as # a recursion. See <https://github.com/cpburnz/python-path-specification/pull/7>. del memo[dir_real]
python
def _iter_tree_next(root_full, dir_rel, memo, on_error, follow_links): """ Scan the directory for all descendant files. *root_full* (:class:`str`) the absolute path to the root directory. *dir_rel* (:class:`str`) the path to the directory to scan relative to *root_full*. *memo* (:class:`dict`) keeps track of ancestor directories encountered. Maps each ancestor real path (:class:`str``) to relative path (:class:`str`). *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. *follow_links* (:class:`bool`) is whether to walk symbolik links that resolve to directories. """ dir_full = os.path.join(root_full, dir_rel) dir_real = os.path.realpath(dir_full) # Remember each encountered ancestor directory and its canonical # (real) path. If a canonical path is encountered more than once, # recursion has occurred. if dir_real not in memo: memo[dir_real] = dir_rel else: raise RecursionError(real_path=dir_real, first_path=memo[dir_real], second_path=dir_rel) for node in os.listdir(dir_full): node_rel = os.path.join(dir_rel, node) node_full = os.path.join(root_full, node_rel) # Inspect child node. try: node_stat = os.lstat(node_full) except OSError as e: if on_error is not None: on_error(e) continue if stat.S_ISLNK(node_stat.st_mode): # Child node is a link, inspect the target node. is_link = True try: node_stat = os.stat(node_full) except OSError as e: if on_error is not None: on_error(e) continue else: is_link = False if stat.S_ISDIR(node_stat.st_mode) and (follow_links or not is_link): # Child node is a directory, recurse into it and yield its # decendant files. for file_rel in _iter_tree_next(root_full, node_rel, memo, on_error, follow_links): yield file_rel elif stat.S_ISREG(node_stat.st_mode): # Child node is a file, yield it. yield node_rel # NOTE: Make sure to remove the canonical (real) path of the directory # from the ancestors memo once we are done with it. This allows the # same directory to appear multiple times. If this is not done, the # second occurance of the directory will be incorrectly interpreted as # a recursion. See <https://github.com/cpburnz/python-path-specification/pull/7>. del memo[dir_real]
[ "def", "_iter_tree_next", "(", "root_full", ",", "dir_rel", ",", "memo", ",", "on_error", ",", "follow_links", ")", ":", "dir_full", "=", "os", ".", "path", ".", "join", "(", "root_full", ",", "dir_rel", ")", "dir_real", "=", "os", ".", "path", ".", "realpath", "(", "dir_full", ")", "# Remember each encountered ancestor directory and its canonical", "# (real) path. If a canonical path is encountered more than once,", "# recursion has occurred.", "if", "dir_real", "not", "in", "memo", ":", "memo", "[", "dir_real", "]", "=", "dir_rel", "else", ":", "raise", "RecursionError", "(", "real_path", "=", "dir_real", ",", "first_path", "=", "memo", "[", "dir_real", "]", ",", "second_path", "=", "dir_rel", ")", "for", "node", "in", "os", ".", "listdir", "(", "dir_full", ")", ":", "node_rel", "=", "os", ".", "path", ".", "join", "(", "dir_rel", ",", "node", ")", "node_full", "=", "os", ".", "path", ".", "join", "(", "root_full", ",", "node_rel", ")", "# Inspect child node.", "try", ":", "node_stat", "=", "os", ".", "lstat", "(", "node_full", ")", "except", "OSError", "as", "e", ":", "if", "on_error", "is", "not", "None", ":", "on_error", "(", "e", ")", "continue", "if", "stat", ".", "S_ISLNK", "(", "node_stat", ".", "st_mode", ")", ":", "# Child node is a link, inspect the target node.", "is_link", "=", "True", "try", ":", "node_stat", "=", "os", ".", "stat", "(", "node_full", ")", "except", "OSError", "as", "e", ":", "if", "on_error", "is", "not", "None", ":", "on_error", "(", "e", ")", "continue", "else", ":", "is_link", "=", "False", "if", "stat", ".", "S_ISDIR", "(", "node_stat", ".", "st_mode", ")", "and", "(", "follow_links", "or", "not", "is_link", ")", ":", "# Child node is a directory, recurse into it and yield its", "# decendant files.", "for", "file_rel", "in", "_iter_tree_next", "(", "root_full", ",", "node_rel", ",", "memo", ",", "on_error", ",", "follow_links", ")", ":", "yield", "file_rel", "elif", "stat", ".", "S_ISREG", "(", "node_stat", ".", "st_mode", ")", ":", "# Child node is a file, yield it.", "yield", "node_rel", "# NOTE: Make sure to remove the canonical (real) path of the directory", "# from the ancestors memo once we are done with it. This allows the", "# same directory to appear multiple times. If this is not done, the", "# second occurance of the directory will be incorrectly interpreted as", "# a recursion. See <https://github.com/cpburnz/python-path-specification/pull/7>.", "del", "memo", "[", "dir_real", "]" ]
Scan the directory for all descendant files. *root_full* (:class:`str`) the absolute path to the root directory. *dir_rel* (:class:`str`) the path to the directory to scan relative to *root_full*. *memo* (:class:`dict`) keeps track of ancestor directories encountered. Maps each ancestor real path (:class:`str``) to relative path (:class:`str`). *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. *follow_links* (:class:`bool`) is whether to walk symbolik links that resolve to directories.
[ "Scan", "the", "directory", "for", "all", "descendant", "files", "." ]
train
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/util.py#L57-L126
cpburnz/python-path-specification
pathspec/util.py
match_file
def match_file(patterns, file): """ Matches the file to the patterns. *patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`) contains the patterns to use. *file* (:class:`str`) is the normalized file path to be matched against *patterns*. Returns :data:`True` if *file* matched; otherwise, :data:`False`. """ matched = False for pattern in patterns: if pattern.include is not None: if file in pattern.match((file,)): matched = pattern.include return matched
python
def match_file(patterns, file): """ Matches the file to the patterns. *patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`) contains the patterns to use. *file* (:class:`str`) is the normalized file path to be matched against *patterns*. Returns :data:`True` if *file* matched; otherwise, :data:`False`. """ matched = False for pattern in patterns: if pattern.include is not None: if file in pattern.match((file,)): matched = pattern.include return matched
[ "def", "match_file", "(", "patterns", ",", "file", ")", ":", "matched", "=", "False", "for", "pattern", "in", "patterns", ":", "if", "pattern", ".", "include", "is", "not", "None", ":", "if", "file", "in", "pattern", ".", "match", "(", "(", "file", ",", ")", ")", ":", "matched", "=", "pattern", ".", "include", "return", "matched" ]
Matches the file to the patterns. *patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`) contains the patterns to use. *file* (:class:`str`) is the normalized file path to be matched against *patterns*. Returns :data:`True` if *file* matched; otherwise, :data:`False`.
[ "Matches", "the", "file", "to", "the", "patterns", "." ]
train
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/util.py#L139-L156
cpburnz/python-path-specification
pathspec/util.py
match_files
def match_files(patterns, files): """ Matches the files to the patterns. *patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`) contains the patterns to use. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains the normalized file paths to be matched against *patterns*. Returns the matched files (:class:`set` of :class:`str`). """ all_files = files if isinstance(files, collection_type) else list(files) return_files = set() for pattern in patterns: if pattern.include is not None: result_files = pattern.match(all_files) if pattern.include: return_files.update(result_files) else: return_files.difference_update(result_files) return return_files
python
def match_files(patterns, files): """ Matches the files to the patterns. *patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`) contains the patterns to use. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains the normalized file paths to be matched against *patterns*. Returns the matched files (:class:`set` of :class:`str`). """ all_files = files if isinstance(files, collection_type) else list(files) return_files = set() for pattern in patterns: if pattern.include is not None: result_files = pattern.match(all_files) if pattern.include: return_files.update(result_files) else: return_files.difference_update(result_files) return return_files
[ "def", "match_files", "(", "patterns", ",", "files", ")", ":", "all_files", "=", "files", "if", "isinstance", "(", "files", ",", "collection_type", ")", "else", "list", "(", "files", ")", "return_files", "=", "set", "(", ")", "for", "pattern", "in", "patterns", ":", "if", "pattern", ".", "include", "is", "not", "None", ":", "result_files", "=", "pattern", ".", "match", "(", "all_files", ")", "if", "pattern", ".", "include", ":", "return_files", ".", "update", "(", "result_files", ")", "else", ":", "return_files", ".", "difference_update", "(", "result_files", ")", "return", "return_files" ]
Matches the files to the patterns. *patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`) contains the patterns to use. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains the normalized file paths to be matched against *patterns*. Returns the matched files (:class:`set` of :class:`str`).
[ "Matches", "the", "files", "to", "the", "patterns", "." ]
train
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/util.py#L158-L179
cpburnz/python-path-specification
pathspec/util.py
normalize_file
def normalize_file(file, separators=None): """ Normalizes the file path to use the POSIX path separator (i.e., ``'/'``). *file* (:class:`str`) is the file path. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. This does not need to include the POSIX path separator (``'/'``), but including it will not affect the results. Default is :data:`None` for :data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty container (e.g., an empty tuple ``()``). Returns the normalized file path (:class:`str`). """ # Normalize path separators. if separators is None: separators = NORMALIZE_PATH_SEPS norm_file = file for sep in separators: norm_file = norm_file.replace(sep, posixpath.sep) # Remove current directory prefix. if norm_file.startswith('./'): norm_file = norm_file[2:] return norm_file
python
def normalize_file(file, separators=None): """ Normalizes the file path to use the POSIX path separator (i.e., ``'/'``). *file* (:class:`str`) is the file path. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. This does not need to include the POSIX path separator (``'/'``), but including it will not affect the results. Default is :data:`None` for :data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty container (e.g., an empty tuple ``()``). Returns the normalized file path (:class:`str`). """ # Normalize path separators. if separators is None: separators = NORMALIZE_PATH_SEPS norm_file = file for sep in separators: norm_file = norm_file.replace(sep, posixpath.sep) # Remove current directory prefix. if norm_file.startswith('./'): norm_file = norm_file[2:] return norm_file
[ "def", "normalize_file", "(", "file", ",", "separators", "=", "None", ")", ":", "# Normalize path separators.", "if", "separators", "is", "None", ":", "separators", "=", "NORMALIZE_PATH_SEPS", "norm_file", "=", "file", "for", "sep", "in", "separators", ":", "norm_file", "=", "norm_file", ".", "replace", "(", "sep", ",", "posixpath", ".", "sep", ")", "# Remove current directory prefix.", "if", "norm_file", ".", "startswith", "(", "'./'", ")", ":", "norm_file", "=", "norm_file", "[", "2", ":", "]", "return", "norm_file" ]
Normalizes the file path to use the POSIX path separator (i.e., ``'/'``). *file* (:class:`str`) is the file path. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. This does not need to include the POSIX path separator (``'/'``), but including it will not affect the results. Default is :data:`None` for :data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty container (e.g., an empty tuple ``()``). Returns the normalized file path (:class:`str`).
[ "Normalizes", "the", "file", "path", "to", "use", "the", "POSIX", "path", "separator", "(", "i", ".", "e", ".", "/", ")", "." ]
train
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/util.py#L181-L207
cpburnz/python-path-specification
pathspec/util.py
normalize_files
def normalize_files(files, separators=None): """ Normalizes the file paths to use the POSIX path separator. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains the file paths to be normalized. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. See :func:`normalize_file` for more information. Returns a :class:`dict` mapping the each normalized file path (:class:`str`) to the original file path (:class:`str`) """ norm_files = {} for path in files: norm_files[normalize_file(path, separators=separators)] = path return norm_files
python
def normalize_files(files, separators=None): """ Normalizes the file paths to use the POSIX path separator. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains the file paths to be normalized. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. See :func:`normalize_file` for more information. Returns a :class:`dict` mapping the each normalized file path (:class:`str`) to the original file path (:class:`str`) """ norm_files = {} for path in files: norm_files[normalize_file(path, separators=separators)] = path return norm_files
[ "def", "normalize_files", "(", "files", ",", "separators", "=", "None", ")", ":", "norm_files", "=", "{", "}", "for", "path", "in", "files", ":", "norm_files", "[", "normalize_file", "(", "path", ",", "separators", "=", "separators", ")", "]", "=", "path", "return", "norm_files" ]
Normalizes the file paths to use the POSIX path separator. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains the file paths to be normalized. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. See :func:`normalize_file` for more information. Returns a :class:`dict` mapping the each normalized file path (:class:`str`) to the original file path (:class:`str`)
[ "Normalizes", "the", "file", "paths", "to", "use", "the", "POSIX", "path", "separator", "." ]
train
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/util.py#L209-L226
cpburnz/python-path-specification
pathspec/util.py
register_pattern
def register_pattern(name, pattern_factory, override=None): """ Registers the specified pattern factory. *name* (:class:`str`) is the name to register the pattern factory under. *pattern_factory* (:class:`~collections.abc.Callable`) is used to compile patterns. It must accept an uncompiled pattern (:class:`str`) and return the compiled pattern (:class:`.Pattern`). *override* (:class:`bool` or :data:`None`) optionally is whether to allow overriding an already registered pattern under the same name (:data:`True`), instead of raising an :exc:`AlreadyRegisteredError` (:data:`False`). Default is :data:`None` for :data:`False`. """ if not isinstance(name, string_types): raise TypeError("name:{!r} is not a string.".format(name)) if not callable(pattern_factory): raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory)) if name in _registered_patterns and not override: raise AlreadyRegisteredError(name, _registered_patterns[name]) _registered_patterns[name] = pattern_factory
python
def register_pattern(name, pattern_factory, override=None): """ Registers the specified pattern factory. *name* (:class:`str`) is the name to register the pattern factory under. *pattern_factory* (:class:`~collections.abc.Callable`) is used to compile patterns. It must accept an uncompiled pattern (:class:`str`) and return the compiled pattern (:class:`.Pattern`). *override* (:class:`bool` or :data:`None`) optionally is whether to allow overriding an already registered pattern under the same name (:data:`True`), instead of raising an :exc:`AlreadyRegisteredError` (:data:`False`). Default is :data:`None` for :data:`False`. """ if not isinstance(name, string_types): raise TypeError("name:{!r} is not a string.".format(name)) if not callable(pattern_factory): raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory)) if name in _registered_patterns and not override: raise AlreadyRegisteredError(name, _registered_patterns[name]) _registered_patterns[name] = pattern_factory
[ "def", "register_pattern", "(", "name", ",", "pattern_factory", ",", "override", "=", "None", ")", ":", "if", "not", "isinstance", "(", "name", ",", "string_types", ")", ":", "raise", "TypeError", "(", "\"name:{!r} is not a string.\"", ".", "format", "(", "name", ")", ")", "if", "not", "callable", "(", "pattern_factory", ")", ":", "raise", "TypeError", "(", "\"pattern_factory:{!r} is not callable.\"", ".", "format", "(", "pattern_factory", ")", ")", "if", "name", "in", "_registered_patterns", "and", "not", "override", ":", "raise", "AlreadyRegisteredError", "(", "name", ",", "_registered_patterns", "[", "name", "]", ")", "_registered_patterns", "[", "name", "]", "=", "pattern_factory" ]
Registers the specified pattern factory. *name* (:class:`str`) is the name to register the pattern factory under. *pattern_factory* (:class:`~collections.abc.Callable`) is used to compile patterns. It must accept an uncompiled pattern (:class:`str`) and return the compiled pattern (:class:`.Pattern`). *override* (:class:`bool` or :data:`None`) optionally is whether to allow overriding an already registered pattern under the same name (:data:`True`), instead of raising an :exc:`AlreadyRegisteredError` (:data:`False`). Default is :data:`None` for :data:`False`.
[ "Registers", "the", "specified", "pattern", "factory", "." ]
train
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/util.py#L228-L250
cpburnz/python-path-specification
pathspec/util.py
RecursionError.message
def message(self): """ *message* (:class:`str`) is the error message. """ return "Real path {real!r} was encountered at {first!r} and then {second!r}.".format( real=self.real_path, first=self.first_path, second=self.second_path, )
python
def message(self): """ *message* (:class:`str`) is the error message. """ return "Real path {real!r} was encountered at {first!r} and then {second!r}.".format( real=self.real_path, first=self.first_path, second=self.second_path, )
[ "def", "message", "(", "self", ")", ":", "return", "\"Real path {real!r} was encountered at {first!r} and then {second!r}.\"", ".", "format", "(", "real", "=", "self", ".", "real_path", ",", "first", "=", "self", ".", "first_path", ",", "second", "=", "self", ".", "second_path", ",", ")" ]
*message* (:class:`str`) is the error message.
[ "*", "message", "*", "(", ":", "class", ":", "str", ")", "is", "the", "error", "message", "." ]
train
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/util.py#L326-L334
cpburnz/python-path-specification
pathspec/pattern.py
Pattern.match
def match(self, files): """ Matches this pattern against the specified files. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains each file relative to the root directory (e.g., ``"relative/path/to/file"``). Returns an :class:`~collections.abc.Iterable` yielding each matched file path (:class:`str`). """ raise NotImplementedError("{}.{} must override match().".format(self.__class__.__module__, self.__class__.__name__))
python
def match(self, files): """ Matches this pattern against the specified files. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains each file relative to the root directory (e.g., ``"relative/path/to/file"``). Returns an :class:`~collections.abc.Iterable` yielding each matched file path (:class:`str`). """ raise NotImplementedError("{}.{} must override match().".format(self.__class__.__module__, self.__class__.__name__))
[ "def", "match", "(", "self", ",", "files", ")", ":", "raise", "NotImplementedError", "(", "\"{}.{} must override match().\"", ".", "format", "(", "self", ".", "__class__", ".", "__module__", ",", "self", ".", "__class__", ".", "__name__", ")", ")" ]
Matches this pattern against the specified files. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains each file relative to the root directory (e.g., ``"relative/path/to/file"``). Returns an :class:`~collections.abc.Iterable` yielding each matched file path (:class:`str`).
[ "Matches", "this", "pattern", "against", "the", "specified", "files", "." ]
train
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/pattern.py#L35-L45
cpburnz/python-path-specification
pathspec/pattern.py
RegexPattern.match
def match(self, files): """ Matches this pattern against the specified files. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains each file relative to the root directory (e.g., "relative/path/to/file"). Returns an :class:`~collections.abc.Iterable` yielding each matched file path (:class:`str`). """ if self.include is not None: for path in files: if self.regex.match(path) is not None: yield path
python
def match(self, files): """ Matches this pattern against the specified files. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains each file relative to the root directory (e.g., "relative/path/to/file"). Returns an :class:`~collections.abc.Iterable` yielding each matched file path (:class:`str`). """ if self.include is not None: for path in files: if self.regex.match(path) is not None: yield path
[ "def", "match", "(", "self", ",", "files", ")", ":", "if", "self", ".", "include", "is", "not", "None", ":", "for", "path", "in", "files", ":", "if", "self", ".", "regex", ".", "match", "(", "path", ")", "is", "not", "None", ":", "yield", "path" ]
Matches this pattern against the specified files. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains each file relative to the root directory (e.g., "relative/path/to/file"). Returns an :class:`~collections.abc.Iterable` yielding each matched file path (:class:`str`).
[ "Matches", "this", "pattern", "against", "the", "specified", "files", "." ]
train
https://github.com/cpburnz/python-path-specification/blob/6fc7567a58cb68ec7d72cc287e7fb97dbe22c017/pathspec/pattern.py#L116-L129
jwhitlock/drf-cached-instances
sample_poll_app/cache.py
SampleCache.user_default_serializer
def user_default_serializer(self, obj): """Convert a User to a cached instance representation.""" if not obj: return None self.user_default_add_related_pks(obj) return dict(( ('id', obj.id), ('username', obj.username), self.field_to_json('DateTime', 'date_joined', obj.date_joined), self.field_to_json( 'PKList', 'votes', model=Choice, pks=obj._votes_pks), ))
python
def user_default_serializer(self, obj): """Convert a User to a cached instance representation.""" if not obj: return None self.user_default_add_related_pks(obj) return dict(( ('id', obj.id), ('username', obj.username), self.field_to_json('DateTime', 'date_joined', obj.date_joined), self.field_to_json( 'PKList', 'votes', model=Choice, pks=obj._votes_pks), ))
[ "def", "user_default_serializer", "(", "self", ",", "obj", ")", ":", "if", "not", "obj", ":", "return", "None", "self", ".", "user_default_add_related_pks", "(", "obj", ")", "return", "dict", "(", "(", "(", "'id'", ",", "obj", ".", "id", ")", ",", "(", "'username'", ",", "obj", ".", "username", ")", ",", "self", ".", "field_to_json", "(", "'DateTime'", ",", "'date_joined'", ",", "obj", ".", "date_joined", ")", ",", "self", ".", "field_to_json", "(", "'PKList'", ",", "'votes'", ",", "model", "=", "Choice", ",", "pks", "=", "obj", ".", "_votes_pks", ")", ",", ")", ")" ]
Convert a User to a cached instance representation.
[ "Convert", "a", "User", "to", "a", "cached", "instance", "representation", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/sample_poll_app/cache.py#L13-L24
jwhitlock/drf-cached-instances
sample_poll_app/cache.py
SampleCache.user_default_loader
def user_default_loader(self, pk): """Load a User from the database.""" try: obj = User.objects.get(pk=pk) except User.DoesNotExist: return None else: self.user_default_add_related_pks(obj) return obj
python
def user_default_loader(self, pk): """Load a User from the database.""" try: obj = User.objects.get(pk=pk) except User.DoesNotExist: return None else: self.user_default_add_related_pks(obj) return obj
[ "def", "user_default_loader", "(", "self", ",", "pk", ")", ":", "try", ":", "obj", "=", "User", ".", "objects", ".", "get", "(", "pk", "=", "pk", ")", "except", "User", ".", "DoesNotExist", ":", "return", "None", "else", ":", "self", ".", "user_default_add_related_pks", "(", "obj", ")", "return", "obj" ]
Load a User from the database.
[ "Load", "a", "User", "from", "the", "database", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/sample_poll_app/cache.py#L26-L34
jwhitlock/drf-cached-instances
sample_poll_app/cache.py
SampleCache.user_default_add_related_pks
def user_default_add_related_pks(self, obj): """Add related primary keys to a User instance.""" if not hasattr(obj, '_votes_pks'): obj._votes_pks = list(obj.votes.values_list('pk', flat=True))
python
def user_default_add_related_pks(self, obj): """Add related primary keys to a User instance.""" if not hasattr(obj, '_votes_pks'): obj._votes_pks = list(obj.votes.values_list('pk', flat=True))
[ "def", "user_default_add_related_pks", "(", "self", ",", "obj", ")", ":", "if", "not", "hasattr", "(", "obj", ",", "'_votes_pks'", ")", ":", "obj", ".", "_votes_pks", "=", "list", "(", "obj", ".", "votes", ".", "values_list", "(", "'pk'", ",", "flat", "=", "True", ")", ")" ]
Add related primary keys to a User instance.
[ "Add", "related", "primary", "keys", "to", "a", "User", "instance", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/sample_poll_app/cache.py#L36-L39
jwhitlock/drf-cached-instances
sample_poll_app/cache.py
SampleCache.group_default_invalidator
def group_default_invalidator(self, obj): """Invalidated cached items when the Group changes.""" user_pks = User.objects.values_list('pk', flat=True) return [('User', pk, False) for pk in user_pks]
python
def group_default_invalidator(self, obj): """Invalidated cached items when the Group changes.""" user_pks = User.objects.values_list('pk', flat=True) return [('User', pk, False) for pk in user_pks]
[ "def", "group_default_invalidator", "(", "self", ",", "obj", ")", ":", "user_pks", "=", "User", ".", "objects", ".", "values_list", "(", "'pk'", ",", "flat", "=", "True", ")", "return", "[", "(", "'User'", ",", "pk", ",", "False", ")", "for", "pk", "in", "user_pks", "]" ]
Invalidated cached items when the Group changes.
[ "Invalidated", "cached", "items", "when", "the", "Group", "changes", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/sample_poll_app/cache.py#L51-L54
jwhitlock/drf-cached-instances
sample_poll_app/cache.py
SampleCache.question_default_serializer
def question_default_serializer(self, obj): """Convert a Question to a cached instance representation.""" if not obj: return None self.question_default_add_related_pks(obj) return dict(( ('id', obj.id), ('question_text', obj.question_text), self.field_to_json('DateTime', 'pub_date', obj.pub_date), self.field_to_json( 'PKList', 'choices', model=Choice, pks=obj._choice_pks), ))
python
def question_default_serializer(self, obj): """Convert a Question to a cached instance representation.""" if not obj: return None self.question_default_add_related_pks(obj) return dict(( ('id', obj.id), ('question_text', obj.question_text), self.field_to_json('DateTime', 'pub_date', obj.pub_date), self.field_to_json( 'PKList', 'choices', model=Choice, pks=obj._choice_pks), ))
[ "def", "question_default_serializer", "(", "self", ",", "obj", ")", ":", "if", "not", "obj", ":", "return", "None", "self", ".", "question_default_add_related_pks", "(", "obj", ")", "return", "dict", "(", "(", "(", "'id'", ",", "obj", ".", "id", ")", ",", "(", "'question_text'", ",", "obj", ".", "question_text", ")", ",", "self", ".", "field_to_json", "(", "'DateTime'", ",", "'pub_date'", ",", "obj", ".", "pub_date", ")", ",", "self", ".", "field_to_json", "(", "'PKList'", ",", "'choices'", ",", "model", "=", "Choice", ",", "pks", "=", "obj", ".", "_choice_pks", ")", ",", ")", ")" ]
Convert a Question to a cached instance representation.
[ "Convert", "a", "Question", "to", "a", "cached", "instance", "representation", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/sample_poll_app/cache.py#L60-L71
jwhitlock/drf-cached-instances
sample_poll_app/cache.py
SampleCache.question_default_loader
def question_default_loader(self, pk): """Load a Question from the database.""" try: obj = Question.objects.get(pk=pk) except Question.DoesNotExist: return None else: self.question_default_add_related_pks(obj) return obj
python
def question_default_loader(self, pk): """Load a Question from the database.""" try: obj = Question.objects.get(pk=pk) except Question.DoesNotExist: return None else: self.question_default_add_related_pks(obj) return obj
[ "def", "question_default_loader", "(", "self", ",", "pk", ")", ":", "try", ":", "obj", "=", "Question", ".", "objects", ".", "get", "(", "pk", "=", "pk", ")", "except", "Question", ".", "DoesNotExist", ":", "return", "None", "else", ":", "self", ".", "question_default_add_related_pks", "(", "obj", ")", "return", "obj" ]
Load a Question from the database.
[ "Load", "a", "Question", "from", "the", "database", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/sample_poll_app/cache.py#L73-L81
jwhitlock/drf-cached-instances
sample_poll_app/cache.py
SampleCache.question_default_add_related_pks
def question_default_add_related_pks(self, obj): """Add related primary keys to a Question instance.""" if not hasattr(obj, '_choice_pks'): obj._choice_pks = list(obj.choices.values_list('pk', flat=True))
python
def question_default_add_related_pks(self, obj): """Add related primary keys to a Question instance.""" if not hasattr(obj, '_choice_pks'): obj._choice_pks = list(obj.choices.values_list('pk', flat=True))
[ "def", "question_default_add_related_pks", "(", "self", ",", "obj", ")", ":", "if", "not", "hasattr", "(", "obj", ",", "'_choice_pks'", ")", ":", "obj", ".", "_choice_pks", "=", "list", "(", "obj", ".", "choices", ".", "values_list", "(", "'pk'", ",", "flat", "=", "True", ")", ")" ]
Add related primary keys to a Question instance.
[ "Add", "related", "primary", "keys", "to", "a", "Question", "instance", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/sample_poll_app/cache.py#L83-L86
jwhitlock/drf-cached-instances
sample_poll_app/cache.py
SampleCache.choice_default_serializer
def choice_default_serializer(self, obj): """Convert a Choice to a cached instance representation.""" if not obj: return None self.choice_default_add_related_pks(obj) return dict(( ('id', obj.id), ('choice_text', obj.choice_text), self.field_to_json( 'PK', 'question', model=Question, pk=obj.question_id), self.field_to_json( 'PKList', 'voters', model=User, pks=obj._voter_pks) ))
python
def choice_default_serializer(self, obj): """Convert a Choice to a cached instance representation.""" if not obj: return None self.choice_default_add_related_pks(obj) return dict(( ('id', obj.id), ('choice_text', obj.choice_text), self.field_to_json( 'PK', 'question', model=Question, pk=obj.question_id), self.field_to_json( 'PKList', 'voters', model=User, pks=obj._voter_pks) ))
[ "def", "choice_default_serializer", "(", "self", ",", "obj", ")", ":", "if", "not", "obj", ":", "return", "None", "self", ".", "choice_default_add_related_pks", "(", "obj", ")", "return", "dict", "(", "(", "(", "'id'", ",", "obj", ".", "id", ")", ",", "(", "'choice_text'", ",", "obj", ".", "choice_text", ")", ",", "self", ".", "field_to_json", "(", "'PK'", ",", "'question'", ",", "model", "=", "Question", ",", "pk", "=", "obj", ".", "question_id", ")", ",", "self", ".", "field_to_json", "(", "'PKList'", ",", "'voters'", ",", "model", "=", "User", ",", "pks", "=", "obj", ".", "_voter_pks", ")", ")", ")" ]
Convert a Choice to a cached instance representation.
[ "Convert", "a", "Choice", "to", "a", "cached", "instance", "representation", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/sample_poll_app/cache.py#L92-L104
jwhitlock/drf-cached-instances
sample_poll_app/cache.py
SampleCache.choice_default_loader
def choice_default_loader(self, pk): """Load a Choice from the database.""" try: obj = Choice.objects.get(pk=pk) except Choice.DoesNotExist: return None else: self.choice_default_add_related_pks(obj) return obj
python
def choice_default_loader(self, pk): """Load a Choice from the database.""" try: obj = Choice.objects.get(pk=pk) except Choice.DoesNotExist: return None else: self.choice_default_add_related_pks(obj) return obj
[ "def", "choice_default_loader", "(", "self", ",", "pk", ")", ":", "try", ":", "obj", "=", "Choice", ".", "objects", ".", "get", "(", "pk", "=", "pk", ")", "except", "Choice", ".", "DoesNotExist", ":", "return", "None", "else", ":", "self", ".", "choice_default_add_related_pks", "(", "obj", ")", "return", "obj" ]
Load a Choice from the database.
[ "Load", "a", "Choice", "from", "the", "database", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/sample_poll_app/cache.py#L106-L114
jwhitlock/drf-cached-instances
sample_poll_app/cache.py
SampleCache.choice_default_add_related_pks
def choice_default_add_related_pks(self, obj): """Add related primary keys to a Choice instance.""" if not hasattr(obj, '_voter_pks'): obj._voter_pks = obj.voters.values_list('pk', flat=True)
python
def choice_default_add_related_pks(self, obj): """Add related primary keys to a Choice instance.""" if not hasattr(obj, '_voter_pks'): obj._voter_pks = obj.voters.values_list('pk', flat=True)
[ "def", "choice_default_add_related_pks", "(", "self", ",", "obj", ")", ":", "if", "not", "hasattr", "(", "obj", ",", "'_voter_pks'", ")", ":", "obj", ".", "_voter_pks", "=", "obj", ".", "voters", ".", "values_list", "(", "'pk'", ",", "flat", "=", "True", ")" ]
Add related primary keys to a Choice instance.
[ "Add", "related", "primary", "keys", "to", "a", "Choice", "instance", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/sample_poll_app/cache.py#L116-L119
jwhitlock/drf-cached-instances
sample_poll_app/cache.py
SampleCache.choice_default_invalidator
def choice_default_invalidator(self, obj): """Invalidated cached items when the Choice changes.""" invalid = [('Question', obj.question_id, True)] for pk in obj.voters.values_list('pk', flat=True): invalid.append(('User', pk, False)) return invalid
python
def choice_default_invalidator(self, obj): """Invalidated cached items when the Choice changes.""" invalid = [('Question', obj.question_id, True)] for pk in obj.voters.values_list('pk', flat=True): invalid.append(('User', pk, False)) return invalid
[ "def", "choice_default_invalidator", "(", "self", ",", "obj", ")", ":", "invalid", "=", "[", "(", "'Question'", ",", "obj", ".", "question_id", ",", "True", ")", "]", "for", "pk", "in", "obj", ".", "voters", ".", "values_list", "(", "'pk'", ",", "flat", "=", "True", ")", ":", "invalid", ".", "append", "(", "(", "'User'", ",", "pk", ",", "False", ")", ")", "return", "invalid" ]
Invalidated cached items when the Choice changes.
[ "Invalidated", "cached", "items", "when", "the", "Choice", "changes", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/sample_poll_app/cache.py#L121-L126
jwhitlock/drf-cached-instances
drf_cached_instances/cache.py
BaseCache.cache
def cache(self): """Get the Django cache interface. This allows disabling the cache with settings.USE_DRF_INSTANCE_CACHE=False. It also delays import so that Django Debug Toolbar will record cache requests. """ if not self._cache: use_cache = getattr(settings, 'USE_DRF_INSTANCE_CACHE', True) if use_cache: from django.core.cache import cache self._cache = cache return self._cache
python
def cache(self): """Get the Django cache interface. This allows disabling the cache with settings.USE_DRF_INSTANCE_CACHE=False. It also delays import so that Django Debug Toolbar will record cache requests. """ if not self._cache: use_cache = getattr(settings, 'USE_DRF_INSTANCE_CACHE', True) if use_cache: from django.core.cache import cache self._cache = cache return self._cache
[ "def", "cache", "(", "self", ")", ":", "if", "not", "self", ".", "_cache", ":", "use_cache", "=", "getattr", "(", "settings", ",", "'USE_DRF_INSTANCE_CACHE'", ",", "True", ")", "if", "use_cache", ":", "from", "django", ".", "core", ".", "cache", "import", "cache", "self", ".", "_cache", "=", "cache", "return", "self", ".", "_cache" ]
Get the Django cache interface. This allows disabling the cache with settings.USE_DRF_INSTANCE_CACHE=False. It also delays import so that Django Debug Toolbar will record cache requests.
[ "Get", "the", "Django", "cache", "interface", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/cache.py#L33-L45
jwhitlock/drf-cached-instances
drf_cached_instances/cache.py
BaseCache.delete_all_versions
def delete_all_versions(self, model_name, obj_pk): """Delete all versions of a cached instance.""" if self.cache: for version in self.versions: key = self.key_for(version, model_name, obj_pk) self.cache.delete(key)
python
def delete_all_versions(self, model_name, obj_pk): """Delete all versions of a cached instance.""" if self.cache: for version in self.versions: key = self.key_for(version, model_name, obj_pk) self.cache.delete(key)
[ "def", "delete_all_versions", "(", "self", ",", "model_name", ",", "obj_pk", ")", ":", "if", "self", ".", "cache", ":", "for", "version", "in", "self", ".", "versions", ":", "key", "=", "self", ".", "key_for", "(", "version", ",", "model_name", ",", "obj_pk", ")", "self", ".", "cache", ".", "delete", "(", "key", ")" ]
Delete all versions of a cached instance.
[ "Delete", "all", "versions", "of", "a", "cached", "instance", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/cache.py#L51-L56
jwhitlock/drf-cached-instances
drf_cached_instances/cache.py
BaseCache.model_function
def model_function(self, model_name, version, func_name): """Return the model-specific caching function.""" assert func_name in ('serializer', 'loader', 'invalidator') name = "%s_%s_%s" % (model_name.lower(), version, func_name) return getattr(self, name)
python
def model_function(self, model_name, version, func_name): """Return the model-specific caching function.""" assert func_name in ('serializer', 'loader', 'invalidator') name = "%s_%s_%s" % (model_name.lower(), version, func_name) return getattr(self, name)
[ "def", "model_function", "(", "self", ",", "model_name", ",", "version", ",", "func_name", ")", ":", "assert", "func_name", "in", "(", "'serializer'", ",", "'loader'", ",", "'invalidator'", ")", "name", "=", "\"%s_%s_%s\"", "%", "(", "model_name", ".", "lower", "(", ")", ",", "version", ",", "func_name", ")", "return", "getattr", "(", "self", ",", "name", ")" ]
Return the model-specific caching function.
[ "Return", "the", "model", "-", "specific", "caching", "function", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/cache.py#L58-L62
jwhitlock/drf-cached-instances
drf_cached_instances/cache.py
BaseCache.field_function
def field_function(self, type_code, func_name): """Return the field function.""" assert func_name in ('to_json', 'from_json') name = "field_%s_%s" % (type_code.lower(), func_name) return getattr(self, name)
python
def field_function(self, type_code, func_name): """Return the field function.""" assert func_name in ('to_json', 'from_json') name = "field_%s_%s" % (type_code.lower(), func_name) return getattr(self, name)
[ "def", "field_function", "(", "self", ",", "type_code", ",", "func_name", ")", ":", "assert", "func_name", "in", "(", "'to_json'", ",", "'from_json'", ")", "name", "=", "\"field_%s_%s\"", "%", "(", "type_code", ".", "lower", "(", ")", ",", "func_name", ")", "return", "getattr", "(", "self", ",", "name", ")" ]
Return the field function.
[ "Return", "the", "field", "function", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/cache.py#L64-L68
jwhitlock/drf-cached-instances
drf_cached_instances/cache.py
BaseCache.field_to_json
def field_to_json(self, type_code, key, *args, **kwargs): """Convert a field to a JSON-serializable representation.""" assert ':' not in key to_json = self.field_function(type_code, 'to_json') key_and_type = "%s:%s" % (key, type_code) json_value = to_json(*args, **kwargs) return key_and_type, json_value
python
def field_to_json(self, type_code, key, *args, **kwargs): """Convert a field to a JSON-serializable representation.""" assert ':' not in key to_json = self.field_function(type_code, 'to_json') key_and_type = "%s:%s" % (key, type_code) json_value = to_json(*args, **kwargs) return key_and_type, json_value
[ "def", "field_to_json", "(", "self", ",", "type_code", ",", "key", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "assert", "':'", "not", "in", "key", "to_json", "=", "self", ".", "field_function", "(", "type_code", ",", "'to_json'", ")", "key_and_type", "=", "\"%s:%s\"", "%", "(", "key", ",", "type_code", ")", "json_value", "=", "to_json", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "key_and_type", ",", "json_value" ]
Convert a field to a JSON-serializable representation.
[ "Convert", "a", "field", "to", "a", "JSON", "-", "serializable", "representation", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/cache.py#L70-L76
jwhitlock/drf-cached-instances
drf_cached_instances/cache.py
BaseCache.field_from_json
def field_from_json(self, key_and_type, json_value): """Convert a JSON-serializable representation back to a field.""" assert ':' in key_and_type key, type_code = key_and_type.split(':', 1) from_json = self.field_function(type_code, 'from_json') value = from_json(json_value) return key, value
python
def field_from_json(self, key_and_type, json_value): """Convert a JSON-serializable representation back to a field.""" assert ':' in key_and_type key, type_code = key_and_type.split(':', 1) from_json = self.field_function(type_code, 'from_json') value = from_json(json_value) return key, value
[ "def", "field_from_json", "(", "self", ",", "key_and_type", ",", "json_value", ")", ":", "assert", "':'", "in", "key_and_type", "key", ",", "type_code", "=", "key_and_type", ".", "split", "(", "':'", ",", "1", ")", "from_json", "=", "self", ".", "field_function", "(", "type_code", ",", "'from_json'", ")", "value", "=", "from_json", "(", "json_value", ")", "return", "key", ",", "value" ]
Convert a JSON-serializable representation back to a field.
[ "Convert", "a", "JSON", "-", "serializable", "representation", "back", "to", "a", "field", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/cache.py#L78-L84
jwhitlock/drf-cached-instances
drf_cached_instances/cache.py
BaseCache.get_instances
def get_instances(self, object_specs, version=None): """Get the cached native representation for one or more objects. Keyword arguments: object_specs - A sequence of triples (model name, pk, obj): - model name - the name of the model - pk - the primary key of the instance - obj - the instance, or None to load it version - The cache version to use, or None for default To get the 'new object' representation, set pk and obj to None Return is a dictionary: key - (model name, pk) value - (native representation, pk, object or None) """ ret = dict() spec_keys = set() cache_keys = [] version = version or self.default_version # Construct all the cache keys to fetch for model_name, obj_pk, obj in object_specs: assert model_name assert obj_pk # Get cache keys to fetch obj_key = self.key_for(version, model_name, obj_pk) spec_keys.add((model_name, obj_pk, obj, obj_key)) cache_keys.append(obj_key) # Fetch the cache keys if cache_keys and self.cache: cache_vals = self.cache.get_many(cache_keys) else: cache_vals = {} # Use cached representations, or recreate cache_to_set = {} for model_name, obj_pk, obj, obj_key in spec_keys: # Load cached objects obj_val = cache_vals.get(obj_key) obj_native = json.loads(obj_val) if obj_val else None # Invalid or not set - load from database if not obj_native: if not obj: loader = self.model_function(model_name, version, 'loader') obj = loader(obj_pk) serializer = self.model_function( model_name, version, 'serializer') obj_native = serializer(obj) or {} if obj_native: cache_to_set[obj_key] = json.dumps(obj_native) # Get fields to convert keys = [key for key in obj_native.keys() if ':' in key] for key in keys: json_value = obj_native.pop(key) name, value = self.field_from_json(key, json_value) assert name not in obj_native obj_native[name] = value if obj_native: ret[(model_name, obj_pk)] = (obj_native, obj_key, obj) # Save any new cached representations if cache_to_set and self.cache: self.cache.set_many(cache_to_set) return ret
python
def get_instances(self, object_specs, version=None): """Get the cached native representation for one or more objects. Keyword arguments: object_specs - A sequence of triples (model name, pk, obj): - model name - the name of the model - pk - the primary key of the instance - obj - the instance, or None to load it version - The cache version to use, or None for default To get the 'new object' representation, set pk and obj to None Return is a dictionary: key - (model name, pk) value - (native representation, pk, object or None) """ ret = dict() spec_keys = set() cache_keys = [] version = version or self.default_version # Construct all the cache keys to fetch for model_name, obj_pk, obj in object_specs: assert model_name assert obj_pk # Get cache keys to fetch obj_key = self.key_for(version, model_name, obj_pk) spec_keys.add((model_name, obj_pk, obj, obj_key)) cache_keys.append(obj_key) # Fetch the cache keys if cache_keys and self.cache: cache_vals = self.cache.get_many(cache_keys) else: cache_vals = {} # Use cached representations, or recreate cache_to_set = {} for model_name, obj_pk, obj, obj_key in spec_keys: # Load cached objects obj_val = cache_vals.get(obj_key) obj_native = json.loads(obj_val) if obj_val else None # Invalid or not set - load from database if not obj_native: if not obj: loader = self.model_function(model_name, version, 'loader') obj = loader(obj_pk) serializer = self.model_function( model_name, version, 'serializer') obj_native = serializer(obj) or {} if obj_native: cache_to_set[obj_key] = json.dumps(obj_native) # Get fields to convert keys = [key for key in obj_native.keys() if ':' in key] for key in keys: json_value = obj_native.pop(key) name, value = self.field_from_json(key, json_value) assert name not in obj_native obj_native[name] = value if obj_native: ret[(model_name, obj_pk)] = (obj_native, obj_key, obj) # Save any new cached representations if cache_to_set and self.cache: self.cache.set_many(cache_to_set) return ret
[ "def", "get_instances", "(", "self", ",", "object_specs", ",", "version", "=", "None", ")", ":", "ret", "=", "dict", "(", ")", "spec_keys", "=", "set", "(", ")", "cache_keys", "=", "[", "]", "version", "=", "version", "or", "self", ".", "default_version", "# Construct all the cache keys to fetch", "for", "model_name", ",", "obj_pk", ",", "obj", "in", "object_specs", ":", "assert", "model_name", "assert", "obj_pk", "# Get cache keys to fetch", "obj_key", "=", "self", ".", "key_for", "(", "version", ",", "model_name", ",", "obj_pk", ")", "spec_keys", ".", "add", "(", "(", "model_name", ",", "obj_pk", ",", "obj", ",", "obj_key", ")", ")", "cache_keys", ".", "append", "(", "obj_key", ")", "# Fetch the cache keys", "if", "cache_keys", "and", "self", ".", "cache", ":", "cache_vals", "=", "self", ".", "cache", ".", "get_many", "(", "cache_keys", ")", "else", ":", "cache_vals", "=", "{", "}", "# Use cached representations, or recreate", "cache_to_set", "=", "{", "}", "for", "model_name", ",", "obj_pk", ",", "obj", ",", "obj_key", "in", "spec_keys", ":", "# Load cached objects", "obj_val", "=", "cache_vals", ".", "get", "(", "obj_key", ")", "obj_native", "=", "json", ".", "loads", "(", "obj_val", ")", "if", "obj_val", "else", "None", "# Invalid or not set - load from database", "if", "not", "obj_native", ":", "if", "not", "obj", ":", "loader", "=", "self", ".", "model_function", "(", "model_name", ",", "version", ",", "'loader'", ")", "obj", "=", "loader", "(", "obj_pk", ")", "serializer", "=", "self", ".", "model_function", "(", "model_name", ",", "version", ",", "'serializer'", ")", "obj_native", "=", "serializer", "(", "obj", ")", "or", "{", "}", "if", "obj_native", ":", "cache_to_set", "[", "obj_key", "]", "=", "json", ".", "dumps", "(", "obj_native", ")", "# Get fields to convert", "keys", "=", "[", "key", "for", "key", "in", "obj_native", ".", "keys", "(", ")", "if", "':'", "in", "key", "]", "for", "key", "in", "keys", ":", "json_value", "=", "obj_native", ".", "pop", "(", "key", ")", "name", ",", "value", "=", "self", ".", "field_from_json", "(", "key", ",", "json_value", ")", "assert", "name", "not", "in", "obj_native", "obj_native", "[", "name", "]", "=", "value", "if", "obj_native", ":", "ret", "[", "(", "model_name", ",", "obj_pk", ")", "]", "=", "(", "obj_native", ",", "obj_key", ",", "obj", ")", "# Save any new cached representations", "if", "cache_to_set", "and", "self", ".", "cache", ":", "self", ".", "cache", ".", "set_many", "(", "cache_to_set", ")", "return", "ret" ]
Get the cached native representation for one or more objects. Keyword arguments: object_specs - A sequence of triples (model name, pk, obj): - model name - the name of the model - pk - the primary key of the instance - obj - the instance, or None to load it version - The cache version to use, or None for default To get the 'new object' representation, set pk and obj to None Return is a dictionary: key - (model name, pk) value - (native representation, pk, object or None)
[ "Get", "the", "cached", "native", "representation", "for", "one", "or", "more", "objects", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/cache.py#L86-L157
jwhitlock/drf-cached-instances
drf_cached_instances/cache.py
BaseCache.update_instance
def update_instance( self, model_name, pk, instance=None, version=None, update_only=False): """Create or update a cached instance. Keyword arguments are: model_name - The name of the model pk - The primary key of the instance instance - The Django model instance, or None to load it versions - Version to update, or None for all update_only - If False (default), then missing cache entries will be populated and will cause follow-on invalidation. If True, then only entries already in the cache will be updated and cause follow-on invalidation. Return is a list of tuples (model name, pk, immediate) that also needs to be updated. """ versions = [version] if version else self.versions invalid = [] for version in versions: serializer = self.model_function(model_name, version, 'serializer') loader = self.model_function(model_name, version, 'loader') invalidator = self.model_function( model_name, version, 'invalidator') if serializer is None and loader is None and invalidator is None: continue if self.cache is None: continue # Try to load the instance if not instance: instance = loader(pk) if serializer: # Get current value, if in cache key = self.key_for(version, model_name, pk) current_raw = self.cache.get(key) current = json.loads(current_raw) if current_raw else None # Get new value if update_only and current_raw is None: new = None else: new = serializer(instance) deleted = not instance # If cache is invalid, update cache invalidate = (current != new) or deleted if invalidate: if deleted: self.cache.delete(key) else: self.cache.set(key, json.dumps(new)) else: invalidate = True # Invalidate upstream caches if instance and invalidate: for upstream in invalidator(instance): if isinstance(upstream, str): self.cache.delete(upstream) else: m, i, immediate = upstream if immediate: invalidate_key = self.key_for(version, m, i) self.cache.delete(invalidate_key) invalid.append((m, i, version)) return invalid
python
def update_instance( self, model_name, pk, instance=None, version=None, update_only=False): """Create or update a cached instance. Keyword arguments are: model_name - The name of the model pk - The primary key of the instance instance - The Django model instance, or None to load it versions - Version to update, or None for all update_only - If False (default), then missing cache entries will be populated and will cause follow-on invalidation. If True, then only entries already in the cache will be updated and cause follow-on invalidation. Return is a list of tuples (model name, pk, immediate) that also needs to be updated. """ versions = [version] if version else self.versions invalid = [] for version in versions: serializer = self.model_function(model_name, version, 'serializer') loader = self.model_function(model_name, version, 'loader') invalidator = self.model_function( model_name, version, 'invalidator') if serializer is None and loader is None and invalidator is None: continue if self.cache is None: continue # Try to load the instance if not instance: instance = loader(pk) if serializer: # Get current value, if in cache key = self.key_for(version, model_name, pk) current_raw = self.cache.get(key) current = json.loads(current_raw) if current_raw else None # Get new value if update_only and current_raw is None: new = None else: new = serializer(instance) deleted = not instance # If cache is invalid, update cache invalidate = (current != new) or deleted if invalidate: if deleted: self.cache.delete(key) else: self.cache.set(key, json.dumps(new)) else: invalidate = True # Invalidate upstream caches if instance and invalidate: for upstream in invalidator(instance): if isinstance(upstream, str): self.cache.delete(upstream) else: m, i, immediate = upstream if immediate: invalidate_key = self.key_for(version, m, i) self.cache.delete(invalidate_key) invalid.append((m, i, version)) return invalid
[ "def", "update_instance", "(", "self", ",", "model_name", ",", "pk", ",", "instance", "=", "None", ",", "version", "=", "None", ",", "update_only", "=", "False", ")", ":", "versions", "=", "[", "version", "]", "if", "version", "else", "self", ".", "versions", "invalid", "=", "[", "]", "for", "version", "in", "versions", ":", "serializer", "=", "self", ".", "model_function", "(", "model_name", ",", "version", ",", "'serializer'", ")", "loader", "=", "self", ".", "model_function", "(", "model_name", ",", "version", ",", "'loader'", ")", "invalidator", "=", "self", ".", "model_function", "(", "model_name", ",", "version", ",", "'invalidator'", ")", "if", "serializer", "is", "None", "and", "loader", "is", "None", "and", "invalidator", "is", "None", ":", "continue", "if", "self", ".", "cache", "is", "None", ":", "continue", "# Try to load the instance", "if", "not", "instance", ":", "instance", "=", "loader", "(", "pk", ")", "if", "serializer", ":", "# Get current value, if in cache", "key", "=", "self", ".", "key_for", "(", "version", ",", "model_name", ",", "pk", ")", "current_raw", "=", "self", ".", "cache", ".", "get", "(", "key", ")", "current", "=", "json", ".", "loads", "(", "current_raw", ")", "if", "current_raw", "else", "None", "# Get new value", "if", "update_only", "and", "current_raw", "is", "None", ":", "new", "=", "None", "else", ":", "new", "=", "serializer", "(", "instance", ")", "deleted", "=", "not", "instance", "# If cache is invalid, update cache", "invalidate", "=", "(", "current", "!=", "new", ")", "or", "deleted", "if", "invalidate", ":", "if", "deleted", ":", "self", ".", "cache", ".", "delete", "(", "key", ")", "else", ":", "self", ".", "cache", ".", "set", "(", "key", ",", "json", ".", "dumps", "(", "new", ")", ")", "else", ":", "invalidate", "=", "True", "# Invalidate upstream caches", "if", "instance", "and", "invalidate", ":", "for", "upstream", "in", "invalidator", "(", "instance", ")", ":", "if", "isinstance", "(", "upstream", ",", "str", ")", ":", "self", ".", "cache", ".", "delete", "(", "upstream", ")", "else", ":", "m", ",", "i", ",", "immediate", "=", "upstream", "if", "immediate", ":", "invalidate_key", "=", "self", ".", "key_for", "(", "version", ",", "m", ",", "i", ")", "self", ".", "cache", ".", "delete", "(", "invalidate_key", ")", "invalid", ".", "append", "(", "(", "m", ",", "i", ",", "version", ")", ")", "return", "invalid" ]
Create or update a cached instance. Keyword arguments are: model_name - The name of the model pk - The primary key of the instance instance - The Django model instance, or None to load it versions - Version to update, or None for all update_only - If False (default), then missing cache entries will be populated and will cause follow-on invalidation. If True, then only entries already in the cache will be updated and cause follow-on invalidation. Return is a list of tuples (model name, pk, immediate) that also needs to be updated.
[ "Create", "or", "update", "a", "cached", "instance", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/cache.py#L159-L228
jwhitlock/drf-cached-instances
drf_cached_instances/cache.py
BaseCache.field_date_to_json
def field_date_to_json(self, day): """Convert a date to a date triple.""" if isinstance(day, six.string_types): day = parse_date(day) return [day.year, day.month, day.day] if day else None
python
def field_date_to_json(self, day): """Convert a date to a date triple.""" if isinstance(day, six.string_types): day = parse_date(day) return [day.year, day.month, day.day] if day else None
[ "def", "field_date_to_json", "(", "self", ",", "day", ")", ":", "if", "isinstance", "(", "day", ",", "six", ".", "string_types", ")", ":", "day", "=", "parse_date", "(", "day", ")", "return", "[", "day", ".", "year", ",", "day", ".", "month", ",", "day", ".", "day", "]", "if", "day", "else", "None" ]
Convert a date to a date triple.
[ "Convert", "a", "date", "to", "a", "date", "triple", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/cache.py#L238-L242
jwhitlock/drf-cached-instances
drf_cached_instances/cache.py
BaseCache.field_datetime_from_json
def field_datetime_from_json(self, json_val): """Convert a UTC timestamp to a UTC datetime.""" if type(json_val) == int: seconds = int(json_val) dt = datetime.fromtimestamp(seconds, utc) elif json_val is None: dt = None else: seconds, microseconds = [int(x) for x in json_val.split('.')] dt = datetime.fromtimestamp(seconds, utc) dt += timedelta(microseconds=microseconds) return dt
python
def field_datetime_from_json(self, json_val): """Convert a UTC timestamp to a UTC datetime.""" if type(json_val) == int: seconds = int(json_val) dt = datetime.fromtimestamp(seconds, utc) elif json_val is None: dt = None else: seconds, microseconds = [int(x) for x in json_val.split('.')] dt = datetime.fromtimestamp(seconds, utc) dt += timedelta(microseconds=microseconds) return dt
[ "def", "field_datetime_from_json", "(", "self", ",", "json_val", ")", ":", "if", "type", "(", "json_val", ")", "==", "int", ":", "seconds", "=", "int", "(", "json_val", ")", "dt", "=", "datetime", ".", "fromtimestamp", "(", "seconds", ",", "utc", ")", "elif", "json_val", "is", "None", ":", "dt", "=", "None", "else", ":", "seconds", ",", "microseconds", "=", "[", "int", "(", "x", ")", "for", "x", "in", "json_val", ".", "split", "(", "'.'", ")", "]", "dt", "=", "datetime", ".", "fromtimestamp", "(", "seconds", ",", "utc", ")", "dt", "+=", "timedelta", "(", "microseconds", "=", "microseconds", ")", "return", "dt" ]
Convert a UTC timestamp to a UTC datetime.
[ "Convert", "a", "UTC", "timestamp", "to", "a", "UTC", "datetime", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/cache.py#L244-L255
jwhitlock/drf-cached-instances
drf_cached_instances/cache.py
BaseCache.field_datetime_to_json
def field_datetime_to_json(self, dt): """Convert a datetime to a UTC timestamp w/ microsecond resolution. datetimes w/o timezone will be assumed to be in UTC """ if isinstance(dt, six.string_types): dt = parse_datetime(dt) if not dt: return None ts = timegm(dt.utctimetuple()) if dt.microsecond: return "{0}.{1:0>6d}".format(ts, dt.microsecond) else: return ts
python
def field_datetime_to_json(self, dt): """Convert a datetime to a UTC timestamp w/ microsecond resolution. datetimes w/o timezone will be assumed to be in UTC """ if isinstance(dt, six.string_types): dt = parse_datetime(dt) if not dt: return None ts = timegm(dt.utctimetuple()) if dt.microsecond: return "{0}.{1:0>6d}".format(ts, dt.microsecond) else: return ts
[ "def", "field_datetime_to_json", "(", "self", ",", "dt", ")", ":", "if", "isinstance", "(", "dt", ",", "six", ".", "string_types", ")", ":", "dt", "=", "parse_datetime", "(", "dt", ")", "if", "not", "dt", ":", "return", "None", "ts", "=", "timegm", "(", "dt", ".", "utctimetuple", "(", ")", ")", "if", "dt", ".", "microsecond", ":", "return", "\"{0}.{1:0>6d}\"", ".", "format", "(", "ts", ",", "dt", ".", "microsecond", ")", "else", ":", "return", "ts" ]
Convert a datetime to a UTC timestamp w/ microsecond resolution. datetimes w/o timezone will be assumed to be in UTC
[ "Convert", "a", "datetime", "to", "a", "UTC", "timestamp", "w", "/", "microsecond", "resolution", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/cache.py#L257-L270
jwhitlock/drf-cached-instances
drf_cached_instances/cache.py
BaseCache.field_timedelta_from_json
def field_timedelta_from_json(self, json_val): """Convert json_val to a timedelta object. json_val contains total number of seconds in the timedelta. If json_val is a string it will be converted to a float. """ if isinstance(json_val, str): return timedelta(seconds=float(json_val)) elif json_val is None: return None else: return timedelta(seconds=json_val)
python
def field_timedelta_from_json(self, json_val): """Convert json_val to a timedelta object. json_val contains total number of seconds in the timedelta. If json_val is a string it will be converted to a float. """ if isinstance(json_val, str): return timedelta(seconds=float(json_val)) elif json_val is None: return None else: return timedelta(seconds=json_val)
[ "def", "field_timedelta_from_json", "(", "self", ",", "json_val", ")", ":", "if", "isinstance", "(", "json_val", ",", "str", ")", ":", "return", "timedelta", "(", "seconds", "=", "float", "(", "json_val", ")", ")", "elif", "json_val", "is", "None", ":", "return", "None", "else", ":", "return", "timedelta", "(", "seconds", "=", "json_val", ")" ]
Convert json_val to a timedelta object. json_val contains total number of seconds in the timedelta. If json_val is a string it will be converted to a float.
[ "Convert", "json_val", "to", "a", "timedelta", "object", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/cache.py#L272-L283
jwhitlock/drf-cached-instances
drf_cached_instances/cache.py
BaseCache.field_timedelta_to_json
def field_timedelta_to_json(self, td): """Convert timedelta to value containing total number of seconds. If there are fractions of a second the return value will be a string, otherwise it will be an int. """ if isinstance(td, six.string_types): td = parse_duration(td) if not td: return None if td.microseconds > 0: return str(td.total_seconds()) else: return int(td.total_seconds())
python
def field_timedelta_to_json(self, td): """Convert timedelta to value containing total number of seconds. If there are fractions of a second the return value will be a string, otherwise it will be an int. """ if isinstance(td, six.string_types): td = parse_duration(td) if not td: return None if td.microseconds > 0: return str(td.total_seconds()) else: return int(td.total_seconds())
[ "def", "field_timedelta_to_json", "(", "self", ",", "td", ")", ":", "if", "isinstance", "(", "td", ",", "six", ".", "string_types", ")", ":", "td", "=", "parse_duration", "(", "td", ")", "if", "not", "td", ":", "return", "None", "if", "td", ".", "microseconds", ">", "0", ":", "return", "str", "(", "td", ".", "total_seconds", "(", ")", ")", "else", ":", "return", "int", "(", "td", ".", "total_seconds", "(", ")", ")" ]
Convert timedelta to value containing total number of seconds. If there are fractions of a second the return value will be a string, otherwise it will be an int.
[ "Convert", "timedelta", "to", "value", "containing", "total", "number", "of", "seconds", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/cache.py#L285-L298
jwhitlock/drf-cached-instances
drf_cached_instances/cache.py
BaseCache.field_pklist_from_json
def field_pklist_from_json(self, data): """Load a PkOnlyQueryset from a JSON dict. This uses the same format as cached_queryset_from_json """ model = get_model(data['app'], data['model']) return PkOnlyQueryset(self, model, data['pks'])
python
def field_pklist_from_json(self, data): """Load a PkOnlyQueryset from a JSON dict. This uses the same format as cached_queryset_from_json """ model = get_model(data['app'], data['model']) return PkOnlyQueryset(self, model, data['pks'])
[ "def", "field_pklist_from_json", "(", "self", ",", "data", ")", ":", "model", "=", "get_model", "(", "data", "[", "'app'", "]", ",", "data", "[", "'model'", "]", ")", "return", "PkOnlyQueryset", "(", "self", ",", "model", ",", "data", "[", "'pks'", "]", ")" ]
Load a PkOnlyQueryset from a JSON dict. This uses the same format as cached_queryset_from_json
[ "Load", "a", "PkOnlyQueryset", "from", "a", "JSON", "dict", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/cache.py#L300-L306
jwhitlock/drf-cached-instances
drf_cached_instances/cache.py
BaseCache.field_pklist_to_json
def field_pklist_to_json(self, model, pks): """Convert a list of primary keys to a JSON dict. This uses the same format as cached_queryset_to_json """ app_label = model._meta.app_label model_name = model._meta.model_name return { 'app': app_label, 'model': model_name, 'pks': list(pks), }
python
def field_pklist_to_json(self, model, pks): """Convert a list of primary keys to a JSON dict. This uses the same format as cached_queryset_to_json """ app_label = model._meta.app_label model_name = model._meta.model_name return { 'app': app_label, 'model': model_name, 'pks': list(pks), }
[ "def", "field_pklist_to_json", "(", "self", ",", "model", ",", "pks", ")", ":", "app_label", "=", "model", ".", "_meta", ".", "app_label", "model_name", "=", "model", ".", "_meta", ".", "model_name", "return", "{", "'app'", ":", "app_label", ",", "'model'", ":", "model_name", ",", "'pks'", ":", "list", "(", "pks", ")", ",", "}" ]
Convert a list of primary keys to a JSON dict. This uses the same format as cached_queryset_to_json
[ "Convert", "a", "list", "of", "primary", "keys", "to", "a", "JSON", "dict", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/cache.py#L308-L319
jwhitlock/drf-cached-instances
drf_cached_instances/cache.py
BaseCache.field_pk_from_json
def field_pk_from_json(self, data): """Load a PkOnlyModel from a JSON dict.""" model = get_model(data['app'], data['model']) return PkOnlyModel(self, model, data['pk'])
python
def field_pk_from_json(self, data): """Load a PkOnlyModel from a JSON dict.""" model = get_model(data['app'], data['model']) return PkOnlyModel(self, model, data['pk'])
[ "def", "field_pk_from_json", "(", "self", ",", "data", ")", ":", "model", "=", "get_model", "(", "data", "[", "'app'", "]", ",", "data", "[", "'model'", "]", ")", "return", "PkOnlyModel", "(", "self", ",", "model", ",", "data", "[", "'pk'", "]", ")" ]
Load a PkOnlyModel from a JSON dict.
[ "Load", "a", "PkOnlyModel", "from", "a", "JSON", "dict", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/cache.py#L321-L324
jwhitlock/drf-cached-instances
drf_cached_instances/cache.py
BaseCache.field_pk_to_json
def field_pk_to_json(self, model, pk): """Convert a primary key to a JSON dict.""" app_label = model._meta.app_label model_name = model._meta.model_name return { 'app': app_label, 'model': model_name, 'pk': pk, }
python
def field_pk_to_json(self, model, pk): """Convert a primary key to a JSON dict.""" app_label = model._meta.app_label model_name = model._meta.model_name return { 'app': app_label, 'model': model_name, 'pk': pk, }
[ "def", "field_pk_to_json", "(", "self", ",", "model", ",", "pk", ")", ":", "app_label", "=", "model", ".", "_meta", ".", "app_label", "model_name", "=", "model", ".", "_meta", ".", "model_name", "return", "{", "'app'", ":", "app_label", ",", "'model'", ":", "model_name", ",", "'pk'", ":", "pk", ",", "}" ]
Convert a primary key to a JSON dict.
[ "Convert", "a", "primary", "key", "to", "a", "JSON", "dict", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/cache.py#L326-L334
jwhitlock/drf-cached-instances
sample_poll_app/models.py
choice_voters_changed_update_cache
def choice_voters_changed_update_cache( sender, instance, action, reverse, model, pk_set, **kwargs): """Update cache when choice.voters changes.""" if action not in ('post_add', 'post_remove', 'post_clear'): # post_clear is not handled, because clear is called in # django.db.models.fields.related.ReverseManyRelatedObjects.__set__ # before setting the new order return if model == User: assert type(instance) == Choice choices = [instance] if pk_set: users = list(User.objects.filter(pk__in=pk_set)) else: users = [] else: if pk_set: choices = list(Choice.objects.filter(pk__in=pk_set)) else: choices = [] users = [instance] from .tasks import update_cache_for_instance for choice in choices: update_cache_for_instance('Choice', choice.pk, choice) for user in users: update_cache_for_instance('User', user.pk, user)
python
def choice_voters_changed_update_cache( sender, instance, action, reverse, model, pk_set, **kwargs): """Update cache when choice.voters changes.""" if action not in ('post_add', 'post_remove', 'post_clear'): # post_clear is not handled, because clear is called in # django.db.models.fields.related.ReverseManyRelatedObjects.__set__ # before setting the new order return if model == User: assert type(instance) == Choice choices = [instance] if pk_set: users = list(User.objects.filter(pk__in=pk_set)) else: users = [] else: if pk_set: choices = list(Choice.objects.filter(pk__in=pk_set)) else: choices = [] users = [instance] from .tasks import update_cache_for_instance for choice in choices: update_cache_for_instance('Choice', choice.pk, choice) for user in users: update_cache_for_instance('User', user.pk, user)
[ "def", "choice_voters_changed_update_cache", "(", "sender", ",", "instance", ",", "action", ",", "reverse", ",", "model", ",", "pk_set", ",", "*", "*", "kwargs", ")", ":", "if", "action", "not", "in", "(", "'post_add'", ",", "'post_remove'", ",", "'post_clear'", ")", ":", "# post_clear is not handled, because clear is called in", "# django.db.models.fields.related.ReverseManyRelatedObjects.__set__", "# before setting the new order", "return", "if", "model", "==", "User", ":", "assert", "type", "(", "instance", ")", "==", "Choice", "choices", "=", "[", "instance", "]", "if", "pk_set", ":", "users", "=", "list", "(", "User", ".", "objects", ".", "filter", "(", "pk__in", "=", "pk_set", ")", ")", "else", ":", "users", "=", "[", "]", "else", ":", "if", "pk_set", ":", "choices", "=", "list", "(", "Choice", ".", "objects", ".", "filter", "(", "pk__in", "=", "pk_set", ")", ")", "else", ":", "choices", "=", "[", "]", "users", "=", "[", "instance", "]", "from", ".", "tasks", "import", "update_cache_for_instance", "for", "choice", "in", "choices", ":", "update_cache_for_instance", "(", "'Choice'", ",", "choice", ".", "pk", ",", "choice", ")", "for", "user", "in", "users", ":", "update_cache_for_instance", "(", "'User'", ",", "user", ".", "pk", ",", "user", ")" ]
Update cache when choice.voters changes.
[ "Update", "cache", "when", "choice", ".", "voters", "changes", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/sample_poll_app/models.py#L33-L60
jwhitlock/drf-cached-instances
sample_poll_app/models.py
post_delete_update_cache
def post_delete_update_cache(sender, instance, **kwargs): """Update the cache when an instance is deleted.""" name = sender.__name__ if name in cached_model_names: from .tasks import update_cache_for_instance update_cache_for_instance(name, instance.pk, instance)
python
def post_delete_update_cache(sender, instance, **kwargs): """Update the cache when an instance is deleted.""" name = sender.__name__ if name in cached_model_names: from .tasks import update_cache_for_instance update_cache_for_instance(name, instance.pk, instance)
[ "def", "post_delete_update_cache", "(", "sender", ",", "instance", ",", "*", "*", "kwargs", ")", ":", "name", "=", "sender", ".", "__name__", "if", "name", "in", "cached_model_names", ":", "from", ".", "tasks", "import", "update_cache_for_instance", "update_cache_for_instance", "(", "name", ",", "instance", ".", "pk", ",", "instance", ")" ]
Update the cache when an instance is deleted.
[ "Update", "the", "cache", "when", "an", "instance", "is", "deleted", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/sample_poll_app/models.py#L64-L69
jwhitlock/drf-cached-instances
sample_poll_app/models.py
post_save_update_cache
def post_save_update_cache(sender, instance, created, raw, **kwargs): """Update the cache when an instance is created or modified.""" if raw: return name = sender.__name__ if name in cached_model_names: delay_cache = getattr(instance, '_delay_cache', False) if not delay_cache: from .tasks import update_cache_for_instance update_cache_for_instance(name, instance.pk, instance)
python
def post_save_update_cache(sender, instance, created, raw, **kwargs): """Update the cache when an instance is created or modified.""" if raw: return name = sender.__name__ if name in cached_model_names: delay_cache = getattr(instance, '_delay_cache', False) if not delay_cache: from .tasks import update_cache_for_instance update_cache_for_instance(name, instance.pk, instance)
[ "def", "post_save_update_cache", "(", "sender", ",", "instance", ",", "created", ",", "raw", ",", "*", "*", "kwargs", ")", ":", "if", "raw", ":", "return", "name", "=", "sender", ".", "__name__", "if", "name", "in", "cached_model_names", ":", "delay_cache", "=", "getattr", "(", "instance", ",", "'_delay_cache'", ",", "False", ")", "if", "not", "delay_cache", ":", "from", ".", "tasks", "import", "update_cache_for_instance", "update_cache_for_instance", "(", "name", ",", "instance", ".", "pk", ",", "instance", ")" ]
Update the cache when an instance is created or modified.
[ "Update", "the", "cache", "when", "an", "instance", "is", "created", "or", "modified", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/sample_poll_app/models.py#L73-L82
jwhitlock/drf-cached-instances
drf_cached_instances/mixins.py
CachedViewMixin.get_queryset
def get_queryset(self): """Get the queryset for the action. If action is read action, return a CachedQueryset Otherwise, return a Django queryset """ queryset = super(CachedViewMixin, self).get_queryset() if self.action in ('list', 'retrieve'): return CachedQueryset(self.get_queryset_cache(), queryset=queryset) else: return queryset
python
def get_queryset(self): """Get the queryset for the action. If action is read action, return a CachedQueryset Otherwise, return a Django queryset """ queryset = super(CachedViewMixin, self).get_queryset() if self.action in ('list', 'retrieve'): return CachedQueryset(self.get_queryset_cache(), queryset=queryset) else: return queryset
[ "def", "get_queryset", "(", "self", ")", ":", "queryset", "=", "super", "(", "CachedViewMixin", ",", "self", ")", ".", "get_queryset", "(", ")", "if", "self", ".", "action", "in", "(", "'list'", ",", "'retrieve'", ")", ":", "return", "CachedQueryset", "(", "self", ".", "get_queryset_cache", "(", ")", ",", "queryset", "=", "queryset", ")", "else", ":", "return", "queryset" ]
Get the queryset for the action. If action is read action, return a CachedQueryset Otherwise, return a Django queryset
[ "Get", "the", "queryset", "for", "the", "action", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/mixins.py#L17-L27
jwhitlock/drf-cached-instances
drf_cached_instances/mixins.py
CachedViewMixin.get_object
def get_object(self, queryset=None): """ Return the object the view is displaying. Same as rest_framework.generics.GenericAPIView, but: - Failed assertions instead of deprecations """ # Determine the base queryset to use. assert queryset is None, "Passing a queryset is disabled" queryset = self.filter_queryset(self.get_queryset()) # Perform the lookup filtering. lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field lookup = self.kwargs.get(lookup_url_kwarg, None) assert lookup is not None, "Other lookup methods are disabled" filter_kwargs = {self.lookup_field: lookup} obj = self.get_object_or_404(queryset, **filter_kwargs) # May raise a permission denied self.check_object_permissions(self.request, obj) return obj
python
def get_object(self, queryset=None): """ Return the object the view is displaying. Same as rest_framework.generics.GenericAPIView, but: - Failed assertions instead of deprecations """ # Determine the base queryset to use. assert queryset is None, "Passing a queryset is disabled" queryset = self.filter_queryset(self.get_queryset()) # Perform the lookup filtering. lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field lookup = self.kwargs.get(lookup_url_kwarg, None) assert lookup is not None, "Other lookup methods are disabled" filter_kwargs = {self.lookup_field: lookup} obj = self.get_object_or_404(queryset, **filter_kwargs) # May raise a permission denied self.check_object_permissions(self.request, obj) return obj
[ "def", "get_object", "(", "self", ",", "queryset", "=", "None", ")", ":", "# Determine the base queryset to use.", "assert", "queryset", "is", "None", ",", "\"Passing a queryset is disabled\"", "queryset", "=", "self", ".", "filter_queryset", "(", "self", ".", "get_queryset", "(", ")", ")", "# Perform the lookup filtering.", "lookup_url_kwarg", "=", "self", ".", "lookup_url_kwarg", "or", "self", ".", "lookup_field", "lookup", "=", "self", ".", "kwargs", ".", "get", "(", "lookup_url_kwarg", ",", "None", ")", "assert", "lookup", "is", "not", "None", ",", "\"Other lookup methods are disabled\"", "filter_kwargs", "=", "{", "self", ".", "lookup_field", ":", "lookup", "}", "obj", "=", "self", ".", "get_object_or_404", "(", "queryset", ",", "*", "*", "filter_kwargs", ")", "# May raise a permission denied", "self", ".", "check_object_permissions", "(", "self", ".", "request", ",", "obj", ")", "return", "obj" ]
Return the object the view is displaying. Same as rest_framework.generics.GenericAPIView, but: - Failed assertions instead of deprecations
[ "Return", "the", "object", "the", "view", "is", "displaying", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/mixins.py#L33-L54
jwhitlock/drf-cached-instances
drf_cached_instances/mixins.py
CachedViewMixin.get_object_or_404
def get_object_or_404(self, queryset, *filter_args, **filter_kwargs): """Return an object or raise a 404. Same as Django's standard shortcut, but make sure to raise 404 if the filter_kwargs don't match the required types. """ if isinstance(queryset, CachedQueryset): try: return queryset.get(*filter_args, **filter_kwargs) except queryset.model.DoesNotExist: raise Http404( 'No %s matches the given query.' % queryset.model) else: return get_object_or_404(queryset, *filter_args, **filter_kwargs)
python
def get_object_or_404(self, queryset, *filter_args, **filter_kwargs): """Return an object or raise a 404. Same as Django's standard shortcut, but make sure to raise 404 if the filter_kwargs don't match the required types. """ if isinstance(queryset, CachedQueryset): try: return queryset.get(*filter_args, **filter_kwargs) except queryset.model.DoesNotExist: raise Http404( 'No %s matches the given query.' % queryset.model) else: return get_object_or_404(queryset, *filter_args, **filter_kwargs)
[ "def", "get_object_or_404", "(", "self", ",", "queryset", ",", "*", "filter_args", ",", "*", "*", "filter_kwargs", ")", ":", "if", "isinstance", "(", "queryset", ",", "CachedQueryset", ")", ":", "try", ":", "return", "queryset", ".", "get", "(", "*", "filter_args", ",", "*", "*", "filter_kwargs", ")", "except", "queryset", ".", "model", ".", "DoesNotExist", ":", "raise", "Http404", "(", "'No %s matches the given query.'", "%", "queryset", ".", "model", ")", "else", ":", "return", "get_object_or_404", "(", "queryset", ",", "*", "filter_args", ",", "*", "*", "filter_kwargs", ")" ]
Return an object or raise a 404. Same as Django's standard shortcut, but make sure to raise 404 if the filter_kwargs don't match the required types.
[ "Return", "an", "object", "or", "raise", "a", "404", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/mixins.py#L56-L69
skorokithakis/jsane
jsane/traversable.py
Empty.r
def r(self, **kwargs): """ Resolve the object. This returns default (if present) or fails on an Empty. """ # by using kwargs we ensure that usage of positional arguments, as if # this object were another kind of function, will fail-fast and raise # a TypeError if 'default' in kwargs: default = kwargs.pop('default') if kwargs: raise TypeError( "Unexpected argument: {}".format(repr(next(iter(kwargs)))) ) return default else: raise JSaneException( "Key does not exist: {}".format(repr(self._key_name)) )
python
def r(self, **kwargs): """ Resolve the object. This returns default (if present) or fails on an Empty. """ # by using kwargs we ensure that usage of positional arguments, as if # this object were another kind of function, will fail-fast and raise # a TypeError if 'default' in kwargs: default = kwargs.pop('default') if kwargs: raise TypeError( "Unexpected argument: {}".format(repr(next(iter(kwargs)))) ) return default else: raise JSaneException( "Key does not exist: {}".format(repr(self._key_name)) )
[ "def", "r", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# by using kwargs we ensure that usage of positional arguments, as if", "# this object were another kind of function, will fail-fast and raise", "# a TypeError", "if", "'default'", "in", "kwargs", ":", "default", "=", "kwargs", ".", "pop", "(", "'default'", ")", "if", "kwargs", ":", "raise", "TypeError", "(", "\"Unexpected argument: {}\"", ".", "format", "(", "repr", "(", "next", "(", "iter", "(", "kwargs", ")", ")", ")", ")", ")", "return", "default", "else", ":", "raise", "JSaneException", "(", "\"Key does not exist: {}\"", ".", "format", "(", "repr", "(", "self", ".", "_key_name", ")", ")", ")" ]
Resolve the object. This returns default (if present) or fails on an Empty.
[ "Resolve", "the", "object", "." ]
train
https://github.com/skorokithakis/jsane/blob/f63c01ab50899e207dae8427e4faef0e9d35e41e/jsane/traversable.py#L35-L54
skorokithakis/jsane
jsane/traversable.py
Traversable.r
def r(self, **kwargs): """ Resolve the object. This will always succeed, since, if a lookup fails, an Empty instance will be returned farther upstream. """ # by using kwargs we ensure that usage of positional arguments, as if # this object were another kind of function, will fail-fast and raise # a TypeError kwargs.pop('default', None) if kwargs: raise TypeError( "Unexpected argument: {}".format(repr(next(iter(kwargs)))) ) return self._obj
python
def r(self, **kwargs): """ Resolve the object. This will always succeed, since, if a lookup fails, an Empty instance will be returned farther upstream. """ # by using kwargs we ensure that usage of positional arguments, as if # this object were another kind of function, will fail-fast and raise # a TypeError kwargs.pop('default', None) if kwargs: raise TypeError( "Unexpected argument: {}".format(repr(next(iter(kwargs)))) ) return self._obj
[ "def", "r", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# by using kwargs we ensure that usage of positional arguments, as if", "# this object were another kind of function, will fail-fast and raise", "# a TypeError", "kwargs", ".", "pop", "(", "'default'", ",", "None", ")", "if", "kwargs", ":", "raise", "TypeError", "(", "\"Unexpected argument: {}\"", ".", "format", "(", "repr", "(", "next", "(", "iter", "(", "kwargs", ")", ")", ")", ")", ")", "return", "self", ".", "_obj" ]
Resolve the object. This will always succeed, since, if a lookup fails, an Empty instance will be returned farther upstream.
[ "Resolve", "the", "object", "." ]
train
https://github.com/skorokithakis/jsane/blob/f63c01ab50899e207dae8427e4faef0e9d35e41e/jsane/traversable.py#L125-L140
jwhitlock/drf-cached-instances
sample_poll_app/tasks.py
update_cache_for_instance
def update_cache_for_instance( model_name, instance_pk, instance=None, version=None): """Update the cache for an instance, with cascading updates.""" cache = SampleCache() invalid = cache.update_instance(model_name, instance_pk, instance, version) for invalid_name, invalid_pk, invalid_version in invalid: update_cache_for_instance.delay( invalid_name, invalid_pk, version=invalid_version)
python
def update_cache_for_instance( model_name, instance_pk, instance=None, version=None): """Update the cache for an instance, with cascading updates.""" cache = SampleCache() invalid = cache.update_instance(model_name, instance_pk, instance, version) for invalid_name, invalid_pk, invalid_version in invalid: update_cache_for_instance.delay( invalid_name, invalid_pk, version=invalid_version)
[ "def", "update_cache_for_instance", "(", "model_name", ",", "instance_pk", ",", "instance", "=", "None", ",", "version", "=", "None", ")", ":", "cache", "=", "SampleCache", "(", ")", "invalid", "=", "cache", ".", "update_instance", "(", "model_name", ",", "instance_pk", ",", "instance", ",", "version", ")", "for", "invalid_name", ",", "invalid_pk", ",", "invalid_version", "in", "invalid", ":", "update_cache_for_instance", ".", "delay", "(", "invalid_name", ",", "invalid_pk", ",", "version", "=", "invalid_version", ")" ]
Update the cache for an instance, with cascading updates.
[ "Update", "the", "cache", "for", "an", "instance", "with", "cascading", "updates", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/sample_poll_app/tasks.py#L8-L15
jwhitlock/drf-cached-instances
drf_cached_instances/models.py
PkOnlyQueryset.values_list
def values_list(self, *args, **kwargs): """Return the primary keys as a list. The only valid call is values_list('pk', flat=True) """ flat = kwargs.pop('flat', False) assert flat is True assert len(args) == 1 assert args[0] == self.model._meta.pk.name return self.pks
python
def values_list(self, *args, **kwargs): """Return the primary keys as a list. The only valid call is values_list('pk', flat=True) """ flat = kwargs.pop('flat', False) assert flat is True assert len(args) == 1 assert args[0] == self.model._meta.pk.name return self.pks
[ "def", "values_list", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "flat", "=", "kwargs", ".", "pop", "(", "'flat'", ",", "False", ")", "assert", "flat", "is", "True", "assert", "len", "(", "args", ")", "==", "1", "assert", "args", "[", "0", "]", "==", "self", ".", "model", ".", "_meta", ".", "pk", ".", "name", "return", "self", ".", "pks" ]
Return the primary keys as a list. The only valid call is values_list('pk', flat=True)
[ "Return", "the", "primary", "keys", "as", "a", "list", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/models.py#L46-L55
jwhitlock/drf-cached-instances
drf_cached_instances/models.py
CachedQueryset.pks
def pks(self): """Lazy-load the primary keys.""" if self._primary_keys is None: self._primary_keys = list( self.queryset.values_list('pk', flat=True)) return self._primary_keys
python
def pks(self): """Lazy-load the primary keys.""" if self._primary_keys is None: self._primary_keys = list( self.queryset.values_list('pk', flat=True)) return self._primary_keys
[ "def", "pks", "(", "self", ")", ":", "if", "self", ".", "_primary_keys", "is", "None", ":", "self", ".", "_primary_keys", "=", "list", "(", "self", ".", "queryset", ".", "values_list", "(", "'pk'", ",", "flat", "=", "True", ")", ")", "return", "self", ".", "_primary_keys" ]
Lazy-load the primary keys.
[ "Lazy", "-", "load", "the", "primary", "keys", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/models.py#L95-L100
jwhitlock/drf-cached-instances
drf_cached_instances/models.py
CachedQueryset.count
def count(self): """Return a count of instances.""" if self._primary_keys is None: return self.queryset.count() else: return len(self.pks)
python
def count(self): """Return a count of instances.""" if self._primary_keys is None: return self.queryset.count() else: return len(self.pks)
[ "def", "count", "(", "self", ")", ":", "if", "self", ".", "_primary_keys", "is", "None", ":", "return", "self", ".", "queryset", ".", "count", "(", ")", "else", ":", "return", "len", "(", "self", ".", "pks", ")" ]
Return a count of instances.
[ "Return", "a", "count", "of", "instances", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/models.py#L119-L124
jwhitlock/drf-cached-instances
drf_cached_instances/models.py
CachedQueryset.filter
def filter(self, **kwargs): """Filter the base queryset.""" assert not self._primary_keys self.queryset = self.queryset.filter(**kwargs) return self
python
def filter(self, **kwargs): """Filter the base queryset.""" assert not self._primary_keys self.queryset = self.queryset.filter(**kwargs) return self
[ "def", "filter", "(", "self", ",", "*", "*", "kwargs", ")", ":", "assert", "not", "self", ".", "_primary_keys", "self", ".", "queryset", "=", "self", ".", "queryset", ".", "filter", "(", "*", "*", "kwargs", ")", "return", "self" ]
Filter the base queryset.
[ "Filter", "the", "base", "queryset", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/models.py#L126-L130
jwhitlock/drf-cached-instances
drf_cached_instances/models.py
CachedQueryset.get
def get(self, *args, **kwargs): """Return the single item from the filtered queryset.""" assert not args assert list(kwargs.keys()) == ['pk'] pk = kwargs['pk'] model_name = self.model.__name__ object_spec = (model_name, pk, None) instances = self.cache.get_instances((object_spec,)) try: model_data = instances[(model_name, pk)][0] except KeyError: raise self.model.DoesNotExist( "No match for %r with args %r, kwargs %r" % (self.model, args, kwargs)) else: return CachedModel(self.model, model_data)
python
def get(self, *args, **kwargs): """Return the single item from the filtered queryset.""" assert not args assert list(kwargs.keys()) == ['pk'] pk = kwargs['pk'] model_name = self.model.__name__ object_spec = (model_name, pk, None) instances = self.cache.get_instances((object_spec,)) try: model_data = instances[(model_name, pk)][0] except KeyError: raise self.model.DoesNotExist( "No match for %r with args %r, kwargs %r" % (self.model, args, kwargs)) else: return CachedModel(self.model, model_data)
[ "def", "get", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "assert", "not", "args", "assert", "list", "(", "kwargs", ".", "keys", "(", ")", ")", "==", "[", "'pk'", "]", "pk", "=", "kwargs", "[", "'pk'", "]", "model_name", "=", "self", ".", "model", ".", "__name__", "object_spec", "=", "(", "model_name", ",", "pk", ",", "None", ")", "instances", "=", "self", ".", "cache", ".", "get_instances", "(", "(", "object_spec", ",", ")", ")", "try", ":", "model_data", "=", "instances", "[", "(", "model_name", ",", "pk", ")", "]", "[", "0", "]", "except", "KeyError", ":", "raise", "self", ".", "model", ".", "DoesNotExist", "(", "\"No match for %r with args %r, kwargs %r\"", "%", "(", "self", ".", "model", ",", "args", ",", "kwargs", ")", ")", "else", ":", "return", "CachedModel", "(", "self", ".", "model", ",", "model_data", ")" ]
Return the single item from the filtered queryset.
[ "Return", "the", "single", "item", "from", "the", "filtered", "queryset", "." ]
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/models.py#L132-L147
thorgate/tg-react
tg_react/webpack.py
WebpackConstants.collect
def collect(cls): """ Load all constant generators from settings.WEBPACK_CONSTANT_PROCESSORS and concat their values. """ constants = {} for method_path in WebpackConstants.get_constant_processors(): method = import_string(method_path) if not callable(method): raise ImproperlyConfigured('Constant processor "%s" is not callable' % method_path) result = method(constants) if isinstance(result, dict): constants.update(result) return constants
python
def collect(cls): """ Load all constant generators from settings.WEBPACK_CONSTANT_PROCESSORS and concat their values. """ constants = {} for method_path in WebpackConstants.get_constant_processors(): method = import_string(method_path) if not callable(method): raise ImproperlyConfigured('Constant processor "%s" is not callable' % method_path) result = method(constants) if isinstance(result, dict): constants.update(result) return constants
[ "def", "collect", "(", "cls", ")", ":", "constants", "=", "{", "}", "for", "method_path", "in", "WebpackConstants", ".", "get_constant_processors", "(", ")", ":", "method", "=", "import_string", "(", "method_path", ")", "if", "not", "callable", "(", "method", ")", ":", "raise", "ImproperlyConfigured", "(", "'Constant processor \"%s\" is not callable'", "%", "method_path", ")", "result", "=", "method", "(", "constants", ")", "if", "isinstance", "(", "result", ",", "dict", ")", ":", "constants", ".", "update", "(", "result", ")", "return", "constants" ]
Load all constant generators from settings.WEBPACK_CONSTANT_PROCESSORS and concat their values.
[ "Load", "all", "constant", "generators", "from", "settings", ".", "WEBPACK_CONSTANT_PROCESSORS", "and", "concat", "their", "values", "." ]
train
https://github.com/thorgate/tg-react/blob/5a6e83d5a5c883f1a5ee4fda2226e81a468bdee3/tg_react/webpack.py#L23-L40
thorgate/tg-react
tg_react/api/accounts/serializers.py
phonenumber_validation
def phonenumber_validation(data): """ Validates phonenumber Similar to phonenumber_field.validators.validate_international_phonenumber() but uses a different message if the country prefix is absent. """ from phonenumber_field.phonenumber import to_python phone_number = to_python(data) if not phone_number: return data elif not phone_number.country_code: raise serializers.ValidationError(_("Phone number needs to include valid country code (E.g +37255555555).")) elif not phone_number.is_valid(): raise serializers.ValidationError(_('The phone number entered is not valid.')) return data
python
def phonenumber_validation(data): """ Validates phonenumber Similar to phonenumber_field.validators.validate_international_phonenumber() but uses a different message if the country prefix is absent. """ from phonenumber_field.phonenumber import to_python phone_number = to_python(data) if not phone_number: return data elif not phone_number.country_code: raise serializers.ValidationError(_("Phone number needs to include valid country code (E.g +37255555555).")) elif not phone_number.is_valid(): raise serializers.ValidationError(_('The phone number entered is not valid.')) return data
[ "def", "phonenumber_validation", "(", "data", ")", ":", "from", "phonenumber_field", ".", "phonenumber", "import", "to_python", "phone_number", "=", "to_python", "(", "data", ")", "if", "not", "phone_number", ":", "return", "data", "elif", "not", "phone_number", ".", "country_code", ":", "raise", "serializers", ".", "ValidationError", "(", "_", "(", "\"Phone number needs to include valid country code (E.g +37255555555).\"", ")", ")", "elif", "not", "phone_number", ".", "is_valid", "(", ")", ":", "raise", "serializers", ".", "ValidationError", "(", "_", "(", "'The phone number entered is not valid.'", ")", ")", "return", "data" ]
Validates phonenumber Similar to phonenumber_field.validators.validate_international_phonenumber() but uses a different message if the country prefix is absent.
[ "Validates", "phonenumber" ]
train
https://github.com/thorgate/tg-react/blob/5a6e83d5a5c883f1a5ee4fda2226e81a468bdee3/tg_react/api/accounts/serializers.py#L98-L113