id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
243,600
jrheard/madison_wcb
madison_wcb/wcb.py
move_to
def move_to(x, y): """Moves the brush to a particular position. Arguments: x - a number between -250 and 250. y - a number between -180 and 180. """ _make_cnc_request("coord/{0}/{1}".format(x, y)) state['turtle'].goto(x, y)
python
def move_to(x, y): """Moves the brush to a particular position. Arguments: x - a number between -250 and 250. y - a number between -180 and 180. """ _make_cnc_request("coord/{0}/{1}".format(x, y)) state['turtle'].goto(x, y)
[ "def", "move_to", "(", "x", ",", "y", ")", ":", "_make_cnc_request", "(", "\"coord/{0}/{1}\"", ".", "format", "(", "x", ",", "y", ")", ")", "state", "[", "'turtle'", "]", ".", "goto", "(", "x", ",", "y", ")" ]
Moves the brush to a particular position. Arguments: x - a number between -250 and 250. y - a number between -180 and 180.
[ "Moves", "the", "brush", "to", "a", "particular", "position", "." ]
545e92e13c8fb46e0d805edef6b6146ab25373e9
https://github.com/jrheard/madison_wcb/blob/545e92e13c8fb46e0d805edef6b6146ab25373e9/madison_wcb/wcb.py#L101-L109
243,601
jrheard/madison_wcb
madison_wcb/wcb.py
turn_left
def turn_left(relative_angle): """Turns the brush's "turtle" to the left. Arguments: relative_angle - a number like 10. A bigger number makes the turtle turn farther to the left. """ assert int(relative_angle) == relative_angle, "turn_left() only accepts integers, but you gave it " + str(relative_angle) _make_cnc_request("move.left./" + str(relative_angle)) state['turtle'].left(relative_angle)
python
def turn_left(relative_angle): """Turns the brush's "turtle" to the left. Arguments: relative_angle - a number like 10. A bigger number makes the turtle turn farther to the left. """ assert int(relative_angle) == relative_angle, "turn_left() only accepts integers, but you gave it " + str(relative_angle) _make_cnc_request("move.left./" + str(relative_angle)) state['turtle'].left(relative_angle)
[ "def", "turn_left", "(", "relative_angle", ")", ":", "assert", "int", "(", "relative_angle", ")", "==", "relative_angle", ",", "\"turn_left() only accepts integers, but you gave it \"", "+", "str", "(", "relative_angle", ")", "_make_cnc_request", "(", "\"move.left./\"", "+", "str", "(", "relative_angle", ")", ")", "state", "[", "'turtle'", "]", ".", "left", "(", "relative_angle", ")" ]
Turns the brush's "turtle" to the left. Arguments: relative_angle - a number like 10. A bigger number makes the turtle turn farther to the left.
[ "Turns", "the", "brush", "s", "turtle", "to", "the", "left", "." ]
545e92e13c8fb46e0d805edef6b6146ab25373e9
https://github.com/jrheard/madison_wcb/blob/545e92e13c8fb46e0d805edef6b6146ab25373e9/madison_wcb/wcb.py#L135-L145
243,602
jrheard/madison_wcb
madison_wcb/wcb.py
turn_right
def turn_right(relative_angle): """Turns the brush's "turtle" to the right. Arguments: relative_angle - a number like 10. A bigger number makes the turtle turn farther to the right. """ assert int(relative_angle) == relative_angle, "turn_right() only accepts integers, but you gave it " + str(relative_angle) _make_cnc_request("move.right./" + str(relative_angle)) state['turtle'].right(relative_angle)
python
def turn_right(relative_angle): """Turns the brush's "turtle" to the right. Arguments: relative_angle - a number like 10. A bigger number makes the turtle turn farther to the right. """ assert int(relative_angle) == relative_angle, "turn_right() only accepts integers, but you gave it " + str(relative_angle) _make_cnc_request("move.right./" + str(relative_angle)) state['turtle'].right(relative_angle)
[ "def", "turn_right", "(", "relative_angle", ")", ":", "assert", "int", "(", "relative_angle", ")", "==", "relative_angle", ",", "\"turn_right() only accepts integers, but you gave it \"", "+", "str", "(", "relative_angle", ")", "_make_cnc_request", "(", "\"move.right./\"", "+", "str", "(", "relative_angle", ")", ")", "state", "[", "'turtle'", "]", ".", "right", "(", "relative_angle", ")" ]
Turns the brush's "turtle" to the right. Arguments: relative_angle - a number like 10. A bigger number makes the turtle turn farther to the right.
[ "Turns", "the", "brush", "s", "turtle", "to", "the", "right", "." ]
545e92e13c8fb46e0d805edef6b6146ab25373e9
https://github.com/jrheard/madison_wcb/blob/545e92e13c8fb46e0d805edef6b6146ab25373e9/madison_wcb/wcb.py#L147-L157
243,603
rosenbrockc/acorn
acorn/logging/descriptors.py
describe
def describe(o): """Describes the object using developer-specified attributes specific to each main object type. Returns: dict: keys are specific attributes tailored to the specific object type, though `fqdn` is common to all descriptions; values are the corresponding attribute values which are *simple* types that can easily be serialized to JSON. """ #First, we need to determine the fqdn, so that we can lookup the format for #this object in the config file for the package. from inspect import getmodule from acorn.logging.decoration import _fqdn fqdn = _fqdn(o, False) if fqdn is None: #This should not have happened; if the FQDN couldn't be determined, then #we should have never logged it. return json_describe(o, str(type(o))) package = fqdn.split('.')[0] global _package_desc if package not in _package_desc: from acorn.config import descriptors spack = descriptors(package) if spack is None: _package_desc[package] = None return json_describe(o, fqdn) else: _package_desc[package] = spack if _package_desc[package] is None: return json_describe(o, fqdn) elif fqdn in _package_desc[package]: return json_describe(o, fqdn, _package_desc[package][fqdn]) else: return json_describe(o, fqdn)
python
def describe(o): """Describes the object using developer-specified attributes specific to each main object type. Returns: dict: keys are specific attributes tailored to the specific object type, though `fqdn` is common to all descriptions; values are the corresponding attribute values which are *simple* types that can easily be serialized to JSON. """ #First, we need to determine the fqdn, so that we can lookup the format for #this object in the config file for the package. from inspect import getmodule from acorn.logging.decoration import _fqdn fqdn = _fqdn(o, False) if fqdn is None: #This should not have happened; if the FQDN couldn't be determined, then #we should have never logged it. return json_describe(o, str(type(o))) package = fqdn.split('.')[0] global _package_desc if package not in _package_desc: from acorn.config import descriptors spack = descriptors(package) if spack is None: _package_desc[package] = None return json_describe(o, fqdn) else: _package_desc[package] = spack if _package_desc[package] is None: return json_describe(o, fqdn) elif fqdn in _package_desc[package]: return json_describe(o, fqdn, _package_desc[package][fqdn]) else: return json_describe(o, fqdn)
[ "def", "describe", "(", "o", ")", ":", "#First, we need to determine the fqdn, so that we can lookup the format for", "#this object in the config file for the package.", "from", "inspect", "import", "getmodule", "from", "acorn", ".", "logging", ".", "decoration", "import", "_fqdn", "fqdn", "=", "_fqdn", "(", "o", ",", "False", ")", "if", "fqdn", "is", "None", ":", "#This should not have happened; if the FQDN couldn't be determined, then", "#we should have never logged it.", "return", "json_describe", "(", "o", ",", "str", "(", "type", "(", "o", ")", ")", ")", "package", "=", "fqdn", ".", "split", "(", "'.'", ")", "[", "0", "]", "global", "_package_desc", "if", "package", "not", "in", "_package_desc", ":", "from", "acorn", ".", "config", "import", "descriptors", "spack", "=", "descriptors", "(", "package", ")", "if", "spack", "is", "None", ":", "_package_desc", "[", "package", "]", "=", "None", "return", "json_describe", "(", "o", ",", "fqdn", ")", "else", ":", "_package_desc", "[", "package", "]", "=", "spack", "if", "_package_desc", "[", "package", "]", "is", "None", ":", "return", "json_describe", "(", "o", ",", "fqdn", ")", "elif", "fqdn", "in", "_package_desc", "[", "package", "]", ":", "return", "json_describe", "(", "o", ",", "fqdn", ",", "_package_desc", "[", "package", "]", "[", "fqdn", "]", ")", "else", ":", "return", "json_describe", "(", "o", ",", "fqdn", ")" ]
Describes the object using developer-specified attributes specific to each main object type. Returns: dict: keys are specific attributes tailored to the specific object type, though `fqdn` is common to all descriptions; values are the corresponding attribute values which are *simple* types that can easily be serialized to JSON.
[ "Describes", "the", "object", "using", "developer", "-", "specified", "attributes", "specific", "to", "each", "main", "object", "type", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/logging/descriptors.py#L10-L46
243,604
rosenbrockc/acorn
acorn/logging/descriptors.py
_obj_getattr
def _obj_getattr(obj, fqdn, start=1): """Returns the attribute specified by the fqdn list from obj. """ node = obj for chain in fqdn.split('.')[start:]: if hasattr(node, chain): node = getattr(node, chain) else: node = None break return node
python
def _obj_getattr(obj, fqdn, start=1): """Returns the attribute specified by the fqdn list from obj. """ node = obj for chain in fqdn.split('.')[start:]: if hasattr(node, chain): node = getattr(node, chain) else: node = None break return node
[ "def", "_obj_getattr", "(", "obj", ",", "fqdn", ",", "start", "=", "1", ")", ":", "node", "=", "obj", "for", "chain", "in", "fqdn", ".", "split", "(", "'.'", ")", "[", "start", ":", "]", ":", "if", "hasattr", "(", "node", ",", "chain", ")", ":", "node", "=", "getattr", "(", "node", ",", "chain", ")", "else", ":", "node", "=", "None", "break", "return", "node" ]
Returns the attribute specified by the fqdn list from obj.
[ "Returns", "the", "attribute", "specified", "by", "the", "fqdn", "list", "from", "obj", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/logging/descriptors.py#L48-L58
243,605
rosenbrockc/acorn
acorn/logging/descriptors.py
_package_transform
def _package_transform(package, fqdn, start=1, *args, **kwargs): """Applies the specified package transform with `fqdn` to the package. Args: package: imported package object. fqdn (str): fully-qualified domain name of function in the package. If it does not include the package name, then set `start=0`. start (int): in the '.'-split list of identifiers in `fqdn`, where to start looking in the package. E.g., `numpy.linalg.norm` has `start=1` since `package=numpy`; however, `linalg.norm` would have `start=0`. """ #Our only difficulty here is that package names can be chained. We ignore #the first item since that was already checked for us by the calling #method. node = _obj_getattr(package, fqdn, start) #By the time this loop is finished, we should have a function to apply if #the developer setting up the config did a good job. if node is not None and hasattr(node, "__call__"): return node(*args, **kwargs) else: return args
python
def _package_transform(package, fqdn, start=1, *args, **kwargs): """Applies the specified package transform with `fqdn` to the package. Args: package: imported package object. fqdn (str): fully-qualified domain name of function in the package. If it does not include the package name, then set `start=0`. start (int): in the '.'-split list of identifiers in `fqdn`, where to start looking in the package. E.g., `numpy.linalg.norm` has `start=1` since `package=numpy`; however, `linalg.norm` would have `start=0`. """ #Our only difficulty here is that package names can be chained. We ignore #the first item since that was already checked for us by the calling #method. node = _obj_getattr(package, fqdn, start) #By the time this loop is finished, we should have a function to apply if #the developer setting up the config did a good job. if node is not None and hasattr(node, "__call__"): return node(*args, **kwargs) else: return args
[ "def", "_package_transform", "(", "package", ",", "fqdn", ",", "start", "=", "1", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "#Our only difficulty here is that package names can be chained. We ignore", "#the first item since that was already checked for us by the calling", "#method.", "node", "=", "_obj_getattr", "(", "package", ",", "fqdn", ",", "start", ")", "#By the time this loop is finished, we should have a function to apply if", "#the developer setting up the config did a good job.", "if", "node", "is", "not", "None", "and", "hasattr", "(", "node", ",", "\"__call__\"", ")", ":", "return", "node", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "return", "args" ]
Applies the specified package transform with `fqdn` to the package. Args: package: imported package object. fqdn (str): fully-qualified domain name of function in the package. If it does not include the package name, then set `start=0`. start (int): in the '.'-split list of identifiers in `fqdn`, where to start looking in the package. E.g., `numpy.linalg.norm` has `start=1` since `package=numpy`; however, `linalg.norm` would have `start=0`.
[ "Applies", "the", "specified", "package", "transform", "with", "fqdn", "to", "the", "package", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/logging/descriptors.py#L60-L81
243,606
rosenbrockc/acorn
acorn/logging/descriptors.py
_instance_transform
def _instance_transform(fqdn, o, *args, **kwargs): """Applies an instance method with name `fqdn` to `o`. Args: fqdn (str): fully-qualified domain name of the object. o: object to apply instance method to. """ return _package_transform(o, fqdn, start=0, *args, **kwargs)
python
def _instance_transform(fqdn, o, *args, **kwargs): """Applies an instance method with name `fqdn` to `o`. Args: fqdn (str): fully-qualified domain name of the object. o: object to apply instance method to. """ return _package_transform(o, fqdn, start=0, *args, **kwargs)
[ "def", "_instance_transform", "(", "fqdn", ",", "o", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_package_transform", "(", "o", ",", "fqdn", ",", "start", "=", "0", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Applies an instance method with name `fqdn` to `o`. Args: fqdn (str): fully-qualified domain name of the object. o: object to apply instance method to.
[ "Applies", "an", "instance", "method", "with", "name", "fqdn", "to", "o", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/logging/descriptors.py#L117-L124
243,607
rosenbrockc/acorn
acorn/logging/descriptors.py
json_describe
def json_describe(o, fqdn, descriptor=None): """Describes the specified object using the directives in the JSON `descriptor`, if available. Args: o: object to describe. fqdn (str): fully-qualified domain name of the object. descriptor (dict): keys are attributes of `o`; values are transform functions to apply to the attribute so that only a single value is returned. Returns: dict: keys are specific attributes tailored to the specific object type, though `fqdn` is common to all descriptions; values are the corresponding attribute values which are *simple* types that can easily be serialized to JSON. """ if descriptor is None or not isinstance(descriptor, dict): return {"fqdn": fqdn} else: result = {"fqdn": fqdn} for attr, desc in descriptor.items(): if attr == "instance": #For instance methods, we repeatedly call instance methods on #`value`, assuming that the methods belong to `value`. value = o else: if '.' in attr: #We get the chain of attribute values. value = o for cattr in attr.split('.'): if hasattr(value, cattr): value = getattr(value, cattr, "") else: break else: #There is just a one-level getattr. value = getattr(o, attr, "") if "transform" in desc: for transform in desc["transform"]: if "numpy" == transform[0:len("numpy")]: value = _numpy_transform(transform, value) elif "scipy" == transform[0:len("scipy")]: value = _scipy_transform(transform, value) elif "math" == transform[0:len("math")]: value = _math_transform(transform, value) elif "self" in transform: args = desc["args"] if "args" in desc else [] kwds = desc["kwargs"] if "kwargs" in desc else {} method = transform[len("self."):] value = _instance_transform(method, value, *args,**kwds) if "slice" in desc: for si, sl in enumerate(desc["slice"]): if ':' in sl: name, slice = sl.split(':') else: name, slice = str(si), sl slvalue = value for i in map(int, slice.split(',')): slvalue = slvalue[i] result[name] = _array_convert(slvalue) else: if "rename" in desc: result[desc["rename"]] = _array_convert(value) else: result[attr] = _array_convert(value) return result
python
def json_describe(o, fqdn, descriptor=None): """Describes the specified object using the directives in the JSON `descriptor`, if available. Args: o: object to describe. fqdn (str): fully-qualified domain name of the object. descriptor (dict): keys are attributes of `o`; values are transform functions to apply to the attribute so that only a single value is returned. Returns: dict: keys are specific attributes tailored to the specific object type, though `fqdn` is common to all descriptions; values are the corresponding attribute values which are *simple* types that can easily be serialized to JSON. """ if descriptor is None or not isinstance(descriptor, dict): return {"fqdn": fqdn} else: result = {"fqdn": fqdn} for attr, desc in descriptor.items(): if attr == "instance": #For instance methods, we repeatedly call instance methods on #`value`, assuming that the methods belong to `value`. value = o else: if '.' in attr: #We get the chain of attribute values. value = o for cattr in attr.split('.'): if hasattr(value, cattr): value = getattr(value, cattr, "") else: break else: #There is just a one-level getattr. value = getattr(o, attr, "") if "transform" in desc: for transform in desc["transform"]: if "numpy" == transform[0:len("numpy")]: value = _numpy_transform(transform, value) elif "scipy" == transform[0:len("scipy")]: value = _scipy_transform(transform, value) elif "math" == transform[0:len("math")]: value = _math_transform(transform, value) elif "self" in transform: args = desc["args"] if "args" in desc else [] kwds = desc["kwargs"] if "kwargs" in desc else {} method = transform[len("self."):] value = _instance_transform(method, value, *args,**kwds) if "slice" in desc: for si, sl in enumerate(desc["slice"]): if ':' in sl: name, slice = sl.split(':') else: name, slice = str(si), sl slvalue = value for i in map(int, slice.split(',')): slvalue = slvalue[i] result[name] = _array_convert(slvalue) else: if "rename" in desc: result[desc["rename"]] = _array_convert(value) else: result[attr] = _array_convert(value) return result
[ "def", "json_describe", "(", "o", ",", "fqdn", ",", "descriptor", "=", "None", ")", ":", "if", "descriptor", "is", "None", "or", "not", "isinstance", "(", "descriptor", ",", "dict", ")", ":", "return", "{", "\"fqdn\"", ":", "fqdn", "}", "else", ":", "result", "=", "{", "\"fqdn\"", ":", "fqdn", "}", "for", "attr", ",", "desc", "in", "descriptor", ".", "items", "(", ")", ":", "if", "attr", "==", "\"instance\"", ":", "#For instance methods, we repeatedly call instance methods on", "#`value`, assuming that the methods belong to `value`.", "value", "=", "o", "else", ":", "if", "'.'", "in", "attr", ":", "#We get the chain of attribute values.", "value", "=", "o", "for", "cattr", "in", "attr", ".", "split", "(", "'.'", ")", ":", "if", "hasattr", "(", "value", ",", "cattr", ")", ":", "value", "=", "getattr", "(", "value", ",", "cattr", ",", "\"\"", ")", "else", ":", "break", "else", ":", "#There is just a one-level getattr. ", "value", "=", "getattr", "(", "o", ",", "attr", ",", "\"\"", ")", "if", "\"transform\"", "in", "desc", ":", "for", "transform", "in", "desc", "[", "\"transform\"", "]", ":", "if", "\"numpy\"", "==", "transform", "[", "0", ":", "len", "(", "\"numpy\"", ")", "]", ":", "value", "=", "_numpy_transform", "(", "transform", ",", "value", ")", "elif", "\"scipy\"", "==", "transform", "[", "0", ":", "len", "(", "\"scipy\"", ")", "]", ":", "value", "=", "_scipy_transform", "(", "transform", ",", "value", ")", "elif", "\"math\"", "==", "transform", "[", "0", ":", "len", "(", "\"math\"", ")", "]", ":", "value", "=", "_math_transform", "(", "transform", ",", "value", ")", "elif", "\"self\"", "in", "transform", ":", "args", "=", "desc", "[", "\"args\"", "]", "if", "\"args\"", "in", "desc", "else", "[", "]", "kwds", "=", "desc", "[", "\"kwargs\"", "]", "if", "\"kwargs\"", "in", "desc", "else", "{", "}", "method", "=", "transform", "[", "len", "(", "\"self.\"", ")", ":", "]", "value", "=", "_instance_transform", "(", "method", ",", "value", ",", "*", "args", ",", "*", "*", "kwds", ")", "if", "\"slice\"", "in", "desc", ":", "for", "si", ",", "sl", "in", "enumerate", "(", "desc", "[", "\"slice\"", "]", ")", ":", "if", "':'", "in", "sl", ":", "name", ",", "slice", "=", "sl", ".", "split", "(", "':'", ")", "else", ":", "name", ",", "slice", "=", "str", "(", "si", ")", ",", "sl", "slvalue", "=", "value", "for", "i", "in", "map", "(", "int", ",", "slice", ".", "split", "(", "','", ")", ")", ":", "slvalue", "=", "slvalue", "[", "i", "]", "result", "[", "name", "]", "=", "_array_convert", "(", "slvalue", ")", "else", ":", "if", "\"rename\"", "in", "desc", ":", "result", "[", "desc", "[", "\"rename\"", "]", "]", "=", "_array_convert", "(", "value", ")", "else", ":", "result", "[", "attr", "]", "=", "_array_convert", "(", "value", ")", "return", "result" ]
Describes the specified object using the directives in the JSON `descriptor`, if available. Args: o: object to describe. fqdn (str): fully-qualified domain name of the object. descriptor (dict): keys are attributes of `o`; values are transform functions to apply to the attribute so that only a single value is returned. Returns: dict: keys are specific attributes tailored to the specific object type, though `fqdn` is common to all descriptions; values are the corresponding attribute values which are *simple* types that can easily be serialized to JSON.
[ "Describes", "the", "specified", "object", "using", "the", "directives", "in", "the", "JSON", "descriptor", "if", "available", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/logging/descriptors.py#L140-L211
243,608
dayski/requestlogs
requestlogs/mongo.py
MongoConnection.get_database_name
def get_database_name(self): """ extract database from connection string """ uri_dict = uri_parser.parse_uri(self.host) database = uri_dict.get('database', None) if not database: raise "database name is missing" return database
python
def get_database_name(self): """ extract database from connection string """ uri_dict = uri_parser.parse_uri(self.host) database = uri_dict.get('database', None) if not database: raise "database name is missing" return database
[ "def", "get_database_name", "(", "self", ")", ":", "uri_dict", "=", "uri_parser", ".", "parse_uri", "(", "self", ".", "host", ")", "database", "=", "uri_dict", ".", "get", "(", "'database'", ",", "None", ")", "if", "not", "database", ":", "raise", "\"database name is missing\"", "return", "database" ]
extract database from connection string
[ "extract", "database", "from", "connection", "string" ]
c22bba4ddfe2a514c1cec08cf49b2e29373db537
https://github.com/dayski/requestlogs/blob/c22bba4ddfe2a514c1cec08cf49b2e29373db537/requestlogs/mongo.py#L35-L43
243,609
dayski/requestlogs
requestlogs/mongo.py
MongoConnection.find
def find(self, start, end, limit=50, *args, **kwargs): """ find by creation date, using start and end dates as range """ # check if spec has been specified, build on top of it fc = kwargs.get('spec', dict()) # filter _id on start and end dates fc['_id'] = {'$gte': ObjectId.from_datetime(start), '$lte': ObjectId.from_datetime(end)} if not self.collection: collection_name = kwargs.get('collection_name', MONGODB_DEFAULT_COLLECTION) self.set_collection(collection_name) return self.collection.find(fc, limit=limit)
python
def find(self, start, end, limit=50, *args, **kwargs): """ find by creation date, using start and end dates as range """ # check if spec has been specified, build on top of it fc = kwargs.get('spec', dict()) # filter _id on start and end dates fc['_id'] = {'$gte': ObjectId.from_datetime(start), '$lte': ObjectId.from_datetime(end)} if not self.collection: collection_name = kwargs.get('collection_name', MONGODB_DEFAULT_COLLECTION) self.set_collection(collection_name) return self.collection.find(fc, limit=limit)
[ "def", "find", "(", "self", ",", "start", ",", "end", ",", "limit", "=", "50", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# check if spec has been specified, build on top of it", "fc", "=", "kwargs", ".", "get", "(", "'spec'", ",", "dict", "(", ")", ")", "# filter _id on start and end dates", "fc", "[", "'_id'", "]", "=", "{", "'$gte'", ":", "ObjectId", ".", "from_datetime", "(", "start", ")", ",", "'$lte'", ":", "ObjectId", ".", "from_datetime", "(", "end", ")", "}", "if", "not", "self", ".", "collection", ":", "collection_name", "=", "kwargs", ".", "get", "(", "'collection_name'", ",", "MONGODB_DEFAULT_COLLECTION", ")", "self", ".", "set_collection", "(", "collection_name", ")", "return", "self", ".", "collection", ".", "find", "(", "fc", ",", "limit", "=", "limit", ")" ]
find by creation date, using start and end dates as range
[ "find", "by", "creation", "date", "using", "start", "and", "end", "dates", "as", "range" ]
c22bba4ddfe2a514c1cec08cf49b2e29373db537
https://github.com/dayski/requestlogs/blob/c22bba4ddfe2a514c1cec08cf49b2e29373db537/requestlogs/mongo.py#L62-L78
243,610
sassoo/goldman
goldman/stores/postgres/store.py
handle_exc
def handle_exc(exc): """ Given a database exception determine how to fail Attempt to lookup a known error & abort on a meaningful error. Otherwise issue a generic DatabaseUnavailable exception. :param exc: psycopg2 exception """ err = ERRORS_TABLE.get(exc.pgcode) if err: abort(exceptions.InvalidQueryParams(**{ 'detail': err, 'parameter': 'filter', })) abort(exceptions.DatabaseUnavailable)
python
def handle_exc(exc): """ Given a database exception determine how to fail Attempt to lookup a known error & abort on a meaningful error. Otherwise issue a generic DatabaseUnavailable exception. :param exc: psycopg2 exception """ err = ERRORS_TABLE.get(exc.pgcode) if err: abort(exceptions.InvalidQueryParams(**{ 'detail': err, 'parameter': 'filter', })) abort(exceptions.DatabaseUnavailable)
[ "def", "handle_exc", "(", "exc", ")", ":", "err", "=", "ERRORS_TABLE", ".", "get", "(", "exc", ".", "pgcode", ")", "if", "err", ":", "abort", "(", "exceptions", ".", "InvalidQueryParams", "(", "*", "*", "{", "'detail'", ":", "err", ",", "'parameter'", ":", "'filter'", ",", "}", ")", ")", "abort", "(", "exceptions", ".", "DatabaseUnavailable", ")" ]
Given a database exception determine how to fail Attempt to lookup a known error & abort on a meaningful error. Otherwise issue a generic DatabaseUnavailable exception. :param exc: psycopg2 exception
[ "Given", "a", "database", "exception", "determine", "how", "to", "fail" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/stores/postgres/store.py#L56-L73
243,611
sassoo/goldman
goldman/stores/postgres/store.py
Store.dirty_vals
def dirty_vals(model): """ Get the models dirty values in a friendly SQL format This will be a string of comma separated field names in a format for psycopg to substitute with parameterized inputs. An example, if the `rid` & `created` fields are dirty then they'll be converted into: '%(rid)s, %(created)s' :return: str or None """ vals = [] for field in model.dirty_fields: vals.append('%({0})s'.format(field)) vals = ', '.join(vals) return vals or None
python
def dirty_vals(model): """ Get the models dirty values in a friendly SQL format This will be a string of comma separated field names in a format for psycopg to substitute with parameterized inputs. An example, if the `rid` & `created` fields are dirty then they'll be converted into: '%(rid)s, %(created)s' :return: str or None """ vals = [] for field in model.dirty_fields: vals.append('%({0})s'.format(field)) vals = ', '.join(vals) return vals or None
[ "def", "dirty_vals", "(", "model", ")", ":", "vals", "=", "[", "]", "for", "field", "in", "model", ".", "dirty_fields", ":", "vals", ".", "append", "(", "'%({0})s'", ".", "format", "(", "field", ")", ")", "vals", "=", "', '", ".", "join", "(", "vals", ")", "return", "vals", "or", "None" ]
Get the models dirty values in a friendly SQL format This will be a string of comma separated field names in a format for psycopg to substitute with parameterized inputs. An example, if the `rid` & `created` fields are dirty then they'll be converted into: '%(rid)s, %(created)s' :return: str or None
[ "Get", "the", "models", "dirty", "values", "in", "a", "friendly", "SQL", "format" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/stores/postgres/store.py#L101-L123
243,612
sassoo/goldman
goldman/stores/postgres/store.py
Store.field_cols
def field_cols(model): """ Get the models columns in a friendly SQL format This will be a string of comma separated field names prefixed by the models resource type. TIP: to_manys are not located on the table in Postgres & are instead application references, so any reference to there column names should be pruned! :return: str """ to_many = model.to_many cols = [f for f in model.all_fields if f not in to_many] cols = ', '.join(cols) return cols or None
python
def field_cols(model): """ Get the models columns in a friendly SQL format This will be a string of comma separated field names prefixed by the models resource type. TIP: to_manys are not located on the table in Postgres & are instead application references, so any reference to there column names should be pruned! :return: str """ to_many = model.to_many cols = [f for f in model.all_fields if f not in to_many] cols = ', '.join(cols) return cols or None
[ "def", "field_cols", "(", "model", ")", ":", "to_many", "=", "model", ".", "to_many", "cols", "=", "[", "f", "for", "f", "in", "model", ".", "all_fields", "if", "f", "not", "in", "to_many", "]", "cols", "=", "', '", ".", "join", "(", "cols", ")", "return", "cols", "or", "None" ]
Get the models columns in a friendly SQL format This will be a string of comma separated field names prefixed by the models resource type. TIP: to_manys are not located on the table in Postgres & are instead application references, so any reference to there column names should be pruned! :return: str
[ "Get", "the", "models", "columns", "in", "a", "friendly", "SQL", "format" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/stores/postgres/store.py#L126-L143
243,613
sassoo/goldman
goldman/stores/postgres/store.py
Store.sorts_query
def sorts_query(sortables): """ Turn the Sortables into a SQL ORDER BY query """ stmts = [] for sortable in sortables: if sortable.desc: stmts.append('{} DESC'.format(sortable.field)) else: stmts.append('{} ASC'.format(sortable.field)) return ' ORDER BY {}'.format(', '.join(stmts))
python
def sorts_query(sortables): """ Turn the Sortables into a SQL ORDER BY query """ stmts = [] for sortable in sortables: if sortable.desc: stmts.append('{} DESC'.format(sortable.field)) else: stmts.append('{} ASC'.format(sortable.field)) return ' ORDER BY {}'.format(', '.join(stmts))
[ "def", "sorts_query", "(", "sortables", ")", ":", "stmts", "=", "[", "]", "for", "sortable", "in", "sortables", ":", "if", "sortable", ".", "desc", ":", "stmts", ".", "append", "(", "'{} DESC'", ".", "format", "(", "sortable", ".", "field", ")", ")", "else", ":", "stmts", ".", "append", "(", "'{} ASC'", ".", "format", "(", "sortable", ".", "field", ")", ")", "return", "' ORDER BY {}'", ".", "format", "(", "', '", ".", "join", "(", "stmts", ")", ")" ]
Turn the Sortables into a SQL ORDER BY query
[ "Turn", "the", "Sortables", "into", "a", "SQL", "ORDER", "BY", "query" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/stores/postgres/store.py#L274-L285
243,614
sassoo/goldman
goldman/stores/postgres/store.py
Store.create
def create(self, model): """ Given a model object instance create it """ signals.pre_create.send(model.__class__, model=model) signals.pre_save.send(model.__class__, model=model) param = self.to_pg(model) query = """ INSERT INTO {table} ({dirty_cols}) VALUES ({dirty_vals}) RETURNING {cols}; """ query = query.format( cols=self.field_cols(model), dirty_cols=self.dirty_cols(model), dirty_vals=self.dirty_vals(model), table=model.rtype, ) result = self.query(query, param=param) signals.post_create.send(model.__class__, model=model) signals.post_save.send(model.__class__, model=model) return model.merge(result[0], clean=True)
python
def create(self, model): """ Given a model object instance create it """ signals.pre_create.send(model.__class__, model=model) signals.pre_save.send(model.__class__, model=model) param = self.to_pg(model) query = """ INSERT INTO {table} ({dirty_cols}) VALUES ({dirty_vals}) RETURNING {cols}; """ query = query.format( cols=self.field_cols(model), dirty_cols=self.dirty_cols(model), dirty_vals=self.dirty_vals(model), table=model.rtype, ) result = self.query(query, param=param) signals.post_create.send(model.__class__, model=model) signals.post_save.send(model.__class__, model=model) return model.merge(result[0], clean=True)
[ "def", "create", "(", "self", ",", "model", ")", ":", "signals", ".", "pre_create", ".", "send", "(", "model", ".", "__class__", ",", "model", "=", "model", ")", "signals", ".", "pre_save", ".", "send", "(", "model", ".", "__class__", ",", "model", "=", "model", ")", "param", "=", "self", ".", "to_pg", "(", "model", ")", "query", "=", "\"\"\"\n INSERT INTO {table} ({dirty_cols})\n VALUES ({dirty_vals})\n RETURNING {cols};\n \"\"\"", "query", "=", "query", ".", "format", "(", "cols", "=", "self", ".", "field_cols", "(", "model", ")", ",", "dirty_cols", "=", "self", ".", "dirty_cols", "(", "model", ")", ",", "dirty_vals", "=", "self", ".", "dirty_vals", "(", "model", ")", ",", "table", "=", "model", ".", "rtype", ",", ")", "result", "=", "self", ".", "query", "(", "query", ",", "param", "=", "param", ")", "signals", ".", "post_create", ".", "send", "(", "model", ".", "__class__", ",", "model", "=", "model", ")", "signals", ".", "post_save", ".", "send", "(", "model", ".", "__class__", ",", "model", "=", "model", ")", "return", "model", ".", "merge", "(", "result", "[", "0", "]", ",", "clean", "=", "True", ")" ]
Given a model object instance create it
[ "Given", "a", "model", "object", "instance", "create", "it" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/stores/postgres/store.py#L296-L321
243,615
sassoo/goldman
goldman/stores/postgres/store.py
Store.delete
def delete(self, model): """ Given a model object instance delete it """ signals.pre_delete.send(model.__class__, model=model) param = {'rid_value': self.to_pg(model)[model.rid_field]} query = """ DELETE FROM {table} WHERE {rid_field} = %(rid_value)s RETURNING {cols}; """ query = query.format( cols=self.field_cols(model), rid_field=model.rid_field, table=model.rtype, ) result = self.query(query, param=param) signals.post_delete.send(model.__class__, model=model) return result
python
def delete(self, model): """ Given a model object instance delete it """ signals.pre_delete.send(model.__class__, model=model) param = {'rid_value': self.to_pg(model)[model.rid_field]} query = """ DELETE FROM {table} WHERE {rid_field} = %(rid_value)s RETURNING {cols}; """ query = query.format( cols=self.field_cols(model), rid_field=model.rid_field, table=model.rtype, ) result = self.query(query, param=param) signals.post_delete.send(model.__class__, model=model) return result
[ "def", "delete", "(", "self", ",", "model", ")", ":", "signals", ".", "pre_delete", ".", "send", "(", "model", ".", "__class__", ",", "model", "=", "model", ")", "param", "=", "{", "'rid_value'", ":", "self", ".", "to_pg", "(", "model", ")", "[", "model", ".", "rid_field", "]", "}", "query", "=", "\"\"\"\n DELETE FROM {table}\n WHERE {rid_field} = %(rid_value)s\n RETURNING {cols};\n \"\"\"", "query", "=", "query", ".", "format", "(", "cols", "=", "self", ".", "field_cols", "(", "model", ")", ",", "rid_field", "=", "model", ".", "rid_field", ",", "table", "=", "model", ".", "rtype", ",", ")", "result", "=", "self", ".", "query", "(", "query", ",", "param", "=", "param", ")", "signals", ".", "post_delete", ".", "send", "(", "model", ".", "__class__", ",", "model", "=", "model", ")", "return", "result" ]
Given a model object instance delete it
[ "Given", "a", "model", "object", "instance", "delete", "it" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/stores/postgres/store.py#L323-L345
243,616
sassoo/goldman
goldman/stores/postgres/store.py
Store.query
def query(self, query, param=None): """ Perform a SQL based query This will abort on a failure to communicate with the database. :query: string query :params: parameters for the query :return: RecordList from psycopg2 """ with self.conn.cursor() as curs: print 'XXX QUERY', curs.mogrify(query, param) try: curs.execute(query, param) except BaseException as exc: msg = 'query: {}, param: {}, exc: {}'.format(query, param, exc) if hasattr(exc, 'pgcode'): msg = '{}, exc code: {}'.format(msg, exc.pgcode) print msg handle_exc(exc) result = curs.fetchall() return result
python
def query(self, query, param=None): """ Perform a SQL based query This will abort on a failure to communicate with the database. :query: string query :params: parameters for the query :return: RecordList from psycopg2 """ with self.conn.cursor() as curs: print 'XXX QUERY', curs.mogrify(query, param) try: curs.execute(query, param) except BaseException as exc: msg = 'query: {}, param: {}, exc: {}'.format(query, param, exc) if hasattr(exc, 'pgcode'): msg = '{}, exc code: {}'.format(msg, exc.pgcode) print msg handle_exc(exc) result = curs.fetchall() return result
[ "def", "query", "(", "self", ",", "query", ",", "param", "=", "None", ")", ":", "with", "self", ".", "conn", ".", "cursor", "(", ")", "as", "curs", ":", "print", "'XXX QUERY'", ",", "curs", ".", "mogrify", "(", "query", ",", "param", ")", "try", ":", "curs", ".", "execute", "(", "query", ",", "param", ")", "except", "BaseException", "as", "exc", ":", "msg", "=", "'query: {}, param: {}, exc: {}'", ".", "format", "(", "query", ",", "param", ",", "exc", ")", "if", "hasattr", "(", "exc", ",", "'pgcode'", ")", ":", "msg", "=", "'{}, exc code: {}'", ".", "format", "(", "msg", ",", "exc", ".", "pgcode", ")", "print", "msg", "handle_exc", "(", "exc", ")", "result", "=", "curs", ".", "fetchall", "(", ")", "return", "result" ]
Perform a SQL based query This will abort on a failure to communicate with the database. :query: string query :params: parameters for the query :return: RecordList from psycopg2
[ "Perform", "a", "SQL", "based", "query" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/stores/postgres/store.py#L381-L407
243,617
sassoo/goldman
goldman/stores/postgres/store.py
Store.search
def search(self, rtype, **kwargs): """ Search for the model by assorted criteria Quite a bit needs to happen for a search processing! The breakdown is we need to apply the query parameters where applicable. These include pagination, sorting, & filtering. Additionally, models can declare there own static filters or static query that should be applied. For static filters a model can opt-in with a `search_filters` property & for a static query to append a `search_query` property must be present. """ model = rtype_to_model(rtype) param = {} pages = self.pages_query(kwargs.get('pages')) sorts = self.sorts_query(kwargs.get( 'sorts', [Sortable(goldman.config.SORT)] )) query = """ SELECT {cols}, count(*) OVER() as _count FROM {table} """ query = query.format( cols=self.field_cols(model), table=rtype, ) filters = kwargs.get('filters', []) filters += getattr(model, 'search_filters', []) or [] if filters: where, param = self.filters_query(filters) query += where model_query = getattr(model, 'search_query', '') or '' if filters and model_query: model_query = ' AND ' + model_query elif model_query: model_query = ' WHERE ' + model_query query += model_query query += sorts query += pages signals.pre_search.send(model.__class__, model=model) result = self.query(query, param=param) models = [model(res) for res in result] if models: signals.post_search.send(model.__class__, models=result) pages = kwargs.get('pages') if pages and result: pages.total = result[0]['_count'] return models
python
def search(self, rtype, **kwargs): """ Search for the model by assorted criteria Quite a bit needs to happen for a search processing! The breakdown is we need to apply the query parameters where applicable. These include pagination, sorting, & filtering. Additionally, models can declare there own static filters or static query that should be applied. For static filters a model can opt-in with a `search_filters` property & for a static query to append a `search_query` property must be present. """ model = rtype_to_model(rtype) param = {} pages = self.pages_query(kwargs.get('pages')) sorts = self.sorts_query(kwargs.get( 'sorts', [Sortable(goldman.config.SORT)] )) query = """ SELECT {cols}, count(*) OVER() as _count FROM {table} """ query = query.format( cols=self.field_cols(model), table=rtype, ) filters = kwargs.get('filters', []) filters += getattr(model, 'search_filters', []) or [] if filters: where, param = self.filters_query(filters) query += where model_query = getattr(model, 'search_query', '') or '' if filters and model_query: model_query = ' AND ' + model_query elif model_query: model_query = ' WHERE ' + model_query query += model_query query += sorts query += pages signals.pre_search.send(model.__class__, model=model) result = self.query(query, param=param) models = [model(res) for res in result] if models: signals.post_search.send(model.__class__, models=result) pages = kwargs.get('pages') if pages and result: pages.total = result[0]['_count'] return models
[ "def", "search", "(", "self", ",", "rtype", ",", "*", "*", "kwargs", ")", ":", "model", "=", "rtype_to_model", "(", "rtype", ")", "param", "=", "{", "}", "pages", "=", "self", ".", "pages_query", "(", "kwargs", ".", "get", "(", "'pages'", ")", ")", "sorts", "=", "self", ".", "sorts_query", "(", "kwargs", ".", "get", "(", "'sorts'", ",", "[", "Sortable", "(", "goldman", ".", "config", ".", "SORT", ")", "]", ")", ")", "query", "=", "\"\"\"\n SELECT {cols}, count(*) OVER() as _count\n FROM {table}\n \"\"\"", "query", "=", "query", ".", "format", "(", "cols", "=", "self", ".", "field_cols", "(", "model", ")", ",", "table", "=", "rtype", ",", ")", "filters", "=", "kwargs", ".", "get", "(", "'filters'", ",", "[", "]", ")", "filters", "+=", "getattr", "(", "model", ",", "'search_filters'", ",", "[", "]", ")", "or", "[", "]", "if", "filters", ":", "where", ",", "param", "=", "self", ".", "filters_query", "(", "filters", ")", "query", "+=", "where", "model_query", "=", "getattr", "(", "model", ",", "'search_query'", ",", "''", ")", "or", "''", "if", "filters", "and", "model_query", ":", "model_query", "=", "' AND '", "+", "model_query", "elif", "model_query", ":", "model_query", "=", "' WHERE '", "+", "model_query", "query", "+=", "model_query", "query", "+=", "sorts", "query", "+=", "pages", "signals", ".", "pre_search", ".", "send", "(", "model", ".", "__class__", ",", "model", "=", "model", ")", "result", "=", "self", ".", "query", "(", "query", ",", "param", "=", "param", ")", "models", "=", "[", "model", "(", "res", ")", "for", "res", "in", "result", "]", "if", "models", ":", "signals", ".", "post_search", ".", "send", "(", "model", ".", "__class__", ",", "models", "=", "result", ")", "pages", "=", "kwargs", ".", "get", "(", "'pages'", ")", "if", "pages", "and", "result", ":", "pages", ".", "total", "=", "result", "[", "0", "]", "[", "'_count'", "]", "return", "models" ]
Search for the model by assorted criteria Quite a bit needs to happen for a search processing! The breakdown is we need to apply the query parameters where applicable. These include pagination, sorting, & filtering. Additionally, models can declare there own static filters or static query that should be applied. For static filters a model can opt-in with a `search_filters` property & for a static query to append a `search_query` property must be present.
[ "Search", "for", "the", "model", "by", "assorted", "criteria" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/stores/postgres/store.py#L409-L470
243,618
sassoo/goldman
goldman/stores/postgres/store.py
Store.update
def update(self, model): """ Given a model object instance update it """ signals.pre_update.send(model.__class__, model=model) signals.pre_save.send(model.__class__, model=model) param = self.to_pg(model) param['rid_value'] = param[model.rid_field] query = """ UPDATE {table} SET ({dirty_cols}) = ({dirty_vals}) WHERE {rid_field} = %(rid_value)s RETURNING {cols}; """ query = query.format( cols=self.field_cols(model), dirty_cols=self.dirty_cols(model), dirty_vals=self.dirty_vals(model), rid_field=model.rid_field, table=model.rtype, ) result = self.query(query, param=param) signals.post_update.send(model.__class__, model=model) signals.post_save.send(model.__class__, model=model) return model.merge(result[0], clean=True)
python
def update(self, model): """ Given a model object instance update it """ signals.pre_update.send(model.__class__, model=model) signals.pre_save.send(model.__class__, model=model) param = self.to_pg(model) param['rid_value'] = param[model.rid_field] query = """ UPDATE {table} SET ({dirty_cols}) = ({dirty_vals}) WHERE {rid_field} = %(rid_value)s RETURNING {cols}; """ query = query.format( cols=self.field_cols(model), dirty_cols=self.dirty_cols(model), dirty_vals=self.dirty_vals(model), rid_field=model.rid_field, table=model.rtype, ) result = self.query(query, param=param) signals.post_update.send(model.__class__, model=model) signals.post_save.send(model.__class__, model=model) return model.merge(result[0], clean=True)
[ "def", "update", "(", "self", ",", "model", ")", ":", "signals", ".", "pre_update", ".", "send", "(", "model", ".", "__class__", ",", "model", "=", "model", ")", "signals", ".", "pre_save", ".", "send", "(", "model", ".", "__class__", ",", "model", "=", "model", ")", "param", "=", "self", ".", "to_pg", "(", "model", ")", "param", "[", "'rid_value'", "]", "=", "param", "[", "model", ".", "rid_field", "]", "query", "=", "\"\"\"\n UPDATE {table}\n SET ({dirty_cols}) = ({dirty_vals})\n WHERE {rid_field} = %(rid_value)s\n RETURNING {cols};\n \"\"\"", "query", "=", "query", ".", "format", "(", "cols", "=", "self", ".", "field_cols", "(", "model", ")", ",", "dirty_cols", "=", "self", ".", "dirty_cols", "(", "model", ")", ",", "dirty_vals", "=", "self", ".", "dirty_vals", "(", "model", ")", ",", "rid_field", "=", "model", ".", "rid_field", ",", "table", "=", "model", ".", "rtype", ",", ")", "result", "=", "self", ".", "query", "(", "query", ",", "param", "=", "param", ")", "signals", ".", "post_update", ".", "send", "(", "model", ".", "__class__", ",", "model", "=", "model", ")", "signals", ".", "post_save", ".", "send", "(", "model", ".", "__class__", ",", "model", "=", "model", ")", "return", "model", ".", "merge", "(", "result", "[", "0", "]", ",", "clean", "=", "True", ")" ]
Given a model object instance update it
[ "Given", "a", "model", "object", "instance", "update", "it" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/stores/postgres/store.py#L472-L501
243,619
callowayproject/Transmogrify
transmogrify/network.py
get_path
def get_path(environ): """ Get the path """ from wsgiref import util request_uri = environ.get('REQUEST_URI', environ.get('RAW_URI', '')) if request_uri == '': uri = util.request_uri(environ) host = environ.get('HTTP_HOST', '') scheme = util.guess_scheme(environ) prefix = "{scheme}://{host}".format(scheme=scheme, host=host) request_uri = uri.replace(prefix, '') return request_uri
python
def get_path(environ): """ Get the path """ from wsgiref import util request_uri = environ.get('REQUEST_URI', environ.get('RAW_URI', '')) if request_uri == '': uri = util.request_uri(environ) host = environ.get('HTTP_HOST', '') scheme = util.guess_scheme(environ) prefix = "{scheme}://{host}".format(scheme=scheme, host=host) request_uri = uri.replace(prefix, '') return request_uri
[ "def", "get_path", "(", "environ", ")", ":", "from", "wsgiref", "import", "util", "request_uri", "=", "environ", ".", "get", "(", "'REQUEST_URI'", ",", "environ", ".", "get", "(", "'RAW_URI'", ",", "''", ")", ")", "if", "request_uri", "==", "''", ":", "uri", "=", "util", ".", "request_uri", "(", "environ", ")", "host", "=", "environ", ".", "get", "(", "'HTTP_HOST'", ",", "''", ")", "scheme", "=", "util", ".", "guess_scheme", "(", "environ", ")", "prefix", "=", "\"{scheme}://{host}\"", ".", "format", "(", "scheme", "=", "scheme", ",", "host", "=", "host", ")", "request_uri", "=", "uri", ".", "replace", "(", "prefix", ",", "''", ")", "return", "request_uri" ]
Get the path
[ "Get", "the", "path" ]
f1f891b8b923b3a1ede5eac7f60531c1c472379e
https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/network.py#L9-L21
243,620
callowayproject/Transmogrify
transmogrify/network.py
handle_purge
def handle_purge(environ, start_response): """ Handle a PURGE request. """ from utils import is_valid_security, get_cached_files from settings import DEBUG server = environ['SERVER_NAME'] try: request_uri = get_path(environ) path_and_query = request_uri.lstrip("/") query_string = environ.get('QUERY_STRING', '') if is_valid_security('PURGE', query_string): cached_files = get_cached_files(path_and_query, server) for i in cached_files: try: os.remove(i) except OSError as e: return do_500(environ, start_response, e.message) start_response("204 No Content", []) return [] else: return do_405(environ, start_response) except Http404 as e: return do_404(environ, start_response, e.message, DEBUG)
python
def handle_purge(environ, start_response): """ Handle a PURGE request. """ from utils import is_valid_security, get_cached_files from settings import DEBUG server = environ['SERVER_NAME'] try: request_uri = get_path(environ) path_and_query = request_uri.lstrip("/") query_string = environ.get('QUERY_STRING', '') if is_valid_security('PURGE', query_string): cached_files = get_cached_files(path_and_query, server) for i in cached_files: try: os.remove(i) except OSError as e: return do_500(environ, start_response, e.message) start_response("204 No Content", []) return [] else: return do_405(environ, start_response) except Http404 as e: return do_404(environ, start_response, e.message, DEBUG)
[ "def", "handle_purge", "(", "environ", ",", "start_response", ")", ":", "from", "utils", "import", "is_valid_security", ",", "get_cached_files", "from", "settings", "import", "DEBUG", "server", "=", "environ", "[", "'SERVER_NAME'", "]", "try", ":", "request_uri", "=", "get_path", "(", "environ", ")", "path_and_query", "=", "request_uri", ".", "lstrip", "(", "\"/\"", ")", "query_string", "=", "environ", ".", "get", "(", "'QUERY_STRING'", ",", "''", ")", "if", "is_valid_security", "(", "'PURGE'", ",", "query_string", ")", ":", "cached_files", "=", "get_cached_files", "(", "path_and_query", ",", "server", ")", "for", "i", "in", "cached_files", ":", "try", ":", "os", ".", "remove", "(", "i", ")", "except", "OSError", "as", "e", ":", "return", "do_500", "(", "environ", ",", "start_response", ",", "e", ".", "message", ")", "start_response", "(", "\"204 No Content\"", ",", "[", "]", ")", "return", "[", "]", "else", ":", "return", "do_405", "(", "environ", ",", "start_response", ")", "except", "Http404", "as", "e", ":", "return", "do_404", "(", "environ", ",", "start_response", ",", "e", ".", "message", ",", "DEBUG", ")" ]
Handle a PURGE request.
[ "Handle", "a", "PURGE", "request", "." ]
f1f891b8b923b3a1ede5eac7f60531c1c472379e
https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/network.py#L24-L47
243,621
nefarioustim/parker
parker/crawlmodel.py
get_instance
def get_instance(page_to_crawl): """Return an instance of CrawlModel.""" global _instances if isinstance(page_to_crawl, basestring): uri = page_to_crawl page_to_crawl = crawlpage.get_instance(uri) elif isinstance(page_to_crawl, crawlpage.CrawlPage): uri = page_to_crawl.uri else: raise TypeError( "get_instance() expects a parker.CrawlPage " "or basestring derivative." ) try: instance = _instances[uri] except KeyError: instance = CrawlModel(page_to_crawl) _instances[uri] = instance return instance
python
def get_instance(page_to_crawl): """Return an instance of CrawlModel.""" global _instances if isinstance(page_to_crawl, basestring): uri = page_to_crawl page_to_crawl = crawlpage.get_instance(uri) elif isinstance(page_to_crawl, crawlpage.CrawlPage): uri = page_to_crawl.uri else: raise TypeError( "get_instance() expects a parker.CrawlPage " "or basestring derivative." ) try: instance = _instances[uri] except KeyError: instance = CrawlModel(page_to_crawl) _instances[uri] = instance return instance
[ "def", "get_instance", "(", "page_to_crawl", ")", ":", "global", "_instances", "if", "isinstance", "(", "page_to_crawl", ",", "basestring", ")", ":", "uri", "=", "page_to_crawl", "page_to_crawl", "=", "crawlpage", ".", "get_instance", "(", "uri", ")", "elif", "isinstance", "(", "page_to_crawl", ",", "crawlpage", ".", "CrawlPage", ")", ":", "uri", "=", "page_to_crawl", ".", "uri", "else", ":", "raise", "TypeError", "(", "\"get_instance() expects a parker.CrawlPage \"", "\"or basestring derivative.\"", ")", "try", ":", "instance", "=", "_instances", "[", "uri", "]", "except", "KeyError", ":", "instance", "=", "CrawlModel", "(", "page_to_crawl", ")", "_instances", "[", "uri", "]", "=", "instance", "return", "instance" ]
Return an instance of CrawlModel.
[ "Return", "an", "instance", "of", "CrawlModel", "." ]
ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6
https://github.com/nefarioustim/parker/blob/ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6/parker/crawlmodel.py#L9-L29
243,622
callowayproject/Transmogrify
transmogrify/filesystem/s3.py
file_exists
def file_exists(original_file): """ Validate the original file is in the S3 bucket """ s3 = boto3.resource('s3') bucket_name, object_key = _parse_s3_file(original_file) bucket = s3.Bucket(bucket_name) bucket_iterator = bucket.objects.filter(Prefix=object_key) bucket_list = [x for x in bucket_iterator] logger.debug("Bucket List: {0}".format(", ".join([x.key for x in bucket_list]))) logger.debug("bucket_list length: {0}".format(len(bucket_list))) return len(bucket_list) == 1
python
def file_exists(original_file): """ Validate the original file is in the S3 bucket """ s3 = boto3.resource('s3') bucket_name, object_key = _parse_s3_file(original_file) bucket = s3.Bucket(bucket_name) bucket_iterator = bucket.objects.filter(Prefix=object_key) bucket_list = [x for x in bucket_iterator] logger.debug("Bucket List: {0}".format(", ".join([x.key for x in bucket_list]))) logger.debug("bucket_list length: {0}".format(len(bucket_list))) return len(bucket_list) == 1
[ "def", "file_exists", "(", "original_file", ")", ":", "s3", "=", "boto3", ".", "resource", "(", "'s3'", ")", "bucket_name", ",", "object_key", "=", "_parse_s3_file", "(", "original_file", ")", "bucket", "=", "s3", ".", "Bucket", "(", "bucket_name", ")", "bucket_iterator", "=", "bucket", ".", "objects", ".", "filter", "(", "Prefix", "=", "object_key", ")", "bucket_list", "=", "[", "x", "for", "x", "in", "bucket_iterator", "]", "logger", ".", "debug", "(", "\"Bucket List: {0}\"", ".", "format", "(", "\", \"", ".", "join", "(", "[", "x", ".", "key", "for", "x", "in", "bucket_list", "]", ")", ")", ")", "logger", ".", "debug", "(", "\"bucket_list length: {0}\"", ".", "format", "(", "len", "(", "bucket_list", ")", ")", ")", "return", "len", "(", "bucket_list", ")", "==", "1" ]
Validate the original file is in the S3 bucket
[ "Validate", "the", "original", "file", "is", "in", "the", "S3", "bucket" ]
f1f891b8b923b3a1ede5eac7f60531c1c472379e
https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/filesystem/s3.py#L19-L30
243,623
callowayproject/Transmogrify
transmogrify/filesystem/s3.py
put_file
def put_file(buffer, modified_file): """ write the buffer to modified_file. modified_file should be in the format 's3://bucketname/path/to/file.txt' """ import mimetypes import boto3 file_type, _ = mimetypes.guess_type(modified_file) s3 = boto3.resource('s3') bucket_name, object_key = _parse_s3_file(modified_file) extra_args = { 'ACL': 'public-read', 'ContentType': file_type } bucket = s3.Bucket(bucket_name) logger.info("Uploading {0} to {1}".format(object_key, bucket_name)) bucket.upload_fileobj(buffer, object_key, ExtraArgs=extra_args)
python
def put_file(buffer, modified_file): """ write the buffer to modified_file. modified_file should be in the format 's3://bucketname/path/to/file.txt' """ import mimetypes import boto3 file_type, _ = mimetypes.guess_type(modified_file) s3 = boto3.resource('s3') bucket_name, object_key = _parse_s3_file(modified_file) extra_args = { 'ACL': 'public-read', 'ContentType': file_type } bucket = s3.Bucket(bucket_name) logger.info("Uploading {0} to {1}".format(object_key, bucket_name)) bucket.upload_fileobj(buffer, object_key, ExtraArgs=extra_args)
[ "def", "put_file", "(", "buffer", ",", "modified_file", ")", ":", "import", "mimetypes", "import", "boto3", "file_type", ",", "_", "=", "mimetypes", ".", "guess_type", "(", "modified_file", ")", "s3", "=", "boto3", ".", "resource", "(", "'s3'", ")", "bucket_name", ",", "object_key", "=", "_parse_s3_file", "(", "modified_file", ")", "extra_args", "=", "{", "'ACL'", ":", "'public-read'", ",", "'ContentType'", ":", "file_type", "}", "bucket", "=", "s3", ".", "Bucket", "(", "bucket_name", ")", "logger", ".", "info", "(", "\"Uploading {0} to {1}\"", ".", "format", "(", "object_key", ",", "bucket_name", ")", ")", "bucket", ".", "upload_fileobj", "(", "buffer", ",", "object_key", ",", "ExtraArgs", "=", "extra_args", ")" ]
write the buffer to modified_file. modified_file should be in the format 's3://bucketname/path/to/file.txt'
[ "write", "the", "buffer", "to", "modified_file", "." ]
f1f891b8b923b3a1ede5eac7f60531c1c472379e
https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/filesystem/s3.py#L60-L78
243,624
moertle/pyaas
pyaas/skel.py
add_data_files
def add_data_files(*include_dirs): 'called from setup.py in skeleton projects' data_files = [] for include_dir in include_dirs: for root, directories, filenames in os.walk(include_dir): include_files = [] for filename in filenames: # do not bring along certain files if filename.endswith('.local'): continue include_files.append(os.path.join(root, filename)) if include_files: data_files.append((root, include_files)) return data_files
python
def add_data_files(*include_dirs): 'called from setup.py in skeleton projects' data_files = [] for include_dir in include_dirs: for root, directories, filenames in os.walk(include_dir): include_files = [] for filename in filenames: # do not bring along certain files if filename.endswith('.local'): continue include_files.append(os.path.join(root, filename)) if include_files: data_files.append((root, include_files)) return data_files
[ "def", "add_data_files", "(", "*", "include_dirs", ")", ":", "data_files", "=", "[", "]", "for", "include_dir", "in", "include_dirs", ":", "for", "root", ",", "directories", ",", "filenames", "in", "os", ".", "walk", "(", "include_dir", ")", ":", "include_files", "=", "[", "]", "for", "filename", "in", "filenames", ":", "# do not bring along certain files", "if", "filename", ".", "endswith", "(", "'.local'", ")", ":", "continue", "include_files", ".", "append", "(", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ")", "if", "include_files", ":", "data_files", ".", "append", "(", "(", "root", ",", "include_files", ")", ")", "return", "data_files" ]
called from setup.py in skeleton projects
[ "called", "from", "setup", ".", "py", "in", "skeleton", "projects" ]
fc6a4cc94d4d6767df449fb3293d0ecb97e7e268
https://github.com/moertle/pyaas/blob/fc6a4cc94d4d6767df449fb3293d0ecb97e7e268/pyaas/skel.py#L11-L24
243,625
ronaldguillen/wave
wave/utils/html.py
parse_html_dict
def parse_html_dict(dictionary, prefix=''): """ Used to support dictionary values in HTML forms. { 'profile.username': 'example', 'profile.email': 'example@example.com', } --> { 'profile': { 'username': 'example', 'email': 'example@example.com' } } """ ret = MultiValueDict() regex = re.compile(r'^%s\.(.+)$' % re.escape(prefix)) for field, value in dictionary.items(): match = regex.match(field) if not match: continue key = match.groups()[0] ret[key] = value return ret
python
def parse_html_dict(dictionary, prefix=''): """ Used to support dictionary values in HTML forms. { 'profile.username': 'example', 'profile.email': 'example@example.com', } --> { 'profile': { 'username': 'example', 'email': 'example@example.com' } } """ ret = MultiValueDict() regex = re.compile(r'^%s\.(.+)$' % re.escape(prefix)) for field, value in dictionary.items(): match = regex.match(field) if not match: continue key = match.groups()[0] ret[key] = value return ret
[ "def", "parse_html_dict", "(", "dictionary", ",", "prefix", "=", "''", ")", ":", "ret", "=", "MultiValueDict", "(", ")", "regex", "=", "re", ".", "compile", "(", "r'^%s\\.(.+)$'", "%", "re", ".", "escape", "(", "prefix", ")", ")", "for", "field", ",", "value", "in", "dictionary", ".", "items", "(", ")", ":", "match", "=", "regex", ".", "match", "(", "field", ")", "if", "not", "match", ":", "continue", "key", "=", "match", ".", "groups", "(", ")", "[", "0", "]", "ret", "[", "key", "]", "=", "value", "return", "ret" ]
Used to support dictionary values in HTML forms. { 'profile.username': 'example', 'profile.email': 'example@example.com', } --> { 'profile': { 'username': 'example', 'email': 'example@example.com' } }
[ "Used", "to", "support", "dictionary", "values", "in", "HTML", "forms", "." ]
20bb979c917f7634d8257992e6d449dc751256a9
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/utils/html.py#L65-L89
243,626
ravenac95/lxc4u
lxc4u/overlayutils.py
OverlayGroup.meta
def meta(self): """Data for loading later""" mount_points = [] for overlay in self.overlays: mount_points.append(overlay.mount_point) return [self.end_dir, self.start_dir, mount_points]
python
def meta(self): """Data for loading later""" mount_points = [] for overlay in self.overlays: mount_points.append(overlay.mount_point) return [self.end_dir, self.start_dir, mount_points]
[ "def", "meta", "(", "self", ")", ":", "mount_points", "=", "[", "]", "for", "overlay", "in", "self", ".", "overlays", ":", "mount_points", ".", "append", "(", "overlay", ".", "mount_point", ")", "return", "[", "self", ".", "end_dir", ",", "self", ".", "start_dir", ",", "mount_points", "]" ]
Data for loading later
[ "Data", "for", "loading", "later" ]
4b5a9c8e25af97e5637db2f4c0c67d319ab0ed32
https://github.com/ravenac95/lxc4u/blob/4b5a9c8e25af97e5637db2f4c0c67d319ab0ed32/lxc4u/overlayutils.py#L62-L67
243,627
inveniosoftware-attic/invenio-upgrader
invenio_upgrader/logging.py
InvenioUpgraderLogFormatter.get_level_fmt
def get_level_fmt(self, level): """Get format for log level.""" key = None if level == logging.DEBUG: key = 'debug' elif level == logging.INFO: key = 'info' elif level == logging.WARNING: key = 'warning' elif level == logging.ERROR: key = 'error' elif level == logging.CRITICAL: key = 'critical' return self.overwrites.get(key, self.fmt)
python
def get_level_fmt(self, level): """Get format for log level.""" key = None if level == logging.DEBUG: key = 'debug' elif level == logging.INFO: key = 'info' elif level == logging.WARNING: key = 'warning' elif level == logging.ERROR: key = 'error' elif level == logging.CRITICAL: key = 'critical' return self.overwrites.get(key, self.fmt)
[ "def", "get_level_fmt", "(", "self", ",", "level", ")", ":", "key", "=", "None", "if", "level", "==", "logging", ".", "DEBUG", ":", "key", "=", "'debug'", "elif", "level", "==", "logging", ".", "INFO", ":", "key", "=", "'info'", "elif", "level", "==", "logging", ".", "WARNING", ":", "key", "=", "'warning'", "elif", "level", "==", "logging", ".", "ERROR", ":", "key", "=", "'error'", "elif", "level", "==", "logging", ".", "CRITICAL", ":", "key", "=", "'critical'", "return", "self", ".", "overwrites", ".", "get", "(", "key", ",", "self", ".", "fmt", ")" ]
Get format for log level.
[ "Get", "format", "for", "log", "level", "." ]
cee4bcb118515463ecf6de1421642007f79a9fcd
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/logging.py#L41-L54
243,628
inveniosoftware-attic/invenio-upgrader
invenio_upgrader/logging.py
InvenioUpgraderLogFormatter.format
def format(self, record): """Format log record.""" format_orig = self._fmt self._fmt = self.get_level_fmt(record.levelno) record.prefix = self.prefix record.plugin_id = self.plugin_id result = logging.Formatter.format(self, record) self._fmt = format_orig return result
python
def format(self, record): """Format log record.""" format_orig = self._fmt self._fmt = self.get_level_fmt(record.levelno) record.prefix = self.prefix record.plugin_id = self.plugin_id result = logging.Formatter.format(self, record) self._fmt = format_orig return result
[ "def", "format", "(", "self", ",", "record", ")", ":", "format_orig", "=", "self", ".", "_fmt", "self", ".", "_fmt", "=", "self", ".", "get_level_fmt", "(", "record", ".", "levelno", ")", "record", ".", "prefix", "=", "self", ".", "prefix", "record", ".", "plugin_id", "=", "self", ".", "plugin_id", "result", "=", "logging", ".", "Formatter", ".", "format", "(", "self", ",", "record", ")", "self", ".", "_fmt", "=", "format_orig", "return", "result" ]
Format log record.
[ "Format", "log", "record", "." ]
cee4bcb118515463ecf6de1421642007f79a9fcd
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/logging.py#L56-L64
243,629
jaraco/jaraco.email
jaraco/email/smtp.py
start_simple_server
def start_simple_server(): "A simple mail server that sends a simple response" args = _get_args() addr = ('', args.port) DebuggingServer(addr, None) asyncore.loop()
python
def start_simple_server(): "A simple mail server that sends a simple response" args = _get_args() addr = ('', args.port) DebuggingServer(addr, None) asyncore.loop()
[ "def", "start_simple_server", "(", ")", ":", "args", "=", "_get_args", "(", ")", "addr", "=", "(", "''", ",", "args", ".", "port", ")", "DebuggingServer", "(", "addr", ",", "None", ")", "asyncore", ".", "loop", "(", ")" ]
A simple mail server that sends a simple response
[ "A", "simple", "mail", "server", "that", "sends", "a", "simple", "response" ]
3a43a0a23931b2c9b4a22a5213fcd5edf3baec53
https://github.com/jaraco/jaraco.email/blob/3a43a0a23931b2c9b4a22a5213fcd5edf3baec53/jaraco/email/smtp.py#L25-L30
243,630
sassoo/goldman
goldman/resources/oauth_revoke.py
Resource.on_post
def on_post(self, req, resp): """ Validate the token revocation request for spec compliance The spec also dictates the JSON based error response on failure & is handled in this responder. """ token = req.get_param('token') token_type_hint = req.get_param('token_type_hint') # errors or not, disable client caching along the way # per the spec resp.disable_caching() if not token: resp.status = falcon.HTTP_400 resp.serialize({ 'error': 'invalid_request', 'error_description': 'A token parameter is required during ' 'revocation according to RFC 7009.', 'error_uri': 'tools.ietf.org/html/rfc7009#section-2.1', }) elif token_type_hint == 'refresh_token': resp.status = falcon.HTTP_400 resp.serialize({ 'error': 'unsupported_token_type', 'error_description': 'Currently only access_token types can ' 'be revoked, NOT refresh_token types.', 'error_uri': 'tools.ietf.org/html/rfc7009#section-2.2.1', }) else: # ignore return code per section 2.2 self.revoke_token(token) resp.status = falcon.HTTP_200
python
def on_post(self, req, resp): """ Validate the token revocation request for spec compliance The spec also dictates the JSON based error response on failure & is handled in this responder. """ token = req.get_param('token') token_type_hint = req.get_param('token_type_hint') # errors or not, disable client caching along the way # per the spec resp.disable_caching() if not token: resp.status = falcon.HTTP_400 resp.serialize({ 'error': 'invalid_request', 'error_description': 'A token parameter is required during ' 'revocation according to RFC 7009.', 'error_uri': 'tools.ietf.org/html/rfc7009#section-2.1', }) elif token_type_hint == 'refresh_token': resp.status = falcon.HTTP_400 resp.serialize({ 'error': 'unsupported_token_type', 'error_description': 'Currently only access_token types can ' 'be revoked, NOT refresh_token types.', 'error_uri': 'tools.ietf.org/html/rfc7009#section-2.2.1', }) else: # ignore return code per section 2.2 self.revoke_token(token) resp.status = falcon.HTTP_200
[ "def", "on_post", "(", "self", ",", "req", ",", "resp", ")", ":", "token", "=", "req", ".", "get_param", "(", "'token'", ")", "token_type_hint", "=", "req", ".", "get_param", "(", "'token_type_hint'", ")", "# errors or not, disable client caching along the way", "# per the spec", "resp", ".", "disable_caching", "(", ")", "if", "not", "token", ":", "resp", ".", "status", "=", "falcon", ".", "HTTP_400", "resp", ".", "serialize", "(", "{", "'error'", ":", "'invalid_request'", ",", "'error_description'", ":", "'A token parameter is required during '", "'revocation according to RFC 7009.'", ",", "'error_uri'", ":", "'tools.ietf.org/html/rfc7009#section-2.1'", ",", "}", ")", "elif", "token_type_hint", "==", "'refresh_token'", ":", "resp", ".", "status", "=", "falcon", ".", "HTTP_400", "resp", ".", "serialize", "(", "{", "'error'", ":", "'unsupported_token_type'", ",", "'error_description'", ":", "'Currently only access_token types can '", "'be revoked, NOT refresh_token types.'", ",", "'error_uri'", ":", "'tools.ietf.org/html/rfc7009#section-2.2.1'", ",", "}", ")", "else", ":", "# ignore return code per section 2.2", "self", ".", "revoke_token", "(", "token", ")", "resp", ".", "status", "=", "falcon", ".", "HTTP_200" ]
Validate the token revocation request for spec compliance The spec also dictates the JSON based error response on failure & is handled in this responder.
[ "Validate", "the", "token", "revocation", "request", "for", "spec", "compliance" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/resources/oauth_revoke.py#L41-L74
243,631
Othernet-Project/bottle-fdsend
fdsend/rangewrapper.py
emulate_seek
def emulate_seek(fd, offset, chunk=CHUNK): """ Emulates a seek on an object that does not support it The seek is emulated by reading and discarding bytes until specified offset is reached. The ``offset`` argument is in bytes from start of file. The ``chunk`` argument can be used to adjust the size of the chunks in which read operation is performed. Larger chunks will reach the offset in less reads and cost less CPU but use more memory. Conversely, smaller chunks will be more memory efficient, but cause more read operations and more CPU usage. If chunk is set to None, then the ``offset`` amount of bytes is read at once. This is fastest but depending on the offset size, may use a lot of memory. Default chunk size is controlled by the ``fsend.rangewrapper.CHUNK`` constant, which is 8KB by default. This function has no return value. """ while chunk and offset > CHUNK: fd.read(chunk) offset -= chunk fd.read(offset)
python
def emulate_seek(fd, offset, chunk=CHUNK): """ Emulates a seek on an object that does not support it The seek is emulated by reading and discarding bytes until specified offset is reached. The ``offset`` argument is in bytes from start of file. The ``chunk`` argument can be used to adjust the size of the chunks in which read operation is performed. Larger chunks will reach the offset in less reads and cost less CPU but use more memory. Conversely, smaller chunks will be more memory efficient, but cause more read operations and more CPU usage. If chunk is set to None, then the ``offset`` amount of bytes is read at once. This is fastest but depending on the offset size, may use a lot of memory. Default chunk size is controlled by the ``fsend.rangewrapper.CHUNK`` constant, which is 8KB by default. This function has no return value. """ while chunk and offset > CHUNK: fd.read(chunk) offset -= chunk fd.read(offset)
[ "def", "emulate_seek", "(", "fd", ",", "offset", ",", "chunk", "=", "CHUNK", ")", ":", "while", "chunk", "and", "offset", ">", "CHUNK", ":", "fd", ".", "read", "(", "chunk", ")", "offset", "-=", "chunk", "fd", ".", "read", "(", "offset", ")" ]
Emulates a seek on an object that does not support it The seek is emulated by reading and discarding bytes until specified offset is reached. The ``offset`` argument is in bytes from start of file. The ``chunk`` argument can be used to adjust the size of the chunks in which read operation is performed. Larger chunks will reach the offset in less reads and cost less CPU but use more memory. Conversely, smaller chunks will be more memory efficient, but cause more read operations and more CPU usage. If chunk is set to None, then the ``offset`` amount of bytes is read at once. This is fastest but depending on the offset size, may use a lot of memory. Default chunk size is controlled by the ``fsend.rangewrapper.CHUNK`` constant, which is 8KB by default. This function has no return value.
[ "Emulates", "a", "seek", "on", "an", "object", "that", "does", "not", "support", "it" ]
5ff27e605e8cf878e24c71c1446dcf5c8caf4898
https://github.com/Othernet-Project/bottle-fdsend/blob/5ff27e605e8cf878e24c71c1446dcf5c8caf4898/fdsend/rangewrapper.py#L17-L41
243,632
Othernet-Project/bottle-fdsend
fdsend/rangewrapper.py
force_seek
def force_seek(fd, offset, chunk=CHUNK): """ Force adjustment of read cursort to specified offset This function takes a file descriptor ``fd`` and tries to seek to position specified by ``offset`` argument. If the descriptor does not support the ``seek()`` method, it will fall back to ``emulate_seek()``. The optional ``chunk`` argument can be used to adjust the chunk size for ``emulate_seek()``. """ try: fd.seek(offset) except (AttributeError, io.UnsupportedOperation): # This file handle probably has no seek() emulate_seek(fd, offset, chunk)
python
def force_seek(fd, offset, chunk=CHUNK): """ Force adjustment of read cursort to specified offset This function takes a file descriptor ``fd`` and tries to seek to position specified by ``offset`` argument. If the descriptor does not support the ``seek()`` method, it will fall back to ``emulate_seek()``. The optional ``chunk`` argument can be used to adjust the chunk size for ``emulate_seek()``. """ try: fd.seek(offset) except (AttributeError, io.UnsupportedOperation): # This file handle probably has no seek() emulate_seek(fd, offset, chunk)
[ "def", "force_seek", "(", "fd", ",", "offset", ",", "chunk", "=", "CHUNK", ")", ":", "try", ":", "fd", ".", "seek", "(", "offset", ")", "except", "(", "AttributeError", ",", "io", ".", "UnsupportedOperation", ")", ":", "# This file handle probably has no seek()", "emulate_seek", "(", "fd", ",", "offset", ",", "chunk", ")" ]
Force adjustment of read cursort to specified offset This function takes a file descriptor ``fd`` and tries to seek to position specified by ``offset`` argument. If the descriptor does not support the ``seek()`` method, it will fall back to ``emulate_seek()``. The optional ``chunk`` argument can be used to adjust the chunk size for ``emulate_seek()``.
[ "Force", "adjustment", "of", "read", "cursort", "to", "specified", "offset" ]
5ff27e605e8cf878e24c71c1446dcf5c8caf4898
https://github.com/Othernet-Project/bottle-fdsend/blob/5ff27e605e8cf878e24c71c1446dcf5c8caf4898/fdsend/rangewrapper.py#L44-L58
243,633
Othernet-Project/bottle-fdsend
fdsend/rangewrapper.py
range_iter
def range_iter(fd, offset, length, chunk=CHUNK): """ Iterator generator that iterates over chunks in specified range This generator is meant to be used when returning file descriptor as a response to Range request (byte serving). It limits the reads to the region specified by ``offset`` (in bytes form start of the file) and ``limit`` (number of bytes to read), and returns the file contents in chunks of ``chunk`` bytes. The read offset is set either by using the file descriptor's ``seek()`` method, or by using ``emulate_seek()`` function if file descriptor does not implement ``seek()``. The file descriptor is automatically closed when iteration is finished. """ force_seek(fd, offset, chunk) while length > 0: ret = fd.read(chunk) if not ret: return length -= chunk yield ret fd.close()
python
def range_iter(fd, offset, length, chunk=CHUNK): """ Iterator generator that iterates over chunks in specified range This generator is meant to be used when returning file descriptor as a response to Range request (byte serving). It limits the reads to the region specified by ``offset`` (in bytes form start of the file) and ``limit`` (number of bytes to read), and returns the file contents in chunks of ``chunk`` bytes. The read offset is set either by using the file descriptor's ``seek()`` method, or by using ``emulate_seek()`` function if file descriptor does not implement ``seek()``. The file descriptor is automatically closed when iteration is finished. """ force_seek(fd, offset, chunk) while length > 0: ret = fd.read(chunk) if not ret: return length -= chunk yield ret fd.close()
[ "def", "range_iter", "(", "fd", ",", "offset", ",", "length", ",", "chunk", "=", "CHUNK", ")", ":", "force_seek", "(", "fd", ",", "offset", ",", "chunk", ")", "while", "length", ">", "0", ":", "ret", "=", "fd", ".", "read", "(", "chunk", ")", "if", "not", "ret", ":", "return", "length", "-=", "chunk", "yield", "ret", "fd", ".", "close", "(", ")" ]
Iterator generator that iterates over chunks in specified range This generator is meant to be used when returning file descriptor as a response to Range request (byte serving). It limits the reads to the region specified by ``offset`` (in bytes form start of the file) and ``limit`` (number of bytes to read), and returns the file contents in chunks of ``chunk`` bytes. The read offset is set either by using the file descriptor's ``seek()`` method, or by using ``emulate_seek()`` function if file descriptor does not implement ``seek()``. The file descriptor is automatically closed when iteration is finished.
[ "Iterator", "generator", "that", "iterates", "over", "chunks", "in", "specified", "range" ]
5ff27e605e8cf878e24c71c1446dcf5c8caf4898
https://github.com/Othernet-Project/bottle-fdsend/blob/5ff27e605e8cf878e24c71c1446dcf5c8caf4898/fdsend/rangewrapper.py#L61-L83
243,634
Othernet-Project/bottle-fdsend
fdsend/rangewrapper.py
RangeWrapper.read
def read(self, size=None): """ Read a specified number of bytes from the file descriptor This method emulates the normal file descriptor's ``read()`` method and restricts the total number of bytes readable. If file descriptor is not present (e.g., ``close()`` method had been called), ``ValueError`` is raised. If ``size`` is omitted, or ``None``, or any other falsy value, read will be done up to the remaining length (constructor's ``length`` argument minus the bytes that have been read previously). This method internally invokes the file descriptor's ``read()`` method, and the method must accept a single integer positional argument. """ if not self.fd: raise ValueError('I/O on closed file') if not size: size = self.remaining size = min([self.remaining, size]) if not size: return '' data = self.fd.read(size) self.remaining -= size return data
python
def read(self, size=None): """ Read a specified number of bytes from the file descriptor This method emulates the normal file descriptor's ``read()`` method and restricts the total number of bytes readable. If file descriptor is not present (e.g., ``close()`` method had been called), ``ValueError`` is raised. If ``size`` is omitted, or ``None``, or any other falsy value, read will be done up to the remaining length (constructor's ``length`` argument minus the bytes that have been read previously). This method internally invokes the file descriptor's ``read()`` method, and the method must accept a single integer positional argument. """ if not self.fd: raise ValueError('I/O on closed file') if not size: size = self.remaining size = min([self.remaining, size]) if not size: return '' data = self.fd.read(size) self.remaining -= size return data
[ "def", "read", "(", "self", ",", "size", "=", "None", ")", ":", "if", "not", "self", ".", "fd", ":", "raise", "ValueError", "(", "'I/O on closed file'", ")", "if", "not", "size", ":", "size", "=", "self", ".", "remaining", "size", "=", "min", "(", "[", "self", ".", "remaining", ",", "size", "]", ")", "if", "not", "size", ":", "return", "''", "data", "=", "self", ".", "fd", ".", "read", "(", "size", ")", "self", ".", "remaining", "-=", "size", "return", "data" ]
Read a specified number of bytes from the file descriptor This method emulates the normal file descriptor's ``read()`` method and restricts the total number of bytes readable. If file descriptor is not present (e.g., ``close()`` method had been called), ``ValueError`` is raised. If ``size`` is omitted, or ``None``, or any other falsy value, read will be done up to the remaining length (constructor's ``length`` argument minus the bytes that have been read previously). This method internally invokes the file descriptor's ``read()`` method, and the method must accept a single integer positional argument.
[ "Read", "a", "specified", "number", "of", "bytes", "from", "the", "file", "descriptor" ]
5ff27e605e8cf878e24c71c1446dcf5c8caf4898
https://github.com/Othernet-Project/bottle-fdsend/blob/5ff27e605e8cf878e24c71c1446dcf5c8caf4898/fdsend/rangewrapper.py#L126-L151
243,635
datadesk/django-greeking
greeking/templatetags/greeking_tags.py
placeholdit
def placeholdit( width, height, background_color="cccccc", text_color="969696", text=None, random_background_color=False ): """ Creates a placeholder image using placehold.it Usage format: {% placeholdit [width] [height] [background_color] [text_color] [text] %} Example usage: Default image at 250 square {% placeholdit 250 %} 100 wide and 200 high {% placeholdit 100 200 %} Custom background and text colors {% placeholdit 100 200 background_color='fff' text_color=000' %} Custom text {% placeholdit 100 200 text='Hello LA' %} """ url = get_placeholdit_url( width, height, background_color=background_color, text_color=text_color, text=text, ) return format_html('<img src="{}"/>', url)
python
def placeholdit( width, height, background_color="cccccc", text_color="969696", text=None, random_background_color=False ): """ Creates a placeholder image using placehold.it Usage format: {% placeholdit [width] [height] [background_color] [text_color] [text] %} Example usage: Default image at 250 square {% placeholdit 250 %} 100 wide and 200 high {% placeholdit 100 200 %} Custom background and text colors {% placeholdit 100 200 background_color='fff' text_color=000' %} Custom text {% placeholdit 100 200 text='Hello LA' %} """ url = get_placeholdit_url( width, height, background_color=background_color, text_color=text_color, text=text, ) return format_html('<img src="{}"/>', url)
[ "def", "placeholdit", "(", "width", ",", "height", ",", "background_color", "=", "\"cccccc\"", ",", "text_color", "=", "\"969696\"", ",", "text", "=", "None", ",", "random_background_color", "=", "False", ")", ":", "url", "=", "get_placeholdit_url", "(", "width", ",", "height", ",", "background_color", "=", "background_color", ",", "text_color", "=", "text_color", ",", "text", "=", "text", ",", ")", "return", "format_html", "(", "'<img src=\"{}\"/>'", ",", "url", ")" ]
Creates a placeholder image using placehold.it Usage format: {% placeholdit [width] [height] [background_color] [text_color] [text] %} Example usage: Default image at 250 square {% placeholdit 250 %} 100 wide and 200 high {% placeholdit 100 200 %} Custom background and text colors {% placeholdit 100 200 background_color='fff' text_color=000' %} Custom text {% placeholdit 100 200 text='Hello LA' %}
[ "Creates", "a", "placeholder", "image", "using", "placehold", ".", "it" ]
72509c94952279503bbe8d5a710c1fd344da0670
https://github.com/datadesk/django-greeking/blob/72509c94952279503bbe8d5a710c1fd344da0670/greeking/templatetags/greeking_tags.py#L111-L147
243,636
datadesk/django-greeking
greeking/templatetags/greeking_tags.py
pangram
def pangram(language='en'): """ Prints a pangram in the specified language. A pangram is a phrase that includes every letter of an alphabet. Default is English. For a full list of available languages, refer to pangrams.py Usage format:: {% pangram [language] %} ``language`` is the two-letter abbreviation the desired language. Examples: * ``{% pangram %}`` will output the default English pangram. * ``{% pangram 'fr' %}`` will output a French pangram. """ try: pangram = get_pangram(language) except KeyError: raise template.TemplateSyntaxError( "Could not find a pangram for %r abbreviation" % language ) return get_pangram_html(pangram)
python
def pangram(language='en'): """ Prints a pangram in the specified language. A pangram is a phrase that includes every letter of an alphabet. Default is English. For a full list of available languages, refer to pangrams.py Usage format:: {% pangram [language] %} ``language`` is the two-letter abbreviation the desired language. Examples: * ``{% pangram %}`` will output the default English pangram. * ``{% pangram 'fr' %}`` will output a French pangram. """ try: pangram = get_pangram(language) except KeyError: raise template.TemplateSyntaxError( "Could not find a pangram for %r abbreviation" % language ) return get_pangram_html(pangram)
[ "def", "pangram", "(", "language", "=", "'en'", ")", ":", "try", ":", "pangram", "=", "get_pangram", "(", "language", ")", "except", "KeyError", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "\"Could not find a pangram for %r abbreviation\"", "%", "language", ")", "return", "get_pangram_html", "(", "pangram", ")" ]
Prints a pangram in the specified language. A pangram is a phrase that includes every letter of an alphabet. Default is English. For a full list of available languages, refer to pangrams.py Usage format:: {% pangram [language] %} ``language`` is the two-letter abbreviation the desired language. Examples: * ``{% pangram %}`` will output the default English pangram. * ``{% pangram 'fr' %}`` will output a French pangram.
[ "Prints", "a", "pangram", "in", "the", "specified", "language", "." ]
72509c94952279503bbe8d5a710c1fd344da0670
https://github.com/datadesk/django-greeking/blob/72509c94952279503bbe8d5a710c1fd344da0670/greeking/templatetags/greeking_tags.py#L193-L218
243,637
kemingy/cnprep
cnprep/extractor.py
Extractor._get_result
def _get_result(self): """ get the result """ info = {} self.options2attr = { 'email': self._email, 'telephone': self._telephone, 'QQ' : self._QQ, 'wechat': self._wechat, 'url': self._url, 'emoji': self._emoji, 'tex': self._tex, 'blur': self._blur, 'message': self.m, } for item in self.option: info[item] = self.options2attr[item] return info
python
def _get_result(self): """ get the result """ info = {} self.options2attr = { 'email': self._email, 'telephone': self._telephone, 'QQ' : self._QQ, 'wechat': self._wechat, 'url': self._url, 'emoji': self._emoji, 'tex': self._tex, 'blur': self._blur, 'message': self.m, } for item in self.option: info[item] = self.options2attr[item] return info
[ "def", "_get_result", "(", "self", ")", ":", "info", "=", "{", "}", "self", ".", "options2attr", "=", "{", "'email'", ":", "self", ".", "_email", ",", "'telephone'", ":", "self", ".", "_telephone", ",", "'QQ'", ":", "self", ".", "_QQ", ",", "'wechat'", ":", "self", ".", "_wechat", ",", "'url'", ":", "self", ".", "_url", ",", "'emoji'", ":", "self", ".", "_emoji", ",", "'tex'", ":", "self", ".", "_tex", ",", "'blur'", ":", "self", ".", "_blur", ",", "'message'", ":", "self", ".", "m", ",", "}", "for", "item", "in", "self", ".", "option", ":", "info", "[", "item", "]", "=", "self", ".", "options2attr", "[", "item", "]", "return", "info" ]
get the result
[ "get", "the", "result" ]
076ea185167adb7e652bea3b81fb6830e162e880
https://github.com/kemingy/cnprep/blob/076ea185167adb7e652bea3b81fb6830e162e880/cnprep/extractor.py#L63-L83
243,638
kemingy/cnprep
cnprep/extractor.py
Extractor.extract
def extract(self, m): """ extract info specified in option """ self._clear() self.m = m # self._preprocess() if self.option != []: self._url_filter() self._email_filter() if 'tex' in self.option: self._tex_filter() # if 'email' in self.option: # self._email_filter() if 'telephone' in self.option: self._telephone_filter() if 'QQ' in self.option: self._QQ_filter() if 'emoji' in self.option: self._emoji_filter() if 'wechat' in self.option: self._wechat_filter() self._filter() if 'blur' in self.option: self._blur = get_number(self.m, self._limit) return self._get_result()
python
def extract(self, m): """ extract info specified in option """ self._clear() self.m = m # self._preprocess() if self.option != []: self._url_filter() self._email_filter() if 'tex' in self.option: self._tex_filter() # if 'email' in self.option: # self._email_filter() if 'telephone' in self.option: self._telephone_filter() if 'QQ' in self.option: self._QQ_filter() if 'emoji' in self.option: self._emoji_filter() if 'wechat' in self.option: self._wechat_filter() self._filter() if 'blur' in self.option: self._blur = get_number(self.m, self._limit) return self._get_result()
[ "def", "extract", "(", "self", ",", "m", ")", ":", "self", ".", "_clear", "(", ")", "self", ".", "m", "=", "m", "# self._preprocess()", "if", "self", ".", "option", "!=", "[", "]", ":", "self", ".", "_url_filter", "(", ")", "self", ".", "_email_filter", "(", ")", "if", "'tex'", "in", "self", ".", "option", ":", "self", ".", "_tex_filter", "(", ")", "# if 'email' in self.option:", "# self._email_filter()", "if", "'telephone'", "in", "self", ".", "option", ":", "self", ".", "_telephone_filter", "(", ")", "if", "'QQ'", "in", "self", ".", "option", ":", "self", ".", "_QQ_filter", "(", ")", "if", "'emoji'", "in", "self", ".", "option", ":", "self", ".", "_emoji_filter", "(", ")", "if", "'wechat'", "in", "self", ".", "option", ":", "self", ".", "_wechat_filter", "(", ")", "self", ".", "_filter", "(", ")", "if", "'blur'", "in", "self", ".", "option", ":", "self", ".", "_blur", "=", "get_number", "(", "self", ".", "m", ",", "self", ".", "_limit", ")", "return", "self", ".", "_get_result", "(", ")" ]
extract info specified in option
[ "extract", "info", "specified", "in", "option" ]
076ea185167adb7e652bea3b81fb6830e162e880
https://github.com/kemingy/cnprep/blob/076ea185167adb7e652bea3b81fb6830e162e880/cnprep/extractor.py#L98-L125
243,639
kemingy/cnprep
cnprep/extractor.py
Extractor._filter
def _filter(self): """ delete the punctuation """ pattern = u"[\s+\.\!\-\/_,$%^*(+\"\']+|[+——!】【,。??:、:~@#¥%……&*“”()]+" self.m = re.sub(pattern, "", self.m)
python
def _filter(self): """ delete the punctuation """ pattern = u"[\s+\.\!\-\/_,$%^*(+\"\']+|[+——!】【,。??:、:~@#¥%……&*“”()]+" self.m = re.sub(pattern, "", self.m)
[ "def", "_filter", "(", "self", ")", ":", "pattern", "=", "u\"[\\s+\\.\\!\\-\\/_,$%^*(+\\\"\\']+|[+——!】【,。??:、:~@#¥%……&*“”()]+\"", "self", ".", "m", "=", "re", ".", "sub", "(", "pattern", ",", "\"\"", ",", "self", ".", "m", ")" ]
delete the punctuation
[ "delete", "the", "punctuation" ]
076ea185167adb7e652bea3b81fb6830e162e880
https://github.com/kemingy/cnprep/blob/076ea185167adb7e652bea3b81fb6830e162e880/cnprep/extractor.py#L127-L132
243,640
jonDel/loggers
loggers/loggers.py
Loggers.set_log_level
def set_log_level(self, log_level): '''Configures class log level Arguments: log_level (:obj:`str`): log level ('NOTSET','DEBUG','INFO' 'WARNING', 'ERROR', 'CRITICAL') ''' if log_level == 'DEBUG': self.log.setLevel(logging.DEBUG) self.log.debug("Changing log level to "+log_level) elif log_level == 'INFO': self.log.setLevel(logging.INFO) self.log.info("Changing log level to "+log_level) elif log_level == 'WARNING': self.log.setLevel(logging.WARNING) self.log.warning("Changing log level to "+log_level) elif log_level == 'ERROR': self.log.setLevel(logging.ERROR) self.log.error("Changing log level to "+log_level) elif log_level == 'CRITICAL': self.log.setLevel(logging.CRITICAL) self.log.critical("Changing log level to "+log_level) elif log_level == 'NOTSET': self.log.setLevel(logging.NOTSET) else: raise NotImplementedError('Not implemented log level '+str(log_level))
python
def set_log_level(self, log_level): '''Configures class log level Arguments: log_level (:obj:`str`): log level ('NOTSET','DEBUG','INFO' 'WARNING', 'ERROR', 'CRITICAL') ''' if log_level == 'DEBUG': self.log.setLevel(logging.DEBUG) self.log.debug("Changing log level to "+log_level) elif log_level == 'INFO': self.log.setLevel(logging.INFO) self.log.info("Changing log level to "+log_level) elif log_level == 'WARNING': self.log.setLevel(logging.WARNING) self.log.warning("Changing log level to "+log_level) elif log_level == 'ERROR': self.log.setLevel(logging.ERROR) self.log.error("Changing log level to "+log_level) elif log_level == 'CRITICAL': self.log.setLevel(logging.CRITICAL) self.log.critical("Changing log level to "+log_level) elif log_level == 'NOTSET': self.log.setLevel(logging.NOTSET) else: raise NotImplementedError('Not implemented log level '+str(log_level))
[ "def", "set_log_level", "(", "self", ",", "log_level", ")", ":", "if", "log_level", "==", "'DEBUG'", ":", "self", ".", "log", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "self", ".", "log", ".", "debug", "(", "\"Changing log level to \"", "+", "log_level", ")", "elif", "log_level", "==", "'INFO'", ":", "self", ".", "log", ".", "setLevel", "(", "logging", ".", "INFO", ")", "self", ".", "log", ".", "info", "(", "\"Changing log level to \"", "+", "log_level", ")", "elif", "log_level", "==", "'WARNING'", ":", "self", ".", "log", ".", "setLevel", "(", "logging", ".", "WARNING", ")", "self", ".", "log", ".", "warning", "(", "\"Changing log level to \"", "+", "log_level", ")", "elif", "log_level", "==", "'ERROR'", ":", "self", ".", "log", ".", "setLevel", "(", "logging", ".", "ERROR", ")", "self", ".", "log", ".", "error", "(", "\"Changing log level to \"", "+", "log_level", ")", "elif", "log_level", "==", "'CRITICAL'", ":", "self", ".", "log", ".", "setLevel", "(", "logging", ".", "CRITICAL", ")", "self", ".", "log", ".", "critical", "(", "\"Changing log level to \"", "+", "log_level", ")", "elif", "log_level", "==", "'NOTSET'", ":", "self", ".", "log", ".", "setLevel", "(", "logging", ".", "NOTSET", ")", "else", ":", "raise", "NotImplementedError", "(", "'Not implemented log level '", "+", "str", "(", "log_level", ")", ")" ]
Configures class log level Arguments: log_level (:obj:`str`): log level ('NOTSET','DEBUG','INFO' 'WARNING', 'ERROR', 'CRITICAL')
[ "Configures", "class", "log", "level" ]
f03ff7231535c87bfa5b97fdab5ae201be503dbc
https://github.com/jonDel/loggers/blob/f03ff7231535c87bfa5b97fdab5ae201be503dbc/loggers/loggers.py#L85-L111
243,641
jonDel/loggers
loggers/loggers.py
Loggers.set_log_format
def set_log_format(self, log_type, log_format): '''Configures log format Arguments: log_type (:obj:`str`): log type (error, debug or stream) log_format (:obj:`str`): log format (ex:"Log: %(message)s | Log level:%(levelname)s | Date:%(asctime)s',datefmt='%m/%d/%Y %I:%M:%S") ''' if not (log_type == 'error' or log_type == 'stream' or log_type == 'debug'): self.log.debug('Log type must be error, stream, or debug') else: self.default_formatter = logging.Formatter(log_format) if log_type == 'error': self.error_handler.setFormatter(self.default_formatter) elif log_type == 'debug': self.debug_handler.setFormatter(self.default_formatter) elif log_type == 'stream': self.stream_handler.setFormatter(self.default_formatter)
python
def set_log_format(self, log_type, log_format): '''Configures log format Arguments: log_type (:obj:`str`): log type (error, debug or stream) log_format (:obj:`str`): log format (ex:"Log: %(message)s | Log level:%(levelname)s | Date:%(asctime)s',datefmt='%m/%d/%Y %I:%M:%S") ''' if not (log_type == 'error' or log_type == 'stream' or log_type == 'debug'): self.log.debug('Log type must be error, stream, or debug') else: self.default_formatter = logging.Formatter(log_format) if log_type == 'error': self.error_handler.setFormatter(self.default_formatter) elif log_type == 'debug': self.debug_handler.setFormatter(self.default_formatter) elif log_type == 'stream': self.stream_handler.setFormatter(self.default_formatter)
[ "def", "set_log_format", "(", "self", ",", "log_type", ",", "log_format", ")", ":", "if", "not", "(", "log_type", "==", "'error'", "or", "log_type", "==", "'stream'", "or", "log_type", "==", "'debug'", ")", ":", "self", ".", "log", ".", "debug", "(", "'Log type must be error, stream, or debug'", ")", "else", ":", "self", ".", "default_formatter", "=", "logging", ".", "Formatter", "(", "log_format", ")", "if", "log_type", "==", "'error'", ":", "self", ".", "error_handler", ".", "setFormatter", "(", "self", ".", "default_formatter", ")", "elif", "log_type", "==", "'debug'", ":", "self", ".", "debug_handler", ".", "setFormatter", "(", "self", ".", "default_formatter", ")", "elif", "log_type", "==", "'stream'", ":", "self", ".", "stream_handler", ".", "setFormatter", "(", "self", ".", "default_formatter", ")" ]
Configures log format Arguments: log_type (:obj:`str`): log type (error, debug or stream) log_format (:obj:`str`): log format (ex:"Log: %(message)s | Log level:%(levelname)s | Date:%(asctime)s',datefmt='%m/%d/%Y %I:%M:%S")
[ "Configures", "log", "format" ]
f03ff7231535c87bfa5b97fdab5ae201be503dbc
https://github.com/jonDel/loggers/blob/f03ff7231535c87bfa5b97fdab5ae201be503dbc/loggers/loggers.py#L113-L131
243,642
Kjwon15/autotweet
autotweet/learning.py
DataCollection.add_document
def add_document(self, question, answer): """Add question answer set to DB. :param question: A question to an answer :type question: :class:`str` :param answer: An answer to a question :type answer: :class:`str` """ question = question.strip() answer = answer.strip() session = self.Session() if session.query(Document) \ .filter_by(text=question, answer=answer).count(): logger.info('Already here: {0} -> {1}'.format(question, answer)) return logger.info('add document: {0} -> {1}'.format(question, answer)) grams = self._get_grams(session, question, make=True) doc = Document(question, answer) doc.grams = list(grams) self._recalc_idfs(session, grams) session.add(doc) session.commit()
python
def add_document(self, question, answer): """Add question answer set to DB. :param question: A question to an answer :type question: :class:`str` :param answer: An answer to a question :type answer: :class:`str` """ question = question.strip() answer = answer.strip() session = self.Session() if session.query(Document) \ .filter_by(text=question, answer=answer).count(): logger.info('Already here: {0} -> {1}'.format(question, answer)) return logger.info('add document: {0} -> {1}'.format(question, answer)) grams = self._get_grams(session, question, make=True) doc = Document(question, answer) doc.grams = list(grams) self._recalc_idfs(session, grams) session.add(doc) session.commit()
[ "def", "add_document", "(", "self", ",", "question", ",", "answer", ")", ":", "question", "=", "question", ".", "strip", "(", ")", "answer", "=", "answer", ".", "strip", "(", ")", "session", "=", "self", ".", "Session", "(", ")", "if", "session", ".", "query", "(", "Document", ")", ".", "filter_by", "(", "text", "=", "question", ",", "answer", "=", "answer", ")", ".", "count", "(", ")", ":", "logger", ".", "info", "(", "'Already here: {0} -> {1}'", ".", "format", "(", "question", ",", "answer", ")", ")", "return", "logger", ".", "info", "(", "'add document: {0} -> {1}'", ".", "format", "(", "question", ",", "answer", ")", ")", "grams", "=", "self", ".", "_get_grams", "(", "session", ",", "question", ",", "make", "=", "True", ")", "doc", "=", "Document", "(", "question", ",", "answer", ")", "doc", ".", "grams", "=", "list", "(", "grams", ")", "self", ".", "_recalc_idfs", "(", "session", ",", "grams", ")", "session", ".", "add", "(", "doc", ")", "session", ".", "commit", "(", ")" ]
Add question answer set to DB. :param question: A question to an answer :type question: :class:`str` :param answer: An answer to a question :type answer: :class:`str`
[ "Add", "question", "answer", "set", "to", "DB", "." ]
c35b68ee1814916fbe9e5a5bd6ea6e75b3cc596e
https://github.com/Kjwon15/autotweet/blob/c35b68ee1814916fbe9e5a5bd6ea6e75b3cc596e/autotweet/learning.py#L30-L58
243,643
Kjwon15/autotweet
autotweet/learning.py
DataCollection.get_best_answer
def get_best_answer(self, query): """Get best answer to a question. :param query: A question to get an answer :type query: :class:`str` :returns: An answer to a question :rtype: :class:`str` :raises: :class:`NoAnswerError` when can not found answer to a question """ query = to_unicode(query) session = self.Session() grams = self._get_grams(session, query) if not grams: raise NoAnswerError('Can not found answer') documents = set([doc for gram in grams for doc in gram.documents]) self._recalc_idfs(session, grams) idfs = dict((gram.gram, gram.idf) for gram in grams) docs = dict( (doc.answer, _cosine_measure(idfs, self._get_tf_idfs(doc))) for doc in documents) docs = dict((key, val) for (key, val) in docs.items() if val) session.commit() try: max_ratio = max(docs.values()) answers = [answer for answer in docs.keys() if docs.get(answer) == max_ratio] answer = random.choice(answers) logger.debug('{0} -> {1} ({2})'.format(query, answer, max_ratio)) return (answer, max_ratio) except ValueError: raise NoAnswerError('Can not found answer') finally: session.commit()
python
def get_best_answer(self, query): """Get best answer to a question. :param query: A question to get an answer :type query: :class:`str` :returns: An answer to a question :rtype: :class:`str` :raises: :class:`NoAnswerError` when can not found answer to a question """ query = to_unicode(query) session = self.Session() grams = self._get_grams(session, query) if not grams: raise NoAnswerError('Can not found answer') documents = set([doc for gram in grams for doc in gram.documents]) self._recalc_idfs(session, grams) idfs = dict((gram.gram, gram.idf) for gram in grams) docs = dict( (doc.answer, _cosine_measure(idfs, self._get_tf_idfs(doc))) for doc in documents) docs = dict((key, val) for (key, val) in docs.items() if val) session.commit() try: max_ratio = max(docs.values()) answers = [answer for answer in docs.keys() if docs.get(answer) == max_ratio] answer = random.choice(answers) logger.debug('{0} -> {1} ({2})'.format(query, answer, max_ratio)) return (answer, max_ratio) except ValueError: raise NoAnswerError('Can not found answer') finally: session.commit()
[ "def", "get_best_answer", "(", "self", ",", "query", ")", ":", "query", "=", "to_unicode", "(", "query", ")", "session", "=", "self", ".", "Session", "(", ")", "grams", "=", "self", ".", "_get_grams", "(", "session", ",", "query", ")", "if", "not", "grams", ":", "raise", "NoAnswerError", "(", "'Can not found answer'", ")", "documents", "=", "set", "(", "[", "doc", "for", "gram", "in", "grams", "for", "doc", "in", "gram", ".", "documents", "]", ")", "self", ".", "_recalc_idfs", "(", "session", ",", "grams", ")", "idfs", "=", "dict", "(", "(", "gram", ".", "gram", ",", "gram", ".", "idf", ")", "for", "gram", "in", "grams", ")", "docs", "=", "dict", "(", "(", "doc", ".", "answer", ",", "_cosine_measure", "(", "idfs", ",", "self", ".", "_get_tf_idfs", "(", "doc", ")", ")", ")", "for", "doc", "in", "documents", ")", "docs", "=", "dict", "(", "(", "key", ",", "val", ")", "for", "(", "key", ",", "val", ")", "in", "docs", ".", "items", "(", ")", "if", "val", ")", "session", ".", "commit", "(", ")", "try", ":", "max_ratio", "=", "max", "(", "docs", ".", "values", "(", ")", ")", "answers", "=", "[", "answer", "for", "answer", "in", "docs", ".", "keys", "(", ")", "if", "docs", ".", "get", "(", "answer", ")", "==", "max_ratio", "]", "answer", "=", "random", ".", "choice", "(", "answers", ")", "logger", ".", "debug", "(", "'{0} -> {1} ({2})'", ".", "format", "(", "query", ",", "answer", ",", "max_ratio", ")", ")", "return", "(", "answer", ",", "max_ratio", ")", "except", "ValueError", ":", "raise", "NoAnswerError", "(", "'Can not found answer'", ")", "finally", ":", "session", ".", "commit", "(", ")" ]
Get best answer to a question. :param query: A question to get an answer :type query: :class:`str` :returns: An answer to a question :rtype: :class:`str` :raises: :class:`NoAnswerError` when can not found answer to a question
[ "Get", "best", "answer", "to", "a", "question", "." ]
c35b68ee1814916fbe9e5a5bd6ea6e75b3cc596e
https://github.com/Kjwon15/autotweet/blob/c35b68ee1814916fbe9e5a5bd6ea6e75b3cc596e/autotweet/learning.py#L60-L103
243,644
Kjwon15/autotweet
autotweet/learning.py
DataCollection.recreate_grams
def recreate_grams(self): """Re-create grams for database. In normal situations, you never need to call this method. But after migrate DB, this method is useful. :param session: DB session :type session: :class:`sqlalchemt.orm.Session` """ session = self.Session() for document in session.query(Document).all(): logger.info(document.text) grams = self._get_grams(session, document.text, make=True) document.grams = list(grams) broken_links = session.query(Gram) \ .filter(~Gram.documents.any()).all() for gram in broken_links: session.delete(gram) session.commit()
python
def recreate_grams(self): """Re-create grams for database. In normal situations, you never need to call this method. But after migrate DB, this method is useful. :param session: DB session :type session: :class:`sqlalchemt.orm.Session` """ session = self.Session() for document in session.query(Document).all(): logger.info(document.text) grams = self._get_grams(session, document.text, make=True) document.grams = list(grams) broken_links = session.query(Gram) \ .filter(~Gram.documents.any()).all() for gram in broken_links: session.delete(gram) session.commit()
[ "def", "recreate_grams", "(", "self", ")", ":", "session", "=", "self", ".", "Session", "(", ")", "for", "document", "in", "session", ".", "query", "(", "Document", ")", ".", "all", "(", ")", ":", "logger", ".", "info", "(", "document", ".", "text", ")", "grams", "=", "self", ".", "_get_grams", "(", "session", ",", "document", ".", "text", ",", "make", "=", "True", ")", "document", ".", "grams", "=", "list", "(", "grams", ")", "broken_links", "=", "session", ".", "query", "(", "Gram", ")", ".", "filter", "(", "~", "Gram", ".", "documents", ".", "any", "(", ")", ")", ".", "all", "(", ")", "for", "gram", "in", "broken_links", ":", "session", ".", "delete", "(", "gram", ")", "session", ".", "commit", "(", ")" ]
Re-create grams for database. In normal situations, you never need to call this method. But after migrate DB, this method is useful. :param session: DB session :type session: :class:`sqlalchemt.orm.Session`
[ "Re", "-", "create", "grams", "for", "database", "." ]
c35b68ee1814916fbe9e5a5bd6ea6e75b3cc596e
https://github.com/Kjwon15/autotweet/blob/c35b68ee1814916fbe9e5a5bd6ea6e75b3cc596e/autotweet/learning.py#L105-L128
243,645
Kjwon15/autotweet
autotweet/learning.py
DataCollection._recalc_idfs
def _recalc_idfs(self, session, grams=None): """Re-calculate idfs for database. calculating idfs for gram is taking long time. So I made it calculates idfs for some grams. If you want make accuracy higher, use this with grams=None. :param session: DB session :type session: :class:`sqlalchemt.orm.Session` :param grams: grams that you want to re-calculating idfs :type grams: A set of :class:`Gram` """ if not grams: grams = session.query(Gram) for gram in grams: orig_idf = gram.idf gram.idf = self._get_idf(session, gram) logger.debug('Recalculating {} {} -> {}'.format( gram.gram, orig_idf, gram.idf))
python
def _recalc_idfs(self, session, grams=None): """Re-calculate idfs for database. calculating idfs for gram is taking long time. So I made it calculates idfs for some grams. If you want make accuracy higher, use this with grams=None. :param session: DB session :type session: :class:`sqlalchemt.orm.Session` :param grams: grams that you want to re-calculating idfs :type grams: A set of :class:`Gram` """ if not grams: grams = session.query(Gram) for gram in grams: orig_idf = gram.idf gram.idf = self._get_idf(session, gram) logger.debug('Recalculating {} {} -> {}'.format( gram.gram, orig_idf, gram.idf))
[ "def", "_recalc_idfs", "(", "self", ",", "session", ",", "grams", "=", "None", ")", ":", "if", "not", "grams", ":", "grams", "=", "session", ".", "query", "(", "Gram", ")", "for", "gram", "in", "grams", ":", "orig_idf", "=", "gram", ".", "idf", "gram", ".", "idf", "=", "self", ".", "_get_idf", "(", "session", ",", "gram", ")", "logger", ".", "debug", "(", "'Recalculating {} {} -> {}'", ".", "format", "(", "gram", ".", "gram", ",", "orig_idf", ",", "gram", ".", "idf", ")", ")" ]
Re-calculate idfs for database. calculating idfs for gram is taking long time. So I made it calculates idfs for some grams. If you want make accuracy higher, use this with grams=None. :param session: DB session :type session: :class:`sqlalchemt.orm.Session` :param grams: grams that you want to re-calculating idfs :type grams: A set of :class:`Gram`
[ "Re", "-", "calculate", "idfs", "for", "database", "." ]
c35b68ee1814916fbe9e5a5bd6ea6e75b3cc596e
https://github.com/Kjwon15/autotweet/blob/c35b68ee1814916fbe9e5a5bd6ea6e75b3cc596e/autotweet/learning.py#L135-L156
243,646
maxfischer2781/chainlet
chainlet/genlink.py
GeneratorLink.throw
def throw(self, type, value=None, traceback=None): # pylint: disable=redefined-builtin """Raise an exception in this element""" return self.__wrapped__.throw(type, value, traceback)
python
def throw(self, type, value=None, traceback=None): # pylint: disable=redefined-builtin """Raise an exception in this element""" return self.__wrapped__.throw(type, value, traceback)
[ "def", "throw", "(", "self", ",", "type", ",", "value", "=", "None", ",", "traceback", "=", "None", ")", ":", "# pylint: disable=redefined-builtin", "return", "self", ".", "__wrapped__", ".", "throw", "(", "type", ",", "value", ",", "traceback", ")" ]
Raise an exception in this element
[ "Raise", "an", "exception", "in", "this", "element" ]
4e17f9992b4780bd0d9309202e2847df640bffe8
https://github.com/maxfischer2781/chainlet/blob/4e17f9992b4780bd0d9309202e2847df640bffe8/chainlet/genlink.py#L194-L196
243,647
maxfischer2781/chainlet
chainlet/protolink.py
enumeratelet
def enumeratelet(iterable=None, start=0): r""" Enumerate chunks of data from an iterable or a chain :param iterable: object supporting iteration, or an index :type iterable: iterable, None or int :param start: an index to start counting from :type start: int :raises TypeError: if both parameters are set and ``iterable`` does not support iteration In pull mode, :py:func:`~.enumeratelet` works similar to the builtin :py:func:`~.enumerate` but is chainable: .. code:: chain = enumeratelet(['Paul', 'Thomas', 'Brian']) >> printlet(sep=':\t') for value in chain: pass # prints `0: Paul`, `1: Thomas`, `2: Brian` By default, :py:func:`~.enumeratelet` enumerates chunks passed in from a pipeline. To use a different starting index, *either* set the ``start`` keyword parameter *or* set the first positional parameter. .. code:: chain = iteratelet(['Paul', 'Thomas', 'Brian']) >> enumeratelet() >> printlet(sep=':\t') for value in chain: pass # prints `0: Paul`, `1: Thomas`, `2: Brian` """ # shortcut directly to chain enumeration if iterable is None: return _enumeratelet(start=start) try: iterator = iter(iterable) except TypeError: if start != 0: raise # first arg is not iterable but start is explicitly set return _enumeratelet(start=iterable) # first arg is not iterable, try short notation else: return iterlet(enumerate(iterator, start=start))
python
def enumeratelet(iterable=None, start=0): r""" Enumerate chunks of data from an iterable or a chain :param iterable: object supporting iteration, or an index :type iterable: iterable, None or int :param start: an index to start counting from :type start: int :raises TypeError: if both parameters are set and ``iterable`` does not support iteration In pull mode, :py:func:`~.enumeratelet` works similar to the builtin :py:func:`~.enumerate` but is chainable: .. code:: chain = enumeratelet(['Paul', 'Thomas', 'Brian']) >> printlet(sep=':\t') for value in chain: pass # prints `0: Paul`, `1: Thomas`, `2: Brian` By default, :py:func:`~.enumeratelet` enumerates chunks passed in from a pipeline. To use a different starting index, *either* set the ``start`` keyword parameter *or* set the first positional parameter. .. code:: chain = iteratelet(['Paul', 'Thomas', 'Brian']) >> enumeratelet() >> printlet(sep=':\t') for value in chain: pass # prints `0: Paul`, `1: Thomas`, `2: Brian` """ # shortcut directly to chain enumeration if iterable is None: return _enumeratelet(start=start) try: iterator = iter(iterable) except TypeError: if start != 0: raise # first arg is not iterable but start is explicitly set return _enumeratelet(start=iterable) # first arg is not iterable, try short notation else: return iterlet(enumerate(iterator, start=start))
[ "def", "enumeratelet", "(", "iterable", "=", "None", ",", "start", "=", "0", ")", ":", "# shortcut directly to chain enumeration", "if", "iterable", "is", "None", ":", "return", "_enumeratelet", "(", "start", "=", "start", ")", "try", ":", "iterator", "=", "iter", "(", "iterable", ")", "except", "TypeError", ":", "if", "start", "!=", "0", ":", "raise", "# first arg is not iterable but start is explicitly set", "return", "_enumeratelet", "(", "start", "=", "iterable", ")", "# first arg is not iterable, try short notation", "else", ":", "return", "iterlet", "(", "enumerate", "(", "iterator", ",", "start", "=", "start", ")", ")" ]
r""" Enumerate chunks of data from an iterable or a chain :param iterable: object supporting iteration, or an index :type iterable: iterable, None or int :param start: an index to start counting from :type start: int :raises TypeError: if both parameters are set and ``iterable`` does not support iteration In pull mode, :py:func:`~.enumeratelet` works similar to the builtin :py:func:`~.enumerate` but is chainable: .. code:: chain = enumeratelet(['Paul', 'Thomas', 'Brian']) >> printlet(sep=':\t') for value in chain: pass # prints `0: Paul`, `1: Thomas`, `2: Brian` By default, :py:func:`~.enumeratelet` enumerates chunks passed in from a pipeline. To use a different starting index, *either* set the ``start`` keyword parameter *or* set the first positional parameter. .. code:: chain = iteratelet(['Paul', 'Thomas', 'Brian']) >> enumeratelet() >> printlet(sep=':\t') for value in chain: pass # prints `0: Paul`, `1: Thomas`, `2: Brian`
[ "r", "Enumerate", "chunks", "of", "data", "from", "an", "iterable", "or", "a", "chain" ]
4e17f9992b4780bd0d9309202e2847df640bffe8
https://github.com/maxfischer2781/chainlet/blob/4e17f9992b4780bd0d9309202e2847df640bffe8/chainlet/protolink.py#L75-L114
243,648
maxfischer2781/chainlet
chainlet/protolink.py
filterlet
def filterlet(function=bool, iterable=None): """ Filter chunks of data from an iterable or a chain :param function: callable selecting valid elements :type function: callable :param iterable: object providing chunks via iteration :type iterable: iterable or None For any chunk in ``iterable`` or the chain, it is passed on only if ``function(chunk)`` returns true. .. code:: chain = iterlet(range(10)) >> filterlet(lambda chunk: chunk % 2 == 0) for value in chain: print(value) # prints 0, 2, 4, 6, 8 """ if iterable is None: return _filterlet(function=function) else: return iterlet(elem for elem in iterable if function(elem))
python
def filterlet(function=bool, iterable=None): """ Filter chunks of data from an iterable or a chain :param function: callable selecting valid elements :type function: callable :param iterable: object providing chunks via iteration :type iterable: iterable or None For any chunk in ``iterable`` or the chain, it is passed on only if ``function(chunk)`` returns true. .. code:: chain = iterlet(range(10)) >> filterlet(lambda chunk: chunk % 2 == 0) for value in chain: print(value) # prints 0, 2, 4, 6, 8 """ if iterable is None: return _filterlet(function=function) else: return iterlet(elem for elem in iterable if function(elem))
[ "def", "filterlet", "(", "function", "=", "bool", ",", "iterable", "=", "None", ")", ":", "if", "iterable", "is", "None", ":", "return", "_filterlet", "(", "function", "=", "function", ")", "else", ":", "return", "iterlet", "(", "elem", "for", "elem", "in", "iterable", "if", "function", "(", "elem", ")", ")" ]
Filter chunks of data from an iterable or a chain :param function: callable selecting valid elements :type function: callable :param iterable: object providing chunks via iteration :type iterable: iterable or None For any chunk in ``iterable`` or the chain, it is passed on only if ``function(chunk)`` returns true. .. code:: chain = iterlet(range(10)) >> filterlet(lambda chunk: chunk % 2 == 0) for value in chain: print(value) # prints 0, 2, 4, 6, 8
[ "Filter", "chunks", "of", "data", "from", "an", "iterable", "or", "a", "chain" ]
4e17f9992b4780bd0d9309202e2847df640bffe8
https://github.com/maxfischer2781/chainlet/blob/4e17f9992b4780bd0d9309202e2847df640bffe8/chainlet/protolink.py#L126-L147
243,649
maxfischer2781/chainlet
chainlet/protolink.py
printlet
def printlet(flatten=False, **kwargs): """ Print chunks of data from a chain :param flatten: whether to flatten data chunks :param kwargs: keyword arguments as for :py:func:`print` If ``flatten`` is :py:const:`True`, every chunk received is unpacked. This is useful when passing around connected data, e.g. from :py:func:`~.enumeratelet`. Keyword arguments via ``kwargs`` are equivalent to those of :py:func:`print`. For example, passing ``file=sys.stderr`` is a simple way of creating a debugging element in a chain: .. code:: debug_chain = chain[:i] >> printlet(file=sys.stderr) >> chain[i:] """ chunk = yield if flatten: while True: print(*chunk, **kwargs) chunk = yield chunk else: while True: print(chunk, **kwargs) chunk = yield chunk
python
def printlet(flatten=False, **kwargs): """ Print chunks of data from a chain :param flatten: whether to flatten data chunks :param kwargs: keyword arguments as for :py:func:`print` If ``flatten`` is :py:const:`True`, every chunk received is unpacked. This is useful when passing around connected data, e.g. from :py:func:`~.enumeratelet`. Keyword arguments via ``kwargs`` are equivalent to those of :py:func:`print`. For example, passing ``file=sys.stderr`` is a simple way of creating a debugging element in a chain: .. code:: debug_chain = chain[:i] >> printlet(file=sys.stderr) >> chain[i:] """ chunk = yield if flatten: while True: print(*chunk, **kwargs) chunk = yield chunk else: while True: print(chunk, **kwargs) chunk = yield chunk
[ "def", "printlet", "(", "flatten", "=", "False", ",", "*", "*", "kwargs", ")", ":", "chunk", "=", "yield", "if", "flatten", ":", "while", "True", ":", "print", "(", "*", "chunk", ",", "*", "*", "kwargs", ")", "chunk", "=", "yield", "chunk", "else", ":", "while", "True", ":", "print", "(", "chunk", ",", "*", "*", "kwargs", ")", "chunk", "=", "yield", "chunk" ]
Print chunks of data from a chain :param flatten: whether to flatten data chunks :param kwargs: keyword arguments as for :py:func:`print` If ``flatten`` is :py:const:`True`, every chunk received is unpacked. This is useful when passing around connected data, e.g. from :py:func:`~.enumeratelet`. Keyword arguments via ``kwargs`` are equivalent to those of :py:func:`print`. For example, passing ``file=sys.stderr`` is a simple way of creating a debugging element in a chain: .. code:: debug_chain = chain[:i] >> printlet(file=sys.stderr) >> chain[i:]
[ "Print", "chunks", "of", "data", "from", "a", "chain" ]
4e17f9992b4780bd0d9309202e2847df640bffe8
https://github.com/maxfischer2781/chainlet/blob/4e17f9992b4780bd0d9309202e2847df640bffe8/chainlet/protolink.py#L158-L183
243,650
delfick/aws_syncr
aws_syncr/option_spec/lambdas.py
Lambdas.sync_one
def sync_one(self, aws_syncr, amazon, function): """Make sure this function exists and has only attributes we want it to have""" function_info = amazon.lambdas.function_info(function.name, function.location) if not function_info: amazon.lambdas.create_function(function.name, function.description, function.location, function.runtime, function.role, function.handler, function.timeout, function.memory_size, function.code) else: amazon.lambdas.modify_function(function_info, function.name, function.description, function.location, function.runtime, function.role, function.handler, function.timeout, function.memory_size, function.code)
python
def sync_one(self, aws_syncr, amazon, function): """Make sure this function exists and has only attributes we want it to have""" function_info = amazon.lambdas.function_info(function.name, function.location) if not function_info: amazon.lambdas.create_function(function.name, function.description, function.location, function.runtime, function.role, function.handler, function.timeout, function.memory_size, function.code) else: amazon.lambdas.modify_function(function_info, function.name, function.description, function.location, function.runtime, function.role, function.handler, function.timeout, function.memory_size, function.code)
[ "def", "sync_one", "(", "self", ",", "aws_syncr", ",", "amazon", ",", "function", ")", ":", "function_info", "=", "amazon", ".", "lambdas", ".", "function_info", "(", "function", ".", "name", ",", "function", ".", "location", ")", "if", "not", "function_info", ":", "amazon", ".", "lambdas", ".", "create_function", "(", "function", ".", "name", ",", "function", ".", "description", ",", "function", ".", "location", ",", "function", ".", "runtime", ",", "function", ".", "role", ",", "function", ".", "handler", ",", "function", ".", "timeout", ",", "function", ".", "memory_size", ",", "function", ".", "code", ")", "else", ":", "amazon", ".", "lambdas", ".", "modify_function", "(", "function_info", ",", "function", ".", "name", ",", "function", ".", "description", ",", "function", ".", "location", ",", "function", ".", "runtime", ",", "function", ".", "role", ",", "function", ".", "handler", ",", "function", ".", "timeout", ",", "function", ".", "memory_size", ",", "function", ".", "code", ")" ]
Make sure this function exists and has only attributes we want it to have
[ "Make", "sure", "this", "function", "exists", "and", "has", "only", "attributes", "we", "want", "it", "to", "have" ]
8cd214b27c1eee98dfba4632cbb8bc0ae36356bd
https://github.com/delfick/aws_syncr/blob/8cd214b27c1eee98dfba4632cbb8bc0ae36356bd/aws_syncr/option_spec/lambdas.py#L162-L168
243,651
mbodenhamer/syn
syn/base_utils/dict.py
AssocDict.update
def update(self, *args, **kwargs): '''Preserves order if given an assoc list. ''' arg = dict_arg(*args, **kwargs) if isinstance(arg, list): for key, val in arg: self[key] = val else: super(AssocDict, self).update(arg)
python
def update(self, *args, **kwargs): '''Preserves order if given an assoc list. ''' arg = dict_arg(*args, **kwargs) if isinstance(arg, list): for key, val in arg: self[key] = val else: super(AssocDict, self).update(arg)
[ "def", "update", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "arg", "=", "dict_arg", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "isinstance", "(", "arg", ",", "list", ")", ":", "for", "key", ",", "val", "in", "arg", ":", "self", "[", "key", "]", "=", "val", "else", ":", "super", "(", "AssocDict", ",", "self", ")", ".", "update", "(", "arg", ")" ]
Preserves order if given an assoc list.
[ "Preserves", "order", "if", "given", "an", "assoc", "list", "." ]
aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258
https://github.com/mbodenhamer/syn/blob/aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258/syn/base_utils/dict.py#L207-L215
243,652
uw-it-aca/uw-restclients-mailman
uw_mailman/basic_list.py
_get_url_path
def _get_url_path(list_name): """ Live Dao requires RESTCLIENTS_MAILMAN_KEY in the settings.py """ access_key = getattr(settings, "RESTCLIENTS_MAILMAN_KEY", "__mock_key__") return URL.format(key=access_key, uwnetid=list_name)
python
def _get_url_path(list_name): """ Live Dao requires RESTCLIENTS_MAILMAN_KEY in the settings.py """ access_key = getattr(settings, "RESTCLIENTS_MAILMAN_KEY", "__mock_key__") return URL.format(key=access_key, uwnetid=list_name)
[ "def", "_get_url_path", "(", "list_name", ")", ":", "access_key", "=", "getattr", "(", "settings", ",", "\"RESTCLIENTS_MAILMAN_KEY\"", ",", "\"__mock_key__\"", ")", "return", "URL", ".", "format", "(", "key", "=", "access_key", ",", "uwnetid", "=", "list_name", ")" ]
Live Dao requires RESTCLIENTS_MAILMAN_KEY in the settings.py
[ "Live", "Dao", "requires", "RESTCLIENTS_MAILMAN_KEY", "in", "the", "settings", ".", "py" ]
ef077f2cc945871422fcd66391e82264e2384b2c
https://github.com/uw-it-aca/uw-restclients-mailman/blob/ef077f2cc945871422fcd66391e82264e2384b2c/uw_mailman/basic_list.py#L14-L21
243,653
quasipedia/swaggery
swaggery/api.py
operations
def operations(*operations): '''Decorator for marking Resource methods as HTTP operations. This decorator does a number of different things: - It transfer onto itself docstring and annotations from the decorated method, so as to be "transparent" with regards to introspection. - It tranform the method so as to make it a classmethod. - It invokes the method within a try-except condition, so as to intercept and populate the Fail(<code>) conditions.''' def decorator(method): def wrapper(cls, request, start_response, **kwargs): result_cache = [] try: yield from method(cls, request, **kwargs) except Respond as e: # Inject messages as taken from signature status = e.status msg = utils.parse_return_annotation(method)[status]['message'] if status / 100 == 2: # All 2xx HTTP codes e.description = msg raise e else: # HTTP Errors --> use werkzeug exceptions raise CODES_TO_EXCEPTIONS[status](msg) # Add operation-specific attributes to the method. method.swagger_ops = operations method.signature = inspect.signature(method) method.source = inspect.getsource(method) method.path_vars = utils.extract_pathvars(method) # "Backport" the method introspective attributes to the wrapper. wrapper.__name__ = method.__name__ wrapper.__doc__ = method.__doc__ wrapper.__annotations__ = method.__annotations__ wrapper.swagger_ops = method.swagger_ops wrapper.signature = method.signature wrapper.source = method.source wrapper.path_vars = method.path_vars return classmethod(wrapper) return decorator
python
def operations(*operations): '''Decorator for marking Resource methods as HTTP operations. This decorator does a number of different things: - It transfer onto itself docstring and annotations from the decorated method, so as to be "transparent" with regards to introspection. - It tranform the method so as to make it a classmethod. - It invokes the method within a try-except condition, so as to intercept and populate the Fail(<code>) conditions.''' def decorator(method): def wrapper(cls, request, start_response, **kwargs): result_cache = [] try: yield from method(cls, request, **kwargs) except Respond as e: # Inject messages as taken from signature status = e.status msg = utils.parse_return_annotation(method)[status]['message'] if status / 100 == 2: # All 2xx HTTP codes e.description = msg raise e else: # HTTP Errors --> use werkzeug exceptions raise CODES_TO_EXCEPTIONS[status](msg) # Add operation-specific attributes to the method. method.swagger_ops = operations method.signature = inspect.signature(method) method.source = inspect.getsource(method) method.path_vars = utils.extract_pathvars(method) # "Backport" the method introspective attributes to the wrapper. wrapper.__name__ = method.__name__ wrapper.__doc__ = method.__doc__ wrapper.__annotations__ = method.__annotations__ wrapper.swagger_ops = method.swagger_ops wrapper.signature = method.signature wrapper.source = method.source wrapper.path_vars = method.path_vars return classmethod(wrapper) return decorator
[ "def", "operations", "(", "*", "operations", ")", ":", "def", "decorator", "(", "method", ")", ":", "def", "wrapper", "(", "cls", ",", "request", ",", "start_response", ",", "*", "*", "kwargs", ")", ":", "result_cache", "=", "[", "]", "try", ":", "yield", "from", "method", "(", "cls", ",", "request", ",", "*", "*", "kwargs", ")", "except", "Respond", "as", "e", ":", "# Inject messages as taken from signature", "status", "=", "e", ".", "status", "msg", "=", "utils", ".", "parse_return_annotation", "(", "method", ")", "[", "status", "]", "[", "'message'", "]", "if", "status", "/", "100", "==", "2", ":", "# All 2xx HTTP codes", "e", ".", "description", "=", "msg", "raise", "e", "else", ":", "# HTTP Errors --> use werkzeug exceptions", "raise", "CODES_TO_EXCEPTIONS", "[", "status", "]", "(", "msg", ")", "# Add operation-specific attributes to the method.", "method", ".", "swagger_ops", "=", "operations", "method", ".", "signature", "=", "inspect", ".", "signature", "(", "method", ")", "method", ".", "source", "=", "inspect", ".", "getsource", "(", "method", ")", "method", ".", "path_vars", "=", "utils", ".", "extract_pathvars", "(", "method", ")", "# \"Backport\" the method introspective attributes to the wrapper.", "wrapper", ".", "__name__", "=", "method", ".", "__name__", "wrapper", ".", "__doc__", "=", "method", ".", "__doc__", "wrapper", ".", "__annotations__", "=", "method", ".", "__annotations__", "wrapper", ".", "swagger_ops", "=", "method", ".", "swagger_ops", "wrapper", ".", "signature", "=", "method", ".", "signature", "wrapper", ".", "source", "=", "method", ".", "source", "wrapper", ".", "path_vars", "=", "method", ".", "path_vars", "return", "classmethod", "(", "wrapper", ")", "return", "decorator" ]
Decorator for marking Resource methods as HTTP operations. This decorator does a number of different things: - It transfer onto itself docstring and annotations from the decorated method, so as to be "transparent" with regards to introspection. - It tranform the method so as to make it a classmethod. - It invokes the method within a try-except condition, so as to intercept and populate the Fail(<code>) conditions.
[ "Decorator", "for", "marking", "Resource", "methods", "as", "HTTP", "operations", "." ]
89a2e1b2bebbc511c781c9e63972f65aef73cc2f
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/api.py#L197-L234
243,654
quasipedia/swaggery
swaggery/api.py
Resource.parse_signature
def parse_signature(cls, function): '''Parses the signature of a method and its annotations to swagger. Return a dictionary {arg_name: info}. ''' annotations = function.__annotations__.copy() del annotations['return'] result = [] for param_name, (param_type, param_obj) in annotations.items(): sig_param = function.signature.parameters[param_name] param_description = { 'paramType': param_type, 'name': param_name, 'required': sig_param.default is inspect.Parameter.empty} param_description.update(param_obj.describe()) result.append(param_description) return result
python
def parse_signature(cls, function): '''Parses the signature of a method and its annotations to swagger. Return a dictionary {arg_name: info}. ''' annotations = function.__annotations__.copy() del annotations['return'] result = [] for param_name, (param_type, param_obj) in annotations.items(): sig_param = function.signature.parameters[param_name] param_description = { 'paramType': param_type, 'name': param_name, 'required': sig_param.default is inspect.Parameter.empty} param_description.update(param_obj.describe()) result.append(param_description) return result
[ "def", "parse_signature", "(", "cls", ",", "function", ")", ":", "annotations", "=", "function", ".", "__annotations__", ".", "copy", "(", ")", "del", "annotations", "[", "'return'", "]", "result", "=", "[", "]", "for", "param_name", ",", "(", "param_type", ",", "param_obj", ")", "in", "annotations", ".", "items", "(", ")", ":", "sig_param", "=", "function", ".", "signature", ".", "parameters", "[", "param_name", "]", "param_description", "=", "{", "'paramType'", ":", "param_type", ",", "'name'", ":", "param_name", ",", "'required'", ":", "sig_param", ".", "default", "is", "inspect", ".", "Parameter", ".", "empty", "}", "param_description", ".", "update", "(", "param_obj", ".", "describe", "(", ")", ")", "result", ".", "append", "(", "param_description", ")", "return", "result" ]
Parses the signature of a method and its annotations to swagger. Return a dictionary {arg_name: info}.
[ "Parses", "the", "signature", "of", "a", "method", "and", "its", "annotations", "to", "swagger", "." ]
89a2e1b2bebbc511c781c9e63972f65aef73cc2f
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/api.py#L91-L107
243,655
quasipedia/swaggery
swaggery/api.py
Resource.get_swagger_fragment
def get_swagger_fragment(cls): '''Return the swagger-formatted fragment for the Resource Listing.''' if cls.__swagger_fragment: return cls.__swagger_fragment cls.__swagger_fragment = { 'path': cls.endpoint_path.replace('<', '{').replace('>', '}'), 'description': cls.description, 'operations': cls.get_resource_operations(), } return cls.__swagger_fragment
python
def get_swagger_fragment(cls): '''Return the swagger-formatted fragment for the Resource Listing.''' if cls.__swagger_fragment: return cls.__swagger_fragment cls.__swagger_fragment = { 'path': cls.endpoint_path.replace('<', '{').replace('>', '}'), 'description': cls.description, 'operations': cls.get_resource_operations(), } return cls.__swagger_fragment
[ "def", "get_swagger_fragment", "(", "cls", ")", ":", "if", "cls", ".", "__swagger_fragment", ":", "return", "cls", ".", "__swagger_fragment", "cls", ".", "__swagger_fragment", "=", "{", "'path'", ":", "cls", ".", "endpoint_path", ".", "replace", "(", "'<'", ",", "'{'", ")", ".", "replace", "(", "'>'", ",", "'}'", ")", ",", "'description'", ":", "cls", ".", "description", ",", "'operations'", ":", "cls", ".", "get_resource_operations", "(", ")", ",", "}", "return", "cls", ".", "__swagger_fragment" ]
Return the swagger-formatted fragment for the Resource Listing.
[ "Return", "the", "swagger", "-", "formatted", "fragment", "for", "the", "Resource", "Listing", "." ]
89a2e1b2bebbc511c781c9e63972f65aef73cc2f
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/api.py#L110-L119
243,656
quasipedia/swaggery
swaggery/api.py
Resource.get_resource_operations
def get_resource_operations(cls): '''Return the swagger-formatted method descriptions''' operations = [] for http, callback in cls.implemented_methods.items(): # Parse docstring summary, notes = utils.parse_docstring(callback) # Parse return annotations responses = utils.parse_return_annotation(callback) ok_result_model = responses[200]['responseModel'] operations.append({ 'method': http, 'nickname': callback.__name__, 'type': ok_result_model, 'parameters': cls.parse_signature(callback), 'summary': summary.strip(), 'notes': notes.strip(), 'responseMessages': list(responses.values()) }) return operations
python
def get_resource_operations(cls): '''Return the swagger-formatted method descriptions''' operations = [] for http, callback in cls.implemented_methods.items(): # Parse docstring summary, notes = utils.parse_docstring(callback) # Parse return annotations responses = utils.parse_return_annotation(callback) ok_result_model = responses[200]['responseModel'] operations.append({ 'method': http, 'nickname': callback.__name__, 'type': ok_result_model, 'parameters': cls.parse_signature(callback), 'summary': summary.strip(), 'notes': notes.strip(), 'responseMessages': list(responses.values()) }) return operations
[ "def", "get_resource_operations", "(", "cls", ")", ":", "operations", "=", "[", "]", "for", "http", ",", "callback", "in", "cls", ".", "implemented_methods", ".", "items", "(", ")", ":", "# Parse docstring", "summary", ",", "notes", "=", "utils", ".", "parse_docstring", "(", "callback", ")", "# Parse return annotations", "responses", "=", "utils", ".", "parse_return_annotation", "(", "callback", ")", "ok_result_model", "=", "responses", "[", "200", "]", "[", "'responseModel'", "]", "operations", ".", "append", "(", "{", "'method'", ":", "http", ",", "'nickname'", ":", "callback", ".", "__name__", ",", "'type'", ":", "ok_result_model", ",", "'parameters'", ":", "cls", ".", "parse_signature", "(", "callback", ")", ",", "'summary'", ":", "summary", ".", "strip", "(", ")", ",", "'notes'", ":", "notes", ".", "strip", "(", ")", ",", "'responseMessages'", ":", "list", "(", "responses", ".", "values", "(", ")", ")", "}", ")", "return", "operations" ]
Return the swagger-formatted method descriptions
[ "Return", "the", "swagger", "-", "formatted", "method", "descriptions" ]
89a2e1b2bebbc511c781c9e63972f65aef73cc2f
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/api.py#L122-L140
243,657
quasipedia/swaggery
swaggery/api.py
Resource.callbacks
def callbacks(cls): '''Return all the methods that are actually a request callback.''' if cls.__callbacks is not None: return cls.__callbacks cls.__callbacks = [] for mname in dir(cls): # Avoid recursion by excluding all methods of this prototype class if mname in dir(Resource): continue callback = getattr(cls, mname) if not hasattr(callback, 'swagger_ops'): continue cls.__callbacks.append(callback) return cls.__callbacks
python
def callbacks(cls): '''Return all the methods that are actually a request callback.''' if cls.__callbacks is not None: return cls.__callbacks cls.__callbacks = [] for mname in dir(cls): # Avoid recursion by excluding all methods of this prototype class if mname in dir(Resource): continue callback = getattr(cls, mname) if not hasattr(callback, 'swagger_ops'): continue cls.__callbacks.append(callback) return cls.__callbacks
[ "def", "callbacks", "(", "cls", ")", ":", "if", "cls", ".", "__callbacks", "is", "not", "None", ":", "return", "cls", ".", "__callbacks", "cls", ".", "__callbacks", "=", "[", "]", "for", "mname", "in", "dir", "(", "cls", ")", ":", "# Avoid recursion by excluding all methods of this prototype class", "if", "mname", "in", "dir", "(", "Resource", ")", ":", "continue", "callback", "=", "getattr", "(", "cls", ",", "mname", ")", "if", "not", "hasattr", "(", "callback", ",", "'swagger_ops'", ")", ":", "continue", "cls", ".", "__callbacks", ".", "append", "(", "callback", ")", "return", "cls", ".", "__callbacks" ]
Return all the methods that are actually a request callback.
[ "Return", "all", "the", "methods", "that", "are", "actually", "a", "request", "callback", "." ]
89a2e1b2bebbc511c781c9e63972f65aef73cc2f
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/api.py#L160-L173
243,658
quasipedia/swaggery
swaggery/api.py
Resource.implemented_methods
def implemented_methods(cls): '''Return a mapping of implemented HTTP methods vs. their callbacks.''' if cls.__implemented_methods: return cls.__implemented_methods cls.__implemented_methods = {} for method in cls.callbacks: for op in getattr(method, 'swagger_ops'): cls.__implemented_methods[op] = method return cls.__implemented_methods
python
def implemented_methods(cls): '''Return a mapping of implemented HTTP methods vs. their callbacks.''' if cls.__implemented_methods: return cls.__implemented_methods cls.__implemented_methods = {} for method in cls.callbacks: for op in getattr(method, 'swagger_ops'): cls.__implemented_methods[op] = method return cls.__implemented_methods
[ "def", "implemented_methods", "(", "cls", ")", ":", "if", "cls", ".", "__implemented_methods", ":", "return", "cls", ".", "__implemented_methods", "cls", ".", "__implemented_methods", "=", "{", "}", "for", "method", "in", "cls", ".", "callbacks", ":", "for", "op", "in", "getattr", "(", "method", ",", "'swagger_ops'", ")", ":", "cls", ".", "__implemented_methods", "[", "op", "]", "=", "method", "return", "cls", ".", "__implemented_methods" ]
Return a mapping of implemented HTTP methods vs. their callbacks.
[ "Return", "a", "mapping", "of", "implemented", "HTTP", "methods", "vs", ".", "their", "callbacks", "." ]
89a2e1b2bebbc511c781c9e63972f65aef73cc2f
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/swaggery/api.py#L186-L194
243,659
radjkarl/fancyTools
fancytools/fit/fit2dArrayToFn.py
fit2dArrayToFn
def fit2dArrayToFn(arr, fn, mask=None, down_scale_factor=None, output_shape=None, guess=None, outgrid=None): """Fit a 2d array to a 2d function USE ONLY MASKED VALUES * [down_scale_factor] map to speed up fitting procedure, set value smaller than 1 * [output_shape] shape of the output array * [guess] must be scaled using [scale_factor] Returns: Fitted map, fitting params (scaled), error """ if mask is None: #assert outgrid is not None mask = np.ones(shape=arr.shape, dtype=bool) if down_scale_factor is None: if mask.sum() > 1000: down_scale_factor = 0.3 else: down_scale_factor = 1 if down_scale_factor != 1: # SCALE TO DECREASE AMOUNT OF POINTS TO FIT: arr2 = zoom(arr, down_scale_factor) mask = zoom(mask, down_scale_factor, output=bool) else: arr2 = arr # USE ONLY VALID POINTS: x, y = np.where(mask) z = arr2[mask] # FIT: parameters, cov_matrix = curve_fit(fn, (x, y), z, p0=guess) # ERROR: perr = np.sqrt(np.diag(cov_matrix)) if outgrid is not None: yy,xx = outgrid rebuilt = fn((yy,xx), *parameters) else: if output_shape is None: output_shape = arr.shape fx = arr2.shape[0] / output_shape[0] fy = arr2.shape[1] / output_shape[1] rebuilt = np.fromfunction(lambda x, y: fn((x * fx, y * fy), *parameters), output_shape) return rebuilt, parameters, perr
python
def fit2dArrayToFn(arr, fn, mask=None, down_scale_factor=None, output_shape=None, guess=None, outgrid=None): """Fit a 2d array to a 2d function USE ONLY MASKED VALUES * [down_scale_factor] map to speed up fitting procedure, set value smaller than 1 * [output_shape] shape of the output array * [guess] must be scaled using [scale_factor] Returns: Fitted map, fitting params (scaled), error """ if mask is None: #assert outgrid is not None mask = np.ones(shape=arr.shape, dtype=bool) if down_scale_factor is None: if mask.sum() > 1000: down_scale_factor = 0.3 else: down_scale_factor = 1 if down_scale_factor != 1: # SCALE TO DECREASE AMOUNT OF POINTS TO FIT: arr2 = zoom(arr, down_scale_factor) mask = zoom(mask, down_scale_factor, output=bool) else: arr2 = arr # USE ONLY VALID POINTS: x, y = np.where(mask) z = arr2[mask] # FIT: parameters, cov_matrix = curve_fit(fn, (x, y), z, p0=guess) # ERROR: perr = np.sqrt(np.diag(cov_matrix)) if outgrid is not None: yy,xx = outgrid rebuilt = fn((yy,xx), *parameters) else: if output_shape is None: output_shape = arr.shape fx = arr2.shape[0] / output_shape[0] fy = arr2.shape[1] / output_shape[1] rebuilt = np.fromfunction(lambda x, y: fn((x * fx, y * fy), *parameters), output_shape) return rebuilt, parameters, perr
[ "def", "fit2dArrayToFn", "(", "arr", ",", "fn", ",", "mask", "=", "None", ",", "down_scale_factor", "=", "None", ",", "output_shape", "=", "None", ",", "guess", "=", "None", ",", "outgrid", "=", "None", ")", ":", "if", "mask", "is", "None", ":", "#assert outgrid is not None", "mask", "=", "np", ".", "ones", "(", "shape", "=", "arr", ".", "shape", ",", "dtype", "=", "bool", ")", "if", "down_scale_factor", "is", "None", ":", "if", "mask", ".", "sum", "(", ")", ">", "1000", ":", "down_scale_factor", "=", "0.3", "else", ":", "down_scale_factor", "=", "1", "if", "down_scale_factor", "!=", "1", ":", "# SCALE TO DECREASE AMOUNT OF POINTS TO FIT:", "arr2", "=", "zoom", "(", "arr", ",", "down_scale_factor", ")", "mask", "=", "zoom", "(", "mask", ",", "down_scale_factor", ",", "output", "=", "bool", ")", "else", ":", "arr2", "=", "arr", "# USE ONLY VALID POINTS:", "x", ",", "y", "=", "np", ".", "where", "(", "mask", ")", "z", "=", "arr2", "[", "mask", "]", "# FIT:", "parameters", ",", "cov_matrix", "=", "curve_fit", "(", "fn", ",", "(", "x", ",", "y", ")", ",", "z", ",", "p0", "=", "guess", ")", "# ERROR:", "perr", "=", "np", ".", "sqrt", "(", "np", ".", "diag", "(", "cov_matrix", ")", ")", "if", "outgrid", "is", "not", "None", ":", "yy", ",", "xx", "=", "outgrid", "rebuilt", "=", "fn", "(", "(", "yy", ",", "xx", ")", ",", "*", "parameters", ")", "else", ":", "if", "output_shape", "is", "None", ":", "output_shape", "=", "arr", ".", "shape", "fx", "=", "arr2", ".", "shape", "[", "0", "]", "/", "output_shape", "[", "0", "]", "fy", "=", "arr2", ".", "shape", "[", "1", "]", "/", "output_shape", "[", "1", "]", "rebuilt", "=", "np", ".", "fromfunction", "(", "lambda", "x", ",", "y", ":", "fn", "(", "(", "x", "*", "fx", ",", "y", "*", "fy", ")", ",", "*", "parameters", ")", ",", "output_shape", ")", "return", "rebuilt", ",", "parameters", ",", "perr" ]
Fit a 2d array to a 2d function USE ONLY MASKED VALUES * [down_scale_factor] map to speed up fitting procedure, set value smaller than 1 * [output_shape] shape of the output array * [guess] must be scaled using [scale_factor] Returns: Fitted map, fitting params (scaled), error
[ "Fit", "a", "2d", "array", "to", "a", "2d", "function" ]
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/fit/fit2dArrayToFn.py#L9-L60
243,660
sassoo/goldman
goldman/middleware/bearer_token/__init__.py
Middleware._validate_auth_scheme
def _validate_auth_scheme(self, req): """ Check if the request has auth & the proper scheme Remember NOT to include the error related info in the WWW-Authenticate header for these conditions. :raise: AuthRequired """ if not req.auth: raise AuthRequired(**{ 'detail': 'You must first login to access the requested ' 'resource(s). Please retry your request using ' 'OAuth 2.0 Bearer Token Authentication as ' 'documented in RFC 6750. If you do not have an ' 'access_token then request one at the token ' 'endpdoint of: %s' % self.token_endpoint, 'headers': self._error_headers, 'links': 'tools.ietf.org/html/rfc6750#section-2.1', }) elif req.auth_scheme != 'bearer': raise AuthRequired(**{ 'detail': 'Your Authorization header is using an unsupported ' 'authentication scheme. Please modify your scheme ' 'to be a string of: "Bearer".', 'headers': self._error_headers, 'links': 'tools.ietf.org/html/rfc6750#section-2.1', })
python
def _validate_auth_scheme(self, req): """ Check if the request has auth & the proper scheme Remember NOT to include the error related info in the WWW-Authenticate header for these conditions. :raise: AuthRequired """ if not req.auth: raise AuthRequired(**{ 'detail': 'You must first login to access the requested ' 'resource(s). Please retry your request using ' 'OAuth 2.0 Bearer Token Authentication as ' 'documented in RFC 6750. If you do not have an ' 'access_token then request one at the token ' 'endpdoint of: %s' % self.token_endpoint, 'headers': self._error_headers, 'links': 'tools.ietf.org/html/rfc6750#section-2.1', }) elif req.auth_scheme != 'bearer': raise AuthRequired(**{ 'detail': 'Your Authorization header is using an unsupported ' 'authentication scheme. Please modify your scheme ' 'to be a string of: "Bearer".', 'headers': self._error_headers, 'links': 'tools.ietf.org/html/rfc6750#section-2.1', })
[ "def", "_validate_auth_scheme", "(", "self", ",", "req", ")", ":", "if", "not", "req", ".", "auth", ":", "raise", "AuthRequired", "(", "*", "*", "{", "'detail'", ":", "'You must first login to access the requested '", "'resource(s). Please retry your request using '", "'OAuth 2.0 Bearer Token Authentication as '", "'documented in RFC 6750. If you do not have an '", "'access_token then request one at the token '", "'endpdoint of: %s'", "%", "self", ".", "token_endpoint", ",", "'headers'", ":", "self", ".", "_error_headers", ",", "'links'", ":", "'tools.ietf.org/html/rfc6750#section-2.1'", ",", "}", ")", "elif", "req", ".", "auth_scheme", "!=", "'bearer'", ":", "raise", "AuthRequired", "(", "*", "*", "{", "'detail'", ":", "'Your Authorization header is using an unsupported '", "'authentication scheme. Please modify your scheme '", "'to be a string of: \"Bearer\".'", ",", "'headers'", ":", "self", ".", "_error_headers", ",", "'links'", ":", "'tools.ietf.org/html/rfc6750#section-2.1'", ",", "}", ")" ]
Check if the request has auth & the proper scheme Remember NOT to include the error related info in the WWW-Authenticate header for these conditions. :raise: AuthRequired
[ "Check", "if", "the", "request", "has", "auth", "&", "the", "proper", "scheme" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/middleware/bearer_token/__init__.py#L93-L121
243,661
sassoo/goldman
goldman/middleware/bearer_token/__init__.py
Middleware._get_token
def _get_token(self, req): """ Get the token from the Authorization header If the header is actually malformed where Bearer Auth was indicated by the request then an InvalidAuthSyntax exception is raised. Otherwise an AuthRequired exception since it's unclear in this scenario if the requestor was even aware Authentication was required & if so which "scheme". Calls _validate_auth_scheme first & bubbles up it's exceptions. :return: string token :raise: AuthRequired, InvalidAuthSyntax """ self._validate_auth_scheme(req) try: return naked(req.auth.split(' ')[1]) except IndexError: desc = 'You are using the Bearer Authentication scheme as ' \ 'required to login but your Authorization header is ' \ 'completely missing the access_token.' raise InvalidAuthSyntax(**{ 'detail': desc, 'headers': self._get_invalid_token_headers(desc), 'links': 'tools.ietf.org/html/rfc6750#section-2.1', })
python
def _get_token(self, req): """ Get the token from the Authorization header If the header is actually malformed where Bearer Auth was indicated by the request then an InvalidAuthSyntax exception is raised. Otherwise an AuthRequired exception since it's unclear in this scenario if the requestor was even aware Authentication was required & if so which "scheme". Calls _validate_auth_scheme first & bubbles up it's exceptions. :return: string token :raise: AuthRequired, InvalidAuthSyntax """ self._validate_auth_scheme(req) try: return naked(req.auth.split(' ')[1]) except IndexError: desc = 'You are using the Bearer Authentication scheme as ' \ 'required to login but your Authorization header is ' \ 'completely missing the access_token.' raise InvalidAuthSyntax(**{ 'detail': desc, 'headers': self._get_invalid_token_headers(desc), 'links': 'tools.ietf.org/html/rfc6750#section-2.1', })
[ "def", "_get_token", "(", "self", ",", "req", ")", ":", "self", ".", "_validate_auth_scheme", "(", "req", ")", "try", ":", "return", "naked", "(", "req", ".", "auth", ".", "split", "(", "' '", ")", "[", "1", "]", ")", "except", "IndexError", ":", "desc", "=", "'You are using the Bearer Authentication scheme as '", "'required to login but your Authorization header is '", "'completely missing the access_token.'", "raise", "InvalidAuthSyntax", "(", "*", "*", "{", "'detail'", ":", "desc", ",", "'headers'", ":", "self", ".", "_get_invalid_token_headers", "(", "desc", ")", ",", "'links'", ":", "'tools.ietf.org/html/rfc6750#section-2.1'", ",", "}", ")" ]
Get the token from the Authorization header If the header is actually malformed where Bearer Auth was indicated by the request then an InvalidAuthSyntax exception is raised. Otherwise an AuthRequired exception since it's unclear in this scenario if the requestor was even aware Authentication was required & if so which "scheme". Calls _validate_auth_scheme first & bubbles up it's exceptions. :return: string token :raise: AuthRequired, InvalidAuthSyntax
[ "Get", "the", "token", "from", "the", "Authorization", "header" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/middleware/bearer_token/__init__.py#L123-L154
243,662
voidpp/python-tools
voidpp_tools/job_delayer.py
JobDelayer.start
def start(self): """Starts the delayed execution""" if self._timer: self._timer.cancel() self._timer = Timer(self._timeout, self._fire) self._timer.start()
python
def start(self): """Starts the delayed execution""" if self._timer: self._timer.cancel() self._timer = Timer(self._timeout, self._fire) self._timer.start()
[ "def", "start", "(", "self", ")", ":", "if", "self", ".", "_timer", ":", "self", ".", "_timer", ".", "cancel", "(", ")", "self", ".", "_timer", "=", "Timer", "(", "self", ".", "_timeout", ",", "self", ".", "_fire", ")", "self", ".", "_timer", ".", "start", "(", ")" ]
Starts the delayed execution
[ "Starts", "the", "delayed", "execution" ]
0fc7460c827b02d8914411cedddadc23ccb3cc73
https://github.com/voidpp/python-tools/blob/0fc7460c827b02d8914411cedddadc23ccb3cc73/voidpp_tools/job_delayer.py#L25-L32
243,663
todddeluca/temps
temps.py
tmpfile
def tmpfile(root=TEMPS_DIR, prefix=TEMPS_PREFIX, suffix=TEMPS_SUFFIX): ''' For use in a with statement, this function returns a context manager that yields a path directly under root guaranteed to be unique by using the uuid module. This path is not created. However if the path is an existing file when the with statement is exited, the file will be removed. This function is useful if you want to use a file temporarily but do not want to write boilerplate to make sure it is removed when you are done with it. Example: with temps.tmpfile() as path: do_stuff(path) ''' path = tmppath(root, prefix, suffix) try: yield path finally: if os.path.isfile(path): # try to delete the file os.unlink(path)
python
def tmpfile(root=TEMPS_DIR, prefix=TEMPS_PREFIX, suffix=TEMPS_SUFFIX): ''' For use in a with statement, this function returns a context manager that yields a path directly under root guaranteed to be unique by using the uuid module. This path is not created. However if the path is an existing file when the with statement is exited, the file will be removed. This function is useful if you want to use a file temporarily but do not want to write boilerplate to make sure it is removed when you are done with it. Example: with temps.tmpfile() as path: do_stuff(path) ''' path = tmppath(root, prefix, suffix) try: yield path finally: if os.path.isfile(path): # try to delete the file os.unlink(path)
[ "def", "tmpfile", "(", "root", "=", "TEMPS_DIR", ",", "prefix", "=", "TEMPS_PREFIX", ",", "suffix", "=", "TEMPS_SUFFIX", ")", ":", "path", "=", "tmppath", "(", "root", ",", "prefix", ",", "suffix", ")", "try", ":", "yield", "path", "finally", ":", "if", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "# try to delete the file", "os", ".", "unlink", "(", "path", ")" ]
For use in a with statement, this function returns a context manager that yields a path directly under root guaranteed to be unique by using the uuid module. This path is not created. However if the path is an existing file when the with statement is exited, the file will be removed. This function is useful if you want to use a file temporarily but do not want to write boilerplate to make sure it is removed when you are done with it. Example: with temps.tmpfile() as path: do_stuff(path)
[ "For", "use", "in", "a", "with", "statement", "this", "function", "returns", "a", "context", "manager", "that", "yields", "a", "path", "directly", "under", "root", "guaranteed", "to", "be", "unique", "by", "using", "the", "uuid", "module", ".", "This", "path", "is", "not", "created", ".", "However", "if", "the", "path", "is", "an", "existing", "file", "when", "the", "with", "statement", "is", "exited", "the", "file", "will", "be", "removed", "." ]
10bf4e71a6b2e8ad10fa8a272145968b6c84f61b
https://github.com/todddeluca/temps/blob/10bf4e71a6b2e8ad10fa8a272145968b6c84f61b/temps.py#L88-L110
243,664
todddeluca/temps
temps.py
tmppath
def tmppath(root=TEMPS_DIR, prefix=TEMPS_PREFIX, suffix=TEMPS_SUFFIX): ''' Returns a path directly under root that is guaranteed to be unique by using the uuid module. ''' return os.path.join(root, prefix + uuid.uuid4().hex + suffix)
python
def tmppath(root=TEMPS_DIR, prefix=TEMPS_PREFIX, suffix=TEMPS_SUFFIX): ''' Returns a path directly under root that is guaranteed to be unique by using the uuid module. ''' return os.path.join(root, prefix + uuid.uuid4().hex + suffix)
[ "def", "tmppath", "(", "root", "=", "TEMPS_DIR", ",", "prefix", "=", "TEMPS_PREFIX", ",", "suffix", "=", "TEMPS_SUFFIX", ")", ":", "return", "os", ".", "path", ".", "join", "(", "root", ",", "prefix", "+", "uuid", ".", "uuid4", "(", ")", ".", "hex", "+", "suffix", ")" ]
Returns a path directly under root that is guaranteed to be unique by using the uuid module.
[ "Returns", "a", "path", "directly", "under", "root", "that", "is", "guaranteed", "to", "be", "unique", "by", "using", "the", "uuid", "module", "." ]
10bf4e71a6b2e8ad10fa8a272145968b6c84f61b
https://github.com/todddeluca/temps/blob/10bf4e71a6b2e8ad10fa8a272145968b6c84f61b/temps.py#L149-L154
243,665
TC01/calcpkg
calcrepo/repos/cemetech.py
CemetechRepository.getComplexFileData
def getComplexFileData(self, fileInfo, data): """Function to initialize the slightly more complicated data for file info""" result = fileInfo[fileInfo.find(data + "</td>") + len(data + "</td>"):] result = result[:result.find("</td>")] result = result[result.rfind(">") + 1:] return result
python
def getComplexFileData(self, fileInfo, data): """Function to initialize the slightly more complicated data for file info""" result = fileInfo[fileInfo.find(data + "</td>") + len(data + "</td>"):] result = result[:result.find("</td>")] result = result[result.rfind(">") + 1:] return result
[ "def", "getComplexFileData", "(", "self", ",", "fileInfo", ",", "data", ")", ":", "result", "=", "fileInfo", "[", "fileInfo", ".", "find", "(", "data", "+", "\"</td>\"", ")", "+", "len", "(", "data", "+", "\"</td>\"", ")", ":", "]", "result", "=", "result", "[", ":", "result", ".", "find", "(", "\"</td>\"", ")", "]", "result", "=", "result", "[", "result", ".", "rfind", "(", "\">\"", ")", "+", "1", ":", "]", "return", "result" ]
Function to initialize the slightly more complicated data for file info
[ "Function", "to", "initialize", "the", "slightly", "more", "complicated", "data", "for", "file", "info" ]
5168f606264620a090b42a64354331d208b00d5f
https://github.com/TC01/calcpkg/blob/5168f606264620a090b42a64354331d208b00d5f/calcrepo/repos/cemetech.py#L100-L105
243,666
TC01/calcpkg
calcrepo/repos/cemetech.py
CemetechRepository.getFileDescription
def getFileDescription(self, fileInfo): """Function to get the description of a file.""" data = 'Description' result = fileInfo[fileInfo.find(data + "</td>") + len(data + "</td>"):] result.lstrip() result = result[:result.find("</td>")] result = result[result.rfind("<"):] if "<td" in result: result = result[result.find(">") + 1:] return result
python
def getFileDescription(self, fileInfo): """Function to get the description of a file.""" data = 'Description' result = fileInfo[fileInfo.find(data + "</td>") + len(data + "</td>"):] result.lstrip() result = result[:result.find("</td>")] result = result[result.rfind("<"):] if "<td" in result: result = result[result.find(">") + 1:] return result
[ "def", "getFileDescription", "(", "self", ",", "fileInfo", ")", ":", "data", "=", "'Description'", "result", "=", "fileInfo", "[", "fileInfo", ".", "find", "(", "data", "+", "\"</td>\"", ")", "+", "len", "(", "data", "+", "\"</td>\"", ")", ":", "]", "result", ".", "lstrip", "(", ")", "result", "=", "result", "[", ":", "result", ".", "find", "(", "\"</td>\"", ")", "]", "result", "=", "result", "[", "result", ".", "rfind", "(", "\"<\"", ")", ":", "]", "if", "\"<td\"", "in", "result", ":", "result", "=", "result", "[", "result", ".", "find", "(", "\">\"", ")", "+", "1", ":", "]", "return", "result" ]
Function to get the description of a file.
[ "Function", "to", "get", "the", "description", "of", "a", "file", "." ]
5168f606264620a090b42a64354331d208b00d5f
https://github.com/TC01/calcpkg/blob/5168f606264620a090b42a64354331d208b00d5f/calcrepo/repos/cemetech.py#L107-L116
243,667
quasipedia/swaggery
examples/calc/calc.py
TwoNumbers.two_numbers
def two_numbers( cls, request, operation: (Ptypes.path, String('One of the 4 arithmetic operations.', enum=['add', 'sub', 'mul', 'div'])), first: (Ptypes.path, Float('The first operand.')), second: (Ptypes.path, Float('The second operand.'))) -> [ (200, 'Ok', Float), (400, 'Wrong number format or invalid operation'), (422, 'NaN')]: '''Any of the four arithmetic operation on two numbers.''' log.info('Performing {} on {} and {}'.format(operation, first, second)) try: first = float(first) second = float(second) except ValueError: Respond(400) if operation == 'add': Respond(200, first + second) elif operation == 'sub': Respond(200, first - second) elif operation == 'mul': Respond(200, first * second) elif operation == 'div': if second == 0: Respond(422) Respond(200, first / second) else: Respond(400)
python
def two_numbers( cls, request, operation: (Ptypes.path, String('One of the 4 arithmetic operations.', enum=['add', 'sub', 'mul', 'div'])), first: (Ptypes.path, Float('The first operand.')), second: (Ptypes.path, Float('The second operand.'))) -> [ (200, 'Ok', Float), (400, 'Wrong number format or invalid operation'), (422, 'NaN')]: '''Any of the four arithmetic operation on two numbers.''' log.info('Performing {} on {} and {}'.format(operation, first, second)) try: first = float(first) second = float(second) except ValueError: Respond(400) if operation == 'add': Respond(200, first + second) elif operation == 'sub': Respond(200, first - second) elif operation == 'mul': Respond(200, first * second) elif operation == 'div': if second == 0: Respond(422) Respond(200, first / second) else: Respond(400)
[ "def", "two_numbers", "(", "cls", ",", "request", ",", "operation", ":", "(", "Ptypes", ".", "path", ",", "String", "(", "'One of the 4 arithmetic operations.'", ",", "enum", "=", "[", "'add'", ",", "'sub'", ",", "'mul'", ",", "'div'", "]", ")", ")", ",", "first", ":", "(", "Ptypes", ".", "path", ",", "Float", "(", "'The first operand.'", ")", ")", ",", "second", ":", "(", "Ptypes", ".", "path", ",", "Float", "(", "'The second operand.'", ")", ")", ")", "->", "[", "(", "200", ",", "'Ok'", ",", "Float", ")", ",", "(", "400", ",", "'Wrong number format or invalid operation'", ")", ",", "(", "422", ",", "'NaN'", ")", "]", ":", "log", ".", "info", "(", "'Performing {} on {} and {}'", ".", "format", "(", "operation", ",", "first", ",", "second", ")", ")", "try", ":", "first", "=", "float", "(", "first", ")", "second", "=", "float", "(", "second", ")", "except", "ValueError", ":", "Respond", "(", "400", ")", "if", "operation", "==", "'add'", ":", "Respond", "(", "200", ",", "first", "+", "second", ")", "elif", "operation", "==", "'sub'", ":", "Respond", "(", "200", ",", "first", "-", "second", ")", "elif", "operation", "==", "'mul'", ":", "Respond", "(", "200", ",", "first", "*", "second", ")", "elif", "operation", "==", "'div'", ":", "if", "second", "==", "0", ":", "Respond", "(", "422", ")", "Respond", "(", "200", ",", "first", "/", "second", ")", "else", ":", "Respond", "(", "400", ")" ]
Any of the four arithmetic operation on two numbers.
[ "Any", "of", "the", "four", "arithmetic", "operation", "on", "two", "numbers", "." ]
89a2e1b2bebbc511c781c9e63972f65aef73cc2f
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/examples/calc/calc.py#L57-L87
243,668
quasipedia/swaggery
examples/calc/calc.py
AddVectors.length
def length( cls, request, vector: (Ptypes.body, Vector('The vector to analyse.'))) -> [ (200, 'Ok', Float), (400, 'Wrong vector format')]: '''Return the modulo of a vector.''' log.info('Computing the length of vector {}'.format(vector)) try: Respond(200, sqrt(vector['x'] ** 2 + vector['y'] ** 2 + vector.get('z', 0) ** 2)) except ValueError: Respond(400)
python
def length( cls, request, vector: (Ptypes.body, Vector('The vector to analyse.'))) -> [ (200, 'Ok', Float), (400, 'Wrong vector format')]: '''Return the modulo of a vector.''' log.info('Computing the length of vector {}'.format(vector)) try: Respond(200, sqrt(vector['x'] ** 2 + vector['y'] ** 2 + vector.get('z', 0) ** 2)) except ValueError: Respond(400)
[ "def", "length", "(", "cls", ",", "request", ",", "vector", ":", "(", "Ptypes", ".", "body", ",", "Vector", "(", "'The vector to analyse.'", ")", ")", ")", "->", "[", "(", "200", ",", "'Ok'", ",", "Float", ")", ",", "(", "400", ",", "'Wrong vector format'", ")", "]", ":", "log", ".", "info", "(", "'Computing the length of vector {}'", ".", "format", "(", "vector", ")", ")", "try", ":", "Respond", "(", "200", ",", "sqrt", "(", "vector", "[", "'x'", "]", "**", "2", "+", "vector", "[", "'y'", "]", "**", "2", "+", "vector", ".", "get", "(", "'z'", ",", "0", ")", "**", "2", ")", ")", "except", "ValueError", ":", "Respond", "(", "400", ")" ]
Return the modulo of a vector.
[ "Return", "the", "modulo", "of", "a", "vector", "." ]
89a2e1b2bebbc511c781c9e63972f65aef73cc2f
https://github.com/quasipedia/swaggery/blob/89a2e1b2bebbc511c781c9e63972f65aef73cc2f/examples/calc/calc.py#L98-L111
243,669
kankiri/pabiana
demos/collection/timer.py
place
def place(slot_name, dttime): """ Set a timer to be published at the specified minute. """ dttime = datetime.strptime(dttime, '%Y-%m-%d %H:%M:%S') dttime = dttime.replace(second=0, microsecond=0) try: area.context['timers'][dttime].add(slot_name) except KeyError: area.context['timers'][dttime] = {slot_name} area.publish({'status': 'placed'}, slot=slot_name)
python
def place(slot_name, dttime): """ Set a timer to be published at the specified minute. """ dttime = datetime.strptime(dttime, '%Y-%m-%d %H:%M:%S') dttime = dttime.replace(second=0, microsecond=0) try: area.context['timers'][dttime].add(slot_name) except KeyError: area.context['timers'][dttime] = {slot_name} area.publish({'status': 'placed'}, slot=slot_name)
[ "def", "place", "(", "slot_name", ",", "dttime", ")", ":", "dttime", "=", "datetime", ".", "strptime", "(", "dttime", ",", "'%Y-%m-%d %H:%M:%S'", ")", "dttime", "=", "dttime", ".", "replace", "(", "second", "=", "0", ",", "microsecond", "=", "0", ")", "try", ":", "area", ".", "context", "[", "'timers'", "]", "[", "dttime", "]", ".", "add", "(", "slot_name", ")", "except", "KeyError", ":", "area", ".", "context", "[", "'timers'", "]", "[", "dttime", "]", "=", "{", "slot_name", "}", "area", ".", "publish", "(", "{", "'status'", ":", "'placed'", "}", ",", "slot", "=", "slot_name", ")" ]
Set a timer to be published at the specified minute.
[ "Set", "a", "timer", "to", "be", "published", "at", "the", "specified", "minute", "." ]
74acfdd81e2a1cc411c37b9ee3d6905ce4b1a39b
https://github.com/kankiri/pabiana/blob/74acfdd81e2a1cc411c37b9ee3d6905ce4b1a39b/demos/collection/timer.py#L18-L28
243,670
inveniosoftware-attic/invenio-upgrader
invenio_upgrader/operations.py
has_table
def has_table(table_name): """Return True if table exists, False otherwise.""" return db.engine.dialect.has_table( db.engine.connect(), table_name )
python
def has_table(table_name): """Return True if table exists, False otherwise.""" return db.engine.dialect.has_table( db.engine.connect(), table_name )
[ "def", "has_table", "(", "table_name", ")", ":", "return", "db", ".", "engine", ".", "dialect", ".", "has_table", "(", "db", ".", "engine", ".", "connect", "(", ")", ",", "table_name", ")" ]
Return True if table exists, False otherwise.
[ "Return", "True", "if", "table", "exists", "False", "otherwise", "." ]
cee4bcb118515463ecf6de1421642007f79a9fcd
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/operations.py#L36-L41
243,671
inveniosoftware-attic/invenio-upgrader
invenio_upgrader/operations.py
create_migration_ctx
def create_migration_ctx(**kwargs): """Create an alembic migration context.""" env = EnvironmentContext(Config(), None) env.configure( connection=db.engine.connect(), sqlalchemy_module_prefix='db.', **kwargs ) return env.get_context()
python
def create_migration_ctx(**kwargs): """Create an alembic migration context.""" env = EnvironmentContext(Config(), None) env.configure( connection=db.engine.connect(), sqlalchemy_module_prefix='db.', **kwargs ) return env.get_context()
[ "def", "create_migration_ctx", "(", "*", "*", "kwargs", ")", ":", "env", "=", "EnvironmentContext", "(", "Config", "(", ")", ",", "None", ")", "env", ".", "configure", "(", "connection", "=", "db", ".", "engine", ".", "connect", "(", ")", ",", "sqlalchemy_module_prefix", "=", "'db.'", ",", "*", "*", "kwargs", ")", "return", "env", ".", "get_context", "(", ")" ]
Create an alembic migration context.
[ "Create", "an", "alembic", "migration", "context", "." ]
cee4bcb118515463ecf6de1421642007f79a9fcd
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/operations.py#L44-L52
243,672
inveniosoftware-attic/invenio-upgrader
invenio_upgrader/operations.py
create_operations
def create_operations(ctx=None, **kwargs): """Create an alembic operations object.""" if ctx is None: ctx = create_migration_ctx(**kwargs) operations = Operations(ctx) operations.has_table = has_table return operations
python
def create_operations(ctx=None, **kwargs): """Create an alembic operations object.""" if ctx is None: ctx = create_migration_ctx(**kwargs) operations = Operations(ctx) operations.has_table = has_table return operations
[ "def", "create_operations", "(", "ctx", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "ctx", "is", "None", ":", "ctx", "=", "create_migration_ctx", "(", "*", "*", "kwargs", ")", "operations", "=", "Operations", "(", "ctx", ")", "operations", ".", "has_table", "=", "has_table", "return", "operations" ]
Create an alembic operations object.
[ "Create", "an", "alembic", "operations", "object", "." ]
cee4bcb118515463ecf6de1421642007f79a9fcd
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/operations.py#L55-L61
243,673
inveniosoftware-attic/invenio-upgrader
invenio_upgrader/operations.py
produce_upgrade_operations
def produce_upgrade_operations( ctx=None, metadata=None, include_symbol=None, include_object=None, **kwargs): """Produce a list of upgrade statements.""" if metadata is None: # Note, all SQLAlchemy models must have been loaded to produce # accurate results. metadata = db.metadata if ctx is None: ctx = create_migration_ctx(target_metadata=metadata, **kwargs) template_args = {} imports = set() _produce_migration_diffs( ctx, template_args, imports, include_object=include_object, include_symbol=include_symbol, **kwargs ) return template_args
python
def produce_upgrade_operations( ctx=None, metadata=None, include_symbol=None, include_object=None, **kwargs): """Produce a list of upgrade statements.""" if metadata is None: # Note, all SQLAlchemy models must have been loaded to produce # accurate results. metadata = db.metadata if ctx is None: ctx = create_migration_ctx(target_metadata=metadata, **kwargs) template_args = {} imports = set() _produce_migration_diffs( ctx, template_args, imports, include_object=include_object, include_symbol=include_symbol, **kwargs ) return template_args
[ "def", "produce_upgrade_operations", "(", "ctx", "=", "None", ",", "metadata", "=", "None", ",", "include_symbol", "=", "None", ",", "include_object", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "metadata", "is", "None", ":", "# Note, all SQLAlchemy models must have been loaded to produce", "# accurate results.", "metadata", "=", "db", ".", "metadata", "if", "ctx", "is", "None", ":", "ctx", "=", "create_migration_ctx", "(", "target_metadata", "=", "metadata", ",", "*", "*", "kwargs", ")", "template_args", "=", "{", "}", "imports", "=", "set", "(", ")", "_produce_migration_diffs", "(", "ctx", ",", "template_args", ",", "imports", ",", "include_object", "=", "include_object", ",", "include_symbol", "=", "include_symbol", ",", "*", "*", "kwargs", ")", "return", "template_args" ]
Produce a list of upgrade statements.
[ "Produce", "a", "list", "of", "upgrade", "statements", "." ]
cee4bcb118515463ecf6de1421642007f79a9fcd
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/operations.py#L64-L85
243,674
bretth/nose2django
nose2django/config.py
DjangoConfig.handleArgs
def handleArgs(self, event): """Nose2 hook for the handling the command line args""" # settings resolution order: # command line > cfg file > environ if self.djsettings: os.environ['DJANGO_SETTINGS_MODULE'] = self.djsettings if self.djconfig: os.environ['DJANGO_CONFIGURATION'] = self.djconfig # test for django-configurations package try: from configurations import importer importer.install() except ImportError: pass from django.conf import settings try: from south.management.commands import patch_for_test_db_setup patch_for_test_db_setup() except ImportError: pass
python
def handleArgs(self, event): """Nose2 hook for the handling the command line args""" # settings resolution order: # command line > cfg file > environ if self.djsettings: os.environ['DJANGO_SETTINGS_MODULE'] = self.djsettings if self.djconfig: os.environ['DJANGO_CONFIGURATION'] = self.djconfig # test for django-configurations package try: from configurations import importer importer.install() except ImportError: pass from django.conf import settings try: from south.management.commands import patch_for_test_db_setup patch_for_test_db_setup() except ImportError: pass
[ "def", "handleArgs", "(", "self", ",", "event", ")", ":", "# settings resolution order:", "# command line > cfg file > environ", "if", "self", ".", "djsettings", ":", "os", ".", "environ", "[", "'DJANGO_SETTINGS_MODULE'", "]", "=", "self", ".", "djsettings", "if", "self", ".", "djconfig", ":", "os", ".", "environ", "[", "'DJANGO_CONFIGURATION'", "]", "=", "self", ".", "djconfig", "# test for django-configurations package", "try", ":", "from", "configurations", "import", "importer", "importer", ".", "install", "(", ")", "except", "ImportError", ":", "pass", "from", "django", ".", "conf", "import", "settings", "try", ":", "from", "south", ".", "management", ".", "commands", "import", "patch_for_test_db_setup", "patch_for_test_db_setup", "(", ")", "except", "ImportError", ":", "pass" ]
Nose2 hook for the handling the command line args
[ "Nose2", "hook", "for", "the", "handling", "the", "command", "line", "args" ]
5df80e34c68714bdaa907eb7f818bfcc6f749f1d
https://github.com/bretth/nose2django/blob/5df80e34c68714bdaa907eb7f818bfcc6f749f1d/nose2django/config.py#L24-L44
243,675
imtapps/generic-request-signer
generic_request_signer/check_signature.py
check_signature
def check_signature(signature, private_key, full_path, payload): """ Checks signature received and verifies that we are able to re-create it from the private key, path, and payload given. :param signature: Signature received from request. :param private_key: Base 64, url encoded private key. :full_path: Full path of request, including GET query string (excluding host) :payload: The request.POST data if present. None if not. :returns: Boolean of whether signature matched or not. """ if isinstance(private_key, bytes): private_key = private_key.decode("ascii") if isinstance(payload, bytes): payload = payload.decode() url_to_check = _strip_signature_from_url(signature, full_path) computed_signature = apysigner.get_signature(private_key, url_to_check, payload) return constant_time_compare(signature, computed_signature)
python
def check_signature(signature, private_key, full_path, payload): """ Checks signature received and verifies that we are able to re-create it from the private key, path, and payload given. :param signature: Signature received from request. :param private_key: Base 64, url encoded private key. :full_path: Full path of request, including GET query string (excluding host) :payload: The request.POST data if present. None if not. :returns: Boolean of whether signature matched or not. """ if isinstance(private_key, bytes): private_key = private_key.decode("ascii") if isinstance(payload, bytes): payload = payload.decode() url_to_check = _strip_signature_from_url(signature, full_path) computed_signature = apysigner.get_signature(private_key, url_to_check, payload) return constant_time_compare(signature, computed_signature)
[ "def", "check_signature", "(", "signature", ",", "private_key", ",", "full_path", ",", "payload", ")", ":", "if", "isinstance", "(", "private_key", ",", "bytes", ")", ":", "private_key", "=", "private_key", ".", "decode", "(", "\"ascii\"", ")", "if", "isinstance", "(", "payload", ",", "bytes", ")", ":", "payload", "=", "payload", ".", "decode", "(", ")", "url_to_check", "=", "_strip_signature_from_url", "(", "signature", ",", "full_path", ")", "computed_signature", "=", "apysigner", ".", "get_signature", "(", "private_key", ",", "url_to_check", ",", "payload", ")", "return", "constant_time_compare", "(", "signature", ",", "computed_signature", ")" ]
Checks signature received and verifies that we are able to re-create it from the private key, path, and payload given. :param signature: Signature received from request. :param private_key: Base 64, url encoded private key. :full_path: Full path of request, including GET query string (excluding host) :payload: The request.POST data if present. None if not. :returns: Boolean of whether signature matched or not.
[ "Checks", "signature", "received", "and", "verifies", "that", "we", "are", "able", "to", "re", "-", "create", "it", "from", "the", "private", "key", "path", "and", "payload", "given", "." ]
34c18856ffda6305bd4cd931bd20365bf161d1de
https://github.com/imtapps/generic-request-signer/blob/34c18856ffda6305bd4cd931bd20365bf161d1de/generic_request_signer/check_signature.py#L17-L42
243,676
sassoo/goldman
goldman/queryparams/filter.py
_parse_param
def _parse_param(key): """ Parse the query param looking for filters Determine the field to filter on & the operator to be used when filtering. :param key: The query parameter to the left of the equal sign :return: tuple of string field name & string operator """ regex = re.compile(r'filter\[([A-Za-z0-9_./]+)\]') match = regex.match(key) if match: field_and_oper = match.groups()[0].split('__') if len(field_and_oper) == 1: return field_and_oper[0], 'eq' elif len(field_and_oper) == 2: return tuple(field_and_oper) else: raise InvalidQueryParams(**{ 'detail': 'The filter query param of "%s" is not ' 'supported. Multiple filter operators are ' 'not allowed in a single expression.' % key, 'links': LINK, 'parameter': PARAM, })
python
def _parse_param(key): """ Parse the query param looking for filters Determine the field to filter on & the operator to be used when filtering. :param key: The query parameter to the left of the equal sign :return: tuple of string field name & string operator """ regex = re.compile(r'filter\[([A-Za-z0-9_./]+)\]') match = regex.match(key) if match: field_and_oper = match.groups()[0].split('__') if len(field_and_oper) == 1: return field_and_oper[0], 'eq' elif len(field_and_oper) == 2: return tuple(field_and_oper) else: raise InvalidQueryParams(**{ 'detail': 'The filter query param of "%s" is not ' 'supported. Multiple filter operators are ' 'not allowed in a single expression.' % key, 'links': LINK, 'parameter': PARAM, })
[ "def", "_parse_param", "(", "key", ")", ":", "regex", "=", "re", ".", "compile", "(", "r'filter\\[([A-Za-z0-9_./]+)\\]'", ")", "match", "=", "regex", ".", "match", "(", "key", ")", "if", "match", ":", "field_and_oper", "=", "match", ".", "groups", "(", ")", "[", "0", "]", ".", "split", "(", "'__'", ")", "if", "len", "(", "field_and_oper", ")", "==", "1", ":", "return", "field_and_oper", "[", "0", "]", ",", "'eq'", "elif", "len", "(", "field_and_oper", ")", "==", "2", ":", "return", "tuple", "(", "field_and_oper", ")", "else", ":", "raise", "InvalidQueryParams", "(", "*", "*", "{", "'detail'", ":", "'The filter query param of \"%s\" is not '", "'supported. Multiple filter operators are '", "'not allowed in a single expression.'", "%", "key", ",", "'links'", ":", "LINK", ",", "'parameter'", ":", "PARAM", ",", "}", ")" ]
Parse the query param looking for filters Determine the field to filter on & the operator to be used when filtering. :param key: The query parameter to the left of the equal sign :return: tuple of string field name & string operator
[ "Parse", "the", "query", "param", "looking", "for", "filters" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/queryparams/filter.py#L140-L169
243,677
sassoo/goldman
goldman/queryparams/filter.py
_validate_field
def _validate_field(param, fields): """ Ensure the field exists on the model """ if '/' not in param.field and param.field not in fields: raise InvalidQueryParams(**{ 'detail': 'The filter query param of "%s" is not possible. The ' 'resource requested does not have a "%s" field. Please ' 'modify your request & retry.' % (param, param.field), 'links': LINK, 'parameter': PARAM, })
python
def _validate_field(param, fields): """ Ensure the field exists on the model """ if '/' not in param.field and param.field not in fields: raise InvalidQueryParams(**{ 'detail': 'The filter query param of "%s" is not possible. The ' 'resource requested does not have a "%s" field. Please ' 'modify your request & retry.' % (param, param.field), 'links': LINK, 'parameter': PARAM, })
[ "def", "_validate_field", "(", "param", ",", "fields", ")", ":", "if", "'/'", "not", "in", "param", ".", "field", "and", "param", ".", "field", "not", "in", "fields", ":", "raise", "InvalidQueryParams", "(", "*", "*", "{", "'detail'", ":", "'The filter query param of \"%s\" is not possible. The '", "'resource requested does not have a \"%s\" field. Please '", "'modify your request & retry.'", "%", "(", "param", ",", "param", ".", "field", ")", ",", "'links'", ":", "LINK", ",", "'parameter'", ":", "PARAM", ",", "}", ")" ]
Ensure the field exists on the model
[ "Ensure", "the", "field", "exists", "on", "the", "model" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/queryparams/filter.py#L172-L182
243,678
sassoo/goldman
goldman/queryparams/filter.py
_validate_rel
def _validate_rel(param, rels): """ Validate relationship based filters We don't support nested filters currently. FIX: Ensure the relationship filter field exists on the relationships model! """ if param.field.count('/') > 1: raise InvalidQueryParams(**{ 'detail': 'The filter query param of "%s" is attempting to ' 'filter on a nested relationship which is not ' 'currently supported.' % param, 'links': LINK, 'parameter': PARAM, }) elif '/' in param.field: model_field = param.field.split('/')[0] if model_field not in rels: raise InvalidQueryParams(**{ 'detail': 'The filter query param of "%s" is attempting to ' 'filter on a relationship but the "%s" field is ' 'NOT a relationship field.' % (param, model_field), 'links': LINK, 'parameter': PARAM, })
python
def _validate_rel(param, rels): """ Validate relationship based filters We don't support nested filters currently. FIX: Ensure the relationship filter field exists on the relationships model! """ if param.field.count('/') > 1: raise InvalidQueryParams(**{ 'detail': 'The filter query param of "%s" is attempting to ' 'filter on a nested relationship which is not ' 'currently supported.' % param, 'links': LINK, 'parameter': PARAM, }) elif '/' in param.field: model_field = param.field.split('/')[0] if model_field not in rels: raise InvalidQueryParams(**{ 'detail': 'The filter query param of "%s" is attempting to ' 'filter on a relationship but the "%s" field is ' 'NOT a relationship field.' % (param, model_field), 'links': LINK, 'parameter': PARAM, })
[ "def", "_validate_rel", "(", "param", ",", "rels", ")", ":", "if", "param", ".", "field", ".", "count", "(", "'/'", ")", ">", "1", ":", "raise", "InvalidQueryParams", "(", "*", "*", "{", "'detail'", ":", "'The filter query param of \"%s\" is attempting to '", "'filter on a nested relationship which is not '", "'currently supported.'", "%", "param", ",", "'links'", ":", "LINK", ",", "'parameter'", ":", "PARAM", ",", "}", ")", "elif", "'/'", "in", "param", ".", "field", ":", "model_field", "=", "param", ".", "field", ".", "split", "(", "'/'", ")", "[", "0", "]", "if", "model_field", "not", "in", "rels", ":", "raise", "InvalidQueryParams", "(", "*", "*", "{", "'detail'", ":", "'The filter query param of \"%s\" is attempting to '", "'filter on a relationship but the \"%s\" field is '", "'NOT a relationship field.'", "%", "(", "param", ",", "model_field", ")", ",", "'links'", ":", "LINK", ",", "'parameter'", ":", "PARAM", ",", "}", ")" ]
Validate relationship based filters We don't support nested filters currently. FIX: Ensure the relationship filter field exists on the relationships model!
[ "Validate", "relationship", "based", "filters" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/queryparams/filter.py#L185-L212
243,679
sassoo/goldman
goldman/queryparams/filter.py
_validate_param
def _validate_param(param): # pylint: disable=too-many-branches """ Ensure the filter cast properly according to the operator """ detail = None if param.oper not in goldman.config.QUERY_FILTERS: detail = 'The query filter {} is not a supported ' \ 'operator. Please change {} & retry your ' \ 'request'.format(param.oper, param) elif param.oper in goldman.config.GEO_FILTERS: try: if not isinstance(param.val, list) or len(param.val) <= 2: raise ValueError else: param.val = [float(i) for i in param.val] except ValueError: detail = 'The query filter {} requires a list ' \ 'of floats for geo evaluation. Please ' \ 'modify your request & retry'.format(param) elif param.oper in goldman.config.ENUM_FILTERS: if not isinstance(param.val, list): param.val = [param.val] param.val = tuple(param.val) elif isinstance(param.val, list): detail = 'The query filter {} should not be specified more ' \ 'than once or have multiple values. Please modify ' \ 'your request & retry'.format(param) elif param.oper in goldman.config.BOOL_FILTERS: try: param.val = str_to_bool(param.val) except ValueError: detail = 'The query filter {} requires a boolean ' \ 'for evaluation. Please modify your ' \ 'request & retry'.format(param) elif param.oper in goldman.config.DATE_FILTERS: try: param.val = str_to_dt(param.val) except ValueError: detail = 'The query filter {} supports only an ' \ 'epoch or ISO 8601 timestamp. Please ' \ 'modify your request & retry'.format(param) elif param.oper in goldman.config.NUM_FILTERS: try: param.val = int(param.val) except ValueError: detail = 'The query filter {} requires a number ' \ 'for evaluation. Please modify your ' \ 'request & retry'.format(param) if detail: raise InvalidQueryParams(**{ 'detail': detail, 'links': LINK, 'parameter': PARAM, })
python
def _validate_param(param): # pylint: disable=too-many-branches """ Ensure the filter cast properly according to the operator """ detail = None if param.oper not in goldman.config.QUERY_FILTERS: detail = 'The query filter {} is not a supported ' \ 'operator. Please change {} & retry your ' \ 'request'.format(param.oper, param) elif param.oper in goldman.config.GEO_FILTERS: try: if not isinstance(param.val, list) or len(param.val) <= 2: raise ValueError else: param.val = [float(i) for i in param.val] except ValueError: detail = 'The query filter {} requires a list ' \ 'of floats for geo evaluation. Please ' \ 'modify your request & retry'.format(param) elif param.oper in goldman.config.ENUM_FILTERS: if not isinstance(param.val, list): param.val = [param.val] param.val = tuple(param.val) elif isinstance(param.val, list): detail = 'The query filter {} should not be specified more ' \ 'than once or have multiple values. Please modify ' \ 'your request & retry'.format(param) elif param.oper in goldman.config.BOOL_FILTERS: try: param.val = str_to_bool(param.val) except ValueError: detail = 'The query filter {} requires a boolean ' \ 'for evaluation. Please modify your ' \ 'request & retry'.format(param) elif param.oper in goldman.config.DATE_FILTERS: try: param.val = str_to_dt(param.val) except ValueError: detail = 'The query filter {} supports only an ' \ 'epoch or ISO 8601 timestamp. Please ' \ 'modify your request & retry'.format(param) elif param.oper in goldman.config.NUM_FILTERS: try: param.val = int(param.val) except ValueError: detail = 'The query filter {} requires a number ' \ 'for evaluation. Please modify your ' \ 'request & retry'.format(param) if detail: raise InvalidQueryParams(**{ 'detail': detail, 'links': LINK, 'parameter': PARAM, })
[ "def", "_validate_param", "(", "param", ")", ":", "# pylint: disable=too-many-branches", "detail", "=", "None", "if", "param", ".", "oper", "not", "in", "goldman", ".", "config", ".", "QUERY_FILTERS", ":", "detail", "=", "'The query filter {} is not a supported '", "'operator. Please change {} & retry your '", "'request'", ".", "format", "(", "param", ".", "oper", ",", "param", ")", "elif", "param", ".", "oper", "in", "goldman", ".", "config", ".", "GEO_FILTERS", ":", "try", ":", "if", "not", "isinstance", "(", "param", ".", "val", ",", "list", ")", "or", "len", "(", "param", ".", "val", ")", "<=", "2", ":", "raise", "ValueError", "else", ":", "param", ".", "val", "=", "[", "float", "(", "i", ")", "for", "i", "in", "param", ".", "val", "]", "except", "ValueError", ":", "detail", "=", "'The query filter {} requires a list '", "'of floats for geo evaluation. Please '", "'modify your request & retry'", ".", "format", "(", "param", ")", "elif", "param", ".", "oper", "in", "goldman", ".", "config", ".", "ENUM_FILTERS", ":", "if", "not", "isinstance", "(", "param", ".", "val", ",", "list", ")", ":", "param", ".", "val", "=", "[", "param", ".", "val", "]", "param", ".", "val", "=", "tuple", "(", "param", ".", "val", ")", "elif", "isinstance", "(", "param", ".", "val", ",", "list", ")", ":", "detail", "=", "'The query filter {} should not be specified more '", "'than once or have multiple values. Please modify '", "'your request & retry'", ".", "format", "(", "param", ")", "elif", "param", ".", "oper", "in", "goldman", ".", "config", ".", "BOOL_FILTERS", ":", "try", ":", "param", ".", "val", "=", "str_to_bool", "(", "param", ".", "val", ")", "except", "ValueError", ":", "detail", "=", "'The query filter {} requires a boolean '", "'for evaluation. Please modify your '", "'request & retry'", ".", "format", "(", "param", ")", "elif", "param", ".", "oper", "in", "goldman", ".", "config", ".", "DATE_FILTERS", ":", "try", ":", "param", ".", "val", "=", "str_to_dt", "(", "param", ".", "val", ")", "except", "ValueError", ":", "detail", "=", "'The query filter {} supports only an '", "'epoch or ISO 8601 timestamp. Please '", "'modify your request & retry'", ".", "format", "(", "param", ")", "elif", "param", ".", "oper", "in", "goldman", ".", "config", ".", "NUM_FILTERS", ":", "try", ":", "param", ".", "val", "=", "int", "(", "param", ".", "val", ")", "except", "ValueError", ":", "detail", "=", "'The query filter {} requires a number '", "'for evaluation. Please modify your '", "'request & retry'", ".", "format", "(", "param", ")", "if", "detail", ":", "raise", "InvalidQueryParams", "(", "*", "*", "{", "'detail'", ":", "detail", ",", "'links'", ":", "LINK", ",", "'parameter'", ":", "PARAM", ",", "}", ")" ]
Ensure the filter cast properly according to the operator
[ "Ensure", "the", "filter", "cast", "properly", "according", "to", "the", "operator" ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/queryparams/filter.py#L215-L276
243,680
sassoo/goldman
goldman/queryparams/filter.py
init
def init(req, model): """ Return an array of Filter objects. """ fields = model.all_fields rels = model.relationships params = [] for key, val in req.params.items(): try: field, oper = _parse_param(key) except (TypeError, ValueError): continue try: local_field, foreign_filter = field.split('/') field_type = getattr(model, local_field) foreign_field = field_type.field foreign_rtype = field_type.rtype if hasattr(field_type, 'local_field'): local_field = field_type.local_field param = FilterRel(foreign_field, foreign_filter, foreign_rtype, local_field, field, oper, val) except AttributeError: raise InvalidQueryParams(**{ 'detail': 'The filter query param "%s" specified a filter ' 'containing a "." indicating a relationship filter ' 'but a relationship by that name does not exist ' 'on the requested resource.' % key, 'links': LINK, 'parameter': PARAM, }) except ValueError: param = Filter(field, oper, val) _validate_param(param) _validate_rel(param, rels) _validate_field(param, fields) params.append(param) return params
python
def init(req, model): """ Return an array of Filter objects. """ fields = model.all_fields rels = model.relationships params = [] for key, val in req.params.items(): try: field, oper = _parse_param(key) except (TypeError, ValueError): continue try: local_field, foreign_filter = field.split('/') field_type = getattr(model, local_field) foreign_field = field_type.field foreign_rtype = field_type.rtype if hasattr(field_type, 'local_field'): local_field = field_type.local_field param = FilterRel(foreign_field, foreign_filter, foreign_rtype, local_field, field, oper, val) except AttributeError: raise InvalidQueryParams(**{ 'detail': 'The filter query param "%s" specified a filter ' 'containing a "." indicating a relationship filter ' 'but a relationship by that name does not exist ' 'on the requested resource.' % key, 'links': LINK, 'parameter': PARAM, }) except ValueError: param = Filter(field, oper, val) _validate_param(param) _validate_rel(param, rels) _validate_field(param, fields) params.append(param) return params
[ "def", "init", "(", "req", ",", "model", ")", ":", "fields", "=", "model", ".", "all_fields", "rels", "=", "model", ".", "relationships", "params", "=", "[", "]", "for", "key", ",", "val", "in", "req", ".", "params", ".", "items", "(", ")", ":", "try", ":", "field", ",", "oper", "=", "_parse_param", "(", "key", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "continue", "try", ":", "local_field", ",", "foreign_filter", "=", "field", ".", "split", "(", "'/'", ")", "field_type", "=", "getattr", "(", "model", ",", "local_field", ")", "foreign_field", "=", "field_type", ".", "field", "foreign_rtype", "=", "field_type", ".", "rtype", "if", "hasattr", "(", "field_type", ",", "'local_field'", ")", ":", "local_field", "=", "field_type", ".", "local_field", "param", "=", "FilterRel", "(", "foreign_field", ",", "foreign_filter", ",", "foreign_rtype", ",", "local_field", ",", "field", ",", "oper", ",", "val", ")", "except", "AttributeError", ":", "raise", "InvalidQueryParams", "(", "*", "*", "{", "'detail'", ":", "'The filter query param \"%s\" specified a filter '", "'containing a \".\" indicating a relationship filter '", "'but a relationship by that name does not exist '", "'on the requested resource.'", "%", "key", ",", "'links'", ":", "LINK", ",", "'parameter'", ":", "PARAM", ",", "}", ")", "except", "ValueError", ":", "param", "=", "Filter", "(", "field", ",", "oper", ",", "val", ")", "_validate_param", "(", "param", ")", "_validate_rel", "(", "param", ",", "rels", ")", "_validate_field", "(", "param", ",", "fields", ")", "params", ".", "append", "(", "param", ")", "return", "params" ]
Return an array of Filter objects.
[ "Return", "an", "array", "of", "Filter", "objects", "." ]
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/queryparams/filter.py#L279-L321
243,681
MinchinWeb/colourettu
make_release.py
git_check
def git_check(): """Check for uncomitted changes""" git_status = subprocess.check_output(['git', 'status', '--porcelain']) if len(git_status) is 0: print(Fore.GREEN + 'All changes committed' + Style.RESET_ALL) else: exit(Fore.RED + 'Please commit all files to continue')
python
def git_check(): """Check for uncomitted changes""" git_status = subprocess.check_output(['git', 'status', '--porcelain']) if len(git_status) is 0: print(Fore.GREEN + 'All changes committed' + Style.RESET_ALL) else: exit(Fore.RED + 'Please commit all files to continue')
[ "def", "git_check", "(", ")", ":", "git_status", "=", "subprocess", ".", "check_output", "(", "[", "'git'", ",", "'status'", ",", "'--porcelain'", "]", ")", "if", "len", "(", "git_status", ")", "is", "0", ":", "print", "(", "Fore", ".", "GREEN", "+", "'All changes committed'", "+", "Style", ".", "RESET_ALL", ")", "else", ":", "exit", "(", "Fore", ".", "RED", "+", "'Please commit all files to continue'", ")" ]
Check for uncomitted changes
[ "Check", "for", "uncomitted", "changes" ]
f0b2f6b1d44055f3ccee62ac2759829f1e16a252
https://github.com/MinchinWeb/colourettu/blob/f0b2f6b1d44055f3ccee62ac2759829f1e16a252/make_release.py#L100-L106
243,682
MinchinWeb/colourettu
make_release.py
update_version_number
def update_version_number(update_level='patch'): """Update version number Returns a semantic_version object""" """Find current version""" temp_file = version_file().parent / ("~" + version_file().name) with open(str(temp_file), 'w') as g: with open(str(version_file()), 'r') as f: for line in f: version_matches = bare_version_re.match(line) if version_matches: bare_version_str = version_matches.groups(0)[0] if semantic_version.validate(bare_version_str): current_version = Version(bare_version_str) print("{}Current version is {}".format(" "*4, current_version)) else: current_version = Version.coerce(bare_version_str) if not text.query_yes_quit("{}I think the version is {}. Use it?".format(" "*4, current_version), default="yes"): exit(Fore.RED + 'Please set an initial version number to continue') """Determine new version number""" if update_level is 'major': current_version = current_version.next_major() elif update_level is 'minor': current_version = current_version.next_minor() elif update_level is 'patch': current_version = current_version.next_patch() elif update_level is 'prerelease': if not current_version.prerelease: current_version = current_version.next_patch() current_version.prerelease = ('dev', ) else: exit(Fore.RED + 'Cannot update version in {} mode'.format(update_level)) print("{}New version is {}".format(" "*4, current_version)) """Update version number""" line = '__version__ = "{}"'.format(current_version) print(line, file=g, end="") print('', file=g) # add a blank line at the end of the file shutil.copyfile(str(temp_file), str(version_file())) os.remove(str(temp_file)) return(current_version)
python
def update_version_number(update_level='patch'): """Update version number Returns a semantic_version object""" """Find current version""" temp_file = version_file().parent / ("~" + version_file().name) with open(str(temp_file), 'w') as g: with open(str(version_file()), 'r') as f: for line in f: version_matches = bare_version_re.match(line) if version_matches: bare_version_str = version_matches.groups(0)[0] if semantic_version.validate(bare_version_str): current_version = Version(bare_version_str) print("{}Current version is {}".format(" "*4, current_version)) else: current_version = Version.coerce(bare_version_str) if not text.query_yes_quit("{}I think the version is {}. Use it?".format(" "*4, current_version), default="yes"): exit(Fore.RED + 'Please set an initial version number to continue') """Determine new version number""" if update_level is 'major': current_version = current_version.next_major() elif update_level is 'minor': current_version = current_version.next_minor() elif update_level is 'patch': current_version = current_version.next_patch() elif update_level is 'prerelease': if not current_version.prerelease: current_version = current_version.next_patch() current_version.prerelease = ('dev', ) else: exit(Fore.RED + 'Cannot update version in {} mode'.format(update_level)) print("{}New version is {}".format(" "*4, current_version)) """Update version number""" line = '__version__ = "{}"'.format(current_version) print(line, file=g, end="") print('', file=g) # add a blank line at the end of the file shutil.copyfile(str(temp_file), str(version_file())) os.remove(str(temp_file)) return(current_version)
[ "def", "update_version_number", "(", "update_level", "=", "'patch'", ")", ":", "\"\"\"Find current version\"\"\"", "temp_file", "=", "version_file", "(", ")", ".", "parent", "/", "(", "\"~\"", "+", "version_file", "(", ")", ".", "name", ")", "with", "open", "(", "str", "(", "temp_file", ")", ",", "'w'", ")", "as", "g", ":", "with", "open", "(", "str", "(", "version_file", "(", ")", ")", ",", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "version_matches", "=", "bare_version_re", ".", "match", "(", "line", ")", "if", "version_matches", ":", "bare_version_str", "=", "version_matches", ".", "groups", "(", "0", ")", "[", "0", "]", "if", "semantic_version", ".", "validate", "(", "bare_version_str", ")", ":", "current_version", "=", "Version", "(", "bare_version_str", ")", "print", "(", "\"{}Current version is {}\"", ".", "format", "(", "\" \"", "*", "4", ",", "current_version", ")", ")", "else", ":", "current_version", "=", "Version", ".", "coerce", "(", "bare_version_str", ")", "if", "not", "text", ".", "query_yes_quit", "(", "\"{}I think the version is {}. Use it?\"", ".", "format", "(", "\" \"", "*", "4", ",", "current_version", ")", ",", "default", "=", "\"yes\"", ")", ":", "exit", "(", "Fore", ".", "RED", "+", "'Please set an initial version number to continue'", ")", "\"\"\"Determine new version number\"\"\"", "if", "update_level", "is", "'major'", ":", "current_version", "=", "current_version", ".", "next_major", "(", ")", "elif", "update_level", "is", "'minor'", ":", "current_version", "=", "current_version", ".", "next_minor", "(", ")", "elif", "update_level", "is", "'patch'", ":", "current_version", "=", "current_version", ".", "next_patch", "(", ")", "elif", "update_level", "is", "'prerelease'", ":", "if", "not", "current_version", ".", "prerelease", ":", "current_version", "=", "current_version", ".", "next_patch", "(", ")", "current_version", ".", "prerelease", "=", "(", "'dev'", ",", ")", "else", ":", "exit", "(", "Fore", ".", "RED", "+", "'Cannot update version in {} mode'", ".", "format", "(", "update_level", ")", ")", "print", "(", "\"{}New version is {}\"", ".", "format", "(", "\" \"", "*", "4", ",", "current_version", ")", ")", "\"\"\"Update version number\"\"\"", "line", "=", "'__version__ = \"{}\"'", ".", "format", "(", "current_version", ")", "print", "(", "line", ",", "file", "=", "g", ",", "end", "=", "\"\"", ")", "print", "(", "''", ",", "file", "=", "g", ")", "# add a blank line at the end of the file", "shutil", ".", "copyfile", "(", "str", "(", "temp_file", ")", ",", "str", "(", "version_file", "(", ")", ")", ")", "os", ".", "remove", "(", "str", "(", "temp_file", ")", ")", "return", "(", "current_version", ")" ]
Update version number Returns a semantic_version object
[ "Update", "version", "number" ]
f0b2f6b1d44055f3ccee62ac2759829f1e16a252
https://github.com/MinchinWeb/colourettu/blob/f0b2f6b1d44055f3ccee62ac2759829f1e16a252/make_release.py#L125-L168
243,683
MinchinWeb/colourettu
make_release.py
add_release_to_changelog
def add_release_to_changelog(version): """Add release line at the top of the first list it finds Assumes your changelog in managed with `releases`""" temp_file = changelog_file().parent / ("~" + changelog_file().name) now = datetime.today() release_added = False with open(str(temp_file), 'w') as g: with open(str(changelog_file()), 'r') as f: for line in f: list_match = list_match_re.match(line) if list_match and not release_added: release_line = "{}{} :release:`{} <{}-{:02}-{:02}>`".format( list_match.group("leading"), list_match.group("mark"), version, now.year, now.month, now.day) print(release_line, file=g) release_added = True print(line, file=g, end="") if not release_added: release_line = "{}{} :release:`{} <{}-{:02}-{:02}>`".format( " ", "-", version, now.year, now.month, now.day) print(release_line, file=g) print('', file=g) # add a blank line at the end of the file shutil.copyfile(str(temp_file), str(changelog_file())) os.remove(str(temp_file))
python
def add_release_to_changelog(version): """Add release line at the top of the first list it finds Assumes your changelog in managed with `releases`""" temp_file = changelog_file().parent / ("~" + changelog_file().name) now = datetime.today() release_added = False with open(str(temp_file), 'w') as g: with open(str(changelog_file()), 'r') as f: for line in f: list_match = list_match_re.match(line) if list_match and not release_added: release_line = "{}{} :release:`{} <{}-{:02}-{:02}>`".format( list_match.group("leading"), list_match.group("mark"), version, now.year, now.month, now.day) print(release_line, file=g) release_added = True print(line, file=g, end="") if not release_added: release_line = "{}{} :release:`{} <{}-{:02}-{:02}>`".format( " ", "-", version, now.year, now.month, now.day) print(release_line, file=g) print('', file=g) # add a blank line at the end of the file shutil.copyfile(str(temp_file), str(changelog_file())) os.remove(str(temp_file))
[ "def", "add_release_to_changelog", "(", "version", ")", ":", "temp_file", "=", "changelog_file", "(", ")", ".", "parent", "/", "(", "\"~\"", "+", "changelog_file", "(", ")", ".", "name", ")", "now", "=", "datetime", ".", "today", "(", ")", "release_added", "=", "False", "with", "open", "(", "str", "(", "temp_file", ")", ",", "'w'", ")", "as", "g", ":", "with", "open", "(", "str", "(", "changelog_file", "(", ")", ")", ",", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "list_match", "=", "list_match_re", ".", "match", "(", "line", ")", "if", "list_match", "and", "not", "release_added", ":", "release_line", "=", "\"{}{} :release:`{} <{}-{:02}-{:02}>`\"", ".", "format", "(", "list_match", ".", "group", "(", "\"leading\"", ")", ",", "list_match", ".", "group", "(", "\"mark\"", ")", ",", "version", ",", "now", ".", "year", ",", "now", ".", "month", ",", "now", ".", "day", ")", "print", "(", "release_line", ",", "file", "=", "g", ")", "release_added", "=", "True", "print", "(", "line", ",", "file", "=", "g", ",", "end", "=", "\"\"", ")", "if", "not", "release_added", ":", "release_line", "=", "\"{}{} :release:`{} <{}-{:02}-{:02}>`\"", ".", "format", "(", "\" \"", ",", "\"-\"", ",", "version", ",", "now", ".", "year", ",", "now", ".", "month", ",", "now", ".", "day", ")", "print", "(", "release_line", ",", "file", "=", "g", ")", "print", "(", "''", ",", "file", "=", "g", ")", "# add a blank line at the end of the file", "shutil", ".", "copyfile", "(", "str", "(", "temp_file", ")", ",", "str", "(", "changelog_file", "(", ")", ")", ")", "os", ".", "remove", "(", "str", "(", "temp_file", ")", ")" ]
Add release line at the top of the first list it finds Assumes your changelog in managed with `releases`
[ "Add", "release", "line", "at", "the", "top", "of", "the", "first", "list", "it", "finds" ]
f0b2f6b1d44055f3ccee62ac2759829f1e16a252
https://github.com/MinchinWeb/colourettu/blob/f0b2f6b1d44055f3ccee62ac2759829f1e16a252/make_release.py#L171-L196
243,684
MinchinWeb/colourettu
make_release.py
run_sphinx
def run_sphinx(): """Runs Sphinx via it's `make html` command""" old_dir = here_directory() os.chdir(str(doc_directory())) doc_status = subprocess.check_call(['make', 'html'], shell=True) os.chdir(str(old_dir)) # go back to former working directory if doc_status is not 0: exit(Fore.RED + 'Something broke generating your documentation...')
python
def run_sphinx(): """Runs Sphinx via it's `make html` command""" old_dir = here_directory() os.chdir(str(doc_directory())) doc_status = subprocess.check_call(['make', 'html'], shell=True) os.chdir(str(old_dir)) # go back to former working directory if doc_status is not 0: exit(Fore.RED + 'Something broke generating your documentation...')
[ "def", "run_sphinx", "(", ")", ":", "old_dir", "=", "here_directory", "(", ")", "os", ".", "chdir", "(", "str", "(", "doc_directory", "(", ")", ")", ")", "doc_status", "=", "subprocess", ".", "check_call", "(", "[", "'make'", ",", "'html'", "]", ",", "shell", "=", "True", ")", "os", ".", "chdir", "(", "str", "(", "old_dir", ")", ")", "# go back to former working directory", "if", "doc_status", "is", "not", "0", ":", "exit", "(", "Fore", ".", "RED", "+", "'Something broke generating your documentation...'", ")" ]
Runs Sphinx via it's `make html` command
[ "Runs", "Sphinx", "via", "it", "s", "make", "html", "command" ]
f0b2f6b1d44055f3ccee62ac2759829f1e16a252
https://github.com/MinchinWeb/colourettu/blob/f0b2f6b1d44055f3ccee62ac2759829f1e16a252/make_release.py#L199-L206
243,685
rosenbrockc/acorn
acorn/logging/database.py
list_tasks
def list_tasks(target=None): """Returns a list of all the projects and tasks available in the `acorn` database directory. Args: target (str): directory to list the projects for. Defaults to the configured database directory. Returns: dict: keys are project names; values are lists of tasks associated with the project. """ from os import getcwd, chdir from glob import glob original = getcwd() if target is None:# pragma: no cover target = _dbdir() chdir(target) result = {} for filename in glob("*.*.json"): project, task = filename.split('.')[0:2] if project not in result: result[project] = [] result[project].append(task) #Set the working directory back to what it was. chdir(original) return result
python
def list_tasks(target=None): """Returns a list of all the projects and tasks available in the `acorn` database directory. Args: target (str): directory to list the projects for. Defaults to the configured database directory. Returns: dict: keys are project names; values are lists of tasks associated with the project. """ from os import getcwd, chdir from glob import glob original = getcwd() if target is None:# pragma: no cover target = _dbdir() chdir(target) result = {} for filename in glob("*.*.json"): project, task = filename.split('.')[0:2] if project not in result: result[project] = [] result[project].append(task) #Set the working directory back to what it was. chdir(original) return result
[ "def", "list_tasks", "(", "target", "=", "None", ")", ":", "from", "os", "import", "getcwd", ",", "chdir", "from", "glob", "import", "glob", "original", "=", "getcwd", "(", ")", "if", "target", "is", "None", ":", "# pragma: no cover", "target", "=", "_dbdir", "(", ")", "chdir", "(", "target", ")", "result", "=", "{", "}", "for", "filename", "in", "glob", "(", "\"*.*.json\"", ")", ":", "project", ",", "task", "=", "filename", ".", "split", "(", "'.'", ")", "[", "0", ":", "2", "]", "if", "project", "not", "in", "result", ":", "result", "[", "project", "]", "=", "[", "]", "result", "[", "project", "]", ".", "append", "(", "task", ")", "#Set the working directory back to what it was.", "chdir", "(", "original", ")", "return", "result" ]
Returns a list of all the projects and tasks available in the `acorn` database directory. Args: target (str): directory to list the projects for. Defaults to the configured database directory. Returns: dict: keys are project names; values are lists of tasks associated with the project.
[ "Returns", "a", "list", "of", "all", "the", "projects", "and", "tasks", "available", "in", "the", "acorn", "database", "directory", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/logging/database.py#L48-L77
243,686
rosenbrockc/acorn
acorn/logging/database.py
set_task
def set_task(project_, task_): """Sets the active project and task. All subsequent logging will be saved to the database with that project and task. Args: project_ (str): active project name; a project can have multiple tasks. task_ (str): active task name. Logging is separated at the project and task level. """ global project, task project = project_ task = task_ msg.okay("Set project name to {}.{}".format(project, task), 2)
python
def set_task(project_, task_): """Sets the active project and task. All subsequent logging will be saved to the database with that project and task. Args: project_ (str): active project name; a project can have multiple tasks. task_ (str): active task name. Logging is separated at the project and task level. """ global project, task project = project_ task = task_ msg.okay("Set project name to {}.{}".format(project, task), 2)
[ "def", "set_task", "(", "project_", ",", "task_", ")", ":", "global", "project", ",", "task", "project", "=", "project_", "task", "=", "task_", "msg", ".", "okay", "(", "\"Set project name to {}.{}\"", ".", "format", "(", "project", ",", "task", ")", ",", "2", ")" ]
Sets the active project and task. All subsequent logging will be saved to the database with that project and task. Args: project_ (str): active project name; a project can have multiple tasks. task_ (str): active task name. Logging is separated at the project and task level.
[ "Sets", "the", "active", "project", "and", "task", ".", "All", "subsequent", "logging", "will", "be", "saved", "to", "the", "database", "with", "that", "project", "and", "task", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/logging/database.py#L79-L91
243,687
rosenbrockc/acorn
acorn/logging/database.py
cleanup
def cleanup(): """Saves all the open databases to JSON so that the kernel can be shut down without losing in-memory collections. """ failed = {} success = [] for dbname, db in dbs.items(): try: #Force the database save, even if the time hasn't elapsed yet. db.save(True) success.append(dbname) except: # pragma: no cover import sys, traceback xcls, xerr = sys.exc_info()[0:2] failed[dbname] = traceback.format_tb(sys.exc_info()[2]) for sdb in success: if writeable: msg.okay("Project {0}.{1} saved successfully.".format(*sdb), 0) for fdb, tb in failed.items(): # pragma: no cover msg.err("Project {1}.{2} save failed:\n{0}".format(tb, *fdb), prefix=False)
python
def cleanup(): """Saves all the open databases to JSON so that the kernel can be shut down without losing in-memory collections. """ failed = {} success = [] for dbname, db in dbs.items(): try: #Force the database save, even if the time hasn't elapsed yet. db.save(True) success.append(dbname) except: # pragma: no cover import sys, traceback xcls, xerr = sys.exc_info()[0:2] failed[dbname] = traceback.format_tb(sys.exc_info()[2]) for sdb in success: if writeable: msg.okay("Project {0}.{1} saved successfully.".format(*sdb), 0) for fdb, tb in failed.items(): # pragma: no cover msg.err("Project {1}.{2} save failed:\n{0}".format(tb, *fdb), prefix=False)
[ "def", "cleanup", "(", ")", ":", "failed", "=", "{", "}", "success", "=", "[", "]", "for", "dbname", ",", "db", "in", "dbs", ".", "items", "(", ")", ":", "try", ":", "#Force the database save, even if the time hasn't elapsed yet.", "db", ".", "save", "(", "True", ")", "success", ".", "append", "(", "dbname", ")", "except", ":", "# pragma: no cover", "import", "sys", ",", "traceback", "xcls", ",", "xerr", "=", "sys", ".", "exc_info", "(", ")", "[", "0", ":", "2", "]", "failed", "[", "dbname", "]", "=", "traceback", ".", "format_tb", "(", "sys", ".", "exc_info", "(", ")", "[", "2", "]", ")", "for", "sdb", "in", "success", ":", "if", "writeable", ":", "msg", ".", "okay", "(", "\"Project {0}.{1} saved successfully.\"", ".", "format", "(", "*", "sdb", ")", ",", "0", ")", "for", "fdb", ",", "tb", "in", "failed", ".", "items", "(", ")", ":", "# pragma: no cover", "msg", ".", "err", "(", "\"Project {1}.{2} save failed:\\n{0}\"", ".", "format", "(", "tb", ",", "*", "fdb", ")", ",", "prefix", "=", "False", ")" ]
Saves all the open databases to JSON so that the kernel can be shut down without losing in-memory collections.
[ "Saves", "all", "the", "open", "databases", "to", "JSON", "so", "that", "the", "kernel", "can", "be", "shut", "down", "without", "losing", "in", "-", "memory", "collections", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/logging/database.py#L106-L127
243,688
rosenbrockc/acorn
acorn/logging/database.py
_dbdir
def _dbdir(): """Returns the path to the directory where acorn DBs are stored. """ global dbdir from os import mkdir, path, getcwd, chdir if dbdir is None: from acorn.config import settings config = settings("acorn") if (config.has_section("database") and config.has_option("database", "folder")): dbdir = config.get("database", "folder") else: # pragma: no cover raise ValueError("The folder to save DBs in must be configured" " in 'acorn.cfg'") #It is possible to specify the database path relative to the repository #root. path.abspath will map it correctly if we are in the root directory. from acorn.utility import abspath if not path.isabs(dbdir): #We want absolute paths to make it easier to port this to other OS. dbdir = abspath(dbdir) if not path.isdir(dbdir): # pragma: no cover mkdir(dbdir) return dbdir
python
def _dbdir(): """Returns the path to the directory where acorn DBs are stored. """ global dbdir from os import mkdir, path, getcwd, chdir if dbdir is None: from acorn.config import settings config = settings("acorn") if (config.has_section("database") and config.has_option("database", "folder")): dbdir = config.get("database", "folder") else: # pragma: no cover raise ValueError("The folder to save DBs in must be configured" " in 'acorn.cfg'") #It is possible to specify the database path relative to the repository #root. path.abspath will map it correctly if we are in the root directory. from acorn.utility import abspath if not path.isabs(dbdir): #We want absolute paths to make it easier to port this to other OS. dbdir = abspath(dbdir) if not path.isdir(dbdir): # pragma: no cover mkdir(dbdir) return dbdir
[ "def", "_dbdir", "(", ")", ":", "global", "dbdir", "from", "os", "import", "mkdir", ",", "path", ",", "getcwd", ",", "chdir", "if", "dbdir", "is", "None", ":", "from", "acorn", ".", "config", "import", "settings", "config", "=", "settings", "(", "\"acorn\"", ")", "if", "(", "config", ".", "has_section", "(", "\"database\"", ")", "and", "config", ".", "has_option", "(", "\"database\"", ",", "\"folder\"", ")", ")", ":", "dbdir", "=", "config", ".", "get", "(", "\"database\"", ",", "\"folder\"", ")", "else", ":", "# pragma: no cover", "raise", "ValueError", "(", "\"The folder to save DBs in must be configured\"", "\" in 'acorn.cfg'\"", ")", "#It is possible to specify the database path relative to the repository", "#root. path.abspath will map it correctly if we are in the root directory.", "from", "acorn", ".", "utility", "import", "abspath", "if", "not", "path", ".", "isabs", "(", "dbdir", ")", ":", "#We want absolute paths to make it easier to port this to other OS.", "dbdir", "=", "abspath", "(", "dbdir", ")", "if", "not", "path", ".", "isdir", "(", "dbdir", ")", ":", "# pragma: no cover", "mkdir", "(", "dbdir", ")", "return", "dbdir" ]
Returns the path to the directory where acorn DBs are stored.
[ "Returns", "the", "path", "to", "the", "directory", "where", "acorn", "DBs", "are", "stored", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/logging/database.py#L212-L238
243,689
rosenbrockc/acorn
acorn/logging/database.py
_json_clean
def _json_clean(d): """Cleans the specified python `dict` by converting any tuple keys to strings so that they can be serialized by JSON. Args: d (dict): python dictionary to clean up. Returns: dict: cleaned-up dictionary. """ result = {} compkeys = {} for k, v in d.items(): if not isinstance(k, tuple): result[k] = v else: #v is a list of entries for instance methods/constructors on the #UUID of the key. Instead of using the composite tuple keys, we #switch them for a string using the key = "c.{}".format(id(k)) result[key] = v compkeys[key] = k return (result, compkeys)
python
def _json_clean(d): """Cleans the specified python `dict` by converting any tuple keys to strings so that they can be serialized by JSON. Args: d (dict): python dictionary to clean up. Returns: dict: cleaned-up dictionary. """ result = {} compkeys = {} for k, v in d.items(): if not isinstance(k, tuple): result[k] = v else: #v is a list of entries for instance methods/constructors on the #UUID of the key. Instead of using the composite tuple keys, we #switch them for a string using the key = "c.{}".format(id(k)) result[key] = v compkeys[key] = k return (result, compkeys)
[ "def", "_json_clean", "(", "d", ")", ":", "result", "=", "{", "}", "compkeys", "=", "{", "}", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", ":", "if", "not", "isinstance", "(", "k", ",", "tuple", ")", ":", "result", "[", "k", "]", "=", "v", "else", ":", "#v is a list of entries for instance methods/constructors on the", "#UUID of the key. Instead of using the composite tuple keys, we", "#switch them for a string using the ", "key", "=", "\"c.{}\"", ".", "format", "(", "id", "(", "k", ")", ")", "result", "[", "key", "]", "=", "v", "compkeys", "[", "key", "]", "=", "k", "return", "(", "result", ",", "compkeys", ")" ]
Cleans the specified python `dict` by converting any tuple keys to strings so that they can be serialized by JSON. Args: d (dict): python dictionary to clean up. Returns: dict: cleaned-up dictionary.
[ "Cleans", "the", "specified", "python", "dict", "by", "converting", "any", "tuple", "keys", "to", "strings", "so", "that", "they", "can", "be", "serialized", "by", "JSON", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/logging/database.py#L240-L263
243,690
rosenbrockc/acorn
acorn/logging/database.py
save_image
def save_image(byteio, imgfmt): """Saves the specified image to disk. Args: byteio (bytes): image bytes to save to disk. imgfmt (str): used as the extension of the saved file. Returns: str: a uuid for the saved image that can be added to the database entry. """ from os import path, mkdir ptdir = "{}.{}".format(project, task) uuid = str(uuid4()) #Save the image within the project/task specific folder. idir = path.join(dbdir, ptdir) if not path.isdir(idir): mkdir(idir) ipath = path.join(idir, "{}.{}".format(uuid, imgfmt)) with open(ipath, 'wb') as f: f.write(byteio) return uuid
python
def save_image(byteio, imgfmt): """Saves the specified image to disk. Args: byteio (bytes): image bytes to save to disk. imgfmt (str): used as the extension of the saved file. Returns: str: a uuid for the saved image that can be added to the database entry. """ from os import path, mkdir ptdir = "{}.{}".format(project, task) uuid = str(uuid4()) #Save the image within the project/task specific folder. idir = path.join(dbdir, ptdir) if not path.isdir(idir): mkdir(idir) ipath = path.join(idir, "{}.{}".format(uuid, imgfmt)) with open(ipath, 'wb') as f: f.write(byteio) return uuid
[ "def", "save_image", "(", "byteio", ",", "imgfmt", ")", ":", "from", "os", "import", "path", ",", "mkdir", "ptdir", "=", "\"{}.{}\"", ".", "format", "(", "project", ",", "task", ")", "uuid", "=", "str", "(", "uuid4", "(", ")", ")", "#Save the image within the project/task specific folder.", "idir", "=", "path", ".", "join", "(", "dbdir", ",", "ptdir", ")", "if", "not", "path", ".", "isdir", "(", "idir", ")", ":", "mkdir", "(", "idir", ")", "ipath", "=", "path", ".", "join", "(", "idir", ",", "\"{}.{}\"", ".", "format", "(", "uuid", ",", "imgfmt", ")", ")", "with", "open", "(", "ipath", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "byteio", ")", "return", "uuid" ]
Saves the specified image to disk. Args: byteio (bytes): image bytes to save to disk. imgfmt (str): used as the extension of the saved file. Returns: str: a uuid for the saved image that can be added to the database entry.
[ "Saves", "the", "specified", "image", "to", "disk", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/logging/database.py#L265-L288
243,691
rosenbrockc/acorn
acorn/logging/database.py
TaskDB.log_uuid
def log_uuid(self, uuid): """Logs the object with the specified `uuid` to `self.uuids` if possible. Args: uuid (str): string value of :meth:`uuid.uuid4` value for the object. """ #We only need to try and describe an object once; if it is already in #our database, then just move along. if uuid not in self.uuids and uuid in uuids: self.uuids[uuid] = uuids[uuid].describe()
python
def log_uuid(self, uuid): """Logs the object with the specified `uuid` to `self.uuids` if possible. Args: uuid (str): string value of :meth:`uuid.uuid4` value for the object. """ #We only need to try and describe an object once; if it is already in #our database, then just move along. if uuid not in self.uuids and uuid in uuids: self.uuids[uuid] = uuids[uuid].describe()
[ "def", "log_uuid", "(", "self", ",", "uuid", ")", ":", "#We only need to try and describe an object once; if it is already in", "#our database, then just move along.", "if", "uuid", "not", "in", "self", ".", "uuids", "and", "uuid", "in", "uuids", ":", "self", ".", "uuids", "[", "uuid", "]", "=", "uuids", "[", "uuid", "]", ".", "describe", "(", ")" ]
Logs the object with the specified `uuid` to `self.uuids` if possible. Args: uuid (str): string value of :meth:`uuid.uuid4` value for the object.
[ "Logs", "the", "object", "with", "the", "specified", "uuid", "to", "self", ".", "uuids", "if", "possible", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/logging/database.py#L350-L361
243,692
rosenbrockc/acorn
acorn/logging/database.py
TaskDB.get_option
def get_option(option, default=None, cast=None): """Returns the option value for the specified acorn database option. """ from acorn.config import settings config = settings("acorn") if (config.has_section("database") and config.has_option("database", option)): result = config.get("database", option) if cast is not None: result = cast(result) else: result = default return result
python
def get_option(option, default=None, cast=None): """Returns the option value for the specified acorn database option. """ from acorn.config import settings config = settings("acorn") if (config.has_section("database") and config.has_option("database", option)): result = config.get("database", option) if cast is not None: result = cast(result) else: result = default return result
[ "def", "get_option", "(", "option", ",", "default", "=", "None", ",", "cast", "=", "None", ")", ":", "from", "acorn", ".", "config", "import", "settings", "config", "=", "settings", "(", "\"acorn\"", ")", "if", "(", "config", ".", "has_section", "(", "\"database\"", ")", "and", "config", ".", "has_option", "(", "\"database\"", ",", "option", ")", ")", ":", "result", "=", "config", ".", "get", "(", "\"database\"", ",", "option", ")", "if", "cast", "is", "not", "None", ":", "result", "=", "cast", "(", "result", ")", "else", ":", "result", "=", "default", "return", "result" ]
Returns the option value for the specified acorn database option.
[ "Returns", "the", "option", "value", "for", "the", "specified", "acorn", "database", "option", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/logging/database.py#L439-L452
243,693
rosenbrockc/acorn
acorn/logging/database.py
TaskDB.load
def load(self): """Deserializes the database from disk. """ #We load the database even when it is not configured to be #writable. After all, the user may decide part-way through a session to #begin writing again, and then we would want a history up to that point #to be valid. from os import path if path.isfile(self.dbpath): import json with open(self.dbpath) as f: jdb = json.load(f) self.entities = jdb["entities"] self.uuids = jdb["uuids"]
python
def load(self): """Deserializes the database from disk. """ #We load the database even when it is not configured to be #writable. After all, the user may decide part-way through a session to #begin writing again, and then we would want a history up to that point #to be valid. from os import path if path.isfile(self.dbpath): import json with open(self.dbpath) as f: jdb = json.load(f) self.entities = jdb["entities"] self.uuids = jdb["uuids"]
[ "def", "load", "(", "self", ")", ":", "#We load the database even when it is not configured to be", "#writable. After all, the user may decide part-way through a session to", "#begin writing again, and then we would want a history up to that point", "#to be valid.", "from", "os", "import", "path", "if", "path", ".", "isfile", "(", "self", ".", "dbpath", ")", ":", "import", "json", "with", "open", "(", "self", ".", "dbpath", ")", "as", "f", ":", "jdb", "=", "json", ".", "load", "(", "f", ")", "self", ".", "entities", "=", "jdb", "[", "\"entities\"", "]", "self", ".", "uuids", "=", "jdb", "[", "\"uuids\"", "]" ]
Deserializes the database from disk.
[ "Deserializes", "the", "database", "from", "disk", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/logging/database.py#L454-L467
243,694
rosenbrockc/acorn
acorn/logging/database.py
TaskDB.save
def save(self, force=False): """Serializes the database file to disk. Args: force (bool): when True, the elapsed time since last save is ignored and the database is saved anyway (subject to global :data:`writeable` setting). """ from time import time # Since the DBs can get rather large, we don't want to save them every # single time a method is called. Instead, we only save them at the # frequency specified in the global settings file. from datetime import datetime savefreq = TaskDB.get_option("savefreq", 2, int) if self.lastsave is not None: delta = (datetime.fromtimestamp(time()) - datetime.fromtimestamp(self.lastsave)) elapsed = int(delta.total_seconds()/60) else: elapsed = savefreq + 1 if elapsed > savefreq or force: if not writeable: #We still overwrite the lastsave value so that this message doesn't #keep getting output for every :meth:`record` call. self.lastsave = time() msg.std("Skipping database write to disk by setting.", 2) return import json try: entities, compkeys = _json_clean(self.entities) jdb = {"entities": entities, "compkeys": compkeys, "uuids": self.uuids} with open(self.dbpath, 'w') as f: json.dump(jdb, f) except: # pragma: no cover from acorn.msg import err import sys raise err("{}: {}".format(*sys.exc_info()[0:2])) self.lastsave = time()
python
def save(self, force=False): """Serializes the database file to disk. Args: force (bool): when True, the elapsed time since last save is ignored and the database is saved anyway (subject to global :data:`writeable` setting). """ from time import time # Since the DBs can get rather large, we don't want to save them every # single time a method is called. Instead, we only save them at the # frequency specified in the global settings file. from datetime import datetime savefreq = TaskDB.get_option("savefreq", 2, int) if self.lastsave is not None: delta = (datetime.fromtimestamp(time()) - datetime.fromtimestamp(self.lastsave)) elapsed = int(delta.total_seconds()/60) else: elapsed = savefreq + 1 if elapsed > savefreq or force: if not writeable: #We still overwrite the lastsave value so that this message doesn't #keep getting output for every :meth:`record` call. self.lastsave = time() msg.std("Skipping database write to disk by setting.", 2) return import json try: entities, compkeys = _json_clean(self.entities) jdb = {"entities": entities, "compkeys": compkeys, "uuids": self.uuids} with open(self.dbpath, 'w') as f: json.dump(jdb, f) except: # pragma: no cover from acorn.msg import err import sys raise err("{}: {}".format(*sys.exc_info()[0:2])) self.lastsave = time()
[ "def", "save", "(", "self", ",", "force", "=", "False", ")", ":", "from", "time", "import", "time", "# Since the DBs can get rather large, we don't want to save them every", "# single time a method is called. Instead, we only save them at the", "# frequency specified in the global settings file.", "from", "datetime", "import", "datetime", "savefreq", "=", "TaskDB", ".", "get_option", "(", "\"savefreq\"", ",", "2", ",", "int", ")", "if", "self", ".", "lastsave", "is", "not", "None", ":", "delta", "=", "(", "datetime", ".", "fromtimestamp", "(", "time", "(", ")", ")", "-", "datetime", ".", "fromtimestamp", "(", "self", ".", "lastsave", ")", ")", "elapsed", "=", "int", "(", "delta", ".", "total_seconds", "(", ")", "/", "60", ")", "else", ":", "elapsed", "=", "savefreq", "+", "1", "if", "elapsed", ">", "savefreq", "or", "force", ":", "if", "not", "writeable", ":", "#We still overwrite the lastsave value so that this message doesn't", "#keep getting output for every :meth:`record` call.", "self", ".", "lastsave", "=", "time", "(", ")", "msg", ".", "std", "(", "\"Skipping database write to disk by setting.\"", ",", "2", ")", "return", "import", "json", "try", ":", "entities", ",", "compkeys", "=", "_json_clean", "(", "self", ".", "entities", ")", "jdb", "=", "{", "\"entities\"", ":", "entities", ",", "\"compkeys\"", ":", "compkeys", ",", "\"uuids\"", ":", "self", ".", "uuids", "}", "with", "open", "(", "self", ".", "dbpath", ",", "'w'", ")", "as", "f", ":", "json", ".", "dump", "(", "jdb", ",", "f", ")", "except", ":", "# pragma: no cover", "from", "acorn", ".", "msg", "import", "err", "import", "sys", "raise", "err", "(", "\"{}: {}\"", ".", "format", "(", "*", "sys", ".", "exc_info", "(", ")", "[", "0", ":", "2", "]", ")", ")", "self", ".", "lastsave", "=", "time", "(", ")" ]
Serializes the database file to disk. Args: force (bool): when True, the elapsed time since last save is ignored and the database is saved anyway (subject to global :data:`writeable` setting).
[ "Serializes", "the", "database", "file", "to", "disk", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/logging/database.py#L469-L514
243,695
rosenbrockc/acorn
acorn/logging/database.py
Instance.describe
def describe(self): """Returns a dictionary describing the object based on its type. """ result = {} #Because we created an Instance object, we already know that this object #is not one of the regular built-in types (except, perhaps, for list, #dict and set objects that can have their tracking turned on). #For objects that are instantiated by the user in __main__, we will #already have a paper trail that shows exactly how it was done; but for #these, we have to rely on human-specified descriptions. from acorn.logging.descriptors import describe return describe(self.obj)
python
def describe(self): """Returns a dictionary describing the object based on its type. """ result = {} #Because we created an Instance object, we already know that this object #is not one of the regular built-in types (except, perhaps, for list, #dict and set objects that can have their tracking turned on). #For objects that are instantiated by the user in __main__, we will #already have a paper trail that shows exactly how it was done; but for #these, we have to rely on human-specified descriptions. from acorn.logging.descriptors import describe return describe(self.obj)
[ "def", "describe", "(", "self", ")", ":", "result", "=", "{", "}", "#Because we created an Instance object, we already know that this object", "#is not one of the regular built-in types (except, perhaps, for list,", "#dict and set objects that can have their tracking turned on).", "#For objects that are instantiated by the user in __main__, we will", "#already have a paper trail that shows exactly how it was done; but for", "#these, we have to rely on human-specified descriptions.", "from", "acorn", ".", "logging", ".", "descriptors", "import", "describe", "return", "describe", "(", "self", ".", "obj", ")" ]
Returns a dictionary describing the object based on its type.
[ "Returns", "a", "dictionary", "describing", "the", "object", "based", "on", "its", "type", "." ]
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/logging/database.py#L533-L545
243,696
noirbizarre/minibench
minibench/_compat.py
load_module
def load_module(name, filename): '''Load a module into name given its filename''' if sys.version_info < (3, 5): import imp import warnings with warnings.catch_warnings(): # Required for Python 2.7 warnings.simplefilter("ignore", RuntimeWarning) return imp.load_source(name, filename) else: from importlib.machinery import SourceFileLoader loader = SourceFileLoader(name, filename) return loader.load_module()
python
def load_module(name, filename): '''Load a module into name given its filename''' if sys.version_info < (3, 5): import imp import warnings with warnings.catch_warnings(): # Required for Python 2.7 warnings.simplefilter("ignore", RuntimeWarning) return imp.load_source(name, filename) else: from importlib.machinery import SourceFileLoader loader = SourceFileLoader(name, filename) return loader.load_module()
[ "def", "load_module", "(", "name", ",", "filename", ")", ":", "if", "sys", ".", "version_info", "<", "(", "3", ",", "5", ")", ":", "import", "imp", "import", "warnings", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "# Required for Python 2.7", "warnings", ".", "simplefilter", "(", "\"ignore\"", ",", "RuntimeWarning", ")", "return", "imp", ".", "load_source", "(", "name", ",", "filename", ")", "else", ":", "from", "importlib", ".", "machinery", "import", "SourceFileLoader", "loader", "=", "SourceFileLoader", "(", "name", ",", "filename", ")", "return", "loader", ".", "load_module", "(", ")" ]
Load a module into name given its filename
[ "Load", "a", "module", "into", "name", "given", "its", "filename" ]
a1ac66dc075181c62bb3c0d3a26beb5c46d5f4ab
https://github.com/noirbizarre/minibench/blob/a1ac66dc075181c62bb3c0d3a26beb5c46d5f4ab/minibench/_compat.py#L11-L22
243,697
SeabornGames/Meta
seaborn_meta/calling_function.py
file_code
def file_code(function_index=1, function_name=None): """ This will return the code of the calling function function_index of 2 will give the parent of the caller function_name should not be used with function_index :param function_index: int of how many frames back the program should look :param function_name: str of what function to look for :return: str of the code from the target function """ info = function_info(function_index + 1, function_name) with open(info['file'], 'r') as fn: return fn.read()
python
def file_code(function_index=1, function_name=None): """ This will return the code of the calling function function_index of 2 will give the parent of the caller function_name should not be used with function_index :param function_index: int of how many frames back the program should look :param function_name: str of what function to look for :return: str of the code from the target function """ info = function_info(function_index + 1, function_name) with open(info['file'], 'r') as fn: return fn.read()
[ "def", "file_code", "(", "function_index", "=", "1", ",", "function_name", "=", "None", ")", ":", "info", "=", "function_info", "(", "function_index", "+", "1", ",", "function_name", ")", "with", "open", "(", "info", "[", "'file'", "]", ",", "'r'", ")", "as", "fn", ":", "return", "fn", ".", "read", "(", ")" ]
This will return the code of the calling function function_index of 2 will give the parent of the caller function_name should not be used with function_index :param function_index: int of how many frames back the program should look :param function_name: str of what function to look for :return: str of the code from the target function
[ "This", "will", "return", "the", "code", "of", "the", "calling", "function", "function_index", "of", "2", "will", "give", "the", "parent", "of", "the", "caller", "function_name", "should", "not", "be", "used", "with", "function_index" ]
f2a38ad8bcc5ac177e537645853593225895df46
https://github.com/SeabornGames/Meta/blob/f2a38ad8bcc5ac177e537645853593225895df46/seaborn_meta/calling_function.py#L66-L78
243,698
SeabornGames/Meta
seaborn_meta/calling_function.py
relevant_kwargs
def relevant_kwargs(function, exclude_keys='self', exclude_values=None, extra_values=None): """ This will return a dictionary of local variables that are parameters to the function provided in the arg. Example: function(**relevant_kwargs(function)) :param function: function to select parameters for :param exclude_keys: str,list,func if not a function it will be converted into a funciton, defaults to excluding None :param exclude_values: obj,list,func if not a function it will be convereted into one, defaults to excluding 'self' :param extra_values: dict of other values to include with local :return: dict of local variables for the function """ args = function_args(function) locals_values = function_kwargs(function_index=2, exclude_keys=exclude_keys) if extra_values: locals_values.update(extra_values) return {k: v for k, v in locals_values.items() if k in args}
python
def relevant_kwargs(function, exclude_keys='self', exclude_values=None, extra_values=None): """ This will return a dictionary of local variables that are parameters to the function provided in the arg. Example: function(**relevant_kwargs(function)) :param function: function to select parameters for :param exclude_keys: str,list,func if not a function it will be converted into a funciton, defaults to excluding None :param exclude_values: obj,list,func if not a function it will be convereted into one, defaults to excluding 'self' :param extra_values: dict of other values to include with local :return: dict of local variables for the function """ args = function_args(function) locals_values = function_kwargs(function_index=2, exclude_keys=exclude_keys) if extra_values: locals_values.update(extra_values) return {k: v for k, v in locals_values.items() if k in args}
[ "def", "relevant_kwargs", "(", "function", ",", "exclude_keys", "=", "'self'", ",", "exclude_values", "=", "None", ",", "extra_values", "=", "None", ")", ":", "args", "=", "function_args", "(", "function", ")", "locals_values", "=", "function_kwargs", "(", "function_index", "=", "2", ",", "exclude_keys", "=", "exclude_keys", ")", "if", "extra_values", ":", "locals_values", ".", "update", "(", "extra_values", ")", "return", "{", "k", ":", "v", "for", "k", ",", "v", "in", "locals_values", ".", "items", "(", ")", "if", "k", "in", "args", "}" ]
This will return a dictionary of local variables that are parameters to the function provided in the arg. Example: function(**relevant_kwargs(function)) :param function: function to select parameters for :param exclude_keys: str,list,func if not a function it will be converted into a funciton, defaults to excluding None :param exclude_values: obj,list,func if not a function it will be convereted into one, defaults to excluding 'self' :param extra_values: dict of other values to include with local :return: dict of local variables for the function
[ "This", "will", "return", "a", "dictionary", "of", "local", "variables", "that", "are", "parameters", "to", "the", "function", "provided", "in", "the", "arg", "." ]
f2a38ad8bcc5ac177e537645853593225895df46
https://github.com/SeabornGames/Meta/blob/f2a38ad8bcc5ac177e537645853593225895df46/seaborn_meta/calling_function.py#L81-L102
243,699
dossier/dossier.models
dossier/models/dragnet.py
make_feature
def make_feature(fc): '''Builds a new `StringCounter` from the many `StringCounters` in the input `fc`. This StringCounter will define one of the targets for the `MultinomialNB` classifier. This crucial function decides the relative importance of features extracted by the ETL pipeline. This is essentially a form of domain fitting that allows us to tune the extraction to the fields that are important to a domain. However, if the NER for a domain is inadequate, then the primary purpose of these relative weightings is to remove bogus NER extractions. ''' feat = StringCounter() rejects = set() keepers = set() #keepers_keys = ['GPE', 'PERSON', 'ORGANIZATION', 'usernames'] keepers_keys = ['phone', 'email'] #['usernames', 'phone', 'email', 'ORGANIZATION', 'PERSON'] rejects_keys = ['keywords', 'usernames', 'ORGANIZATION', 'PERSON'] # The features used to pull the keys for the classifier for f, strength in [('keywords', 10**4), ('GPE', 1), ('bow', 1), ('bowNP_sip', 10**8), ('phone', 10**12), ('email', 10**12), ('bowNP', 10**3), ('PERSON', 10**8), ('ORGANIZATION', 10**6), ('usernames', 10**12)]: if strength == 1: feat += fc[f] else: feat += StringCounter({key: strength * count for key, count in fc[f].items()}) if f in rejects_keys: map(rejects.add, fc[f]) if f in keepers_keys: map(keepers.add, fc[f]) if u'' in feat: feat.pop(u'') return feat, rejects, keepers
python
def make_feature(fc): '''Builds a new `StringCounter` from the many `StringCounters` in the input `fc`. This StringCounter will define one of the targets for the `MultinomialNB` classifier. This crucial function decides the relative importance of features extracted by the ETL pipeline. This is essentially a form of domain fitting that allows us to tune the extraction to the fields that are important to a domain. However, if the NER for a domain is inadequate, then the primary purpose of these relative weightings is to remove bogus NER extractions. ''' feat = StringCounter() rejects = set() keepers = set() #keepers_keys = ['GPE', 'PERSON', 'ORGANIZATION', 'usernames'] keepers_keys = ['phone', 'email'] #['usernames', 'phone', 'email', 'ORGANIZATION', 'PERSON'] rejects_keys = ['keywords', 'usernames', 'ORGANIZATION', 'PERSON'] # The features used to pull the keys for the classifier for f, strength in [('keywords', 10**4), ('GPE', 1), ('bow', 1), ('bowNP_sip', 10**8), ('phone', 10**12), ('email', 10**12), ('bowNP', 10**3), ('PERSON', 10**8), ('ORGANIZATION', 10**6), ('usernames', 10**12)]: if strength == 1: feat += fc[f] else: feat += StringCounter({key: strength * count for key, count in fc[f].items()}) if f in rejects_keys: map(rejects.add, fc[f]) if f in keepers_keys: map(keepers.add, fc[f]) if u'' in feat: feat.pop(u'') return feat, rejects, keepers
[ "def", "make_feature", "(", "fc", ")", ":", "feat", "=", "StringCounter", "(", ")", "rejects", "=", "set", "(", ")", "keepers", "=", "set", "(", ")", "#keepers_keys = ['GPE', 'PERSON', 'ORGANIZATION', 'usernames']", "keepers_keys", "=", "[", "'phone'", ",", "'email'", "]", "#['usernames', 'phone', 'email', 'ORGANIZATION', 'PERSON']", "rejects_keys", "=", "[", "'keywords'", ",", "'usernames'", ",", "'ORGANIZATION'", ",", "'PERSON'", "]", "# The features used to pull the keys for the classifier", "for", "f", ",", "strength", "in", "[", "(", "'keywords'", ",", "10", "**", "4", ")", ",", "(", "'GPE'", ",", "1", ")", ",", "(", "'bow'", ",", "1", ")", ",", "(", "'bowNP_sip'", ",", "10", "**", "8", ")", ",", "(", "'phone'", ",", "10", "**", "12", ")", ",", "(", "'email'", ",", "10", "**", "12", ")", ",", "(", "'bowNP'", ",", "10", "**", "3", ")", ",", "(", "'PERSON'", ",", "10", "**", "8", ")", ",", "(", "'ORGANIZATION'", ",", "10", "**", "6", ")", ",", "(", "'usernames'", ",", "10", "**", "12", ")", "]", ":", "if", "strength", "==", "1", ":", "feat", "+=", "fc", "[", "f", "]", "else", ":", "feat", "+=", "StringCounter", "(", "{", "key", ":", "strength", "*", "count", "for", "key", ",", "count", "in", "fc", "[", "f", "]", ".", "items", "(", ")", "}", ")", "if", "f", "in", "rejects_keys", ":", "map", "(", "rejects", ".", "add", ",", "fc", "[", "f", "]", ")", "if", "f", "in", "keepers_keys", ":", "map", "(", "keepers", ".", "add", ",", "fc", "[", "f", "]", ")", "if", "u''", "in", "feat", ":", "feat", ".", "pop", "(", "u''", ")", "return", "feat", ",", "rejects", ",", "keepers" ]
Builds a new `StringCounter` from the many `StringCounters` in the input `fc`. This StringCounter will define one of the targets for the `MultinomialNB` classifier. This crucial function decides the relative importance of features extracted by the ETL pipeline. This is essentially a form of domain fitting that allows us to tune the extraction to the fields that are important to a domain. However, if the NER for a domain is inadequate, then the primary purpose of these relative weightings is to remove bogus NER extractions.
[ "Builds", "a", "new", "StringCounter", "from", "the", "many", "StringCounters", "in", "the", "input", "fc", ".", "This", "StringCounter", "will", "define", "one", "of", "the", "targets", "for", "the", "MultinomialNB", "classifier", "." ]
c9e282f690eab72963926329efe1600709e48b13
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/dragnet.py#L46-L79