id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
238,300
humilis/humilis-lambdautils
lambdautils/state.py
get_state_batch
def get_state_batch(keys, namespace=None, consistent=True): """Get a batch of items from the state store.""" ukeys = set(keys) if namespace: ns_keys = ["{}:{}".format(namespace, key) for key in ukeys] uvalues = {k: v for k, v in zip(ukeys, get_item_batch(ns_keys, consistent=consistent))} return list(zip(keys, (uvalues[k] for k in keys)))
python
def get_state_batch(keys, namespace=None, consistent=True): """Get a batch of items from the state store.""" ukeys = set(keys) if namespace: ns_keys = ["{}:{}".format(namespace, key) for key in ukeys] uvalues = {k: v for k, v in zip(ukeys, get_item_batch(ns_keys, consistent=consistent))} return list(zip(keys, (uvalues[k] for k in keys)))
[ "def", "get_state_batch", "(", "keys", ",", "namespace", "=", "None", ",", "consistent", "=", "True", ")", ":", "ukeys", "=", "set", "(", "keys", ")", "if", "namespace", ":", "ns_keys", "=", "[", "\"{}:{}\"", ".", "format", "(", "namespace", ",", "key", ")", "for", "key", "in", "ukeys", "]", "uvalues", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "zip", "(", "ukeys", ",", "get_item_batch", "(", "ns_keys", ",", "consistent", "=", "consistent", ")", ")", "}", "return", "list", "(", "zip", "(", "keys", ",", "(", "uvalues", "[", "k", "]", "for", "k", "in", "keys", ")", ")", ")" ]
Get a batch of items from the state store.
[ "Get", "a", "batch", "of", "items", "from", "the", "state", "store", "." ]
58f75eb5ace23523c283708d56a9193181ea7e8e
https://github.com/humilis/humilis-lambdautils/blob/58f75eb5ace23523c283708d56a9193181ea7e8e/lambdautils/state.py#L227-L237
238,301
humilis/humilis-lambdautils
lambdautils/state.py
set_state_batch
def set_state_batch(keys, values, namespace=None, ttl=3600*24*365): """Set a batch of items in the state store.""" keys, values = zip(*{k: v for k, v in zip(keys, values)}.items()) if namespace: keys = ["{}:{}".format(namespace, key) for key in keys] return set_item_batch(keys, values, ttl)
python
def set_state_batch(keys, values, namespace=None, ttl=3600*24*365): """Set a batch of items in the state store.""" keys, values = zip(*{k: v for k, v in zip(keys, values)}.items()) if namespace: keys = ["{}:{}".format(namespace, key) for key in keys] return set_item_batch(keys, values, ttl)
[ "def", "set_state_batch", "(", "keys", ",", "values", ",", "namespace", "=", "None", ",", "ttl", "=", "3600", "*", "24", "*", "365", ")", ":", "keys", ",", "values", "=", "zip", "(", "*", "{", "k", ":", "v", "for", "k", ",", "v", "in", "zip", "(", "keys", ",", "values", ")", "}", ".", "items", "(", ")", ")", "if", "namespace", ":", "keys", "=", "[", "\"{}:{}\"", ".", "format", "(", "namespace", ",", "key", ")", "for", "key", "in", "keys", "]", "return", "set_item_batch", "(", "keys", ",", "values", ",", "ttl", ")" ]
Set a batch of items in the state store.
[ "Set", "a", "batch", "of", "items", "in", "the", "state", "store", "." ]
58f75eb5ace23523c283708d56a9193181ea7e8e
https://github.com/humilis/humilis-lambdautils/blob/58f75eb5ace23523c283708d56a9193181ea7e8e/lambdautils/state.py#L259-L267
238,302
humilis/humilis-lambdautils
lambdautils/state.py
set_state
def set_state(key, value, namespace=None, table_name=None, environment=None, layer=None, stage=None, shard_id=None, consistent=True, serializer=json.dumps, wait_exponential_multiplier=500, wait_exponential_max=5000, stop_max_delay=10000, ttl=None): """Set Lambda state value.""" if table_name is None: table_name = _state_table_name(environment=environment, layer=layer, stage=stage) if not table_name: msg = ("Can't produce state table name: unable to set state " "item '{}'".format(key)) logger.error(msg) raise StateTableError(msg) return dynamodb = boto3.resource("dynamodb") table = dynamodb.Table(table_name) logger.info("Putting {} -> {} in DynamoDB table {}".format(key, value, table_name)) if serializer: try: value = serializer(value) except TypeError: logger.error( "Value for state key '{}' is not json-serializable".format( key)) raise if namespace: key = "{}:{}".format(namespace, key) if shard_id: key = "{}:{}".format(shard_id, key) item = {"id": key, "value": value} if ttl: item["ttl"] = {"N": str(int(time.time() + ttl))} @retry(retry_on_exception=_is_critical_exception, wait_exponential_multiplier=500, wait_exponential_max=5000, stop_max_delay=10000) def put_item(): try: return table.put_item(Item=item) except Exception as err: if _is_dynamodb_critical_exception(err): raise CriticalError(err) else: raise resp = put_item() logger.info("Response from DynamoDB: '{}'".format(resp)) return resp
python
def set_state(key, value, namespace=None, table_name=None, environment=None, layer=None, stage=None, shard_id=None, consistent=True, serializer=json.dumps, wait_exponential_multiplier=500, wait_exponential_max=5000, stop_max_delay=10000, ttl=None): """Set Lambda state value.""" if table_name is None: table_name = _state_table_name(environment=environment, layer=layer, stage=stage) if not table_name: msg = ("Can't produce state table name: unable to set state " "item '{}'".format(key)) logger.error(msg) raise StateTableError(msg) return dynamodb = boto3.resource("dynamodb") table = dynamodb.Table(table_name) logger.info("Putting {} -> {} in DynamoDB table {}".format(key, value, table_name)) if serializer: try: value = serializer(value) except TypeError: logger.error( "Value for state key '{}' is not json-serializable".format( key)) raise if namespace: key = "{}:{}".format(namespace, key) if shard_id: key = "{}:{}".format(shard_id, key) item = {"id": key, "value": value} if ttl: item["ttl"] = {"N": str(int(time.time() + ttl))} @retry(retry_on_exception=_is_critical_exception, wait_exponential_multiplier=500, wait_exponential_max=5000, stop_max_delay=10000) def put_item(): try: return table.put_item(Item=item) except Exception as err: if _is_dynamodb_critical_exception(err): raise CriticalError(err) else: raise resp = put_item() logger.info("Response from DynamoDB: '{}'".format(resp)) return resp
[ "def", "set_state", "(", "key", ",", "value", ",", "namespace", "=", "None", ",", "table_name", "=", "None", ",", "environment", "=", "None", ",", "layer", "=", "None", ",", "stage", "=", "None", ",", "shard_id", "=", "None", ",", "consistent", "=", "True", ",", "serializer", "=", "json", ".", "dumps", ",", "wait_exponential_multiplier", "=", "500", ",", "wait_exponential_max", "=", "5000", ",", "stop_max_delay", "=", "10000", ",", "ttl", "=", "None", ")", ":", "if", "table_name", "is", "None", ":", "table_name", "=", "_state_table_name", "(", "environment", "=", "environment", ",", "layer", "=", "layer", ",", "stage", "=", "stage", ")", "if", "not", "table_name", ":", "msg", "=", "(", "\"Can't produce state table name: unable to set state \"", "\"item '{}'\"", ".", "format", "(", "key", ")", ")", "logger", ".", "error", "(", "msg", ")", "raise", "StateTableError", "(", "msg", ")", "return", "dynamodb", "=", "boto3", ".", "resource", "(", "\"dynamodb\"", ")", "table", "=", "dynamodb", ".", "Table", "(", "table_name", ")", "logger", ".", "info", "(", "\"Putting {} -> {} in DynamoDB table {}\"", ".", "format", "(", "key", ",", "value", ",", "table_name", ")", ")", "if", "serializer", ":", "try", ":", "value", "=", "serializer", "(", "value", ")", "except", "TypeError", ":", "logger", ".", "error", "(", "\"Value for state key '{}' is not json-serializable\"", ".", "format", "(", "key", ")", ")", "raise", "if", "namespace", ":", "key", "=", "\"{}:{}\"", ".", "format", "(", "namespace", ",", "key", ")", "if", "shard_id", ":", "key", "=", "\"{}:{}\"", ".", "format", "(", "shard_id", ",", "key", ")", "item", "=", "{", "\"id\"", ":", "key", ",", "\"value\"", ":", "value", "}", "if", "ttl", ":", "item", "[", "\"ttl\"", "]", "=", "{", "\"N\"", ":", "str", "(", "int", "(", "time", ".", "time", "(", ")", "+", "ttl", ")", ")", "}", "@", "retry", "(", "retry_on_exception", "=", "_is_critical_exception", ",", "wait_exponential_multiplier", "=", "500", ",", "wait_exponential_max", "=", "5000", ",", "stop_max_delay", "=", "10000", ")", "def", "put_item", "(", ")", ":", "try", ":", "return", "table", ".", "put_item", "(", "Item", "=", "item", ")", "except", "Exception", "as", "err", ":", "if", "_is_dynamodb_critical_exception", "(", "err", ")", ":", "raise", "CriticalError", "(", "err", ")", "else", ":", "raise", "resp", "=", "put_item", "(", ")", "logger", ".", "info", "(", "\"Response from DynamoDB: '{}'\"", ".", "format", "(", "resp", ")", ")", "return", "resp" ]
Set Lambda state value.
[ "Set", "Lambda", "state", "value", "." ]
58f75eb5ace23523c283708d56a9193181ea7e8e
https://github.com/humilis/humilis-lambdautils/blob/58f75eb5ace23523c283708d56a9193181ea7e8e/lambdautils/state.py#L270-L324
238,303
humilis/humilis-lambdautils
lambdautils/state.py
delete_state
def delete_state(key, namespace=None, table_name=None, environment=None, layer=None, stage=None, shard_id=None, consistent=True, wait_exponential_multiplier=500, wait_exponential_max=5000, stop_max_delay=10000): """Delete Lambda state value.""" if table_name is None: table_name = _state_table_name(environment=environment, layer=layer, stage=stage) if not table_name: msg = ("Can't produce state table name: unable to set state " "item '{}'".format(key)) logger.error(msg) raise StateTableError(msg) return dynamodb = boto3.resource("dynamodb") table = dynamodb.Table(table_name) logger.info("Deleting {} in DynamoDB table {}".format(key, table_name)) if namespace: key = "{}:{}".format(namespace, key) if shard_id: key = "{}:{}".format(shard_id, key) @retry(retry_on_exception=_is_critical_exception, wait_exponential_multiplier=500, wait_exponential_max=5000, stop_max_delay=10000) def delete_item(): try: return table.delete_item(Key={"id": key}) except Exception as err: if _is_dynamodb_critical_exception(err): raise CriticalError(err) else: raise resp = delete_item() logger.info("Response from DynamoDB: '{}'".format(resp)) return resp
python
def delete_state(key, namespace=None, table_name=None, environment=None, layer=None, stage=None, shard_id=None, consistent=True, wait_exponential_multiplier=500, wait_exponential_max=5000, stop_max_delay=10000): """Delete Lambda state value.""" if table_name is None: table_name = _state_table_name(environment=environment, layer=layer, stage=stage) if not table_name: msg = ("Can't produce state table name: unable to set state " "item '{}'".format(key)) logger.error(msg) raise StateTableError(msg) return dynamodb = boto3.resource("dynamodb") table = dynamodb.Table(table_name) logger.info("Deleting {} in DynamoDB table {}".format(key, table_name)) if namespace: key = "{}:{}".format(namespace, key) if shard_id: key = "{}:{}".format(shard_id, key) @retry(retry_on_exception=_is_critical_exception, wait_exponential_multiplier=500, wait_exponential_max=5000, stop_max_delay=10000) def delete_item(): try: return table.delete_item(Key={"id": key}) except Exception as err: if _is_dynamodb_critical_exception(err): raise CriticalError(err) else: raise resp = delete_item() logger.info("Response from DynamoDB: '{}'".format(resp)) return resp
[ "def", "delete_state", "(", "key", ",", "namespace", "=", "None", ",", "table_name", "=", "None", ",", "environment", "=", "None", ",", "layer", "=", "None", ",", "stage", "=", "None", ",", "shard_id", "=", "None", ",", "consistent", "=", "True", ",", "wait_exponential_multiplier", "=", "500", ",", "wait_exponential_max", "=", "5000", ",", "stop_max_delay", "=", "10000", ")", ":", "if", "table_name", "is", "None", ":", "table_name", "=", "_state_table_name", "(", "environment", "=", "environment", ",", "layer", "=", "layer", ",", "stage", "=", "stage", ")", "if", "not", "table_name", ":", "msg", "=", "(", "\"Can't produce state table name: unable to set state \"", "\"item '{}'\"", ".", "format", "(", "key", ")", ")", "logger", ".", "error", "(", "msg", ")", "raise", "StateTableError", "(", "msg", ")", "return", "dynamodb", "=", "boto3", ".", "resource", "(", "\"dynamodb\"", ")", "table", "=", "dynamodb", ".", "Table", "(", "table_name", ")", "logger", ".", "info", "(", "\"Deleting {} in DynamoDB table {}\"", ".", "format", "(", "key", ",", "table_name", ")", ")", "if", "namespace", ":", "key", "=", "\"{}:{}\"", ".", "format", "(", "namespace", ",", "key", ")", "if", "shard_id", ":", "key", "=", "\"{}:{}\"", ".", "format", "(", "shard_id", ",", "key", ")", "@", "retry", "(", "retry_on_exception", "=", "_is_critical_exception", ",", "wait_exponential_multiplier", "=", "500", ",", "wait_exponential_max", "=", "5000", ",", "stop_max_delay", "=", "10000", ")", "def", "delete_item", "(", ")", ":", "try", ":", "return", "table", ".", "delete_item", "(", "Key", "=", "{", "\"id\"", ":", "key", "}", ")", "except", "Exception", "as", "err", ":", "if", "_is_dynamodb_critical_exception", "(", "err", ")", ":", "raise", "CriticalError", "(", "err", ")", "else", ":", "raise", "resp", "=", "delete_item", "(", ")", "logger", ".", "info", "(", "\"Response from DynamoDB: '{}'\"", ".", "format", "(", "resp", ")", ")", "return", "resp" ]
Delete Lambda state value.
[ "Delete", "Lambda", "state", "value", "." ]
58f75eb5ace23523c283708d56a9193181ea7e8e
https://github.com/humilis/humilis-lambdautils/blob/58f75eb5ace23523c283708d56a9193181ea7e8e/lambdautils/state.py#L327-L369
238,304
humilis/humilis-lambdautils
lambdautils/state.py
produce_context
def produce_context(namespace, context_id, max_delay=None): """Produce event context.""" try: context_obj = get_context(namespace, context_id) logger.info("Found context '%s:%s'", namespace, context_id) except ContextError: logger.info("Context '%s:%s' not found", namespace, context_id) if max_delay is not None: max_delay = float(max_delay) logger.info("Context error handled with max_delay=%s", max_delay) if not max_delay \ or arrival_delay_greater_than(context_id, max_delay): context_obj = {} logger.info( "Timeout: waited %s seconds for context '%s'", max_delay, context_id) else: msg = "Context '{}' not found: resorting".format(context_id) raise OutOfOrderError(msg) return context_obj
python
def produce_context(namespace, context_id, max_delay=None): """Produce event context.""" try: context_obj = get_context(namespace, context_id) logger.info("Found context '%s:%s'", namespace, context_id) except ContextError: logger.info("Context '%s:%s' not found", namespace, context_id) if max_delay is not None: max_delay = float(max_delay) logger.info("Context error handled with max_delay=%s", max_delay) if not max_delay \ or arrival_delay_greater_than(context_id, max_delay): context_obj = {} logger.info( "Timeout: waited %s seconds for context '%s'", max_delay, context_id) else: msg = "Context '{}' not found: resorting".format(context_id) raise OutOfOrderError(msg) return context_obj
[ "def", "produce_context", "(", "namespace", ",", "context_id", ",", "max_delay", "=", "None", ")", ":", "try", ":", "context_obj", "=", "get_context", "(", "namespace", ",", "context_id", ")", "logger", ".", "info", "(", "\"Found context '%s:%s'\"", ",", "namespace", ",", "context_id", ")", "except", "ContextError", ":", "logger", ".", "info", "(", "\"Context '%s:%s' not found\"", ",", "namespace", ",", "context_id", ")", "if", "max_delay", "is", "not", "None", ":", "max_delay", "=", "float", "(", "max_delay", ")", "logger", ".", "info", "(", "\"Context error handled with max_delay=%s\"", ",", "max_delay", ")", "if", "not", "max_delay", "or", "arrival_delay_greater_than", "(", "context_id", ",", "max_delay", ")", ":", "context_obj", "=", "{", "}", "logger", ".", "info", "(", "\"Timeout: waited %s seconds for context '%s'\"", ",", "max_delay", ",", "context_id", ")", "else", ":", "msg", "=", "\"Context '{}' not found: resorting\"", ".", "format", "(", "context_id", ")", "raise", "OutOfOrderError", "(", "msg", ")", "return", "context_obj" ]
Produce event context.
[ "Produce", "event", "context", "." ]
58f75eb5ace23523c283708d56a9193181ea7e8e
https://github.com/humilis/humilis-lambdautils/blob/58f75eb5ace23523c283708d56a9193181ea7e8e/lambdautils/state.py#L372-L392
238,305
humilis/humilis-lambdautils
lambdautils/state.py
get_context
def get_context(namespace, context_id): """Get stored context object.""" context_obj = get_state(context_id, namespace=namespace) if not context_obj: raise ContextError("Context '{}' not found in namespace '{}'".format( context_id, namespace)) return context_obj
python
def get_context(namespace, context_id): """Get stored context object.""" context_obj = get_state(context_id, namespace=namespace) if not context_obj: raise ContextError("Context '{}' not found in namespace '{}'".format( context_id, namespace)) return context_obj
[ "def", "get_context", "(", "namespace", ",", "context_id", ")", ":", "context_obj", "=", "get_state", "(", "context_id", ",", "namespace", "=", "namespace", ")", "if", "not", "context_obj", ":", "raise", "ContextError", "(", "\"Context '{}' not found in namespace '{}'\"", ".", "format", "(", "context_id", ",", "namespace", ")", ")", "return", "context_obj" ]
Get stored context object.
[ "Get", "stored", "context", "object", "." ]
58f75eb5ace23523c283708d56a9193181ea7e8e
https://github.com/humilis/humilis-lambdautils/blob/58f75eb5ace23523c283708d56a9193181ea7e8e/lambdautils/state.py#L395-L401
238,306
humilis/humilis-lambdautils
lambdautils/state.py
arrival_delay_greater_than
def arrival_delay_greater_than(item_id, delay, namespace="_expected_arrival"): """Check if an item arrival is delayed more than a given amount.""" expected = get_state(item_id, namespace=namespace) now = time.time() if expected and (now - expected) > delay: logger.error("Timeout: waited %s seconds for parent.", delay) return True elif expected: logger.info("Still out of order but no timeout: %s-%s <= %s.", now, expected, delay) return False elif delay > 0: logger.info("Storing expected arrival time (%s) for context '%s'", datetime.fromtimestamp(now).isoformat(), item_id) set_state(item_id, now, namespace=namespace) return False else: logger.info("Event is out of order but not waiting for parent.") return True
python
def arrival_delay_greater_than(item_id, delay, namespace="_expected_arrival"): """Check if an item arrival is delayed more than a given amount.""" expected = get_state(item_id, namespace=namespace) now = time.time() if expected and (now - expected) > delay: logger.error("Timeout: waited %s seconds for parent.", delay) return True elif expected: logger.info("Still out of order but no timeout: %s-%s <= %s.", now, expected, delay) return False elif delay > 0: logger.info("Storing expected arrival time (%s) for context '%s'", datetime.fromtimestamp(now).isoformat(), item_id) set_state(item_id, now, namespace=namespace) return False else: logger.info("Event is out of order but not waiting for parent.") return True
[ "def", "arrival_delay_greater_than", "(", "item_id", ",", "delay", ",", "namespace", "=", "\"_expected_arrival\"", ")", ":", "expected", "=", "get_state", "(", "item_id", ",", "namespace", "=", "namespace", ")", "now", "=", "time", ".", "time", "(", ")", "if", "expected", "and", "(", "now", "-", "expected", ")", ">", "delay", ":", "logger", ".", "error", "(", "\"Timeout: waited %s seconds for parent.\"", ",", "delay", ")", "return", "True", "elif", "expected", ":", "logger", ".", "info", "(", "\"Still out of order but no timeout: %s-%s <= %s.\"", ",", "now", ",", "expected", ",", "delay", ")", "return", "False", "elif", "delay", ">", "0", ":", "logger", ".", "info", "(", "\"Storing expected arrival time (%s) for context '%s'\"", ",", "datetime", ".", "fromtimestamp", "(", "now", ")", ".", "isoformat", "(", ")", ",", "item_id", ")", "set_state", "(", "item_id", ",", "now", ",", "namespace", "=", "namespace", ")", "return", "False", "else", ":", "logger", ".", "info", "(", "\"Event is out of order but not waiting for parent.\"", ")", "return", "True" ]
Check if an item arrival is delayed more than a given amount.
[ "Check", "if", "an", "item", "arrival", "is", "delayed", "more", "than", "a", "given", "amount", "." ]
58f75eb5ace23523c283708d56a9193181ea7e8e
https://github.com/humilis/humilis-lambdautils/blob/58f75eb5ace23523c283708d56a9193181ea7e8e/lambdautils/state.py#L409-L427
238,307
scruffy-t/mpls
mpls/mpls.py
get
def get(name, stype, **kwargs): """Returns the rcParams specified in the style file given by `name` and `stype`. Parameters ---------- name: str The name of the style. stype: str Any of ('context', 'style', 'palette'). kwargs: - stylelib_url: str Overwrite the value in the local config with the specified url. - ignore_cache: bool Ignore files in the cache and force loading from the stylelib. Raises ------ ValueError: If `stype` is not any of ('context', 'style', 'palette') Returns ------- rcParams: dict The parameter dict of the file. """ stype = str(stype) params = {} if stype in MPLS_STYPES: params.update(__get(name, stype, **kwargs)) else: raise ValueError('unexpected stype: {}! Must be any of {!r}'.format(stype, MPLS_STYPES)) # color palette hack if params.get('axes.prop_cycle'): params['axes.prop_cycle'] = mpl.rcsetup.cycler('color', params['axes.prop_cycle']) return params
python
def get(name, stype, **kwargs): """Returns the rcParams specified in the style file given by `name` and `stype`. Parameters ---------- name: str The name of the style. stype: str Any of ('context', 'style', 'palette'). kwargs: - stylelib_url: str Overwrite the value in the local config with the specified url. - ignore_cache: bool Ignore files in the cache and force loading from the stylelib. Raises ------ ValueError: If `stype` is not any of ('context', 'style', 'palette') Returns ------- rcParams: dict The parameter dict of the file. """ stype = str(stype) params = {} if stype in MPLS_STYPES: params.update(__get(name, stype, **kwargs)) else: raise ValueError('unexpected stype: {}! Must be any of {!r}'.format(stype, MPLS_STYPES)) # color palette hack if params.get('axes.prop_cycle'): params['axes.prop_cycle'] = mpl.rcsetup.cycler('color', params['axes.prop_cycle']) return params
[ "def", "get", "(", "name", ",", "stype", ",", "*", "*", "kwargs", ")", ":", "stype", "=", "str", "(", "stype", ")", "params", "=", "{", "}", "if", "stype", "in", "MPLS_STYPES", ":", "params", ".", "update", "(", "__get", "(", "name", ",", "stype", ",", "*", "*", "kwargs", ")", ")", "else", ":", "raise", "ValueError", "(", "'unexpected stype: {}! Must be any of {!r}'", ".", "format", "(", "stype", ",", "MPLS_STYPES", ")", ")", "# color palette hack", "if", "params", ".", "get", "(", "'axes.prop_cycle'", ")", ":", "params", "[", "'axes.prop_cycle'", "]", "=", "mpl", ".", "rcsetup", ".", "cycler", "(", "'color'", ",", "params", "[", "'axes.prop_cycle'", "]", ")", "return", "params" ]
Returns the rcParams specified in the style file given by `name` and `stype`. Parameters ---------- name: str The name of the style. stype: str Any of ('context', 'style', 'palette'). kwargs: - stylelib_url: str Overwrite the value in the local config with the specified url. - ignore_cache: bool Ignore files in the cache and force loading from the stylelib. Raises ------ ValueError: If `stype` is not any of ('context', 'style', 'palette') Returns ------- rcParams: dict The parameter dict of the file.
[ "Returns", "the", "rcParams", "specified", "in", "the", "style", "file", "given", "by", "name", "and", "stype", "." ]
1320d1217cd72404509da49d0ea7b65163a7b40b
https://github.com/scruffy-t/mpls/blob/1320d1217cd72404509da49d0ea7b65163a7b40b/mpls/mpls.py#L83-L120
238,308
scruffy-t/mpls
mpls/mpls.py
collect
def collect(context=None, style=None, palette=None, **kwargs): """Returns the merged rcParams dict of the specified context, style, and palette. Parameters ---------- context: str style: str palette: str kwargs: - Returns ------- rcParams: dict The merged parameter dicts of the specified context, style, and palette. Notes ----- The rcParams dicts are loaded and updated in the order: context, style, palette. That means if a context parameter is also defined in the style or palette dict, it will be overwritten. There is currently no checking being done to avoid this. """ params = {} if context: params.update(get(context, 'context', **kwargs)) if style: params.update(get(style, 'style', **kwargs)) if palette: params.update(get(palette, 'palette', **kwargs)) return params
python
def collect(context=None, style=None, palette=None, **kwargs): """Returns the merged rcParams dict of the specified context, style, and palette. Parameters ---------- context: str style: str palette: str kwargs: - Returns ------- rcParams: dict The merged parameter dicts of the specified context, style, and palette. Notes ----- The rcParams dicts are loaded and updated in the order: context, style, palette. That means if a context parameter is also defined in the style or palette dict, it will be overwritten. There is currently no checking being done to avoid this. """ params = {} if context: params.update(get(context, 'context', **kwargs)) if style: params.update(get(style, 'style', **kwargs)) if palette: params.update(get(palette, 'palette', **kwargs)) return params
[ "def", "collect", "(", "context", "=", "None", ",", "style", "=", "None", ",", "palette", "=", "None", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "}", "if", "context", ":", "params", ".", "update", "(", "get", "(", "context", ",", "'context'", ",", "*", "*", "kwargs", ")", ")", "if", "style", ":", "params", ".", "update", "(", "get", "(", "style", ",", "'style'", ",", "*", "*", "kwargs", ")", ")", "if", "palette", ":", "params", ".", "update", "(", "get", "(", "palette", ",", "'palette'", ",", "*", "*", "kwargs", ")", ")", "return", "params" ]
Returns the merged rcParams dict of the specified context, style, and palette. Parameters ---------- context: str style: str palette: str kwargs: - Returns ------- rcParams: dict The merged parameter dicts of the specified context, style, and palette. Notes ----- The rcParams dicts are loaded and updated in the order: context, style, palette. That means if a context parameter is also defined in the style or palette dict, it will be overwritten. There is currently no checking being done to avoid this.
[ "Returns", "the", "merged", "rcParams", "dict", "of", "the", "specified", "context", "style", "and", "palette", "." ]
1320d1217cd72404509da49d0ea7b65163a7b40b
https://github.com/scruffy-t/mpls/blob/1320d1217cd72404509da49d0ea7b65163a7b40b/mpls/mpls.py#L123-L155
238,309
rtluckie/seria
seria/utils.py
str_to_num
def str_to_num(i, exact_match=True): """ Attempts to convert a str to either an int or float """ # TODO: Cleanup -- this is really ugly if not isinstance(i, str): return i try: if not exact_match: return int(i) elif str(int(i)) == i: return int(i) elif str(float(i)) == i: return float(i) else: pass except ValueError: pass return i
python
def str_to_num(i, exact_match=True): """ Attempts to convert a str to either an int or float """ # TODO: Cleanup -- this is really ugly if not isinstance(i, str): return i try: if not exact_match: return int(i) elif str(int(i)) == i: return int(i) elif str(float(i)) == i: return float(i) else: pass except ValueError: pass return i
[ "def", "str_to_num", "(", "i", ",", "exact_match", "=", "True", ")", ":", "# TODO: Cleanup -- this is really ugly", "if", "not", "isinstance", "(", "i", ",", "str", ")", ":", "return", "i", "try", ":", "if", "not", "exact_match", ":", "return", "int", "(", "i", ")", "elif", "str", "(", "int", "(", "i", ")", ")", "==", "i", ":", "return", "int", "(", "i", ")", "elif", "str", "(", "float", "(", "i", ")", ")", "==", "i", ":", "return", "float", "(", "i", ")", "else", ":", "pass", "except", "ValueError", ":", "pass", "return", "i" ]
Attempts to convert a str to either an int or float
[ "Attempts", "to", "convert", "a", "str", "to", "either", "an", "int", "or", "float" ]
8ae4f71237e69085d8f974a024720f45b34ab963
https://github.com/rtluckie/seria/blob/8ae4f71237e69085d8f974a024720f45b34ab963/seria/utils.py#L3-L21
238,310
anlutro/russell
russell/engine.py
make_link
def make_link(title, url, blank=False): """ Make a HTML link out of an URL. Args: title (str): Text to show for the link. url (str): URL the link will point to. blank (bool): If True, appends target=_blank, noopener and noreferrer to the <a> element. Defaults to False. """ attrs = 'href="%s"' % url if blank: attrs += ' target="_blank" rel="noopener noreferrer"' return '<a %s>%s</a>' % (attrs, title)
python
def make_link(title, url, blank=False): """ Make a HTML link out of an URL. Args: title (str): Text to show for the link. url (str): URL the link will point to. blank (bool): If True, appends target=_blank, noopener and noreferrer to the <a> element. Defaults to False. """ attrs = 'href="%s"' % url if blank: attrs += ' target="_blank" rel="noopener noreferrer"' return '<a %s>%s</a>' % (attrs, title)
[ "def", "make_link", "(", "title", ",", "url", ",", "blank", "=", "False", ")", ":", "attrs", "=", "'href=\"%s\"'", "%", "url", "if", "blank", ":", "attrs", "+=", "' target=\"_blank\" rel=\"noopener noreferrer\"'", "return", "'<a %s>%s</a>'", "%", "(", "attrs", ",", "title", ")" ]
Make a HTML link out of an URL. Args: title (str): Text to show for the link. url (str): URL the link will point to. blank (bool): If True, appends target=_blank, noopener and noreferrer to the <a> element. Defaults to False.
[ "Make", "a", "HTML", "link", "out", "of", "an", "URL", "." ]
6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L27-L40
238,311
anlutro/russell
russell/engine.py
BlogEngine.get_asset_url
def get_asset_url(self, path): """ Get the URL of an asset. If asset hashes are added and one exists for the path, it will be appended as a query string. Args: path (str): Path to the file, relative to your "assets" directory. """ url = self.root_url + '/assets/' + path if path in self.asset_hash: url += '?' + self.asset_hash[path] return url
python
def get_asset_url(self, path): """ Get the URL of an asset. If asset hashes are added and one exists for the path, it will be appended as a query string. Args: path (str): Path to the file, relative to your "assets" directory. """ url = self.root_url + '/assets/' + path if path in self.asset_hash: url += '?' + self.asset_hash[path] return url
[ "def", "get_asset_url", "(", "self", ",", "path", ")", ":", "url", "=", "self", ".", "root_url", "+", "'/assets/'", "+", "path", "if", "path", "in", "self", ".", "asset_hash", ":", "url", "+=", "'?'", "+", "self", ".", "asset_hash", "[", "path", "]", "return", "url" ]
Get the URL of an asset. If asset hashes are added and one exists for the path, it will be appended as a query string. Args: path (str): Path to the file, relative to your "assets" directory.
[ "Get", "the", "URL", "of", "an", "asset", ".", "If", "asset", "hashes", "are", "added", "and", "one", "exists", "for", "the", "path", "it", "will", "be", "appended", "as", "a", "query", "string", "." ]
6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L85-L96
238,312
anlutro/russell
russell/engine.py
BlogEngine.add_pages
def add_pages(self, path='pages'): """ Look through a directory for markdown files and add them as pages. """ pages_path = os.path.join(self.root_path, path) pages = [] for file in _listfiles(pages_path): page_dir = os.path.relpath(os.path.dirname(file), pages_path) if page_dir == '.': page_dir = None pages.append(self.cm.Page.from_file(file, directory=page_dir)) self.cm.add_pages(pages)
python
def add_pages(self, path='pages'): """ Look through a directory for markdown files and add them as pages. """ pages_path = os.path.join(self.root_path, path) pages = [] for file in _listfiles(pages_path): page_dir = os.path.relpath(os.path.dirname(file), pages_path) if page_dir == '.': page_dir = None pages.append(self.cm.Page.from_file(file, directory=page_dir)) self.cm.add_pages(pages)
[ "def", "add_pages", "(", "self", ",", "path", "=", "'pages'", ")", ":", "pages_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "root_path", ",", "path", ")", "pages", "=", "[", "]", "for", "file", "in", "_listfiles", "(", "pages_path", ")", ":", "page_dir", "=", "os", ".", "path", ".", "relpath", "(", "os", ".", "path", ".", "dirname", "(", "file", ")", ",", "pages_path", ")", "if", "page_dir", "==", "'.'", ":", "page_dir", "=", "None", "pages", ".", "append", "(", "self", ".", "cm", ".", "Page", ".", "from_file", "(", "file", ",", "directory", "=", "page_dir", ")", ")", "self", ".", "cm", ".", "add_pages", "(", "pages", ")" ]
Look through a directory for markdown files and add them as pages.
[ "Look", "through", "a", "directory", "for", "markdown", "files", "and", "add", "them", "as", "pages", "." ]
6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L98-L109
238,313
anlutro/russell
russell/engine.py
BlogEngine.add_posts
def add_posts(self, path='posts'): """ Look through a directory for markdown files and add them as posts. """ path = os.path.join(self.root_path, path) self.cm.add_posts([ self.cm.Post.from_file(file) for file in _listfiles(path) ])
python
def add_posts(self, path='posts'): """ Look through a directory for markdown files and add them as posts. """ path = os.path.join(self.root_path, path) self.cm.add_posts([ self.cm.Post.from_file(file) for file in _listfiles(path) ])
[ "def", "add_posts", "(", "self", ",", "path", "=", "'posts'", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "root_path", ",", "path", ")", "self", ".", "cm", ".", "add_posts", "(", "[", "self", ".", "cm", ".", "Post", ".", "from_file", "(", "file", ")", "for", "file", "in", "_listfiles", "(", "path", ")", "]", ")" ]
Look through a directory for markdown files and add them as posts.
[ "Look", "through", "a", "directory", "for", "markdown", "files", "and", "add", "them", "as", "posts", "." ]
6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L111-L119
238,314
anlutro/russell
russell/engine.py
BlogEngine.copy_assets
def copy_assets(self, path='assets'): """ Copy assets into the destination directory. """ path = os.path.join(self.root_path, path) for root, _, files in os.walk(path): for file in files: fullpath = os.path.join(root, file) relpath = os.path.relpath(fullpath, path) copy_to = os.path.join(self._get_dist_path(relpath, directory='assets')) LOG.debug('copying %r to %r', fullpath, copy_to) shutil.copyfile(fullpath, copy_to)
python
def copy_assets(self, path='assets'): """ Copy assets into the destination directory. """ path = os.path.join(self.root_path, path) for root, _, files in os.walk(path): for file in files: fullpath = os.path.join(root, file) relpath = os.path.relpath(fullpath, path) copy_to = os.path.join(self._get_dist_path(relpath, directory='assets')) LOG.debug('copying %r to %r', fullpath, copy_to) shutil.copyfile(fullpath, copy_to)
[ "def", "copy_assets", "(", "self", ",", "path", "=", "'assets'", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "root_path", ",", "path", ")", "for", "root", ",", "_", ",", "files", "in", "os", ".", "walk", "(", "path", ")", ":", "for", "file", "in", "files", ":", "fullpath", "=", "os", ".", "path", ".", "join", "(", "root", ",", "file", ")", "relpath", "=", "os", ".", "path", ".", "relpath", "(", "fullpath", ",", "path", ")", "copy_to", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_get_dist_path", "(", "relpath", ",", "directory", "=", "'assets'", ")", ")", "LOG", ".", "debug", "(", "'copying %r to %r'", ",", "fullpath", ",", "copy_to", ")", "shutil", ".", "copyfile", "(", "fullpath", ",", "copy_to", ")" ]
Copy assets into the destination directory.
[ "Copy", "assets", "into", "the", "destination", "directory", "." ]
6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L121-L132
238,315
anlutro/russell
russell/engine.py
BlogEngine.add_asset_hashes
def add_asset_hashes(self, path='dist/assets'): """ Scan through a directory and add hashes for each file found. """ for fullpath in _listfiles(os.path.join(self.root_path, path)): relpath = fullpath.replace(self.root_path + '/' + path + '/', '') md5sum = hashlib.md5(open(fullpath, 'rb').read()).hexdigest() LOG.debug('MD5 of %s (%s): %s', fullpath, relpath, md5sum) self.asset_hash[relpath] = md5sum
python
def add_asset_hashes(self, path='dist/assets'): """ Scan through a directory and add hashes for each file found. """ for fullpath in _listfiles(os.path.join(self.root_path, path)): relpath = fullpath.replace(self.root_path + '/' + path + '/', '') md5sum = hashlib.md5(open(fullpath, 'rb').read()).hexdigest() LOG.debug('MD5 of %s (%s): %s', fullpath, relpath, md5sum) self.asset_hash[relpath] = md5sum
[ "def", "add_asset_hashes", "(", "self", ",", "path", "=", "'dist/assets'", ")", ":", "for", "fullpath", "in", "_listfiles", "(", "os", ".", "path", ".", "join", "(", "self", ".", "root_path", ",", "path", ")", ")", ":", "relpath", "=", "fullpath", ".", "replace", "(", "self", ".", "root_path", "+", "'/'", "+", "path", "+", "'/'", ",", "''", ")", "md5sum", "=", "hashlib", ".", "md5", "(", "open", "(", "fullpath", ",", "'rb'", ")", ".", "read", "(", ")", ")", ".", "hexdigest", "(", ")", "LOG", ".", "debug", "(", "'MD5 of %s (%s): %s'", ",", "fullpath", ",", "relpath", ",", "md5sum", ")", "self", ".", "asset_hash", "[", "relpath", "]", "=", "md5sum" ]
Scan through a directory and add hashes for each file found.
[ "Scan", "through", "a", "directory", "and", "add", "hashes", "for", "each", "file", "found", "." ]
6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L134-L142
238,316
anlutro/russell
russell/engine.py
BlogEngine.get_posts
def get_posts(self, num=None, tag=None, private=False): """ Get all the posts added to the blog. Args: num (int): Optional. If provided, only return N posts (sorted by date, most recent first). tag (Tag): Optional. If provided, only return posts that have a specific tag. private (bool): By default (if False), private posts are not included. If set to True, private posts will also be included. """ posts = self.posts if not private: posts = [post for post in posts if post.public] if tag: posts = [post for post in posts if tag in post.tags] if num: return posts[:num] return posts
python
def get_posts(self, num=None, tag=None, private=False): """ Get all the posts added to the blog. Args: num (int): Optional. If provided, only return N posts (sorted by date, most recent first). tag (Tag): Optional. If provided, only return posts that have a specific tag. private (bool): By default (if False), private posts are not included. If set to True, private posts will also be included. """ posts = self.posts if not private: posts = [post for post in posts if post.public] if tag: posts = [post for post in posts if tag in post.tags] if num: return posts[:num] return posts
[ "def", "get_posts", "(", "self", ",", "num", "=", "None", ",", "tag", "=", "None", ",", "private", "=", "False", ")", ":", "posts", "=", "self", ".", "posts", "if", "not", "private", ":", "posts", "=", "[", "post", "for", "post", "in", "posts", "if", "post", ".", "public", "]", "if", "tag", ":", "posts", "=", "[", "post", "for", "post", "in", "posts", "if", "tag", "in", "post", ".", "tags", "]", "if", "num", ":", "return", "posts", "[", ":", "num", "]", "return", "posts" ]
Get all the posts added to the blog. Args: num (int): Optional. If provided, only return N posts (sorted by date, most recent first). tag (Tag): Optional. If provided, only return posts that have a specific tag. private (bool): By default (if False), private posts are not included. If set to True, private posts will also be included.
[ "Get", "all", "the", "posts", "added", "to", "the", "blog", "." ]
6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L144-L166
238,317
anlutro/russell
russell/engine.py
BlogEngine.generate_pages
def generate_pages(self): """ Generate HTML out of the pages added to the blog. """ for page in self.pages: self.generate_page(page.slug, template='page.html.jinja', page=page)
python
def generate_pages(self): """ Generate HTML out of the pages added to the blog. """ for page in self.pages: self.generate_page(page.slug, template='page.html.jinja', page=page)
[ "def", "generate_pages", "(", "self", ")", ":", "for", "page", "in", "self", ".", "pages", ":", "self", ".", "generate_page", "(", "page", ".", "slug", ",", "template", "=", "'page.html.jinja'", ",", "page", "=", "page", ")" ]
Generate HTML out of the pages added to the blog.
[ "Generate", "HTML", "out", "of", "the", "pages", "added", "to", "the", "blog", "." ]
6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L180-L185
238,318
anlutro/russell
russell/engine.py
BlogEngine.generate_posts
def generate_posts(self): """ Generate single-post HTML files out of posts added to the blog. Will not generate front page, archives or tag files - those have to be generated separately. """ for post in self.posts: self.generate_page( ['posts', post.slug], template='post.html.jinja', post=post, )
python
def generate_posts(self): """ Generate single-post HTML files out of posts added to the blog. Will not generate front page, archives or tag files - those have to be generated separately. """ for post in self.posts: self.generate_page( ['posts', post.slug], template='post.html.jinja', post=post, )
[ "def", "generate_posts", "(", "self", ")", ":", "for", "post", "in", "self", ".", "posts", ":", "self", ".", "generate_page", "(", "[", "'posts'", ",", "post", ".", "slug", "]", ",", "template", "=", "'post.html.jinja'", ",", "post", "=", "post", ",", ")" ]
Generate single-post HTML files out of posts added to the blog. Will not generate front page, archives or tag files - those have to be generated separately.
[ "Generate", "single", "-", "post", "HTML", "files", "out", "of", "posts", "added", "to", "the", "blog", ".", "Will", "not", "generate", "front", "page", "archives", "or", "tag", "files", "-", "those", "have", "to", "be", "generated", "separately", "." ]
6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L187-L198
238,319
anlutro/russell
russell/engine.py
BlogEngine.generate_tags
def generate_tags(self): """ Generate one HTML page for each tag, each containing all posts that match that tag. """ for tag in self.tags: posts = self.get_posts(tag=tag, private=True) self.generate_page(['tags', tag.slug], template='archive.html.jinja', posts=posts)
python
def generate_tags(self): """ Generate one HTML page for each tag, each containing all posts that match that tag. """ for tag in self.tags: posts = self.get_posts(tag=tag, private=True) self.generate_page(['tags', tag.slug], template='archive.html.jinja', posts=posts)
[ "def", "generate_tags", "(", "self", ")", ":", "for", "tag", "in", "self", ".", "tags", ":", "posts", "=", "self", ".", "get_posts", "(", "tag", "=", "tag", ",", "private", "=", "True", ")", "self", ".", "generate_page", "(", "[", "'tags'", ",", "tag", ".", "slug", "]", ",", "template", "=", "'archive.html.jinja'", ",", "posts", "=", "posts", ")" ]
Generate one HTML page for each tag, each containing all posts that match that tag.
[ "Generate", "one", "HTML", "page", "for", "each", "tag", "each", "containing", "all", "posts", "that", "match", "that", "tag", "." ]
6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L200-L208
238,320
anlutro/russell
russell/engine.py
BlogEngine.generate_page
def generate_page(self, path, template, **kwargs): """ Generate the HTML for a single page. You usually don't need to call this method manually, it is used by a lot of other, more end-user friendly methods. Args: path (str): Where to place the page relative to the root URL. Usually something like "index", "about-me", "projects/example", etc. template (str): Which jinja template to use to render the page. **kwargs: Kwargs will be passed on to the jinja template. Also, if the `page` kwarg is passed, its directory attribute will be prepended to the path. """ directory = None if kwargs.get('page'): directory = kwargs['page'].dir path = self._get_dist_path(path, directory=directory) if not path.endswith('.html'): path = path + '.html' if not os.path.isdir(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) html = self._get_template(template).render(**kwargs) with open(path, 'w+') as file: file.write(html)
python
def generate_page(self, path, template, **kwargs): """ Generate the HTML for a single page. You usually don't need to call this method manually, it is used by a lot of other, more end-user friendly methods. Args: path (str): Where to place the page relative to the root URL. Usually something like "index", "about-me", "projects/example", etc. template (str): Which jinja template to use to render the page. **kwargs: Kwargs will be passed on to the jinja template. Also, if the `page` kwarg is passed, its directory attribute will be prepended to the path. """ directory = None if kwargs.get('page'): directory = kwargs['page'].dir path = self._get_dist_path(path, directory=directory) if not path.endswith('.html'): path = path + '.html' if not os.path.isdir(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) html = self._get_template(template).render(**kwargs) with open(path, 'w+') as file: file.write(html)
[ "def", "generate_page", "(", "self", ",", "path", ",", "template", ",", "*", "*", "kwargs", ")", ":", "directory", "=", "None", "if", "kwargs", ".", "get", "(", "'page'", ")", ":", "directory", "=", "kwargs", "[", "'page'", "]", ".", "dir", "path", "=", "self", ".", "_get_dist_path", "(", "path", ",", "directory", "=", "directory", ")", "if", "not", "path", ".", "endswith", "(", "'.html'", ")", ":", "path", "=", "path", "+", "'.html'", "if", "not", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "dirname", "(", "path", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "path", ")", ")", "html", "=", "self", ".", "_get_template", "(", "template", ")", ".", "render", "(", "*", "*", "kwargs", ")", "with", "open", "(", "path", ",", "'w+'", ")", "as", "file", ":", "file", ".", "write", "(", "html", ")" ]
Generate the HTML for a single page. You usually don't need to call this method manually, it is used by a lot of other, more end-user friendly methods. Args: path (str): Where to place the page relative to the root URL. Usually something like "index", "about-me", "projects/example", etc. template (str): Which jinja template to use to render the page. **kwargs: Kwargs will be passed on to the jinja template. Also, if the `page` kwarg is passed, its directory attribute will be prepended to the path.
[ "Generate", "the", "HTML", "for", "a", "single", "page", ".", "You", "usually", "don", "t", "need", "to", "call", "this", "method", "manually", "it", "is", "used", "by", "a", "lot", "of", "other", "more", "end", "-", "user", "friendly", "methods", "." ]
6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L210-L238
238,321
anlutro/russell
russell/engine.py
BlogEngine.generate_index
def generate_index(self, num_posts=5): """ Generate the front page, aka index.html. """ posts = self.get_posts(num=num_posts) self.generate_page('index', template='index.html.jinja', posts=posts)
python
def generate_index(self, num_posts=5): """ Generate the front page, aka index.html. """ posts = self.get_posts(num=num_posts) self.generate_page('index', template='index.html.jinja', posts=posts)
[ "def", "generate_index", "(", "self", ",", "num_posts", "=", "5", ")", ":", "posts", "=", "self", ".", "get_posts", "(", "num", "=", "num_posts", ")", "self", ".", "generate_page", "(", "'index'", ",", "template", "=", "'index.html.jinja'", ",", "posts", "=", "posts", ")" ]
Generate the front page, aka index.html.
[ "Generate", "the", "front", "page", "aka", "index", ".", "html", "." ]
6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L240-L245
238,322
anlutro/russell
russell/engine.py
BlogEngine.generate_rss
def generate_rss(self, path='rss.xml', only_excerpt=True, https=False): """ Generate the RSS feed. Args: path (str): Where to save the RSS file. Make sure that your jinja templates refer to the same path using <link>. only_excerpt (bool): If True (the default), don't include the full body of posts in the RSS. Instead, include the first paragraph and a "read more" link to your website. https (bool): If True, links inside the RSS with relative scheme (e.g. //example.com/something) will be set to HTTPS. If False (the default), they will be set to plain HTTP. """ feed = russell.feed.get_rss_feed(self, only_excerpt=only_excerpt, https=https) feed.rss_file(self._get_dist_path(path))
python
def generate_rss(self, path='rss.xml', only_excerpt=True, https=False): """ Generate the RSS feed. Args: path (str): Where to save the RSS file. Make sure that your jinja templates refer to the same path using <link>. only_excerpt (bool): If True (the default), don't include the full body of posts in the RSS. Instead, include the first paragraph and a "read more" link to your website. https (bool): If True, links inside the RSS with relative scheme (e.g. //example.com/something) will be set to HTTPS. If False (the default), they will be set to plain HTTP. """ feed = russell.feed.get_rss_feed(self, only_excerpt=only_excerpt, https=https) feed.rss_file(self._get_dist_path(path))
[ "def", "generate_rss", "(", "self", ",", "path", "=", "'rss.xml'", ",", "only_excerpt", "=", "True", ",", "https", "=", "False", ")", ":", "feed", "=", "russell", ".", "feed", ".", "get_rss_feed", "(", "self", ",", "only_excerpt", "=", "only_excerpt", ",", "https", "=", "https", ")", "feed", ".", "rss_file", "(", "self", ".", "_get_dist_path", "(", "path", ")", ")" ]
Generate the RSS feed. Args: path (str): Where to save the RSS file. Make sure that your jinja templates refer to the same path using <link>. only_excerpt (bool): If True (the default), don't include the full body of posts in the RSS. Instead, include the first paragraph and a "read more" link to your website. https (bool): If True, links inside the RSS with relative scheme (e.g. //example.com/something) will be set to HTTPS. If False (the default), they will be set to plain HTTP.
[ "Generate", "the", "RSS", "feed", "." ]
6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L253-L268
238,323
anlutro/russell
russell/engine.py
BlogEngine.generate_sitemap
def generate_sitemap(self, path='sitemap.xml', https=False): """ Generate an XML sitemap. Args: path (str): The name of the file to write to. https (bool): If True, links inside the sitemap with relative scheme (e.g. example.com/something) will be set to HTTPS. If False (the default), they will be set to plain HTTP. """ sitemap = russell.sitemap.generate_sitemap(self, https=https) self.write_file(path, sitemap)
python
def generate_sitemap(self, path='sitemap.xml', https=False): """ Generate an XML sitemap. Args: path (str): The name of the file to write to. https (bool): If True, links inside the sitemap with relative scheme (e.g. example.com/something) will be set to HTTPS. If False (the default), they will be set to plain HTTP. """ sitemap = russell.sitemap.generate_sitemap(self, https=https) self.write_file(path, sitemap)
[ "def", "generate_sitemap", "(", "self", ",", "path", "=", "'sitemap.xml'", ",", "https", "=", "False", ")", ":", "sitemap", "=", "russell", ".", "sitemap", ".", "generate_sitemap", "(", "self", ",", "https", "=", "https", ")", "self", ".", "write_file", "(", "path", ",", "sitemap", ")" ]
Generate an XML sitemap. Args: path (str): The name of the file to write to. https (bool): If True, links inside the sitemap with relative scheme (e.g. example.com/something) will be set to HTTPS. If False (the default), they will be set to plain HTTP.
[ "Generate", "an", "XML", "sitemap", "." ]
6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L270-L281
238,324
anlutro/russell
russell/engine.py
BlogEngine.write_file
def write_file(self, path, contents): """ Write a file of any type to the destination path. Useful for files like robots.txt, manifest.json, and so on. Args: path (str): The name of the file to write to. contents (str or bytes): The contents to write. """ path = self._get_dist_path(path) if not os.path.isdir(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) if isinstance(contents, bytes): mode = 'wb+' else: mode = 'w' with open(path, mode) as file: file.write(contents)
python
def write_file(self, path, contents): """ Write a file of any type to the destination path. Useful for files like robots.txt, manifest.json, and so on. Args: path (str): The name of the file to write to. contents (str or bytes): The contents to write. """ path = self._get_dist_path(path) if not os.path.isdir(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) if isinstance(contents, bytes): mode = 'wb+' else: mode = 'w' with open(path, mode) as file: file.write(contents)
[ "def", "write_file", "(", "self", ",", "path", ",", "contents", ")", ":", "path", "=", "self", ".", "_get_dist_path", "(", "path", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "dirname", "(", "path", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "path", ")", ")", "if", "isinstance", "(", "contents", ",", "bytes", ")", ":", "mode", "=", "'wb+'", "else", ":", "mode", "=", "'w'", "with", "open", "(", "path", ",", "mode", ")", "as", "file", ":", "file", ".", "write", "(", "contents", ")" ]
Write a file of any type to the destination path. Useful for files like robots.txt, manifest.json, and so on. Args: path (str): The name of the file to write to. contents (str or bytes): The contents to write.
[ "Write", "a", "file", "of", "any", "type", "to", "the", "destination", "path", ".", "Useful", "for", "files", "like", "robots", ".", "txt", "manifest", ".", "json", "and", "so", "on", "." ]
6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5
https://github.com/anlutro/russell/blob/6e4a95929f031926d3acd5d9e6c9ca7bb896b1b5/russell/engine.py#L283-L300
238,325
Carreau/Love
love/love.py
enable_travis
def enable_travis(token, slug, log): """ Enable Travis automatically for the given repo. this need to have access to the GitHub token. """ # Done with github directly. Login to travis travis = TravisPy.github_auth(token, uri='https://api.travis-ci.org') user = travis.user() log.info('Travis user: %s', user.name) # Ask travis to sync with github, try to fetch created repo with exponentially decaying time. last_sync = user.synced_at log.info('syncing Travis with Github, this can take a while...') repo = travis._session.post(travis._session.uri+'/users/sync') for i in range(10): try: sleep((1.5)**i) repo = travis.repo(slug) if travis.user().synced_at == last_sync: raise ValueError('synced not really done, travis.repo() can be a duplicate') log.info('\nsyncing done') break # TODO: find the right exception here except Exception: pass ## todo , warn if not found # Enable travis hook for this repository log.info('Enabling Travis-CI hook for this repository') resp = travis._session.put(travis._session.uri+"/hooks/", json={ "hook": { "id": repo.id , "active": True } }, ) if resp.json()['result'] is True: log.info('Travis hook for this repository is now enabled.') log.info('Continuous integration test should be triggered every time you push code to github') else: log.info("I was not able to set up Travis hooks... something went wrong.") return user
python
def enable_travis(token, slug, log): """ Enable Travis automatically for the given repo. this need to have access to the GitHub token. """ # Done with github directly. Login to travis travis = TravisPy.github_auth(token, uri='https://api.travis-ci.org') user = travis.user() log.info('Travis user: %s', user.name) # Ask travis to sync with github, try to fetch created repo with exponentially decaying time. last_sync = user.synced_at log.info('syncing Travis with Github, this can take a while...') repo = travis._session.post(travis._session.uri+'/users/sync') for i in range(10): try: sleep((1.5)**i) repo = travis.repo(slug) if travis.user().synced_at == last_sync: raise ValueError('synced not really done, travis.repo() can be a duplicate') log.info('\nsyncing done') break # TODO: find the right exception here except Exception: pass ## todo , warn if not found # Enable travis hook for this repository log.info('Enabling Travis-CI hook for this repository') resp = travis._session.put(travis._session.uri+"/hooks/", json={ "hook": { "id": repo.id , "active": True } }, ) if resp.json()['result'] is True: log.info('Travis hook for this repository is now enabled.') log.info('Continuous integration test should be triggered every time you push code to github') else: log.info("I was not able to set up Travis hooks... something went wrong.") return user
[ "def", "enable_travis", "(", "token", ",", "slug", ",", "log", ")", ":", "# Done with github directly. Login to travis", "travis", "=", "TravisPy", ".", "github_auth", "(", "token", ",", "uri", "=", "'https://api.travis-ci.org'", ")", "user", "=", "travis", ".", "user", "(", ")", "log", ".", "info", "(", "'Travis user: %s'", ",", "user", ".", "name", ")", "# Ask travis to sync with github, try to fetch created repo with exponentially decaying time.", "last_sync", "=", "user", ".", "synced_at", "log", ".", "info", "(", "'syncing Travis with Github, this can take a while...'", ")", "repo", "=", "travis", ".", "_session", ".", "post", "(", "travis", ".", "_session", ".", "uri", "+", "'/users/sync'", ")", "for", "i", "in", "range", "(", "10", ")", ":", "try", ":", "sleep", "(", "(", "1.5", ")", "**", "i", ")", "repo", "=", "travis", ".", "repo", "(", "slug", ")", "if", "travis", ".", "user", "(", ")", ".", "synced_at", "==", "last_sync", ":", "raise", "ValueError", "(", "'synced not really done, travis.repo() can be a duplicate'", ")", "log", ".", "info", "(", "'\\nsyncing done'", ")", "break", "# TODO: find the right exception here", "except", "Exception", ":", "pass", "## todo , warn if not found", "# Enable travis hook for this repository", "log", ".", "info", "(", "'Enabling Travis-CI hook for this repository'", ")", "resp", "=", "travis", ".", "_session", ".", "put", "(", "travis", ".", "_session", ".", "uri", "+", "\"/hooks/\"", ",", "json", "=", "{", "\"hook\"", ":", "{", "\"id\"", ":", "repo", ".", "id", ",", "\"active\"", ":", "True", "}", "}", ",", ")", "if", "resp", ".", "json", "(", ")", "[", "'result'", "]", "is", "True", ":", "log", ".", "info", "(", "'Travis hook for this repository is now enabled.'", ")", "log", ".", "info", "(", "'Continuous integration test should be triggered every time you push code to github'", ")", "else", ":", "log", ".", "info", "(", "\"I was not able to set up Travis hooks... something went wrong.\"", ")", "return", "user" ]
Enable Travis automatically for the given repo. this need to have access to the GitHub token.
[ "Enable", "Travis", "automatically", "for", "the", "given", "repo", "." ]
a85d1139b32ee926b3bee73447e32e89b86983ba
https://github.com/Carreau/Love/blob/a85d1139b32ee926b3bee73447e32e89b86983ba/love/love.py#L280-L329
238,326
Carreau/Love
love/love.py
project_layout
def project_layout(proposal, user=None, repo=None, log=None): """ generate the project template proposal is the name of the project, user is an object containing some information about the user. - full name, - github username - email """ proposal = proposal.lower() #context_file = os.path.expanduser('~/.cookiecutters/cookiecutter-pypackage/cookiecutter.json') #context = generate_context(context_file) # os.chdir('..') # context['cookiecutter']['full_name'] = user.name # context['cookiecutter']['email'] = user.email # context['cookiecutter']['github_username'] = user.login # context['cookiecutter']['project_name'] = proposal # context['cookiecutter']['repo_name'] = proposal.lower() try: os.mkdir(proposal) except FileExistsError: log.info('Skip directory structure, as project seem to already exists') with open('.gitignore', 'w') as f: f.write(''' *.pyc __pycache__ /build/ /dist/ ''') with open( '/'.join([proposal, '__init__.py']), 'w') as f: f.write(''' """ a simple package """ __version__ = '0.0.1' ''') travis_yml() #generate_files( # repo_dir=os.path.expanduser('~/.cookiecutters/cookiecutter-pypackage/'), # context=context # ) log.info('Workig in %s', os.getcwd()) os.listdir('.') subprocess.call(['git','add','.'], ) subprocess.call(['git','commit',"-am'initial commit of %s'" % proposal]) subprocess.call(['git', "push", "origin", "master:master"])
python
def project_layout(proposal, user=None, repo=None, log=None): """ generate the project template proposal is the name of the project, user is an object containing some information about the user. - full name, - github username - email """ proposal = proposal.lower() #context_file = os.path.expanduser('~/.cookiecutters/cookiecutter-pypackage/cookiecutter.json') #context = generate_context(context_file) # os.chdir('..') # context['cookiecutter']['full_name'] = user.name # context['cookiecutter']['email'] = user.email # context['cookiecutter']['github_username'] = user.login # context['cookiecutter']['project_name'] = proposal # context['cookiecutter']['repo_name'] = proposal.lower() try: os.mkdir(proposal) except FileExistsError: log.info('Skip directory structure, as project seem to already exists') with open('.gitignore', 'w') as f: f.write(''' *.pyc __pycache__ /build/ /dist/ ''') with open( '/'.join([proposal, '__init__.py']), 'w') as f: f.write(''' """ a simple package """ __version__ = '0.0.1' ''') travis_yml() #generate_files( # repo_dir=os.path.expanduser('~/.cookiecutters/cookiecutter-pypackage/'), # context=context # ) log.info('Workig in %s', os.getcwd()) os.listdir('.') subprocess.call(['git','add','.'], ) subprocess.call(['git','commit',"-am'initial commit of %s'" % proposal]) subprocess.call(['git', "push", "origin", "master:master"])
[ "def", "project_layout", "(", "proposal", ",", "user", "=", "None", ",", "repo", "=", "None", ",", "log", "=", "None", ")", ":", "proposal", "=", "proposal", ".", "lower", "(", ")", "#context_file = os.path.expanduser('~/.cookiecutters/cookiecutter-pypackage/cookiecutter.json')", "#context = generate_context(context_file)", "# os.chdir('..')", "# context['cookiecutter']['full_name'] = user.name", "# context['cookiecutter']['email'] = user.email", "# context['cookiecutter']['github_username'] = user.login", "# context['cookiecutter']['project_name'] = proposal", "# context['cookiecutter']['repo_name'] = proposal.lower()", "try", ":", "os", ".", "mkdir", "(", "proposal", ")", "except", "FileExistsError", ":", "log", ".", "info", "(", "'Skip directory structure, as project seem to already exists'", ")", "with", "open", "(", "'.gitignore'", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "'''\n*.pyc\n__pycache__\n/build/\n/dist/\n'''", ")", "with", "open", "(", "'/'", ".", "join", "(", "[", "proposal", ",", "'__init__.py'", "]", ")", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "'''\n\"\"\"\na simple package\n\"\"\"\n\n\n__version__ = '0.0.1'\n\n '''", ")", "travis_yml", "(", ")", "#generate_files(", "# repo_dir=os.path.expanduser('~/.cookiecutters/cookiecutter-pypackage/'),", "# context=context", "# )", "log", ".", "info", "(", "'Workig in %s'", ",", "os", ".", "getcwd", "(", ")", ")", "os", ".", "listdir", "(", "'.'", ")", "subprocess", ".", "call", "(", "[", "'git'", ",", "'add'", ",", "'.'", "]", ",", ")", "subprocess", ".", "call", "(", "[", "'git'", ",", "'commit'", ",", "\"-am'initial commit of %s'\"", "%", "proposal", "]", ")", "subprocess", ".", "call", "(", "[", "'git'", ",", "\"push\"", ",", "\"origin\"", ",", "\"master:master\"", "]", ")" ]
generate the project template proposal is the name of the project, user is an object containing some information about the user. - full name, - github username - email
[ "generate", "the", "project", "template" ]
a85d1139b32ee926b3bee73447e32e89b86983ba
https://github.com/Carreau/Love/blob/a85d1139b32ee926b3bee73447e32e89b86983ba/love/love.py#L353-L418
238,327
mlavin/argyle
argyle/postgres.py
create_db_user
def create_db_user(username, password=None, flags=None): """Create a databse user.""" flags = flags or u'-D -A -R' sudo(u'createuser %s %s' % (flags, username), user=u'postgres') if password: change_db_user_password(username, password)
python
def create_db_user(username, password=None, flags=None): """Create a databse user.""" flags = flags or u'-D -A -R' sudo(u'createuser %s %s' % (flags, username), user=u'postgres') if password: change_db_user_password(username, password)
[ "def", "create_db_user", "(", "username", ",", "password", "=", "None", ",", "flags", "=", "None", ")", ":", "flags", "=", "flags", "or", "u'-D -A -R'", "sudo", "(", "u'createuser %s %s'", "%", "(", "flags", ",", "username", ")", ",", "user", "=", "u'postgres'", ")", "if", "password", ":", "change_db_user_password", "(", "username", ",", "password", ")" ]
Create a databse user.
[ "Create", "a", "databse", "user", "." ]
92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72
https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/postgres.py#L11-L17
238,328
mlavin/argyle
argyle/postgres.py
excute_query
def excute_query(query, db=None, flags=None, use_sudo=False, **kwargs): """Execute remote psql query.""" flags = flags or u'' if db: flags = u"%s -d %s" % (flags, db) command = u'psql %s -c "%s"' % (flags, query) if use_sudo: sudo(command, user='postgres', **kwargs) else: run(command, **kwargs)
python
def excute_query(query, db=None, flags=None, use_sudo=False, **kwargs): """Execute remote psql query.""" flags = flags or u'' if db: flags = u"%s -d %s" % (flags, db) command = u'psql %s -c "%s"' % (flags, query) if use_sudo: sudo(command, user='postgres', **kwargs) else: run(command, **kwargs)
[ "def", "excute_query", "(", "query", ",", "db", "=", "None", ",", "flags", "=", "None", ",", "use_sudo", "=", "False", ",", "*", "*", "kwargs", ")", ":", "flags", "=", "flags", "or", "u''", "if", "db", ":", "flags", "=", "u\"%s -d %s\"", "%", "(", "flags", ",", "db", ")", "command", "=", "u'psql %s -c \"%s\"'", "%", "(", "flags", ",", "query", ")", "if", "use_sudo", ":", "sudo", "(", "command", ",", "user", "=", "'postgres'", ",", "*", "*", "kwargs", ")", "else", ":", "run", "(", "command", ",", "*", "*", "kwargs", ")" ]
Execute remote psql query.
[ "Execute", "remote", "psql", "query", "." ]
92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72
https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/postgres.py#L21-L31
238,329
mlavin/argyle
argyle/postgres.py
db_user_exists
def db_user_exists(username): """Return True if the DB user already exists. """ qry = u"""SELECT COUNT(*) FROM pg_roles where rolname = \'{username}\';""" output = StringIO() excute_query( qry.format(username=username), flags="-Aqt", use_sudo=True, stdout=output ) # FIXME: is there a way to get fabric to not clutter the output # with "[127.0.0.1] out:" on each line? lines = output.getvalue().splitlines() return lines and lines[0].endswith('out: 1')
python
def db_user_exists(username): """Return True if the DB user already exists. """ qry = u"""SELECT COUNT(*) FROM pg_roles where rolname = \'{username}\';""" output = StringIO() excute_query( qry.format(username=username), flags="-Aqt", use_sudo=True, stdout=output ) # FIXME: is there a way to get fabric to not clutter the output # with "[127.0.0.1] out:" on each line? lines = output.getvalue().splitlines() return lines and lines[0].endswith('out: 1')
[ "def", "db_user_exists", "(", "username", ")", ":", "qry", "=", "u\"\"\"SELECT COUNT(*) FROM pg_roles where rolname = \\'{username}\\';\"\"\"", "output", "=", "StringIO", "(", ")", "excute_query", "(", "qry", ".", "format", "(", "username", "=", "username", ")", ",", "flags", "=", "\"-Aqt\"", ",", "use_sudo", "=", "True", ",", "stdout", "=", "output", ")", "# FIXME: is there a way to get fabric to not clutter the output", "# with \"[127.0.0.1] out:\" on each line?", "lines", "=", "output", ".", "getvalue", "(", ")", ".", "splitlines", "(", ")", "return", "lines", "and", "lines", "[", "0", "]", ".", "endswith", "(", "'out: 1'", ")" ]
Return True if the DB user already exists.
[ "Return", "True", "if", "the", "DB", "user", "already", "exists", "." ]
92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72
https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/postgres.py#L34-L48
238,330
mlavin/argyle
argyle/postgres.py
change_db_user_password
def change_db_user_password(username, password): """Change a db user's password.""" sql = "ALTER USER %s WITH PASSWORD '%s'" % (username, password) excute_query(sql, use_sudo=True)
python
def change_db_user_password(username, password): """Change a db user's password.""" sql = "ALTER USER %s WITH PASSWORD '%s'" % (username, password) excute_query(sql, use_sudo=True)
[ "def", "change_db_user_password", "(", "username", ",", "password", ")", ":", "sql", "=", "\"ALTER USER %s WITH PASSWORD '%s'\"", "%", "(", "username", ",", "password", ")", "excute_query", "(", "sql", ",", "use_sudo", "=", "True", ")" ]
Change a db user's password.
[ "Change", "a", "db", "user", "s", "password", "." ]
92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72
https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/postgres.py#L66-L70
238,331
mlavin/argyle
argyle/postgres.py
create_db
def create_db(name, owner=None, encoding=u'UTF-8', template='template1', **kwargs): """Create a Postgres database.""" flags = u'' if encoding: flags = u'-E %s' % encoding if owner: flags = u'%s -O %s' % (flags, owner) if template and template != 'template1': flags = u'%s --template=%s' % (flags, template) sudo('createdb %s %s' % (flags, name), user='postgres', **kwargs)
python
def create_db(name, owner=None, encoding=u'UTF-8', template='template1', **kwargs): """Create a Postgres database.""" flags = u'' if encoding: flags = u'-E %s' % encoding if owner: flags = u'%s -O %s' % (flags, owner) if template and template != 'template1': flags = u'%s --template=%s' % (flags, template) sudo('createdb %s %s' % (flags, name), user='postgres', **kwargs)
[ "def", "create_db", "(", "name", ",", "owner", "=", "None", ",", "encoding", "=", "u'UTF-8'", ",", "template", "=", "'template1'", ",", "*", "*", "kwargs", ")", ":", "flags", "=", "u''", "if", "encoding", ":", "flags", "=", "u'-E %s'", "%", "encoding", "if", "owner", ":", "flags", "=", "u'%s -O %s'", "%", "(", "flags", ",", "owner", ")", "if", "template", "and", "template", "!=", "'template1'", ":", "flags", "=", "u'%s --template=%s'", "%", "(", "flags", ",", "template", ")", "sudo", "(", "'createdb %s %s'", "%", "(", "flags", ",", "name", ")", ",", "user", "=", "'postgres'", ",", "*", "*", "kwargs", ")" ]
Create a Postgres database.
[ "Create", "a", "Postgres", "database", "." ]
92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72
https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/postgres.py#L74-L85
238,332
mlavin/argyle
argyle/postgres.py
upload_pg_hba_conf
def upload_pg_hba_conf(template_name=None, pg_version=None, pg_cluster='main', restart=True): """ Upload configuration for pg_hba.conf If the version is not given it will be guessed. """ template_name = template_name or u'postgres/pg_hba.conf' version = pg_version or detect_version() config = {'version': version, 'cluster': pg_cluster} destination = u'/etc/postgresql/%(version)s/%(cluster)s/pg_hba.conf' % config upload_template(template_name, destination, use_sudo=True) if restart: restart_service(u'postgresql')
python
def upload_pg_hba_conf(template_name=None, pg_version=None, pg_cluster='main', restart=True): """ Upload configuration for pg_hba.conf If the version is not given it will be guessed. """ template_name = template_name or u'postgres/pg_hba.conf' version = pg_version or detect_version() config = {'version': version, 'cluster': pg_cluster} destination = u'/etc/postgresql/%(version)s/%(cluster)s/pg_hba.conf' % config upload_template(template_name, destination, use_sudo=True) if restart: restart_service(u'postgresql')
[ "def", "upload_pg_hba_conf", "(", "template_name", "=", "None", ",", "pg_version", "=", "None", ",", "pg_cluster", "=", "'main'", ",", "restart", "=", "True", ")", ":", "template_name", "=", "template_name", "or", "u'postgres/pg_hba.conf'", "version", "=", "pg_version", "or", "detect_version", "(", ")", "config", "=", "{", "'version'", ":", "version", ",", "'cluster'", ":", "pg_cluster", "}", "destination", "=", "u'/etc/postgresql/%(version)s/%(cluster)s/pg_hba.conf'", "%", "config", "upload_template", "(", "template_name", ",", "destination", ",", "use_sudo", "=", "True", ")", "if", "restart", ":", "restart_service", "(", "u'postgresql'", ")" ]
Upload configuration for pg_hba.conf If the version is not given it will be guessed.
[ "Upload", "configuration", "for", "pg_hba", ".", "conf", "If", "the", "version", "is", "not", "given", "it", "will", "be", "guessed", "." ]
92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72
https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/postgres.py#L89-L101
238,333
mlavin/argyle
argyle/postgres.py
detect_version
def detect_version(): """Parse the output of psql to detect Postgres version.""" version_regex = re.compile(r'\(PostgreSQL\) (?P<major>\d)\.(?P<minor>\d)\.(?P<bugfix>\d)') pg_version = None with hide('running', 'stdout', 'stderr'): output = run('psql --version') match = version_regex.search(output) if match: result = match.groupdict() if 'major' in result and 'minor' in result: pg_version = u'%(major)s.%(minor)s' % result if not pg_version: abort(u"Error: Could not determine Postgres version of the server.") return pg_version
python
def detect_version(): """Parse the output of psql to detect Postgres version.""" version_regex = re.compile(r'\(PostgreSQL\) (?P<major>\d)\.(?P<minor>\d)\.(?P<bugfix>\d)') pg_version = None with hide('running', 'stdout', 'stderr'): output = run('psql --version') match = version_regex.search(output) if match: result = match.groupdict() if 'major' in result and 'minor' in result: pg_version = u'%(major)s.%(minor)s' % result if not pg_version: abort(u"Error: Could not determine Postgres version of the server.") return pg_version
[ "def", "detect_version", "(", ")", ":", "version_regex", "=", "re", ".", "compile", "(", "r'\\(PostgreSQL\\) (?P<major>\\d)\\.(?P<minor>\\d)\\.(?P<bugfix>\\d)'", ")", "pg_version", "=", "None", "with", "hide", "(", "'running'", ",", "'stdout'", ",", "'stderr'", ")", ":", "output", "=", "run", "(", "'psql --version'", ")", "match", "=", "version_regex", ".", "search", "(", "output", ")", "if", "match", ":", "result", "=", "match", ".", "groupdict", "(", ")", "if", "'major'", "in", "result", "and", "'minor'", "in", "result", ":", "pg_version", "=", "u'%(major)s.%(minor)s'", "%", "result", "if", "not", "pg_version", ":", "abort", "(", "u\"Error: Could not determine Postgres version of the server.\"", ")", "return", "pg_version" ]
Parse the output of psql to detect Postgres version.
[ "Parse", "the", "output", "of", "psql", "to", "detect", "Postgres", "version", "." ]
92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72
https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/postgres.py#L104-L117
238,334
mlavin/argyle
argyle/postgres.py
reset_cluster
def reset_cluster(pg_cluster='main', pg_version=None, encoding=u'UTF-8', locale=u'en_US.UTF-8'): """Drop and restore a given cluster.""" warning = u'You are about to drop the %s cluster. This cannot be undone.' \ u' Are you sure you want to continue?' % pg_cluster if confirm(warning, default=False): version = pg_version or detect_version() config = {'version': version, 'cluster': pg_cluster, 'encoding': encoding, 'locale': locale} sudo(u'pg_dropcluster --stop %(version)s %(cluster)s' % config, user='postgres', warn_only=True) sudo(u'pg_createcluster --start -e %(encoding)s --locale %(locale)s' u' %(version)s %(cluster)s' % config, user='postgres') else: abort(u"Dropping %s cluster aborted by user input." % pg_cluster)
python
def reset_cluster(pg_cluster='main', pg_version=None, encoding=u'UTF-8', locale=u'en_US.UTF-8'): """Drop and restore a given cluster.""" warning = u'You are about to drop the %s cluster. This cannot be undone.' \ u' Are you sure you want to continue?' % pg_cluster if confirm(warning, default=False): version = pg_version or detect_version() config = {'version': version, 'cluster': pg_cluster, 'encoding': encoding, 'locale': locale} sudo(u'pg_dropcluster --stop %(version)s %(cluster)s' % config, user='postgres', warn_only=True) sudo(u'pg_createcluster --start -e %(encoding)s --locale %(locale)s' u' %(version)s %(cluster)s' % config, user='postgres') else: abort(u"Dropping %s cluster aborted by user input." % pg_cluster)
[ "def", "reset_cluster", "(", "pg_cluster", "=", "'main'", ",", "pg_version", "=", "None", ",", "encoding", "=", "u'UTF-8'", ",", "locale", "=", "u'en_US.UTF-8'", ")", ":", "warning", "=", "u'You are about to drop the %s cluster. This cannot be undone.'", "u' Are you sure you want to continue?'", "%", "pg_cluster", "if", "confirm", "(", "warning", ",", "default", "=", "False", ")", ":", "version", "=", "pg_version", "or", "detect_version", "(", ")", "config", "=", "{", "'version'", ":", "version", ",", "'cluster'", ":", "pg_cluster", ",", "'encoding'", ":", "encoding", ",", "'locale'", ":", "locale", "}", "sudo", "(", "u'pg_dropcluster --stop %(version)s %(cluster)s'", "%", "config", ",", "user", "=", "'postgres'", ",", "warn_only", "=", "True", ")", "sudo", "(", "u'pg_createcluster --start -e %(encoding)s --locale %(locale)s'", "u' %(version)s %(cluster)s'", "%", "config", ",", "user", "=", "'postgres'", ")", "else", ":", "abort", "(", "u\"Dropping %s cluster aborted by user input.\"", "%", "pg_cluster", ")" ]
Drop and restore a given cluster.
[ "Drop", "and", "restore", "a", "given", "cluster", "." ]
92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72
https://github.com/mlavin/argyle/blob/92cc6e1dd9b8e7cb41c5098a79d05e14b8243d72/argyle/postgres.py#L121-L135
238,335
brinkframework/brink
brink/decorators.py
require_request_model
def require_request_model(cls, validate=True): """ Makes a handler require that a request body that map towards the given model is provided. Unless the ``validate`` option is set to ``False`` the data will be validated against the model's fields. The model will be passed to the handler as the last positional argument. :: @require_request_model(Model) async def handle_model(request, model): return 200, model """ def decorator(handler): async def new_handler(request, *args, **kwargs): body = await request.json() model = cls(**body) if validate: model.validate() return await handler(request, model, *args, **kwargs) return new_handler return decorator
python
def require_request_model(cls, validate=True): """ Makes a handler require that a request body that map towards the given model is provided. Unless the ``validate`` option is set to ``False`` the data will be validated against the model's fields. The model will be passed to the handler as the last positional argument. :: @require_request_model(Model) async def handle_model(request, model): return 200, model """ def decorator(handler): async def new_handler(request, *args, **kwargs): body = await request.json() model = cls(**body) if validate: model.validate() return await handler(request, model, *args, **kwargs) return new_handler return decorator
[ "def", "require_request_model", "(", "cls", ",", "validate", "=", "True", ")", ":", "def", "decorator", "(", "handler", ")", ":", "async", "def", "new_handler", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "body", "=", "await", "request", ".", "json", "(", ")", "model", "=", "cls", "(", "*", "*", "body", ")", "if", "validate", ":", "model", ".", "validate", "(", ")", "return", "await", "handler", "(", "request", ",", "model", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "new_handler", "return", "decorator" ]
Makes a handler require that a request body that map towards the given model is provided. Unless the ``validate`` option is set to ``False`` the data will be validated against the model's fields. The model will be passed to the handler as the last positional argument. :: @require_request_model(Model) async def handle_model(request, model): return 200, model
[ "Makes", "a", "handler", "require", "that", "a", "request", "body", "that", "map", "towards", "the", "given", "model", "is", "provided", ".", "Unless", "the", "validate", "option", "is", "set", "to", "False", "the", "data", "will", "be", "validated", "against", "the", "model", "s", "fields", "." ]
e837ee35a57140994b4e761cc756af172e5d5aa1
https://github.com/brinkframework/brink/blob/e837ee35a57140994b4e761cc756af172e5d5aa1/brink/decorators.py#L4-L26
238,336
rvswift/EB
EB/builder/utilities/csv_interface.py
read_csv
def read_csv(csv_file, options, ensemble_list=None): """ Read csv and return molList, otherwise print error and exit. """ name, ext = os.path.splitext(csv_file) try: if ext == '.gz': f = gzip.open(csv_file, 'rb') else: f = open(csv_file, 'rU') except IOError: print(" \n '{f}' could not be opened\n".format(f=os.path.basename(csv_file))) sys.exit(1) csv_reader = csv.reader(f) molList = [] line_number = 1 for line in csv_reader: if line_number == 1: if ensemble_list: prop_indices = read_header(line, options, ensemble_list) else: prop_indices = read_header(line, options) else: mol = Molecule() if ensemble_list: mol = read_line(line, options, prop_indices, mol, ensemble_list) else: mol = read_line(line, options, prop_indices, mol) if mol == 1: print(" skipping molecule {m}\n".format(m=(line_number - 1))) else: molList.append(mol) line_number += 1 return molList
python
def read_csv(csv_file, options, ensemble_list=None): """ Read csv and return molList, otherwise print error and exit. """ name, ext = os.path.splitext(csv_file) try: if ext == '.gz': f = gzip.open(csv_file, 'rb') else: f = open(csv_file, 'rU') except IOError: print(" \n '{f}' could not be opened\n".format(f=os.path.basename(csv_file))) sys.exit(1) csv_reader = csv.reader(f) molList = [] line_number = 1 for line in csv_reader: if line_number == 1: if ensemble_list: prop_indices = read_header(line, options, ensemble_list) else: prop_indices = read_header(line, options) else: mol = Molecule() if ensemble_list: mol = read_line(line, options, prop_indices, mol, ensemble_list) else: mol = read_line(line, options, prop_indices, mol) if mol == 1: print(" skipping molecule {m}\n".format(m=(line_number - 1))) else: molList.append(mol) line_number += 1 return molList
[ "def", "read_csv", "(", "csv_file", ",", "options", ",", "ensemble_list", "=", "None", ")", ":", "name", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "csv_file", ")", "try", ":", "if", "ext", "==", "'.gz'", ":", "f", "=", "gzip", ".", "open", "(", "csv_file", ",", "'rb'", ")", "else", ":", "f", "=", "open", "(", "csv_file", ",", "'rU'", ")", "except", "IOError", ":", "print", "(", "\" \\n '{f}' could not be opened\\n\"", ".", "format", "(", "f", "=", "os", ".", "path", ".", "basename", "(", "csv_file", ")", ")", ")", "sys", ".", "exit", "(", "1", ")", "csv_reader", "=", "csv", ".", "reader", "(", "f", ")", "molList", "=", "[", "]", "line_number", "=", "1", "for", "line", "in", "csv_reader", ":", "if", "line_number", "==", "1", ":", "if", "ensemble_list", ":", "prop_indices", "=", "read_header", "(", "line", ",", "options", ",", "ensemble_list", ")", "else", ":", "prop_indices", "=", "read_header", "(", "line", ",", "options", ")", "else", ":", "mol", "=", "Molecule", "(", ")", "if", "ensemble_list", ":", "mol", "=", "read_line", "(", "line", ",", "options", ",", "prop_indices", ",", "mol", ",", "ensemble_list", ")", "else", ":", "mol", "=", "read_line", "(", "line", ",", "options", ",", "prop_indices", ",", "mol", ")", "if", "mol", "==", "1", ":", "print", "(", "\" skipping molecule {m}\\n\"", ".", "format", "(", "m", "=", "(", "line_number", "-", "1", ")", ")", ")", "else", ":", "molList", ".", "append", "(", "mol", ")", "line_number", "+=", "1", "return", "molList" ]
Read csv and return molList, otherwise print error and exit.
[ "Read", "csv", "and", "return", "molList", "otherwise", "print", "error", "and", "exit", "." ]
341880b79faf8147dc9fa6e90438531cd09fabcc
https://github.com/rvswift/EB/blob/341880b79faf8147dc9fa6e90438531cd09fabcc/EB/builder/utilities/csv_interface.py#L11-L49
238,337
jespino/anillo
anillo/handlers/routing.py
_build_rules
def _build_rules(specs): """Adapts the list of anillo urlmapping specs into a list of werkzeug rules or rules subclasses. :param list specs: A list of anillo url mapping specs. :return: generator """ for spec in specs: if "context" in spec: yield Context(spec["context"], list(_build_rules(spec.get("routes", [])))) else: rulespec = spec.copy() match = rulespec.pop("match") name = rulespec.pop("name") yield Rule(match, endpoint=name, **rulespec)
python
def _build_rules(specs): """Adapts the list of anillo urlmapping specs into a list of werkzeug rules or rules subclasses. :param list specs: A list of anillo url mapping specs. :return: generator """ for spec in specs: if "context" in spec: yield Context(spec["context"], list(_build_rules(spec.get("routes", [])))) else: rulespec = spec.copy() match = rulespec.pop("match") name = rulespec.pop("name") yield Rule(match, endpoint=name, **rulespec)
[ "def", "_build_rules", "(", "specs", ")", ":", "for", "spec", "in", "specs", ":", "if", "\"context\"", "in", "spec", ":", "yield", "Context", "(", "spec", "[", "\"context\"", "]", ",", "list", "(", "_build_rules", "(", "spec", ".", "get", "(", "\"routes\"", ",", "[", "]", ")", ")", ")", ")", "else", ":", "rulespec", "=", "spec", ".", "copy", "(", ")", "match", "=", "rulespec", ".", "pop", "(", "\"match\"", ")", "name", "=", "rulespec", ".", "pop", "(", "\"name\"", ")", "yield", "Rule", "(", "match", ",", "endpoint", "=", "name", ",", "*", "*", "rulespec", ")" ]
Adapts the list of anillo urlmapping specs into a list of werkzeug rules or rules subclasses. :param list specs: A list of anillo url mapping specs. :return: generator
[ "Adapts", "the", "list", "of", "anillo", "urlmapping", "specs", "into", "a", "list", "of", "werkzeug", "rules", "or", "rules", "subclasses", "." ]
901a84fd2b4fa909bc06e8bd76090457990576a7
https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/handlers/routing.py#L158-L172
238,338
jespino/anillo
anillo/handlers/routing.py
_build_urlmapping
def _build_urlmapping(urls, strict_slashes=False, **kwargs): """Convers the anillo urlmappings list into werkzeug Map instance. :return: a werkzeug Map instance :rtype: Map """ rules = _build_rules(urls) return Map(rules=list(rules), strict_slashes=strict_slashes, **kwargs)
python
def _build_urlmapping(urls, strict_slashes=False, **kwargs): """Convers the anillo urlmappings list into werkzeug Map instance. :return: a werkzeug Map instance :rtype: Map """ rules = _build_rules(urls) return Map(rules=list(rules), strict_slashes=strict_slashes, **kwargs)
[ "def", "_build_urlmapping", "(", "urls", ",", "strict_slashes", "=", "False", ",", "*", "*", "kwargs", ")", ":", "rules", "=", "_build_rules", "(", "urls", ")", "return", "Map", "(", "rules", "=", "list", "(", "rules", ")", ",", "strict_slashes", "=", "strict_slashes", ",", "*", "*", "kwargs", ")" ]
Convers the anillo urlmappings list into werkzeug Map instance. :return: a werkzeug Map instance :rtype: Map
[ "Convers", "the", "anillo", "urlmappings", "list", "into", "werkzeug", "Map", "instance", "." ]
901a84fd2b4fa909bc06e8bd76090457990576a7
https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/handlers/routing.py#L175-L184
238,339
jespino/anillo
anillo/handlers/routing.py
default_match_error_handler
def default_match_error_handler(exc): """ Default implementation for match error handling. """ if isinstance(exc, NotFound): return http.NotFound() elif isinstance(exc, MethodNotAllowed): return http.MethodNotAllowed() elif isinstance(exc, RequestRedirect): return redirect(exc.new_url) else: raise exc
python
def default_match_error_handler(exc): """ Default implementation for match error handling. """ if isinstance(exc, NotFound): return http.NotFound() elif isinstance(exc, MethodNotAllowed): return http.MethodNotAllowed() elif isinstance(exc, RequestRedirect): return redirect(exc.new_url) else: raise exc
[ "def", "default_match_error_handler", "(", "exc", ")", ":", "if", "isinstance", "(", "exc", ",", "NotFound", ")", ":", "return", "http", ".", "NotFound", "(", ")", "elif", "isinstance", "(", "exc", ",", "MethodNotAllowed", ")", ":", "return", "http", ".", "MethodNotAllowed", "(", ")", "elif", "isinstance", "(", "exc", ",", "RequestRedirect", ")", ":", "return", "redirect", "(", "exc", ".", "new_url", ")", "else", ":", "raise", "exc" ]
Default implementation for match error handling.
[ "Default", "implementation", "for", "match", "error", "handling", "." ]
901a84fd2b4fa909bc06e8bd76090457990576a7
https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/handlers/routing.py#L187-L198
238,340
Capitains/nemo-oauth-plugin
nemo_oauth_plugin/__init__.py
NemoOauthPlugin.r_oauth_login
def r_oauth_login(self): """ Route for OAuth2 Login :param next next url :type str :return: Redirects to OAuth Provider Login URL """ session['next'] = request.args.get('next','') callback_url = self.authcallback if callback_url is None: callback_url = url_for('.r_oauth_authorized', _external=True) return self.authobj.authorize(callback=callback_url)
python
def r_oauth_login(self): """ Route for OAuth2 Login :param next next url :type str :return: Redirects to OAuth Provider Login URL """ session['next'] = request.args.get('next','') callback_url = self.authcallback if callback_url is None: callback_url = url_for('.r_oauth_authorized', _external=True) return self.authobj.authorize(callback=callback_url)
[ "def", "r_oauth_login", "(", "self", ")", ":", "session", "[", "'next'", "]", "=", "request", ".", "args", ".", "get", "(", "'next'", ",", "''", ")", "callback_url", "=", "self", ".", "authcallback", "if", "callback_url", "is", "None", ":", "callback_url", "=", "url_for", "(", "'.r_oauth_authorized'", ",", "_external", "=", "True", ")", "return", "self", ".", "authobj", ".", "authorize", "(", "callback", "=", "callback_url", ")" ]
Route for OAuth2 Login :param next next url :type str :return: Redirects to OAuth Provider Login URL
[ "Route", "for", "OAuth2", "Login" ]
55dd1dc9648040c4e632c4f424e85ef19111ffb5
https://github.com/Capitains/nemo-oauth-plugin/blob/55dd1dc9648040c4e632c4f424e85ef19111ffb5/nemo_oauth_plugin/__init__.py#L72-L85
238,341
jespino/anillo
anillo/middlewares/session.py
wrap_session
def wrap_session(func=None, *, storage=MemoryStorage): """ A middleware that adds the session management to the request. This middleware optionally accepts a `storage` keyword only parameter for provide own session storage implementation. If it is not provided, the in memory session storage will be used. :param storage: A storage factory/constructor. :type storage: callable or class """ if func is None: return functools.partial(wrap_session, storage=storage) # Initialize the storage storage = storage() def wrapper(request, *args, **kwargs): session_key = storage.get_session_key(request) request.session = storage.retrieve(request, session_key) response = func(request, *args, **kwargs) storage.store(request, response, session_key, request.session) storage.persist_session_key(request, response, session_key) return response return wrapper
python
def wrap_session(func=None, *, storage=MemoryStorage): """ A middleware that adds the session management to the request. This middleware optionally accepts a `storage` keyword only parameter for provide own session storage implementation. If it is not provided, the in memory session storage will be used. :param storage: A storage factory/constructor. :type storage: callable or class """ if func is None: return functools.partial(wrap_session, storage=storage) # Initialize the storage storage = storage() def wrapper(request, *args, **kwargs): session_key = storage.get_session_key(request) request.session = storage.retrieve(request, session_key) response = func(request, *args, **kwargs) storage.store(request, response, session_key, request.session) storage.persist_session_key(request, response, session_key) return response return wrapper
[ "def", "wrap_session", "(", "func", "=", "None", ",", "*", ",", "storage", "=", "MemoryStorage", ")", ":", "if", "func", "is", "None", ":", "return", "functools", ".", "partial", "(", "wrap_session", ",", "storage", "=", "storage", ")", "# Initialize the storage", "storage", "=", "storage", "(", ")", "def", "wrapper", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "session_key", "=", "storage", ".", "get_session_key", "(", "request", ")", "request", ".", "session", "=", "storage", ".", "retrieve", "(", "request", ",", "session_key", ")", "response", "=", "func", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "storage", ".", "store", "(", "request", ",", "response", ",", "session_key", ",", "request", ".", "session", ")", "storage", ".", "persist_session_key", "(", "request", ",", "response", ",", "session_key", ")", "return", "response", "return", "wrapper" ]
A middleware that adds the session management to the request. This middleware optionally accepts a `storage` keyword only parameter for provide own session storage implementation. If it is not provided, the in memory session storage will be used. :param storage: A storage factory/constructor. :type storage: callable or class
[ "A", "middleware", "that", "adds", "the", "session", "management", "to", "the", "request", "." ]
901a84fd2b4fa909bc06e8bd76090457990576a7
https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/middlewares/session.py#L30-L59
238,342
NeilGirdhar/rectangle
rectangle/rectangle.py
Rect.with_added_dimensions
def with_added_dimensions(self, n): """ Adds n dimensions and returns the Rect. If n < 0, removes dimensions. """ if n > 0: return Rect(np.pad(self.data, ((0, 0), (0, n)), 'constant')) return Rect(self.data[:, :self.dimensions + n])
python
def with_added_dimensions(self, n): """ Adds n dimensions and returns the Rect. If n < 0, removes dimensions. """ if n > 0: return Rect(np.pad(self.data, ((0, 0), (0, n)), 'constant')) return Rect(self.data[:, :self.dimensions + n])
[ "def", "with_added_dimensions", "(", "self", ",", "n", ")", ":", "if", "n", ">", "0", ":", "return", "Rect", "(", "np", ".", "pad", "(", "self", ".", "data", ",", "(", "(", "0", ",", "0", ")", ",", "(", "0", ",", "n", ")", ")", ",", "'constant'", ")", ")", "return", "Rect", "(", "self", ".", "data", "[", ":", ",", ":", "self", ".", "dimensions", "+", "n", "]", ")" ]
Adds n dimensions and returns the Rect. If n < 0, removes dimensions.
[ "Adds", "n", "dimensions", "and", "returns", "the", "Rect", ".", "If", "n", "<", "0", "removes", "dimensions", "." ]
b0ca25e199cf6e331aef7fd99bda5ba10ae98753
https://github.com/NeilGirdhar/rectangle/blob/b0ca25e199cf6e331aef7fd99bda5ba10ae98753/rectangle/rectangle.py#L41-L48
238,343
NeilGirdhar/rectangle
rectangle/rectangle.py
Rect.clamped
def clamped(self, point_or_rect): """ Returns the point or rectangle clamped to this rectangle. """ if isinstance(point_or_rect, Rect): return Rect(np.minimum(self.mins, point_or_rect.mins), np.maximum(self.maxes, point_or_rect.maxes)) return np.clip(point_or_rect, self.mins, self.maxes)
python
def clamped(self, point_or_rect): """ Returns the point or rectangle clamped to this rectangle. """ if isinstance(point_or_rect, Rect): return Rect(np.minimum(self.mins, point_or_rect.mins), np.maximum(self.maxes, point_or_rect.maxes)) return np.clip(point_or_rect, self.mins, self.maxes)
[ "def", "clamped", "(", "self", ",", "point_or_rect", ")", ":", "if", "isinstance", "(", "point_or_rect", ",", "Rect", ")", ":", "return", "Rect", "(", "np", ".", "minimum", "(", "self", ".", "mins", ",", "point_or_rect", ".", "mins", ")", ",", "np", ".", "maximum", "(", "self", ".", "maxes", ",", "point_or_rect", ".", "maxes", ")", ")", "return", "np", ".", "clip", "(", "point_or_rect", ",", "self", ".", "mins", ",", "self", ".", "maxes", ")" ]
Returns the point or rectangle clamped to this rectangle.
[ "Returns", "the", "point", "or", "rectangle", "clamped", "to", "this", "rectangle", "." ]
b0ca25e199cf6e331aef7fd99bda5ba10ae98753
https://github.com/NeilGirdhar/rectangle/blob/b0ca25e199cf6e331aef7fd99bda5ba10ae98753/rectangle/rectangle.py#L78-L85
238,344
NeilGirdhar/rectangle
rectangle/rectangle.py
Rect.rectified
def rectified(self): """ Fixes swaped min-max pairs. """ return Rect(np.minimum(self.mins, self.maxes), np.maximum(self.maxes, self.mins))
python
def rectified(self): """ Fixes swaped min-max pairs. """ return Rect(np.minimum(self.mins, self.maxes), np.maximum(self.maxes, self.mins))
[ "def", "rectified", "(", "self", ")", ":", "return", "Rect", "(", "np", ".", "minimum", "(", "self", ".", "mins", ",", "self", ".", "maxes", ")", ",", "np", ".", "maximum", "(", "self", ".", "maxes", ",", "self", ".", "mins", ")", ")" ]
Fixes swaped min-max pairs.
[ "Fixes", "swaped", "min", "-", "max", "pairs", "." ]
b0ca25e199cf6e331aef7fd99bda5ba10ae98753
https://github.com/NeilGirdhar/rectangle/blob/b0ca25e199cf6e331aef7fd99bda5ba10ae98753/rectangle/rectangle.py#L87-L92
238,345
baguette-io/baguette-messaging
farine/stream/sse.py
SSEConsumer.start
def start(self, *args, **kwargs):#pylint:disable=unused-argument """ | Launch the SSE consumer. | It can listen forever for messages or just wait for one. :param limit: If set, the consumer listens for a limited number of events. :type limit: int :param timeout: If set, the consumer listens for an event for a limited time. :type timeout: int :rtype: None """ limit = kwargs.get('limit', None) timeout = kwargs.get('timeout', None) self.run(limit=limit, timeout=timeout)
python
def start(self, *args, **kwargs):#pylint:disable=unused-argument """ | Launch the SSE consumer. | It can listen forever for messages or just wait for one. :param limit: If set, the consumer listens for a limited number of events. :type limit: int :param timeout: If set, the consumer listens for an event for a limited time. :type timeout: int :rtype: None """ limit = kwargs.get('limit', None) timeout = kwargs.get('timeout', None) self.run(limit=limit, timeout=timeout)
[ "def", "start", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "#pylint:disable=unused-argument", "limit", "=", "kwargs", ".", "get", "(", "'limit'", ",", "None", ")", "timeout", "=", "kwargs", ".", "get", "(", "'timeout'", ",", "None", ")", "self", ".", "run", "(", "limit", "=", "limit", ",", "timeout", "=", "timeout", ")" ]
| Launch the SSE consumer. | It can listen forever for messages or just wait for one. :param limit: If set, the consumer listens for a limited number of events. :type limit: int :param timeout: If set, the consumer listens for an event for a limited time. :type timeout: int :rtype: None
[ "|", "Launch", "the", "SSE", "consumer", ".", "|", "It", "can", "listen", "forever", "for", "messages", "or", "just", "wait", "for", "one", "." ]
8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1
https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/stream/sse.py#L65-L78
238,346
pip-services3-python/pip-services3-commons-python
pip_services3_commons/random/RandomText.py
RandomText.phrase
def phrase(min_size, max_size = None): """ Generates a random phrase which consists of few words separated by spaces. The first word is capitalized, others are not. :param min_size: (optional) minimum string length. :param max_size: maximum string length. :return: a random phrase. """ max_size = max_size if max_size != None else min_size size = RandomInteger.next_integer(min_size, max_size) if size <= 0: return "" result = "" result += random.choice(_all_words) while len(result) < size: result += " " + random.choice(_all_words).lower() return result
python
def phrase(min_size, max_size = None): """ Generates a random phrase which consists of few words separated by spaces. The first word is capitalized, others are not. :param min_size: (optional) minimum string length. :param max_size: maximum string length. :return: a random phrase. """ max_size = max_size if max_size != None else min_size size = RandomInteger.next_integer(min_size, max_size) if size <= 0: return "" result = "" result += random.choice(_all_words) while len(result) < size: result += " " + random.choice(_all_words).lower() return result
[ "def", "phrase", "(", "min_size", ",", "max_size", "=", "None", ")", ":", "max_size", "=", "max_size", "if", "max_size", "!=", "None", "else", "min_size", "size", "=", "RandomInteger", ".", "next_integer", "(", "min_size", ",", "max_size", ")", "if", "size", "<=", "0", ":", "return", "\"\"", "result", "=", "\"\"", "result", "+=", "random", ".", "choice", "(", "_all_words", ")", "while", "len", "(", "result", ")", "<", "size", ":", "result", "+=", "\" \"", "+", "random", ".", "choice", "(", "_all_words", ")", ".", "lower", "(", ")", "return", "result" ]
Generates a random phrase which consists of few words separated by spaces. The first word is capitalized, others are not. :param min_size: (optional) minimum string length. :param max_size: maximum string length. :return: a random phrase.
[ "Generates", "a", "random", "phrase", "which", "consists", "of", "few", "words", "separated", "by", "spaces", ".", "The", "first", "word", "is", "capitalized", "others", "are", "not", "." ]
22cbbb3e91e49717f65c083d36147fdb07ba9e3b
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/random/RandomText.py#L124-L145
238,347
pip-services3-python/pip-services3-commons-python
pip_services3_commons/random/RandomText.py
RandomText.words
def words(min_size, max_size = None): """ Generates a random text that consists of random number of random words separated by spaces. :param min_size: (optional) a minimum number of words. :param max_size: a maximum number of words. :return: a random text. """ max_size = max_size if max_size != None else min_size result = "" count = RandomInteger.next_integer(min_size, max_size) for i in range(count): result += random.choice(_all_words) return result
python
def words(min_size, max_size = None): """ Generates a random text that consists of random number of random words separated by spaces. :param min_size: (optional) a minimum number of words. :param max_size: a maximum number of words. :return: a random text. """ max_size = max_size if max_size != None else min_size result = "" count = RandomInteger.next_integer(min_size, max_size) for i in range(count): result += random.choice(_all_words) return result
[ "def", "words", "(", "min_size", ",", "max_size", "=", "None", ")", ":", "max_size", "=", "max_size", "if", "max_size", "!=", "None", "else", "min_size", "result", "=", "\"\"", "count", "=", "RandomInteger", ".", "next_integer", "(", "min_size", ",", "max_size", ")", "for", "i", "in", "range", "(", "count", ")", ":", "result", "+=", "random", ".", "choice", "(", "_all_words", ")", "return", "result" ]
Generates a random text that consists of random number of random words separated by spaces. :param min_size: (optional) a minimum number of words. :param max_size: a maximum number of words. :return: a random text.
[ "Generates", "a", "random", "text", "that", "consists", "of", "random", "number", "of", "random", "words", "separated", "by", "spaces", "." ]
22cbbb3e91e49717f65c083d36147fdb07ba9e3b
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/random/RandomText.py#L177-L194
238,348
pip-services3-python/pip-services3-commons-python
pip_services3_commons/random/RandomText.py
RandomText.text
def text(min_size, max_size): """ Generates a random text, consisting of first names, last names, colors, stuffs, adjectives, verbs, and punctuation marks. :param min_size: minimum amount of words to generate. Text will contain 'minSize' words if 'maxSize' is omitted. :param max_size: (optional) maximum amount of words to generate. :return: a random text. """ max_size = max_size if max_size != None else min_size size = RandomInteger.next_integer(min_size, max_size) result = "" result += random.choice(_all_words) while len(result) < size: next = random.choice(_all_words) if RandomBoolean.chance(4, 6): next = " " + next.lower() elif RandomBoolean.chance(2, 5): next = random.choice(":,-") + next.lower() elif RandomBoolean.chance(3, 5): next = random.choice(":,-") + " " + next.lower() else: next = random.choice(".!?") + " " + next result += next return result
python
def text(min_size, max_size): """ Generates a random text, consisting of first names, last names, colors, stuffs, adjectives, verbs, and punctuation marks. :param min_size: minimum amount of words to generate. Text will contain 'minSize' words if 'maxSize' is omitted. :param max_size: (optional) maximum amount of words to generate. :return: a random text. """ max_size = max_size if max_size != None else min_size size = RandomInteger.next_integer(min_size, max_size) result = "" result += random.choice(_all_words) while len(result) < size: next = random.choice(_all_words) if RandomBoolean.chance(4, 6): next = " " + next.lower() elif RandomBoolean.chance(2, 5): next = random.choice(":,-") + next.lower() elif RandomBoolean.chance(3, 5): next = random.choice(":,-") + " " + next.lower() else: next = random.choice(".!?") + " " + next result += next return result
[ "def", "text", "(", "min_size", ",", "max_size", ")", ":", "max_size", "=", "max_size", "if", "max_size", "!=", "None", "else", "min_size", "size", "=", "RandomInteger", ".", "next_integer", "(", "min_size", ",", "max_size", ")", "result", "=", "\"\"", "result", "+=", "random", ".", "choice", "(", "_all_words", ")", "while", "len", "(", "result", ")", "<", "size", ":", "next", "=", "random", ".", "choice", "(", "_all_words", ")", "if", "RandomBoolean", ".", "chance", "(", "4", ",", "6", ")", ":", "next", "=", "\" \"", "+", "next", ".", "lower", "(", ")", "elif", "RandomBoolean", ".", "chance", "(", "2", ",", "5", ")", ":", "next", "=", "random", ".", "choice", "(", "\":,-\"", ")", "+", "next", ".", "lower", "(", ")", "elif", "RandomBoolean", ".", "chance", "(", "3", ",", "5", ")", ":", "next", "=", "random", ".", "choice", "(", "\":,-\"", ")", "+", "\" \"", "+", "next", ".", "lower", "(", ")", "else", ":", "next", "=", "random", ".", "choice", "(", "\".!?\"", ")", "+", "\" \"", "+", "next", "result", "+=", "next", "return", "result" ]
Generates a random text, consisting of first names, last names, colors, stuffs, adjectives, verbs, and punctuation marks. :param min_size: minimum amount of words to generate. Text will contain 'minSize' words if 'maxSize' is omitted. :param max_size: (optional) maximum amount of words to generate. :return: a random text.
[ "Generates", "a", "random", "text", "consisting", "of", "first", "names", "last", "names", "colors", "stuffs", "adjectives", "verbs", "and", "punctuation", "marks", "." ]
22cbbb3e91e49717f65c083d36147fdb07ba9e3b
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/random/RandomText.py#L219-L249
238,349
jespino/anillo
anillo/middlewares/params.py
wrap_form_params
def wrap_form_params(func): """ A middleware that parses the url-encoded body and attach the result to the request `form_params` attribute. This middleware also merges the parsed value with the existing `params` attribute in same way as `wrap_query_params` is doing. """ @functools.wraps(func) def wrapper(request, *args, **kwargs): ctype, pdict = parse_header(request.headers.get('Content-Type', '')) if ctype == "application/x-www-form-urlencoded": params = {} for key, value in parse_qs(request.body.decode("utf-8")).items(): if len(value) == 1: params[key] = value[0] else: params[key] = value request.params = merge_dicts(getattr(request, "params", None), params) request.form_params = params return func(request, *args, **kwargs) return wrapper
python
def wrap_form_params(func): """ A middleware that parses the url-encoded body and attach the result to the request `form_params` attribute. This middleware also merges the parsed value with the existing `params` attribute in same way as `wrap_query_params` is doing. """ @functools.wraps(func) def wrapper(request, *args, **kwargs): ctype, pdict = parse_header(request.headers.get('Content-Type', '')) if ctype == "application/x-www-form-urlencoded": params = {} for key, value in parse_qs(request.body.decode("utf-8")).items(): if len(value) == 1: params[key] = value[0] else: params[key] = value request.params = merge_dicts(getattr(request, "params", None), params) request.form_params = params return func(request, *args, **kwargs) return wrapper
[ "def", "wrap_form_params", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapper", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "ctype", ",", "pdict", "=", "parse_header", "(", "request", ".", "headers", ".", "get", "(", "'Content-Type'", ",", "''", ")", ")", "if", "ctype", "==", "\"application/x-www-form-urlencoded\"", ":", "params", "=", "{", "}", "for", "key", ",", "value", "in", "parse_qs", "(", "request", ".", "body", ".", "decode", "(", "\"utf-8\"", ")", ")", ".", "items", "(", ")", ":", "if", "len", "(", "value", ")", "==", "1", ":", "params", "[", "key", "]", "=", "value", "[", "0", "]", "else", ":", "params", "[", "key", "]", "=", "value", "request", ".", "params", "=", "merge_dicts", "(", "getattr", "(", "request", ",", "\"params\"", ",", "None", ")", ",", "params", ")", "request", ".", "form_params", "=", "params", "return", "func", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
A middleware that parses the url-encoded body and attach the result to the request `form_params` attribute. This middleware also merges the parsed value with the existing `params` attribute in same way as `wrap_query_params` is doing.
[ "A", "middleware", "that", "parses", "the", "url", "-", "encoded", "body", "and", "attach", "the", "result", "to", "the", "request", "form_params", "attribute", "." ]
901a84fd2b4fa909bc06e8bd76090457990576a7
https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/middlewares/params.py#L8-L31
238,350
jespino/anillo
anillo/middlewares/params.py
wrap_query_params
def wrap_query_params(func): """ A middleware that parses the urlencoded params from the querystring and attach it to the request `query_params` attribute. This middleware also merges the parsed value with the existing `params` attribute in same way as `wrap_form_params` is doing. """ @functools.wraps(func) def wrapper(request, *args, **kwargs): params = {} for key, value in parse_qs(request.query_string.decode("utf-8")).items(): if len(value) == 1: params[key] = value[0] else: params[key] = value request.params = merge_dicts(getattr(request, "params", None), params) request.query_params = params return func(request, *args, **kwargs) return wrapper
python
def wrap_query_params(func): """ A middleware that parses the urlencoded params from the querystring and attach it to the request `query_params` attribute. This middleware also merges the parsed value with the existing `params` attribute in same way as `wrap_form_params` is doing. """ @functools.wraps(func) def wrapper(request, *args, **kwargs): params = {} for key, value in parse_qs(request.query_string.decode("utf-8")).items(): if len(value) == 1: params[key] = value[0] else: params[key] = value request.params = merge_dicts(getattr(request, "params", None), params) request.query_params = params return func(request, *args, **kwargs) return wrapper
[ "def", "wrap_query_params", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapper", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "}", "for", "key", ",", "value", "in", "parse_qs", "(", "request", ".", "query_string", ".", "decode", "(", "\"utf-8\"", ")", ")", ".", "items", "(", ")", ":", "if", "len", "(", "value", ")", "==", "1", ":", "params", "[", "key", "]", "=", "value", "[", "0", "]", "else", ":", "params", "[", "key", "]", "=", "value", "request", ".", "params", "=", "merge_dicts", "(", "getattr", "(", "request", ",", "\"params\"", ",", "None", ")", ",", "params", ")", "request", ".", "query_params", "=", "params", "return", "func", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
A middleware that parses the urlencoded params from the querystring and attach it to the request `query_params` attribute. This middleware also merges the parsed value with the existing `params` attribute in same way as `wrap_form_params` is doing.
[ "A", "middleware", "that", "parses", "the", "urlencoded", "params", "from", "the", "querystring", "and", "attach", "it", "to", "the", "request", "query_params", "attribute", "." ]
901a84fd2b4fa909bc06e8bd76090457990576a7
https://github.com/jespino/anillo/blob/901a84fd2b4fa909bc06e8bd76090457990576a7/anillo/middlewares/params.py#L34-L55
238,351
cyrus-/cypy
cypy/astx.py
init_node
def init_node(cls, *args, **kwargs): """Initializes an ast node with the provided attributes. Python 2.6+ supports this in the node class initializers, but Python 2.5 does not, so this is intended to be an equivalent. """ node = cls() for name, value in zip(cls._fields, args): setattr(node, name, value) for name, value in kwargs: setattr(node, name, value) return node
python
def init_node(cls, *args, **kwargs): """Initializes an ast node with the provided attributes. Python 2.6+ supports this in the node class initializers, but Python 2.5 does not, so this is intended to be an equivalent. """ node = cls() for name, value in zip(cls._fields, args): setattr(node, name, value) for name, value in kwargs: setattr(node, name, value) return node
[ "def", "init_node", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "node", "=", "cls", "(", ")", "for", "name", ",", "value", "in", "zip", "(", "cls", ".", "_fields", ",", "args", ")", ":", "setattr", "(", "node", ",", "name", ",", "value", ")", "for", "name", ",", "value", "in", "kwargs", ":", "setattr", "(", "node", ",", "name", ",", "value", ")", "return", "node" ]
Initializes an ast node with the provided attributes. Python 2.6+ supports this in the node class initializers, but Python 2.5 does not, so this is intended to be an equivalent.
[ "Initializes", "an", "ast", "node", "with", "the", "provided", "attributes", ".", "Python", "2", ".", "6", "+", "supports", "this", "in", "the", "node", "class", "initializers", "but", "Python", "2", ".", "5", "does", "not", "so", "this", "is", "intended", "to", "be", "an", "equivalent", "." ]
04bb59e91fa314e8cf987743189c77a9b6bc371d
https://github.com/cyrus-/cypy/blob/04bb59e91fa314e8cf987743189c77a9b6bc371d/cypy/astx.py#L7-L18
238,352
cyrus-/cypy
cypy/astx.py
extract_the
def extract_the(node, node_type): """Extracts the node of type node_type from the provided node. - If the node is itself of node_type, returns node. - If the node is a suite, it must contain exactly one node of the provided type in its body. """ if isinstance(node, node_type): return node try: body = node.body except AttributeError: raise cypy.Error( "Expecting suite containing a single %s, or a %s, but got %s." % (node_type.__name__, node_type.__name__, type(node).__name__)) if len(body) != 1 or not isinstance(body[0], node_type): raise cypy.Error( "The body must contain exactly one node of type %s." % node_type.__name__) return body[0]
python
def extract_the(node, node_type): """Extracts the node of type node_type from the provided node. - If the node is itself of node_type, returns node. - If the node is a suite, it must contain exactly one node of the provided type in its body. """ if isinstance(node, node_type): return node try: body = node.body except AttributeError: raise cypy.Error( "Expecting suite containing a single %s, or a %s, but got %s." % (node_type.__name__, node_type.__name__, type(node).__name__)) if len(body) != 1 or not isinstance(body[0], node_type): raise cypy.Error( "The body must contain exactly one node of type %s." % node_type.__name__) return body[0]
[ "def", "extract_the", "(", "node", ",", "node_type", ")", ":", "if", "isinstance", "(", "node", ",", "node_type", ")", ":", "return", "node", "try", ":", "body", "=", "node", ".", "body", "except", "AttributeError", ":", "raise", "cypy", ".", "Error", "(", "\"Expecting suite containing a single %s, or a %s, but got %s.\"", "%", "(", "node_type", ".", "__name__", ",", "node_type", ".", "__name__", ",", "type", "(", "node", ")", ".", "__name__", ")", ")", "if", "len", "(", "body", ")", "!=", "1", "or", "not", "isinstance", "(", "body", "[", "0", "]", ",", "node_type", ")", ":", "raise", "cypy", ".", "Error", "(", "\"The body must contain exactly one node of type %s.\"", "%", "node_type", ".", "__name__", ")", "return", "body", "[", "0", "]" ]
Extracts the node of type node_type from the provided node. - If the node is itself of node_type, returns node. - If the node is a suite, it must contain exactly one node of the provided type in its body.
[ "Extracts", "the", "node", "of", "type", "node_type", "from", "the", "provided", "node", ".", "-", "If", "the", "node", "is", "itself", "of", "node_type", "returns", "node", ".", "-", "If", "the", "node", "is", "a", "suite", "it", "must", "contain", "exactly", "one", "node", "of", "the", "provided", "type", "in", "its", "body", "." ]
04bb59e91fa314e8cf987743189c77a9b6bc371d
https://github.com/cyrus-/cypy/blob/04bb59e91fa314e8cf987743189c77a9b6bc371d/cypy/astx.py#L85-L106
238,353
JoaoFelipe/pyposast
setup.py
get_version
def get_version(): """Use git describe to get version from tag""" proc = subprocess.Popen( ("git", "describe", "--tag", "--always"), stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) output, _ = proc.communicate() result = output.decode("utf-8").strip() if proc.returncode != 0: sys.stderr.write( ">>> Git Describe Error:\n " + result ) return "1+unknown" split = result.split("-", 1) version = "+".join(split).replace("-", ".") if len(split) > 1: sys.stderr.write( ">>> Please verify the commit tag:\n " + version + "\n" ) return version
python
def get_version(): """Use git describe to get version from tag""" proc = subprocess.Popen( ("git", "describe", "--tag", "--always"), stdout=subprocess.PIPE, stderr=subprocess.STDOUT ) output, _ = proc.communicate() result = output.decode("utf-8").strip() if proc.returncode != 0: sys.stderr.write( ">>> Git Describe Error:\n " + result ) return "1+unknown" split = result.split("-", 1) version = "+".join(split).replace("-", ".") if len(split) > 1: sys.stderr.write( ">>> Please verify the commit tag:\n " + version + "\n" ) return version
[ "def", "get_version", "(", ")", ":", "proc", "=", "subprocess", ".", "Popen", "(", "(", "\"git\"", ",", "\"describe\"", ",", "\"--tag\"", ",", "\"--always\"", ")", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "output", ",", "_", "=", "proc", ".", "communicate", "(", ")", "result", "=", "output", ".", "decode", "(", "\"utf-8\"", ")", ".", "strip", "(", ")", "if", "proc", ".", "returncode", "!=", "0", ":", "sys", ".", "stderr", ".", "write", "(", "\">>> Git Describe Error:\\n \"", "+", "result", ")", "return", "\"1+unknown\"", "split", "=", "result", ".", "split", "(", "\"-\"", ",", "1", ")", "version", "=", "\"+\"", ".", "join", "(", "split", ")", ".", "replace", "(", "\"-\"", ",", "\".\"", ")", "if", "len", "(", "split", ")", ">", "1", ":", "sys", ".", "stderr", ".", "write", "(", "\">>> Please verify the commit tag:\\n \"", "+", "version", "+", "\"\\n\"", ")", "return", "version" ]
Use git describe to get version from tag
[ "Use", "git", "describe", "to", "get", "version", "from", "tag" ]
497c88c66b451ff2cd7354be1af070c92e119f41
https://github.com/JoaoFelipe/pyposast/blob/497c88c66b451ff2cd7354be1af070c92e119f41/setup.py#L6-L28
238,354
willkg/django-eadred
eadred/helpers.py
get_file
def get_file(fn): """Returns file contents in unicode as list.""" fn = os.path.join(os.path.dirname(__file__), 'data', fn) f = open(fn, 'rb') lines = [line.decode('utf-8').strip() for line in f.readlines()] return lines
python
def get_file(fn): """Returns file contents in unicode as list.""" fn = os.path.join(os.path.dirname(__file__), 'data', fn) f = open(fn, 'rb') lines = [line.decode('utf-8').strip() for line in f.readlines()] return lines
[ "def", "get_file", "(", "fn", ")", ":", "fn", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "'data'", ",", "fn", ")", "f", "=", "open", "(", "fn", ",", "'rb'", ")", "lines", "=", "[", "line", ".", "decode", "(", "'utf-8'", ")", ".", "strip", "(", ")", "for", "line", "in", "f", ".", "readlines", "(", ")", "]", "return", "lines" ]
Returns file contents in unicode as list.
[ "Returns", "file", "contents", "in", "unicode", "as", "list", "." ]
6e103e098a44ab79b794159453a8c54f73de331c
https://github.com/willkg/django-eadred/blob/6e103e098a44ab79b794159453a8c54f73de331c/eadred/helpers.py#L12-L17
238,355
willkg/django-eadred
eadred/helpers.py
name_generator
def name_generator(names=None): """Creates a generator for generating names. :arg names: list or tuple of names you want to use; defaults to ENGLISH_MONARCHS :returns: generator for names Example:: from eadred.helpers import name_generator gen = name_generator() for i in range(50): mymodel = SomeModel(name=gen.next()) mymodel.save() Example 2: >>> gen = name_generator() >>> gen.next() u'James II' >>> gen.next() u'Stephen of Blois' >>> gen.next() u'James I' .. Note:: This gives full names for a "name" field. It's probably not useful for broken down name fields like "firstname", "lastname", etc. """ if names is None: names = ENGLISH_MONARCHS while True: yield text_type(random.choice(names))
python
def name_generator(names=None): """Creates a generator for generating names. :arg names: list or tuple of names you want to use; defaults to ENGLISH_MONARCHS :returns: generator for names Example:: from eadred.helpers import name_generator gen = name_generator() for i in range(50): mymodel = SomeModel(name=gen.next()) mymodel.save() Example 2: >>> gen = name_generator() >>> gen.next() u'James II' >>> gen.next() u'Stephen of Blois' >>> gen.next() u'James I' .. Note:: This gives full names for a "name" field. It's probably not useful for broken down name fields like "firstname", "lastname", etc. """ if names is None: names = ENGLISH_MONARCHS while True: yield text_type(random.choice(names))
[ "def", "name_generator", "(", "names", "=", "None", ")", ":", "if", "names", "is", "None", ":", "names", "=", "ENGLISH_MONARCHS", "while", "True", ":", "yield", "text_type", "(", "random", ".", "choice", "(", "names", ")", ")" ]
Creates a generator for generating names. :arg names: list or tuple of names you want to use; defaults to ENGLISH_MONARCHS :returns: generator for names Example:: from eadred.helpers import name_generator gen = name_generator() for i in range(50): mymodel = SomeModel(name=gen.next()) mymodel.save() Example 2: >>> gen = name_generator() >>> gen.next() u'James II' >>> gen.next() u'Stephen of Blois' >>> gen.next() u'James I' .. Note:: This gives full names for a "name" field. It's probably not useful for broken down name fields like "firstname", "lastname", etc.
[ "Creates", "a", "generator", "for", "generating", "names", "." ]
6e103e098a44ab79b794159453a8c54f73de331c
https://github.com/willkg/django-eadred/blob/6e103e098a44ab79b794159453a8c54f73de331c/eadred/helpers.py#L63-L103
238,356
willkg/django-eadred
eadred/helpers.py
email_generator
def email_generator(names=None, domains=None, unique=False): """Creates a generator for generating email addresses. :arg names: list of names to use; defaults to ENGLISH_MONARCHS lowercased, ascii-fied, and stripped of whitespace :arg domains: list of domains to use; defaults to DOMAINS :arg unique: True if you want the username part of the email addresses to be unique :returns: generator Example:: from eadred.helpers import email_generator gen = email_generator() for i in range(50): mymodel = SomeModel(email=gen.next()) mymodel.save() Example 2: >>> gen = email_generator() >>> gen.next() 'eadwig@example.net' >>> gen.next() 'henrybeauclerc@mail1.example.org' >>> gen.next() 'williamrufus@example.com' """ if names is None: names = [name.encode('ascii', 'ignore').lower().replace(b' ', b'') for name in ENGLISH_MONARCHS] if domains is None: domains = DOMAINS if unique: uniquifyer = lambda: str(next(_unique_counter)) else: uniquifyer = lambda: '' while True: yield '{0}{1}@{2}'.format( random.choice(names), uniquifyer(), random.choice(domains))
python
def email_generator(names=None, domains=None, unique=False): """Creates a generator for generating email addresses. :arg names: list of names to use; defaults to ENGLISH_MONARCHS lowercased, ascii-fied, and stripped of whitespace :arg domains: list of domains to use; defaults to DOMAINS :arg unique: True if you want the username part of the email addresses to be unique :returns: generator Example:: from eadred.helpers import email_generator gen = email_generator() for i in range(50): mymodel = SomeModel(email=gen.next()) mymodel.save() Example 2: >>> gen = email_generator() >>> gen.next() 'eadwig@example.net' >>> gen.next() 'henrybeauclerc@mail1.example.org' >>> gen.next() 'williamrufus@example.com' """ if names is None: names = [name.encode('ascii', 'ignore').lower().replace(b' ', b'') for name in ENGLISH_MONARCHS] if domains is None: domains = DOMAINS if unique: uniquifyer = lambda: str(next(_unique_counter)) else: uniquifyer = lambda: '' while True: yield '{0}{1}@{2}'.format( random.choice(names), uniquifyer(), random.choice(domains))
[ "def", "email_generator", "(", "names", "=", "None", ",", "domains", "=", "None", ",", "unique", "=", "False", ")", ":", "if", "names", "is", "None", ":", "names", "=", "[", "name", ".", "encode", "(", "'ascii'", ",", "'ignore'", ")", ".", "lower", "(", ")", ".", "replace", "(", "b' '", ",", "b''", ")", "for", "name", "in", "ENGLISH_MONARCHS", "]", "if", "domains", "is", "None", ":", "domains", "=", "DOMAINS", "if", "unique", ":", "uniquifyer", "=", "lambda", ":", "str", "(", "next", "(", "_unique_counter", ")", ")", "else", ":", "uniquifyer", "=", "lambda", ":", "''", "while", "True", ":", "yield", "'{0}{1}@{2}'", ".", "format", "(", "random", ".", "choice", "(", "names", ")", ",", "uniquifyer", "(", ")", ",", "random", ".", "choice", "(", "domains", ")", ")" ]
Creates a generator for generating email addresses. :arg names: list of names to use; defaults to ENGLISH_MONARCHS lowercased, ascii-fied, and stripped of whitespace :arg domains: list of domains to use; defaults to DOMAINS :arg unique: True if you want the username part of the email addresses to be unique :returns: generator Example:: from eadred.helpers import email_generator gen = email_generator() for i in range(50): mymodel = SomeModel(email=gen.next()) mymodel.save() Example 2: >>> gen = email_generator() >>> gen.next() 'eadwig@example.net' >>> gen.next() 'henrybeauclerc@mail1.example.org' >>> gen.next() 'williamrufus@example.com'
[ "Creates", "a", "generator", "for", "generating", "email", "addresses", "." ]
6e103e098a44ab79b794159453a8c54f73de331c
https://github.com/willkg/django-eadred/blob/6e103e098a44ab79b794159453a8c54f73de331c/eadred/helpers.py#L106-L153
238,357
willkg/django-eadred
eadred/helpers.py
paragraph_generator
def paragraph_generator(sentences=None): """Creates a generator for generating paragraphs. :arg sentences: list or tuple of sentences you want to use; defaults to LOREM :returns: generator Example:: from eadred.helpers import paragraph_generator gen = paragraph_generator() for i in range(50): mymodel = SomeModel(description=gen.next()) mymodel.save() """ if sentences is None: sentences = LOREM while True: # Paragraph consists of 1-7 sentences. paragraph = [random.choice(sentences) for num in range(random.randint(1, 7))] yield u' '.join(paragraph)
python
def paragraph_generator(sentences=None): """Creates a generator for generating paragraphs. :arg sentences: list or tuple of sentences you want to use; defaults to LOREM :returns: generator Example:: from eadred.helpers import paragraph_generator gen = paragraph_generator() for i in range(50): mymodel = SomeModel(description=gen.next()) mymodel.save() """ if sentences is None: sentences = LOREM while True: # Paragraph consists of 1-7 sentences. paragraph = [random.choice(sentences) for num in range(random.randint(1, 7))] yield u' '.join(paragraph)
[ "def", "paragraph_generator", "(", "sentences", "=", "None", ")", ":", "if", "sentences", "is", "None", ":", "sentences", "=", "LOREM", "while", "True", ":", "# Paragraph consists of 1-7 sentences.", "paragraph", "=", "[", "random", ".", "choice", "(", "sentences", ")", "for", "num", "in", "range", "(", "random", ".", "randint", "(", "1", ",", "7", ")", ")", "]", "yield", "u' '", ".", "join", "(", "paragraph", ")" ]
Creates a generator for generating paragraphs. :arg sentences: list or tuple of sentences you want to use; defaults to LOREM :returns: generator Example:: from eadred.helpers import paragraph_generator gen = paragraph_generator() for i in range(50): mymodel = SomeModel(description=gen.next()) mymodel.save()
[ "Creates", "a", "generator", "for", "generating", "paragraphs", "." ]
6e103e098a44ab79b794159453a8c54f73de331c
https://github.com/willkg/django-eadred/blob/6e103e098a44ab79b794159453a8c54f73de331c/eadred/helpers.py#L180-L205
238,358
eht16/django-axes-login-actions
axes_login_actions/signals.py
import_dotted_path
def import_dotted_path(path): """ Takes a dotted path to a member name in a module, and returns the member after importing it. """ # stolen from Mezzanine (mezzanine.utils.importing.import_dotted_path) try: module_path, member_name = path.rsplit(".", 1) module = import_module(module_path) return getattr(module, member_name) except (ValueError, ImportError, AttributeError) as e: raise ImportError('Could not import the name: {}: {}'.format(path, e))
python
def import_dotted_path(path): """ Takes a dotted path to a member name in a module, and returns the member after importing it. """ # stolen from Mezzanine (mezzanine.utils.importing.import_dotted_path) try: module_path, member_name = path.rsplit(".", 1) module = import_module(module_path) return getattr(module, member_name) except (ValueError, ImportError, AttributeError) as e: raise ImportError('Could not import the name: {}: {}'.format(path, e))
[ "def", "import_dotted_path", "(", "path", ")", ":", "# stolen from Mezzanine (mezzanine.utils.importing.import_dotted_path)", "try", ":", "module_path", ",", "member_name", "=", "path", ".", "rsplit", "(", "\".\"", ",", "1", ")", "module", "=", "import_module", "(", "module_path", ")", "return", "getattr", "(", "module", ",", "member_name", ")", "except", "(", "ValueError", ",", "ImportError", ",", "AttributeError", ")", "as", "e", ":", "raise", "ImportError", "(", "'Could not import the name: {}: {}'", ".", "format", "(", "path", ",", "e", ")", ")" ]
Takes a dotted path to a member name in a module, and returns the member after importing it.
[ "Takes", "a", "dotted", "path", "to", "a", "member", "name", "in", "a", "module", "and", "returns", "the", "member", "after", "importing", "it", "." ]
1478e85831583eef8b4cb628a9744e5a16f9ef5a
https://github.com/eht16/django-axes-login-actions/blob/1478e85831583eef8b4cb628a9744e5a16f9ef5a/axes_login_actions/signals.py#L15-L26
238,359
bear/ninka
ninka/indieauth.py
discoverAuthEndpoints
def discoverAuthEndpoints(authDomain, content=None, look_in={'name': 'link'}, test_urls=True, validateCerts=True, headers={}): """Find the authorization or redirect_uri endpoints for the given authDomain. Only scan html element matching all criteria in look_in. optionally the content to be scanned can be given as an argument. :param authDomain: the URL of the domain to handle :param content: the content to be scanned for the authorization endpoint :param look_in: dictionary with name, id and class_. only element matching all of these will be scanned :param test_urls: optional flag to test URLs for validation :param validateCerts: optional flag to enforce HTTPS certificates if present :param headers: optional headers to send with any request :rtype: list of authorization endpoints """ if test_urls: ronkyuu.URLValidator(message='invalid domain URL')(authDomain) if content: result = {'status': requests.codes.ok, 'headers': None, 'content': content } else: r = requests.get(authDomain, verify=validateCerts, headers=headers) result = {'status': r.status_code, 'headers': r.headers } # check for character encodings and use 'correct' data if 'charset' in r.headers.get('content-type', ''): result['content'] = r.text else: result['content'] = r.content result.update({'authorization_endpoint': set(), 'redirect_uri': set(), 'authDomain': authDomain}) if result['status'] == requests.codes.ok: if 'link' in r.headers: all_links = r.headers['link'].split(',', 1) for link in all_links: if ';' in link: link_parts = link.split(';') for s in link_parts[1:]: if 'rel=' in s: href = link_parts[0].strip() rel = s.strip().replace('rel=', '').replace('"', '') break url = urlparse(href[1:-1]) if url.scheme in ('http', 'https') and rel in ('authorization_endpoint', 'redirect_uri'): result[rel].add(url) all_links = BeautifulSoup(result['content'], _html_parser, parse_only=SoupStrainer(**look_in)).find_all('link') for link in all_links: rel = link.get('rel', None)[0] if rel in ('authorization_endpoint', 'redirect_uri'): href = link.get('href', None) if href: url = urlparse(href) if url.scheme in ('http', 'https'): result[rel].add(url) return result
python
def discoverAuthEndpoints(authDomain, content=None, look_in={'name': 'link'}, test_urls=True, validateCerts=True, headers={}): """Find the authorization or redirect_uri endpoints for the given authDomain. Only scan html element matching all criteria in look_in. optionally the content to be scanned can be given as an argument. :param authDomain: the URL of the domain to handle :param content: the content to be scanned for the authorization endpoint :param look_in: dictionary with name, id and class_. only element matching all of these will be scanned :param test_urls: optional flag to test URLs for validation :param validateCerts: optional flag to enforce HTTPS certificates if present :param headers: optional headers to send with any request :rtype: list of authorization endpoints """ if test_urls: ronkyuu.URLValidator(message='invalid domain URL')(authDomain) if content: result = {'status': requests.codes.ok, 'headers': None, 'content': content } else: r = requests.get(authDomain, verify=validateCerts, headers=headers) result = {'status': r.status_code, 'headers': r.headers } # check for character encodings and use 'correct' data if 'charset' in r.headers.get('content-type', ''): result['content'] = r.text else: result['content'] = r.content result.update({'authorization_endpoint': set(), 'redirect_uri': set(), 'authDomain': authDomain}) if result['status'] == requests.codes.ok: if 'link' in r.headers: all_links = r.headers['link'].split(',', 1) for link in all_links: if ';' in link: link_parts = link.split(';') for s in link_parts[1:]: if 'rel=' in s: href = link_parts[0].strip() rel = s.strip().replace('rel=', '').replace('"', '') break url = urlparse(href[1:-1]) if url.scheme in ('http', 'https') and rel in ('authorization_endpoint', 'redirect_uri'): result[rel].add(url) all_links = BeautifulSoup(result['content'], _html_parser, parse_only=SoupStrainer(**look_in)).find_all('link') for link in all_links: rel = link.get('rel', None)[0] if rel in ('authorization_endpoint', 'redirect_uri'): href = link.get('href', None) if href: url = urlparse(href) if url.scheme in ('http', 'https'): result[rel].add(url) return result
[ "def", "discoverAuthEndpoints", "(", "authDomain", ",", "content", "=", "None", ",", "look_in", "=", "{", "'name'", ":", "'link'", "}", ",", "test_urls", "=", "True", ",", "validateCerts", "=", "True", ",", "headers", "=", "{", "}", ")", ":", "if", "test_urls", ":", "ronkyuu", ".", "URLValidator", "(", "message", "=", "'invalid domain URL'", ")", "(", "authDomain", ")", "if", "content", ":", "result", "=", "{", "'status'", ":", "requests", ".", "codes", ".", "ok", ",", "'headers'", ":", "None", ",", "'content'", ":", "content", "}", "else", ":", "r", "=", "requests", ".", "get", "(", "authDomain", ",", "verify", "=", "validateCerts", ",", "headers", "=", "headers", ")", "result", "=", "{", "'status'", ":", "r", ".", "status_code", ",", "'headers'", ":", "r", ".", "headers", "}", "# check for character encodings and use 'correct' data", "if", "'charset'", "in", "r", ".", "headers", ".", "get", "(", "'content-type'", ",", "''", ")", ":", "result", "[", "'content'", "]", "=", "r", ".", "text", "else", ":", "result", "[", "'content'", "]", "=", "r", ".", "content", "result", ".", "update", "(", "{", "'authorization_endpoint'", ":", "set", "(", ")", ",", "'redirect_uri'", ":", "set", "(", ")", ",", "'authDomain'", ":", "authDomain", "}", ")", "if", "result", "[", "'status'", "]", "==", "requests", ".", "codes", ".", "ok", ":", "if", "'link'", "in", "r", ".", "headers", ":", "all_links", "=", "r", ".", "headers", "[", "'link'", "]", ".", "split", "(", "','", ",", "1", ")", "for", "link", "in", "all_links", ":", "if", "';'", "in", "link", ":", "link_parts", "=", "link", ".", "split", "(", "';'", ")", "for", "s", "in", "link_parts", "[", "1", ":", "]", ":", "if", "'rel='", "in", "s", ":", "href", "=", "link_parts", "[", "0", "]", ".", "strip", "(", ")", "rel", "=", "s", ".", "strip", "(", ")", ".", "replace", "(", "'rel='", ",", "''", ")", ".", "replace", "(", "'\"'", ",", "''", ")", "break", "url", "=", "urlparse", "(", "href", "[", "1", ":", "-", "1", "]", ")", "if", "url", ".", "scheme", "in", "(", "'http'", ",", "'https'", ")", "and", "rel", "in", "(", "'authorization_endpoint'", ",", "'redirect_uri'", ")", ":", "result", "[", "rel", "]", ".", "add", "(", "url", ")", "all_links", "=", "BeautifulSoup", "(", "result", "[", "'content'", "]", ",", "_html_parser", ",", "parse_only", "=", "SoupStrainer", "(", "*", "*", "look_in", ")", ")", ".", "find_all", "(", "'link'", ")", "for", "link", "in", "all_links", ":", "rel", "=", "link", ".", "get", "(", "'rel'", ",", "None", ")", "[", "0", "]", "if", "rel", "in", "(", "'authorization_endpoint'", ",", "'redirect_uri'", ")", ":", "href", "=", "link", ".", "get", "(", "'href'", ",", "None", ")", "if", "href", ":", "url", "=", "urlparse", "(", "href", ")", "if", "url", ".", "scheme", "in", "(", "'http'", ",", "'https'", ")", ":", "result", "[", "rel", "]", ".", "add", "(", "url", ")", "return", "result" ]
Find the authorization or redirect_uri endpoints for the given authDomain. Only scan html element matching all criteria in look_in. optionally the content to be scanned can be given as an argument. :param authDomain: the URL of the domain to handle :param content: the content to be scanned for the authorization endpoint :param look_in: dictionary with name, id and class_. only element matching all of these will be scanned :param test_urls: optional flag to test URLs for validation :param validateCerts: optional flag to enforce HTTPS certificates if present :param headers: optional headers to send with any request :rtype: list of authorization endpoints
[ "Find", "the", "authorization", "or", "redirect_uri", "endpoints", "for", "the", "given", "authDomain", ".", "Only", "scan", "html", "element", "matching", "all", "criteria", "in", "look_in", "." ]
4d13a48d2b8857496f7fc470b0c379486351c89b
https://github.com/bear/ninka/blob/4d13a48d2b8857496f7fc470b0c379486351c89b/ninka/indieauth.py#L38-L101
238,360
bear/ninka
ninka/indieauth.py
validateAuthCode
def validateAuthCode(code, redirect_uri, client_id, state=None, validationEndpoint='https://indieauth.com/auth', headers={}): """Call authorization endpoint to validate given auth code. :param code: the auth code to validate :param redirect_uri: redirect_uri for the given auth code :param client_id: where to find the auth endpoint for the given auth code :param state: state for the given auth code :param validationEndpoint: URL to make the validation request at :param headers: optional headers to send with any request :rtype: True if auth code is valid """ payload = {'code': code, 'redirect_uri': redirect_uri, 'client_id': client_id, } if state is not None: payload['state'] = state authURL = None authEndpoints = discoverAuthEndpoints(client_id, headers=headers) for url in authEndpoints['authorization_endpoint']: authURL = url break if authURL is not None: validationEndpoint = ParseResult(authURL.scheme, authURL.netloc, authURL.path, '', '', '').geturl() r = requests.post(validationEndpoint, verify=True, data=payload, headers=headers) result = { 'status': r.status_code, 'headers': r.headers } if 'charset' in r.headers.get('content-type', ''): result['content'] = r.text else: result['content'] = r.content if r.status_code == requests.codes.ok: result['response'] = parse_qs(result['content']) return result
python
def validateAuthCode(code, redirect_uri, client_id, state=None, validationEndpoint='https://indieauth.com/auth', headers={}): """Call authorization endpoint to validate given auth code. :param code: the auth code to validate :param redirect_uri: redirect_uri for the given auth code :param client_id: where to find the auth endpoint for the given auth code :param state: state for the given auth code :param validationEndpoint: URL to make the validation request at :param headers: optional headers to send with any request :rtype: True if auth code is valid """ payload = {'code': code, 'redirect_uri': redirect_uri, 'client_id': client_id, } if state is not None: payload['state'] = state authURL = None authEndpoints = discoverAuthEndpoints(client_id, headers=headers) for url in authEndpoints['authorization_endpoint']: authURL = url break if authURL is not None: validationEndpoint = ParseResult(authURL.scheme, authURL.netloc, authURL.path, '', '', '').geturl() r = requests.post(validationEndpoint, verify=True, data=payload, headers=headers) result = { 'status': r.status_code, 'headers': r.headers } if 'charset' in r.headers.get('content-type', ''): result['content'] = r.text else: result['content'] = r.content if r.status_code == requests.codes.ok: result['response'] = parse_qs(result['content']) return result
[ "def", "validateAuthCode", "(", "code", ",", "redirect_uri", ",", "client_id", ",", "state", "=", "None", ",", "validationEndpoint", "=", "'https://indieauth.com/auth'", ",", "headers", "=", "{", "}", ")", ":", "payload", "=", "{", "'code'", ":", "code", ",", "'redirect_uri'", ":", "redirect_uri", ",", "'client_id'", ":", "client_id", ",", "}", "if", "state", "is", "not", "None", ":", "payload", "[", "'state'", "]", "=", "state", "authURL", "=", "None", "authEndpoints", "=", "discoverAuthEndpoints", "(", "client_id", ",", "headers", "=", "headers", ")", "for", "url", "in", "authEndpoints", "[", "'authorization_endpoint'", "]", ":", "authURL", "=", "url", "break", "if", "authURL", "is", "not", "None", ":", "validationEndpoint", "=", "ParseResult", "(", "authURL", ".", "scheme", ",", "authURL", ".", "netloc", ",", "authURL", ".", "path", ",", "''", ",", "''", ",", "''", ")", ".", "geturl", "(", ")", "r", "=", "requests", ".", "post", "(", "validationEndpoint", ",", "verify", "=", "True", ",", "data", "=", "payload", ",", "headers", "=", "headers", ")", "result", "=", "{", "'status'", ":", "r", ".", "status_code", ",", "'headers'", ":", "r", ".", "headers", "}", "if", "'charset'", "in", "r", ".", "headers", ".", "get", "(", "'content-type'", ",", "''", ")", ":", "result", "[", "'content'", "]", "=", "r", ".", "text", "else", ":", "result", "[", "'content'", "]", "=", "r", ".", "content", "if", "r", ".", "status_code", "==", "requests", ".", "codes", ".", "ok", ":", "result", "[", "'response'", "]", "=", "parse_qs", "(", "result", "[", "'content'", "]", ")", "return", "result" ]
Call authorization endpoint to validate given auth code. :param code: the auth code to validate :param redirect_uri: redirect_uri for the given auth code :param client_id: where to find the auth endpoint for the given auth code :param state: state for the given auth code :param validationEndpoint: URL to make the validation request at :param headers: optional headers to send with any request :rtype: True if auth code is valid
[ "Call", "authorization", "endpoint", "to", "validate", "given", "auth", "code", "." ]
4d13a48d2b8857496f7fc470b0c379486351c89b
https://github.com/bear/ninka/blob/4d13a48d2b8857496f7fc470b0c379486351c89b/ninka/indieauth.py#L103-L140
238,361
syndbg/demonoid-api
demonoid/parser.py
Parser.get_params
def get_params(url, ignore_empty=False): """ Static method that parses a given `url` and retrieves `url`'s parameters. Could also ignore empty value parameters. Handles parameters-only urls as `q=banana&peel=false`. :param str url: url to parse :param bool ignore_empty: ignore empty value parameter or not :return: dictionary of params and their values :rtype: dict """ try: params_start_index = url.index('?') except ValueError: params_start_index = 0 params_string = url[params_start_index + 1:] params_dict = {} for pair in params_string.split('&'): if not pair: continue splitted = pair.split('=') param, value = splitted if not value and ignore_empty: continue value = int(value) if value.isdigit() else value params_dict[param] = value return params_dict
python
def get_params(url, ignore_empty=False): """ Static method that parses a given `url` and retrieves `url`'s parameters. Could also ignore empty value parameters. Handles parameters-only urls as `q=banana&peel=false`. :param str url: url to parse :param bool ignore_empty: ignore empty value parameter or not :return: dictionary of params and their values :rtype: dict """ try: params_start_index = url.index('?') except ValueError: params_start_index = 0 params_string = url[params_start_index + 1:] params_dict = {} for pair in params_string.split('&'): if not pair: continue splitted = pair.split('=') param, value = splitted if not value and ignore_empty: continue value = int(value) if value.isdigit() else value params_dict[param] = value return params_dict
[ "def", "get_params", "(", "url", ",", "ignore_empty", "=", "False", ")", ":", "try", ":", "params_start_index", "=", "url", ".", "index", "(", "'?'", ")", "except", "ValueError", ":", "params_start_index", "=", "0", "params_string", "=", "url", "[", "params_start_index", "+", "1", ":", "]", "params_dict", "=", "{", "}", "for", "pair", "in", "params_string", ".", "split", "(", "'&'", ")", ":", "if", "not", "pair", ":", "continue", "splitted", "=", "pair", ".", "split", "(", "'='", ")", "param", ",", "value", "=", "splitted", "if", "not", "value", "and", "ignore_empty", ":", "continue", "value", "=", "int", "(", "value", ")", "if", "value", ".", "isdigit", "(", ")", "else", "value", "params_dict", "[", "param", "]", "=", "value", "return", "params_dict" ]
Static method that parses a given `url` and retrieves `url`'s parameters. Could also ignore empty value parameters. Handles parameters-only urls as `q=banana&peel=false`. :param str url: url to parse :param bool ignore_empty: ignore empty value parameter or not :return: dictionary of params and their values :rtype: dict
[ "Static", "method", "that", "parses", "a", "given", "url", "and", "retrieves", "url", "s", "parameters", ".", "Could", "also", "ignore", "empty", "value", "parameters", ".", "Handles", "parameters", "-", "only", "urls", "as", "q", "=", "banana&peel", "=", "false", "." ]
518aa389ac91b5243b92fc19923103f31041a61e
https://github.com/syndbg/demonoid-api/blob/518aa389ac91b5243b92fc19923103f31041a61e/demonoid/parser.py#L49-L75
238,362
syndbg/demonoid-api
demonoid/parser.py
Parser.parse_date
def parse_date(table_data): """ Static method that parses a given table data element with `Url.DATE_STRPTIME_FORMAT` and creates a `date` object from td's text contnet. :param lxml.HtmlElement table_data: table_data tag to parse :return: date object from td's text date :rtype: datetime.date """ text = table_data.text.split('Added on ') # Then it's 'Added today'. Hacky if len(text) < 2: return date.today() # Looks like ['', 'Thursday, Mar 05, 2015'] return datetime.strptime(text[1], Parser.DATE_STRPTIME_FORMAT).date()
python
def parse_date(table_data): """ Static method that parses a given table data element with `Url.DATE_STRPTIME_FORMAT` and creates a `date` object from td's text contnet. :param lxml.HtmlElement table_data: table_data tag to parse :return: date object from td's text date :rtype: datetime.date """ text = table_data.text.split('Added on ') # Then it's 'Added today'. Hacky if len(text) < 2: return date.today() # Looks like ['', 'Thursday, Mar 05, 2015'] return datetime.strptime(text[1], Parser.DATE_STRPTIME_FORMAT).date()
[ "def", "parse_date", "(", "table_data", ")", ":", "text", "=", "table_data", ".", "text", ".", "split", "(", "'Added on '", ")", "# Then it's 'Added today'. Hacky", "if", "len", "(", "text", ")", "<", "2", ":", "return", "date", ".", "today", "(", ")", "# Looks like ['', 'Thursday, Mar 05, 2015']", "return", "datetime", ".", "strptime", "(", "text", "[", "1", "]", ",", "Parser", ".", "DATE_STRPTIME_FORMAT", ")", ".", "date", "(", ")" ]
Static method that parses a given table data element with `Url.DATE_STRPTIME_FORMAT` and creates a `date` object from td's text contnet. :param lxml.HtmlElement table_data: table_data tag to parse :return: date object from td's text date :rtype: datetime.date
[ "Static", "method", "that", "parses", "a", "given", "table", "data", "element", "with", "Url", ".", "DATE_STRPTIME_FORMAT", "and", "creates", "a", "date", "object", "from", "td", "s", "text", "contnet", "." ]
518aa389ac91b5243b92fc19923103f31041a61e
https://github.com/syndbg/demonoid-api/blob/518aa389ac91b5243b92fc19923103f31041a61e/demonoid/parser.py#L78-L91
238,363
syndbg/demonoid-api
demonoid/parser.py
Parser.parse_first_row
def parse_first_row(row, url_instance): """ Static method that parses a given table row element by executing `Parser.FIRST_ROW_XPATH` and scrapping torrent's id, title, tracked by status, category url and torrent url. Used specifically with a torrent's first table row. :param lxml.HtmlElement row: row to parse :param urls.Url url_instance: Url used to combine base url's with scrapped links from tr :return: scrapped id, title, tracked by status, category url and torrent url :rtype: list """ tags = row.xpath(Parser.FIRST_ROW_XPATH) category_url = url_instance.combine(tags[0].get('href')) title = unicode(tags[1].text) # work with the incomplete URL to get str_id torrent_url = tags[1].get('href') str_id = torrent_url.split('details/')[1] str_id = str_id[:-1] if str_id.endswith('/') else str_id # complete the torrent URL with BASE_URL torrent_url = url_instance.combine(torrent_url) # means that torrent has external property if len(tags) == 3: # monkey patch the missing external query param category_url += '&external=1' tracked_by = '(external)' else: tracked_by = 'Demonoid' return [str_id, title, tracked_by, category_url, torrent_url]
python
def parse_first_row(row, url_instance): """ Static method that parses a given table row element by executing `Parser.FIRST_ROW_XPATH` and scrapping torrent's id, title, tracked by status, category url and torrent url. Used specifically with a torrent's first table row. :param lxml.HtmlElement row: row to parse :param urls.Url url_instance: Url used to combine base url's with scrapped links from tr :return: scrapped id, title, tracked by status, category url and torrent url :rtype: list """ tags = row.xpath(Parser.FIRST_ROW_XPATH) category_url = url_instance.combine(tags[0].get('href')) title = unicode(tags[1].text) # work with the incomplete URL to get str_id torrent_url = tags[1].get('href') str_id = torrent_url.split('details/')[1] str_id = str_id[:-1] if str_id.endswith('/') else str_id # complete the torrent URL with BASE_URL torrent_url = url_instance.combine(torrent_url) # means that torrent has external property if len(tags) == 3: # monkey patch the missing external query param category_url += '&external=1' tracked_by = '(external)' else: tracked_by = 'Demonoid' return [str_id, title, tracked_by, category_url, torrent_url]
[ "def", "parse_first_row", "(", "row", ",", "url_instance", ")", ":", "tags", "=", "row", ".", "xpath", "(", "Parser", ".", "FIRST_ROW_XPATH", ")", "category_url", "=", "url_instance", ".", "combine", "(", "tags", "[", "0", "]", ".", "get", "(", "'href'", ")", ")", "title", "=", "unicode", "(", "tags", "[", "1", "]", ".", "text", ")", "# work with the incomplete URL to get str_id", "torrent_url", "=", "tags", "[", "1", "]", ".", "get", "(", "'href'", ")", "str_id", "=", "torrent_url", ".", "split", "(", "'details/'", ")", "[", "1", "]", "str_id", "=", "str_id", "[", ":", "-", "1", "]", "if", "str_id", ".", "endswith", "(", "'/'", ")", "else", "str_id", "# complete the torrent URL with BASE_URL", "torrent_url", "=", "url_instance", ".", "combine", "(", "torrent_url", ")", "# means that torrent has external property", "if", "len", "(", "tags", ")", "==", "3", ":", "# monkey patch the missing external query param", "category_url", "+=", "'&external=1'", "tracked_by", "=", "'(external)'", "else", ":", "tracked_by", "=", "'Demonoid'", "return", "[", "str_id", ",", "title", ",", "tracked_by", ",", "category_url", ",", "torrent_url", "]" ]
Static method that parses a given table row element by executing `Parser.FIRST_ROW_XPATH` and scrapping torrent's id, title, tracked by status, category url and torrent url. Used specifically with a torrent's first table row. :param lxml.HtmlElement row: row to parse :param urls.Url url_instance: Url used to combine base url's with scrapped links from tr :return: scrapped id, title, tracked by status, category url and torrent url :rtype: list
[ "Static", "method", "that", "parses", "a", "given", "table", "row", "element", "by", "executing", "Parser", ".", "FIRST_ROW_XPATH", "and", "scrapping", "torrent", "s", "id", "title", "tracked", "by", "status", "category", "url", "and", "torrent", "url", ".", "Used", "specifically", "with", "a", "torrent", "s", "first", "table", "row", "." ]
518aa389ac91b5243b92fc19923103f31041a61e
https://github.com/syndbg/demonoid-api/blob/518aa389ac91b5243b92fc19923103f31041a61e/demonoid/parser.py#L94-L121
238,364
syndbg/demonoid-api
demonoid/parser.py
Parser.parse_second_row
def parse_second_row(row, url): """ Static method that parses a given table row element by using helper methods `Parser.parse_category_subcategory_and_or_quality`, `Parser.parse_torrent_link` and scrapping torrent's category, subcategory, quality, language, user, user url, torrent link, size, comments, times completed, seeders and leechers. Used specifically with a torrent's second table row. :param lxml.HtmlElement row: row to parse :param urls.Url url_instance: Url used to combine base url's with scrapped links from tr :return: scrapped category, subcategory, quality, language, user, user url, torrent link, size, comments, times completed, seeders and leechers :rtype: list """ tags = row.findall('./td') category, subcategory, quality, language = Parser.parse_torrent_properties(tags[0]) user_info = tags[1].find('./a') user = user_info.text_content() user_url = url.combine(user_info.get('href')) # Two urls - one is spam, second is torrent url. # Don't combine it with BASE_URL, since it's an absolute url. torrent_link = Parser.parse_torrent_link(tags[2]) size = tags[3].text # as 10.5 GB comments = tags[4].text times_completed = tags[5].text seeders = tags[6].text leechers = tags[7].text return [category, subcategory, quality, language, user, user_url, torrent_link, size, comments, times_completed, seeders, leechers]
python
def parse_second_row(row, url): """ Static method that parses a given table row element by using helper methods `Parser.parse_category_subcategory_and_or_quality`, `Parser.parse_torrent_link` and scrapping torrent's category, subcategory, quality, language, user, user url, torrent link, size, comments, times completed, seeders and leechers. Used specifically with a torrent's second table row. :param lxml.HtmlElement row: row to parse :param urls.Url url_instance: Url used to combine base url's with scrapped links from tr :return: scrapped category, subcategory, quality, language, user, user url, torrent link, size, comments, times completed, seeders and leechers :rtype: list """ tags = row.findall('./td') category, subcategory, quality, language = Parser.parse_torrent_properties(tags[0]) user_info = tags[1].find('./a') user = user_info.text_content() user_url = url.combine(user_info.get('href')) # Two urls - one is spam, second is torrent url. # Don't combine it with BASE_URL, since it's an absolute url. torrent_link = Parser.parse_torrent_link(tags[2]) size = tags[3].text # as 10.5 GB comments = tags[4].text times_completed = tags[5].text seeders = tags[6].text leechers = tags[7].text return [category, subcategory, quality, language, user, user_url, torrent_link, size, comments, times_completed, seeders, leechers]
[ "def", "parse_second_row", "(", "row", ",", "url", ")", ":", "tags", "=", "row", ".", "findall", "(", "'./td'", ")", "category", ",", "subcategory", ",", "quality", ",", "language", "=", "Parser", ".", "parse_torrent_properties", "(", "tags", "[", "0", "]", ")", "user_info", "=", "tags", "[", "1", "]", ".", "find", "(", "'./a'", ")", "user", "=", "user_info", ".", "text_content", "(", ")", "user_url", "=", "url", ".", "combine", "(", "user_info", ".", "get", "(", "'href'", ")", ")", "# Two urls - one is spam, second is torrent url.", "# Don't combine it with BASE_URL, since it's an absolute url.", "torrent_link", "=", "Parser", ".", "parse_torrent_link", "(", "tags", "[", "2", "]", ")", "size", "=", "tags", "[", "3", "]", ".", "text", "# as 10.5 GB", "comments", "=", "tags", "[", "4", "]", ".", "text", "times_completed", "=", "tags", "[", "5", "]", ".", "text", "seeders", "=", "tags", "[", "6", "]", ".", "text", "leechers", "=", "tags", "[", "7", "]", ".", "text", "return", "[", "category", ",", "subcategory", ",", "quality", ",", "language", ",", "user", ",", "user_url", ",", "torrent_link", ",", "size", ",", "comments", ",", "times_completed", ",", "seeders", ",", "leechers", "]" ]
Static method that parses a given table row element by using helper methods `Parser.parse_category_subcategory_and_or_quality`, `Parser.parse_torrent_link` and scrapping torrent's category, subcategory, quality, language, user, user url, torrent link, size, comments, times completed, seeders and leechers. Used specifically with a torrent's second table row. :param lxml.HtmlElement row: row to parse :param urls.Url url_instance: Url used to combine base url's with scrapped links from tr :return: scrapped category, subcategory, quality, language, user, user url, torrent link, size, comments, times completed, seeders and leechers :rtype: list
[ "Static", "method", "that", "parses", "a", "given", "table", "row", "element", "by", "using", "helper", "methods", "Parser", ".", "parse_category_subcategory_and_or_quality", "Parser", ".", "parse_torrent_link", "and", "scrapping", "torrent", "s", "category", "subcategory", "quality", "language", "user", "user", "url", "torrent", "link", "size", "comments", "times", "completed", "seeders", "and", "leechers", ".", "Used", "specifically", "with", "a", "torrent", "s", "second", "table", "row", "." ]
518aa389ac91b5243b92fc19923103f31041a61e
https://github.com/syndbg/demonoid-api/blob/518aa389ac91b5243b92fc19923103f31041a61e/demonoid/parser.py#L124-L151
238,365
syndbg/demonoid-api
demonoid/parser.py
Parser.parse_torrent_properties
def parse_torrent_properties(table_datas): """ Static method that parses a given list of table data elements and using helper methods `Parser.is_subcategory`, `Parser.is_quality`, `Parser.is_language`, collects torrent properties. :param list lxml.HtmlElement table_datas: table_datas to parse :return: identified category, subcategory, quality and languages. :rtype: dict """ output = {'category': table_datas[0].text, 'subcategory': None, 'quality': None, 'language': None} for i in range(1, len(table_datas)): td = table_datas[i] url = td.get('href') params = Parser.get_params(url) if Parser.is_subcategory(params) and not output['subcategory']: output['subcategory'] = td.text elif Parser.is_quality(params) and not output['quality']: output['quality'] = td.text elif Parser.is_language(params) and not output['language']: output['language'] = td.text return output
python
def parse_torrent_properties(table_datas): """ Static method that parses a given list of table data elements and using helper methods `Parser.is_subcategory`, `Parser.is_quality`, `Parser.is_language`, collects torrent properties. :param list lxml.HtmlElement table_datas: table_datas to parse :return: identified category, subcategory, quality and languages. :rtype: dict """ output = {'category': table_datas[0].text, 'subcategory': None, 'quality': None, 'language': None} for i in range(1, len(table_datas)): td = table_datas[i] url = td.get('href') params = Parser.get_params(url) if Parser.is_subcategory(params) and not output['subcategory']: output['subcategory'] = td.text elif Parser.is_quality(params) and not output['quality']: output['quality'] = td.text elif Parser.is_language(params) and not output['language']: output['language'] = td.text return output
[ "def", "parse_torrent_properties", "(", "table_datas", ")", ":", "output", "=", "{", "'category'", ":", "table_datas", "[", "0", "]", ".", "text", ",", "'subcategory'", ":", "None", ",", "'quality'", ":", "None", ",", "'language'", ":", "None", "}", "for", "i", "in", "range", "(", "1", ",", "len", "(", "table_datas", ")", ")", ":", "td", "=", "table_datas", "[", "i", "]", "url", "=", "td", ".", "get", "(", "'href'", ")", "params", "=", "Parser", ".", "get_params", "(", "url", ")", "if", "Parser", ".", "is_subcategory", "(", "params", ")", "and", "not", "output", "[", "'subcategory'", "]", ":", "output", "[", "'subcategory'", "]", "=", "td", ".", "text", "elif", "Parser", ".", "is_quality", "(", "params", ")", "and", "not", "output", "[", "'quality'", "]", ":", "output", "[", "'quality'", "]", "=", "td", ".", "text", "elif", "Parser", ".", "is_language", "(", "params", ")", "and", "not", "output", "[", "'language'", "]", ":", "output", "[", "'language'", "]", "=", "td", ".", "text", "return", "output" ]
Static method that parses a given list of table data elements and using helper methods `Parser.is_subcategory`, `Parser.is_quality`, `Parser.is_language`, collects torrent properties. :param list lxml.HtmlElement table_datas: table_datas to parse :return: identified category, subcategory, quality and languages. :rtype: dict
[ "Static", "method", "that", "parses", "a", "given", "list", "of", "table", "data", "elements", "and", "using", "helper", "methods", "Parser", ".", "is_subcategory", "Parser", ".", "is_quality", "Parser", ".", "is_language", "collects", "torrent", "properties", "." ]
518aa389ac91b5243b92fc19923103f31041a61e
https://github.com/syndbg/demonoid-api/blob/518aa389ac91b5243b92fc19923103f31041a61e/demonoid/parser.py#L154-L174
238,366
syndbg/demonoid-api
demonoid/parser.py
Parser.parse_torrent_link
def parse_torrent_link(table_data): """ Static method that parses list of table data, finds all anchor elements and gets the torrent url. However the torrent url is usually hidden behind a fake spam ad url, this is handled. :param list lxml.HtmlElement table_data: table_data tag to parse :return: torrent url from anchor (link) element :rtype: str """ anchors = table_data.findall('./a') link_tag = anchors[0] if len(anchors) < 2 else anchors[1] return link_tag.get('href')
python
def parse_torrent_link(table_data): """ Static method that parses list of table data, finds all anchor elements and gets the torrent url. However the torrent url is usually hidden behind a fake spam ad url, this is handled. :param list lxml.HtmlElement table_data: table_data tag to parse :return: torrent url from anchor (link) element :rtype: str """ anchors = table_data.findall('./a') link_tag = anchors[0] if len(anchors) < 2 else anchors[1] return link_tag.get('href')
[ "def", "parse_torrent_link", "(", "table_data", ")", ":", "anchors", "=", "table_data", ".", "findall", "(", "'./a'", ")", "link_tag", "=", "anchors", "[", "0", "]", "if", "len", "(", "anchors", ")", "<", "2", "else", "anchors", "[", "1", "]", "return", "link_tag", ".", "get", "(", "'href'", ")" ]
Static method that parses list of table data, finds all anchor elements and gets the torrent url. However the torrent url is usually hidden behind a fake spam ad url, this is handled. :param list lxml.HtmlElement table_data: table_data tag to parse :return: torrent url from anchor (link) element :rtype: str
[ "Static", "method", "that", "parses", "list", "of", "table", "data", "finds", "all", "anchor", "elements", "and", "gets", "the", "torrent", "url", ".", "However", "the", "torrent", "url", "is", "usually", "hidden", "behind", "a", "fake", "spam", "ad", "url", "this", "is", "handled", "." ]
518aa389ac91b5243b92fc19923103f31041a61e
https://github.com/syndbg/demonoid-api/blob/518aa389ac91b5243b92fc19923103f31041a61e/demonoid/parser.py#L177-L189
238,367
jfilter/get-retries
get_retries/get.py
get
def get(url, max_backoff=32, verbose=False, **kwargs): """Adding retries to requests.get with exponential backoff. Args: url (str): The URL to fetch max_backoff (int): The number of seconds to sleep at maximums verbose (bool): Whether to print exceptions. Returns: Response: For successful requests return requests' response. `None` otherwise. """ sleep_seconds = 1 while sleep_seconds <= max_backoff: try: # you may overwrite `timeout` via `kwargs` response = requests.get(url, **{**{'timeout': 30}, **kwargs}) # for 4xx, return instantly, no hope of success if 400 <= response.status_code < 500: return None # successfully return 2XX and 3xx if 200 <= response.status_code < 400: return response # for 1xx and 5xx, retry except RequestException as e: if verbose: print(str(e)) time.sleep(sleep_seconds) sleep_seconds *= 2 return None
python
def get(url, max_backoff=32, verbose=False, **kwargs): """Adding retries to requests.get with exponential backoff. Args: url (str): The URL to fetch max_backoff (int): The number of seconds to sleep at maximums verbose (bool): Whether to print exceptions. Returns: Response: For successful requests return requests' response. `None` otherwise. """ sleep_seconds = 1 while sleep_seconds <= max_backoff: try: # you may overwrite `timeout` via `kwargs` response = requests.get(url, **{**{'timeout': 30}, **kwargs}) # for 4xx, return instantly, no hope of success if 400 <= response.status_code < 500: return None # successfully return 2XX and 3xx if 200 <= response.status_code < 400: return response # for 1xx and 5xx, retry except RequestException as e: if verbose: print(str(e)) time.sleep(sleep_seconds) sleep_seconds *= 2 return None
[ "def", "get", "(", "url", ",", "max_backoff", "=", "32", ",", "verbose", "=", "False", ",", "*", "*", "kwargs", ")", ":", "sleep_seconds", "=", "1", "while", "sleep_seconds", "<=", "max_backoff", ":", "try", ":", "# you may overwrite `timeout` via `kwargs`", "response", "=", "requests", ".", "get", "(", "url", ",", "*", "*", "{", "*", "*", "{", "'timeout'", ":", "30", "}", ",", "*", "*", "kwargs", "}", ")", "# for 4xx, return instantly, no hope of success", "if", "400", "<=", "response", ".", "status_code", "<", "500", ":", "return", "None", "# successfully return 2XX and 3xx", "if", "200", "<=", "response", ".", "status_code", "<", "400", ":", "return", "response", "# for 1xx and 5xx, retry", "except", "RequestException", "as", "e", ":", "if", "verbose", ":", "print", "(", "str", "(", "e", ")", ")", "time", ".", "sleep", "(", "sleep_seconds", ")", "sleep_seconds", "*=", "2", "return", "None" ]
Adding retries to requests.get with exponential backoff. Args: url (str): The URL to fetch max_backoff (int): The number of seconds to sleep at maximums verbose (bool): Whether to print exceptions. Returns: Response: For successful requests return requests' response. `None` otherwise.
[ "Adding", "retries", "to", "requests", ".", "get", "with", "exponential", "backoff", "." ]
b6c65f05c1dd24aedce5ff4ea7cbc329efb2e6ed
https://github.com/jfilter/get-retries/blob/b6c65f05c1dd24aedce5ff4ea7cbc329efb2e6ed/get_retries/get.py#L10-L39
238,368
praekelt/jmbo-music
music/models.py
Track.set_image
def set_image(self): """This code must be in its own method since the fetch functions need credits to be set. m2m fields are not yet set at the end of either the save method or post_save signal.""" if not self.image: scrape_image(self) # If still no image then use first contributor image if not self.image: contributors = self.get_primary_contributors() if contributors: self.image = contributors[0].image self.save(set_image=False) # If still not image then default if not self.image: filename = settings.STATIC_ROOT + 'music/images/default.png' if os.path.exists(filename): image = File( open(filename, 'rb') ) image.name = 'default.png' self.image = image self.save(set_image=False)
python
def set_image(self): """This code must be in its own method since the fetch functions need credits to be set. m2m fields are not yet set at the end of either the save method or post_save signal.""" if not self.image: scrape_image(self) # If still no image then use first contributor image if not self.image: contributors = self.get_primary_contributors() if contributors: self.image = contributors[0].image self.save(set_image=False) # If still not image then default if not self.image: filename = settings.STATIC_ROOT + 'music/images/default.png' if os.path.exists(filename): image = File( open(filename, 'rb') ) image.name = 'default.png' self.image = image self.save(set_image=False)
[ "def", "set_image", "(", "self", ")", ":", "if", "not", "self", ".", "image", ":", "scrape_image", "(", "self", ")", "# If still no image then use first contributor image", "if", "not", "self", ".", "image", ":", "contributors", "=", "self", ".", "get_primary_contributors", "(", ")", "if", "contributors", ":", "self", ".", "image", "=", "contributors", "[", "0", "]", ".", "image", "self", ".", "save", "(", "set_image", "=", "False", ")", "# If still not image then default", "if", "not", "self", ".", "image", ":", "filename", "=", "settings", ".", "STATIC_ROOT", "+", "'music/images/default.png'", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "image", "=", "File", "(", "open", "(", "filename", ",", "'rb'", ")", ")", "image", ".", "name", "=", "'default.png'", "self", ".", "image", "=", "image", "self", ".", "save", "(", "set_image", "=", "False", ")" ]
This code must be in its own method since the fetch functions need credits to be set. m2m fields are not yet set at the end of either the save method or post_save signal.
[ "This", "code", "must", "be", "in", "its", "own", "method", "since", "the", "fetch", "functions", "need", "credits", "to", "be", "set", ".", "m2m", "fields", "are", "not", "yet", "set", "at", "the", "end", "of", "either", "the", "save", "method", "or", "post_save", "signal", "." ]
baeacaa1971b9110ff952fc4eca938c6b426f33e
https://github.com/praekelt/jmbo-music/blob/baeacaa1971b9110ff952fc4eca938c6b426f33e/music/models.py#L120-L144
238,369
gisce/heman
heman/config.py
configure_api
def configure_api(app): """Configure API Endpoints. """ from heman.api.empowering import resources as empowering_resources from heman.api.cch import resources as cch_resources from heman.api.form import resources as form_resources from heman.api import ApiCatchall # Add Empowering resources for resource in empowering_resources: api.add_resource(*resource) # Add CCHFact resources for resource in cch_resources: api.add_resource(*resource) # Add Form resources for resource in form_resources: api.add_resource(*resource) api.add_resource(ApiCatchall, '/<path:path>') api.init_app(app)
python
def configure_api(app): """Configure API Endpoints. """ from heman.api.empowering import resources as empowering_resources from heman.api.cch import resources as cch_resources from heman.api.form import resources as form_resources from heman.api import ApiCatchall # Add Empowering resources for resource in empowering_resources: api.add_resource(*resource) # Add CCHFact resources for resource in cch_resources: api.add_resource(*resource) # Add Form resources for resource in form_resources: api.add_resource(*resource) api.add_resource(ApiCatchall, '/<path:path>') api.init_app(app)
[ "def", "configure_api", "(", "app", ")", ":", "from", "heman", ".", "api", ".", "empowering", "import", "resources", "as", "empowering_resources", "from", "heman", ".", "api", ".", "cch", "import", "resources", "as", "cch_resources", "from", "heman", ".", "api", ".", "form", "import", "resources", "as", "form_resources", "from", "heman", ".", "api", "import", "ApiCatchall", "# Add Empowering resources", "for", "resource", "in", "empowering_resources", ":", "api", ".", "add_resource", "(", "*", "resource", ")", "# Add CCHFact resources", "for", "resource", "in", "cch_resources", ":", "api", ".", "add_resource", "(", "*", "resource", ")", "# Add Form resources", "for", "resource", "in", "form_resources", ":", "api", ".", "add_resource", "(", "*", "resource", ")", "api", ".", "add_resource", "(", "ApiCatchall", ",", "'/<path:path>'", ")", "api", ".", "init_app", "(", "app", ")" ]
Configure API Endpoints.
[ "Configure", "API", "Endpoints", "." ]
cf09fca09953f12454b2910ddfa9d7586709657b
https://github.com/gisce/heman/blob/cf09fca09953f12454b2910ddfa9d7586709657b/heman/config.py#L63-L84
238,370
gisce/heman
heman/config.py
configure_login
def configure_login(app): """Configure login authentification Uses `Flask-Login <https://flask-login.readthedocs.org>`_ """ from heman.auth import login_manager, login login_manager.init_app(app) @app.teardown_request def force_logout(*args, **kwargs): login.logout_user()
python
def configure_login(app): """Configure login authentification Uses `Flask-Login <https://flask-login.readthedocs.org>`_ """ from heman.auth import login_manager, login login_manager.init_app(app) @app.teardown_request def force_logout(*args, **kwargs): login.logout_user()
[ "def", "configure_login", "(", "app", ")", ":", "from", "heman", ".", "auth", "import", "login_manager", ",", "login", "login_manager", ".", "init_app", "(", "app", ")", "@", "app", ".", "teardown_request", "def", "force_logout", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "login", ".", "logout_user", "(", ")" ]
Configure login authentification Uses `Flask-Login <https://flask-login.readthedocs.org>`_
[ "Configure", "login", "authentification" ]
cf09fca09953f12454b2910ddfa9d7586709657b
https://github.com/gisce/heman/blob/cf09fca09953f12454b2910ddfa9d7586709657b/heman/config.py#L112-L122
238,371
6809/dragonlib
dragonlib/core/binary_files.py
BinaryFile.load_DragonDosBinary
def load_DragonDosBinary(self, data, strip_padding=True): """ Dragon DOS Binary Format http://dragon32.info/info/binformt.html Offset: Type: Value: 0 byte $55 Constant 1 byte Filetype 2:3 word Load Address 4:5 word Length 6:7 word Exec Address 8 byte $AA Constant 9-xxx byte[] Data """ data = bytearray(data) log.debug("Load Dragon DOS Binary Format.") meta_data = struct.unpack(">BBHHHB", data[:9]) machine_type = meta_data[0] if machine_type != 0x55: log.error("ERROR: Machine type wrong: is $%02X but should be $55!", machine_type) self.file_type = meta_data[1] self.load_address = meta_data[2] self.length = meta_data[3] self.exec_address = meta_data[4] terminator = meta_data[5] if terminator != 0xAA: log.error("ERROR: Terminator byte is $%02X but should be $AA!", terminator) # print("before strip:") # print("\n".join(bin2hexline(data, width=16))) if strip_padding: self.data = data[9:self.length + 9] else: self.data = data[9:] # print("after strip:") # print("\n".join(bin2hexline(self.data, width=16))) log.debug( "File type: $%02X Load Address: $%04X Exec Address: $%04X Length: %iBytes", self.file_type, self.load_address, self.exec_address, self.length ) if self.length != len(self.data): log.error("ERROR: Wrong data size: should be: %i Bytes but is %i Bytes!", self.length, len(self.data)) # log_bytes(self.data, "data in hex: %s", level=logging.DEBUG) self.debug2log(level=logging.DEBUG)
python
def load_DragonDosBinary(self, data, strip_padding=True): """ Dragon DOS Binary Format http://dragon32.info/info/binformt.html Offset: Type: Value: 0 byte $55 Constant 1 byte Filetype 2:3 word Load Address 4:5 word Length 6:7 word Exec Address 8 byte $AA Constant 9-xxx byte[] Data """ data = bytearray(data) log.debug("Load Dragon DOS Binary Format.") meta_data = struct.unpack(">BBHHHB", data[:9]) machine_type = meta_data[0] if machine_type != 0x55: log.error("ERROR: Machine type wrong: is $%02X but should be $55!", machine_type) self.file_type = meta_data[1] self.load_address = meta_data[2] self.length = meta_data[3] self.exec_address = meta_data[4] terminator = meta_data[5] if terminator != 0xAA: log.error("ERROR: Terminator byte is $%02X but should be $AA!", terminator) # print("before strip:") # print("\n".join(bin2hexline(data, width=16))) if strip_padding: self.data = data[9:self.length + 9] else: self.data = data[9:] # print("after strip:") # print("\n".join(bin2hexline(self.data, width=16))) log.debug( "File type: $%02X Load Address: $%04X Exec Address: $%04X Length: %iBytes", self.file_type, self.load_address, self.exec_address, self.length ) if self.length != len(self.data): log.error("ERROR: Wrong data size: should be: %i Bytes but is %i Bytes!", self.length, len(self.data)) # log_bytes(self.data, "data in hex: %s", level=logging.DEBUG) self.debug2log(level=logging.DEBUG)
[ "def", "load_DragonDosBinary", "(", "self", ",", "data", ",", "strip_padding", "=", "True", ")", ":", "data", "=", "bytearray", "(", "data", ")", "log", ".", "debug", "(", "\"Load Dragon DOS Binary Format.\"", ")", "meta_data", "=", "struct", ".", "unpack", "(", "\">BBHHHB\"", ",", "data", "[", ":", "9", "]", ")", "machine_type", "=", "meta_data", "[", "0", "]", "if", "machine_type", "!=", "0x55", ":", "log", ".", "error", "(", "\"ERROR: Machine type wrong: is $%02X but should be $55!\"", ",", "machine_type", ")", "self", ".", "file_type", "=", "meta_data", "[", "1", "]", "self", ".", "load_address", "=", "meta_data", "[", "2", "]", "self", ".", "length", "=", "meta_data", "[", "3", "]", "self", ".", "exec_address", "=", "meta_data", "[", "4", "]", "terminator", "=", "meta_data", "[", "5", "]", "if", "terminator", "!=", "0xAA", ":", "log", ".", "error", "(", "\"ERROR: Terminator byte is $%02X but should be $AA!\"", ",", "terminator", ")", "# print(\"before strip:\")", "# print(\"\\n\".join(bin2hexline(data, width=16)))", "if", "strip_padding", ":", "self", ".", "data", "=", "data", "[", "9", ":", "self", ".", "length", "+", "9", "]", "else", ":", "self", ".", "data", "=", "data", "[", "9", ":", "]", "# print(\"after strip:\")", "# print(\"\\n\".join(bin2hexline(self.data, width=16)))", "log", ".", "debug", "(", "\"File type: $%02X Load Address: $%04X Exec Address: $%04X Length: %iBytes\"", ",", "self", ".", "file_type", ",", "self", ".", "load_address", ",", "self", ".", "exec_address", ",", "self", ".", "length", ")", "if", "self", ".", "length", "!=", "len", "(", "self", ".", "data", ")", ":", "log", ".", "error", "(", "\"ERROR: Wrong data size: should be: %i Bytes but is %i Bytes!\"", ",", "self", ".", "length", ",", "len", "(", "self", ".", "data", ")", ")", "# log_bytes(self.data, \"data in hex: %s\", level=logging.DEBUG)", "self", ".", "debug2log", "(", "level", "=", "logging", ".", "DEBUG", ")" ]
Dragon DOS Binary Format http://dragon32.info/info/binformt.html Offset: Type: Value: 0 byte $55 Constant 1 byte Filetype 2:3 word Load Address 4:5 word Length 6:7 word Exec Address 8 byte $AA Constant 9-xxx byte[] Data
[ "Dragon", "DOS", "Binary", "Format" ]
faa4011e76c5857db96efdb4199e2fd49711e999
https://github.com/6809/dragonlib/blob/faa4011e76c5857db96efdb4199e2fd49711e999/dragonlib/core/binary_files.py#L80-L130
238,372
asweigart/pybresenham
pybresenham/__init__.py
rotatePoint
def rotatePoint(x, y, rotationDegrees, pivotx=0, pivoty=0): """ Rotates the point at `x` and `y` by `rotationDegrees`. The point is rotated around the origin by default, but can be rotated around another pivot point by specifying `pivotx` and `pivoty`. The points are rotated counterclockwise. Returns an x and y tuple. Since the final result will be integers, there is a large amount of rounding error that can take place. >>> rotatePoint(10, 0, 90) (0, 10) >>> rotatePoint(10, 0, 180) (-10, 0) >>> rotatePoint(10, 0, 45) (7, 7) """ # Reuse the code in rotatePoints() return list(rotatePoints([(x, y)], rotationDegrees, pivotx, pivoty))[0]
python
def rotatePoint(x, y, rotationDegrees, pivotx=0, pivoty=0): """ Rotates the point at `x` and `y` by `rotationDegrees`. The point is rotated around the origin by default, but can be rotated around another pivot point by specifying `pivotx` and `pivoty`. The points are rotated counterclockwise. Returns an x and y tuple. Since the final result will be integers, there is a large amount of rounding error that can take place. >>> rotatePoint(10, 0, 90) (0, 10) >>> rotatePoint(10, 0, 180) (-10, 0) >>> rotatePoint(10, 0, 45) (7, 7) """ # Reuse the code in rotatePoints() return list(rotatePoints([(x, y)], rotationDegrees, pivotx, pivoty))[0]
[ "def", "rotatePoint", "(", "x", ",", "y", ",", "rotationDegrees", ",", "pivotx", "=", "0", ",", "pivoty", "=", "0", ")", ":", "# Reuse the code in rotatePoints()", "return", "list", "(", "rotatePoints", "(", "[", "(", "x", ",", "y", ")", "]", ",", "rotationDegrees", ",", "pivotx", ",", "pivoty", ")", ")", "[", "0", "]" ]
Rotates the point at `x` and `y` by `rotationDegrees`. The point is rotated around the origin by default, but can be rotated around another pivot point by specifying `pivotx` and `pivoty`. The points are rotated counterclockwise. Returns an x and y tuple. Since the final result will be integers, there is a large amount of rounding error that can take place. >>> rotatePoint(10, 0, 90) (0, 10) >>> rotatePoint(10, 0, 180) (-10, 0) >>> rotatePoint(10, 0, 45) (7, 7)
[ "Rotates", "the", "point", "at", "x", "and", "y", "by", "rotationDegrees", ".", "The", "point", "is", "rotated", "around", "the", "origin", "by", "default", "but", "can", "be", "rotated", "around", "another", "pivot", "point", "by", "specifying", "pivotx", "and", "pivoty", "." ]
5183f39af58d899cf736075d2b27c892824bb563
https://github.com/asweigart/pybresenham/blob/5183f39af58d899cf736075d2b27c892824bb563/pybresenham/__init__.py#L49-L71
238,373
asweigart/pybresenham
pybresenham/__init__.py
rotatePoints
def rotatePoints(points, rotationDegrees, pivotx=0, pivoty=0): """ Rotates each x and y tuple in `points`` by `rotationDegrees`. The points are rotated around the origin by default, but can be rotated around another pivot point by specifying `pivotx` and `pivoty`. The points are rotated counterclockwise. Returns a generator that produces an x and y tuple for each point in `points`. >>> list(rotatePoints([(10, 0), (7, 7)], 45)) [(7, 7), (0, 9)] """ rotationRadians = math.radians(rotationDegrees % 360) for x, y in points: _checkForIntOrFloat(x) _checkForIntOrFloat(y) x -= pivotx y -= pivoty x, y = x * math.cos(rotationRadians) - y * math.sin(rotationRadians), x * math.sin(rotationRadians) + y * math.cos(rotationRadians) x += pivotx y += pivoty yield int(x), int(y)
python
def rotatePoints(points, rotationDegrees, pivotx=0, pivoty=0): """ Rotates each x and y tuple in `points`` by `rotationDegrees`. The points are rotated around the origin by default, but can be rotated around another pivot point by specifying `pivotx` and `pivoty`. The points are rotated counterclockwise. Returns a generator that produces an x and y tuple for each point in `points`. >>> list(rotatePoints([(10, 0), (7, 7)], 45)) [(7, 7), (0, 9)] """ rotationRadians = math.radians(rotationDegrees % 360) for x, y in points: _checkForIntOrFloat(x) _checkForIntOrFloat(y) x -= pivotx y -= pivoty x, y = x * math.cos(rotationRadians) - y * math.sin(rotationRadians), x * math.sin(rotationRadians) + y * math.cos(rotationRadians) x += pivotx y += pivoty yield int(x), int(y)
[ "def", "rotatePoints", "(", "points", ",", "rotationDegrees", ",", "pivotx", "=", "0", ",", "pivoty", "=", "0", ")", ":", "rotationRadians", "=", "math", ".", "radians", "(", "rotationDegrees", "%", "360", ")", "for", "x", ",", "y", "in", "points", ":", "_checkForIntOrFloat", "(", "x", ")", "_checkForIntOrFloat", "(", "y", ")", "x", "-=", "pivotx", "y", "-=", "pivoty", "x", ",", "y", "=", "x", "*", "math", ".", "cos", "(", "rotationRadians", ")", "-", "y", "*", "math", ".", "sin", "(", "rotationRadians", ")", ",", "x", "*", "math", ".", "sin", "(", "rotationRadians", ")", "+", "y", "*", "math", ".", "cos", "(", "rotationRadians", ")", "x", "+=", "pivotx", "y", "+=", "pivoty", "yield", "int", "(", "x", ")", ",", "int", "(", "y", ")" ]
Rotates each x and y tuple in `points`` by `rotationDegrees`. The points are rotated around the origin by default, but can be rotated around another pivot point by specifying `pivotx` and `pivoty`. The points are rotated counterclockwise. Returns a generator that produces an x and y tuple for each point in `points`. >>> list(rotatePoints([(10, 0), (7, 7)], 45)) [(7, 7), (0, 9)]
[ "Rotates", "each", "x", "and", "y", "tuple", "in", "points", "by", "rotationDegrees", ".", "The", "points", "are", "rotated", "around", "the", "origin", "by", "default", "but", "can", "be", "rotated", "around", "another", "pivot", "point", "by", "specifying", "pivotx", "and", "pivoty", "." ]
5183f39af58d899cf736075d2b27c892824bb563
https://github.com/asweigart/pybresenham/blob/5183f39af58d899cf736075d2b27c892824bb563/pybresenham/__init__.py#L74-L99
238,374
asweigart/pybresenham
pybresenham/__init__.py
line
def line(x1, y1, x2, y2, thickness=1, endcap=None, _skipFirst=False): """ Returns a generator that produces all of the points in a line between `x1`, `y1` and `x2`, `y2`. (Note: The `thickness` and `endcap` parameters are not yet implemented.) >>> list(line(0, 0, 10, 3)) [(0, 0), (1, 0), (2, 1), (3, 1), (4, 1), (5, 1), (6, 2), (7, 2), (8, 2), (9, 3), (10, 3)] >>> drawPoints(line(0, 0, 20, 3)) OOOO,,,,,,,,,,,,,,,,, ,,,,OOOOOOO,,,,,,,,,, ,,,,,,,,,,,OOOOOO,,,, ,,,,,,,,,,,,,,,,,OOOO """ if (thickness != 1) or (endcap is not None): raise NotImplementedError('The pybresenham module is under development and the filled and thickness parameters are not implemented. You can contribute at https://github.com/asweigart/pybresenham') _checkForIntOrFloat(x1) _checkForIntOrFloat(y1) _checkForIntOrFloat(x2) _checkForIntOrFloat(y2) x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) # TODO - Do we want this line? if not isinstance(_skipFirst, bool): raise PyBresenhamException('_skipFirst argument must be a bool') isSteep = abs(y2-y1) > abs(x2-x1) if isSteep: x1, y1 = y1, x1 x2, y2 = y2, x2 isReversed = x1 > x2 if isReversed: x1, x2 = x2, x1 y1, y2 = y2, y1 deltax = x2 - x1 deltay = abs(y2-y1) error = int(deltax / 2) y = y2 ystep = None if y1 < y2: ystep = 1 else: ystep = -1 for x in range(x2, x1 - 1, -1): if isSteep: if not (_skipFirst and (x, y) == (x2, y2)): yield (y, x) else: if not (_skipFirst and (x, y) == (x2, y2)): yield (x, y) error -= deltay if error <= 0: y -= ystep error += deltax else: deltax = x2 - x1 deltay = abs(y2-y1) error = int(deltax / 2) y = y1 ystep = None if y1 < y2: ystep = 1 else: ystep = -1 for x in range(x1, x2 + 1): if isSteep: if not (_skipFirst and (x, y) == (x1, y1)): yield (y, x) else: if not (_skipFirst and (x, y) == (x1, y1)): yield (x, y) error -= deltay if error < 0: y += ystep error += deltax
python
def line(x1, y1, x2, y2, thickness=1, endcap=None, _skipFirst=False): """ Returns a generator that produces all of the points in a line between `x1`, `y1` and `x2`, `y2`. (Note: The `thickness` and `endcap` parameters are not yet implemented.) >>> list(line(0, 0, 10, 3)) [(0, 0), (1, 0), (2, 1), (3, 1), (4, 1), (5, 1), (6, 2), (7, 2), (8, 2), (9, 3), (10, 3)] >>> drawPoints(line(0, 0, 20, 3)) OOOO,,,,,,,,,,,,,,,,, ,,,,OOOOOOO,,,,,,,,,, ,,,,,,,,,,,OOOOOO,,,, ,,,,,,,,,,,,,,,,,OOOO """ if (thickness != 1) or (endcap is not None): raise NotImplementedError('The pybresenham module is under development and the filled and thickness parameters are not implemented. You can contribute at https://github.com/asweigart/pybresenham') _checkForIntOrFloat(x1) _checkForIntOrFloat(y1) _checkForIntOrFloat(x2) _checkForIntOrFloat(y2) x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) # TODO - Do we want this line? if not isinstance(_skipFirst, bool): raise PyBresenhamException('_skipFirst argument must be a bool') isSteep = abs(y2-y1) > abs(x2-x1) if isSteep: x1, y1 = y1, x1 x2, y2 = y2, x2 isReversed = x1 > x2 if isReversed: x1, x2 = x2, x1 y1, y2 = y2, y1 deltax = x2 - x1 deltay = abs(y2-y1) error = int(deltax / 2) y = y2 ystep = None if y1 < y2: ystep = 1 else: ystep = -1 for x in range(x2, x1 - 1, -1): if isSteep: if not (_skipFirst and (x, y) == (x2, y2)): yield (y, x) else: if not (_skipFirst and (x, y) == (x2, y2)): yield (x, y) error -= deltay if error <= 0: y -= ystep error += deltax else: deltax = x2 - x1 deltay = abs(y2-y1) error = int(deltax / 2) y = y1 ystep = None if y1 < y2: ystep = 1 else: ystep = -1 for x in range(x1, x2 + 1): if isSteep: if not (_skipFirst and (x, y) == (x1, y1)): yield (y, x) else: if not (_skipFirst and (x, y) == (x1, y1)): yield (x, y) error -= deltay if error < 0: y += ystep error += deltax
[ "def", "line", "(", "x1", ",", "y1", ",", "x2", ",", "y2", ",", "thickness", "=", "1", ",", "endcap", "=", "None", ",", "_skipFirst", "=", "False", ")", ":", "if", "(", "thickness", "!=", "1", ")", "or", "(", "endcap", "is", "not", "None", ")", ":", "raise", "NotImplementedError", "(", "'The pybresenham module is under development and the filled and thickness parameters are not implemented. You can contribute at https://github.com/asweigart/pybresenham'", ")", "_checkForIntOrFloat", "(", "x1", ")", "_checkForIntOrFloat", "(", "y1", ")", "_checkForIntOrFloat", "(", "x2", ")", "_checkForIntOrFloat", "(", "y2", ")", "x1", ",", "y1", ",", "x2", ",", "y2", "=", "int", "(", "x1", ")", ",", "int", "(", "y1", ")", ",", "int", "(", "x2", ")", ",", "int", "(", "y2", ")", "# TODO - Do we want this line?", "if", "not", "isinstance", "(", "_skipFirst", ",", "bool", ")", ":", "raise", "PyBresenhamException", "(", "'_skipFirst argument must be a bool'", ")", "isSteep", "=", "abs", "(", "y2", "-", "y1", ")", ">", "abs", "(", "x2", "-", "x1", ")", "if", "isSteep", ":", "x1", ",", "y1", "=", "y1", ",", "x1", "x2", ",", "y2", "=", "y2", ",", "x2", "isReversed", "=", "x1", ">", "x2", "if", "isReversed", ":", "x1", ",", "x2", "=", "x2", ",", "x1", "y1", ",", "y2", "=", "y2", ",", "y1", "deltax", "=", "x2", "-", "x1", "deltay", "=", "abs", "(", "y2", "-", "y1", ")", "error", "=", "int", "(", "deltax", "/", "2", ")", "y", "=", "y2", "ystep", "=", "None", "if", "y1", "<", "y2", ":", "ystep", "=", "1", "else", ":", "ystep", "=", "-", "1", "for", "x", "in", "range", "(", "x2", ",", "x1", "-", "1", ",", "-", "1", ")", ":", "if", "isSteep", ":", "if", "not", "(", "_skipFirst", "and", "(", "x", ",", "y", ")", "==", "(", "x2", ",", "y2", ")", ")", ":", "yield", "(", "y", ",", "x", ")", "else", ":", "if", "not", "(", "_skipFirst", "and", "(", "x", ",", "y", ")", "==", "(", "x2", ",", "y2", ")", ")", ":", "yield", "(", "x", ",", "y", ")", "error", "-=", "deltay", "if", "error", "<=", "0", ":", "y", "-=", "ystep", "error", "+=", "deltax", "else", ":", "deltax", "=", "x2", "-", "x1", "deltay", "=", "abs", "(", "y2", "-", "y1", ")", "error", "=", "int", "(", "deltax", "/", "2", ")", "y", "=", "y1", "ystep", "=", "None", "if", "y1", "<", "y2", ":", "ystep", "=", "1", "else", ":", "ystep", "=", "-", "1", "for", "x", "in", "range", "(", "x1", ",", "x2", "+", "1", ")", ":", "if", "isSteep", ":", "if", "not", "(", "_skipFirst", "and", "(", "x", ",", "y", ")", "==", "(", "x1", ",", "y1", ")", ")", ":", "yield", "(", "y", ",", "x", ")", "else", ":", "if", "not", "(", "_skipFirst", "and", "(", "x", ",", "y", ")", "==", "(", "x1", ",", "y1", ")", ")", ":", "yield", "(", "x", ",", "y", ")", "error", "-=", "deltay", "if", "error", "<", "0", ":", "y", "+=", "ystep", "error", "+=", "deltax" ]
Returns a generator that produces all of the points in a line between `x1`, `y1` and `x2`, `y2`. (Note: The `thickness` and `endcap` parameters are not yet implemented.) >>> list(line(0, 0, 10, 3)) [(0, 0), (1, 0), (2, 1), (3, 1), (4, 1), (5, 1), (6, 2), (7, 2), (8, 2), (9, 3), (10, 3)] >>> drawPoints(line(0, 0, 20, 3)) OOOO,,,,,,,,,,,,,,,,, ,,,,OOOOOOO,,,,,,,,,, ,,,,,,,,,,,OOOOOO,,,, ,,,,,,,,,,,,,,,,,OOOO
[ "Returns", "a", "generator", "that", "produces", "all", "of", "the", "points", "in", "a", "line", "between", "x1", "y1", "and", "x2", "y2", "." ]
5183f39af58d899cf736075d2b27c892824bb563
https://github.com/asweigart/pybresenham/blob/5183f39af58d899cf736075d2b27c892824bb563/pybresenham/__init__.py#L123-L200
238,375
facundobatista/logassert
logassert/logassert.py
SetupLogChecker.emit
def emit(self, record): """Store the message, not only the record.""" self.records.append(Record(levelno=record.levelno, levelname=record.levelname, message=self.format(record))) return super(SetupLogChecker, self).emit(record)
python
def emit(self, record): """Store the message, not only the record.""" self.records.append(Record(levelno=record.levelno, levelname=record.levelname, message=self.format(record))) return super(SetupLogChecker, self).emit(record)
[ "def", "emit", "(", "self", ",", "record", ")", ":", "self", ".", "records", ".", "append", "(", "Record", "(", "levelno", "=", "record", ".", "levelno", ",", "levelname", "=", "record", ".", "levelname", ",", "message", "=", "self", ".", "format", "(", "record", ")", ")", ")", "return", "super", "(", "SetupLogChecker", ",", "self", ")", ".", "emit", "(", "record", ")" ]
Store the message, not only the record.
[ "Store", "the", "message", "not", "only", "the", "record", "." ]
79dc3d22a402fa0fb91cf3b046c63f039aa71890
https://github.com/facundobatista/logassert/blob/79dc3d22a402fa0fb91cf3b046c63f039aa71890/logassert/logassert.py#L53-L57
238,376
facundobatista/logassert
logassert/logassert.py
SetupLogChecker._check_generic_pos
def _check_generic_pos(self, *tokens): """Check if the different tokens were logged in one record, any level.""" for record in self.records: if all(token in record.message for token in tokens): return # didn't exit, all tokens are not present in the same record msgs = ["Tokens {} not found, all was logged is...".format(tokens)] for record in self.records: msgs.append(" {:9s} {!r}".format(record.levelname, record.message)) self.test_instance.fail("\n".join(msgs))
python
def _check_generic_pos(self, *tokens): """Check if the different tokens were logged in one record, any level.""" for record in self.records: if all(token in record.message for token in tokens): return # didn't exit, all tokens are not present in the same record msgs = ["Tokens {} not found, all was logged is...".format(tokens)] for record in self.records: msgs.append(" {:9s} {!r}".format(record.levelname, record.message)) self.test_instance.fail("\n".join(msgs))
[ "def", "_check_generic_pos", "(", "self", ",", "*", "tokens", ")", ":", "for", "record", "in", "self", ".", "records", ":", "if", "all", "(", "token", "in", "record", ".", "message", "for", "token", "in", "tokens", ")", ":", "return", "# didn't exit, all tokens are not present in the same record", "msgs", "=", "[", "\"Tokens {} not found, all was logged is...\"", ".", "format", "(", "tokens", ")", "]", "for", "record", "in", "self", ".", "records", ":", "msgs", ".", "append", "(", "\" {:9s} {!r}\"", ".", "format", "(", "record", ".", "levelname", ",", "record", ".", "message", ")", ")", "self", ".", "test_instance", ".", "fail", "(", "\"\\n\"", ".", "join", "(", "msgs", ")", ")" ]
Check if the different tokens were logged in one record, any level.
[ "Check", "if", "the", "different", "tokens", "were", "logged", "in", "one", "record", "any", "level", "." ]
79dc3d22a402fa0fb91cf3b046c63f039aa71890
https://github.com/facundobatista/logassert/blob/79dc3d22a402fa0fb91cf3b046c63f039aa71890/logassert/logassert.py#L59-L69
238,377
facundobatista/logassert
logassert/logassert.py
SetupLogChecker._check_pos
def _check_pos(self, level, *tokens): """Check if the different tokens were logged in one record, assert by level.""" for record in self.records: if all(record.levelno == level and token in record.message for token in tokens): return # didn't exit, all tokens are not present in the same record level_name = logging.getLevelName(level) msgs = ["Tokens {} not found in {}, all was logged is...".format(tokens, level_name)] for record in self.records: msgs.append(" {:9s} {!r}".format(record.levelname, record.message)) self.test_instance.fail("\n".join(msgs))
python
def _check_pos(self, level, *tokens): """Check if the different tokens were logged in one record, assert by level.""" for record in self.records: if all(record.levelno == level and token in record.message for token in tokens): return # didn't exit, all tokens are not present in the same record level_name = logging.getLevelName(level) msgs = ["Tokens {} not found in {}, all was logged is...".format(tokens, level_name)] for record in self.records: msgs.append(" {:9s} {!r}".format(record.levelname, record.message)) self.test_instance.fail("\n".join(msgs))
[ "def", "_check_pos", "(", "self", ",", "level", ",", "*", "tokens", ")", ":", "for", "record", "in", "self", ".", "records", ":", "if", "all", "(", "record", ".", "levelno", "==", "level", "and", "token", "in", "record", ".", "message", "for", "token", "in", "tokens", ")", ":", "return", "# didn't exit, all tokens are not present in the same record", "level_name", "=", "logging", ".", "getLevelName", "(", "level", ")", "msgs", "=", "[", "\"Tokens {} not found in {}, all was logged is...\"", ".", "format", "(", "tokens", ",", "level_name", ")", "]", "for", "record", "in", "self", ".", "records", ":", "msgs", ".", "append", "(", "\" {:9s} {!r}\"", ".", "format", "(", "record", ".", "levelname", ",", "record", ".", "message", ")", ")", "self", ".", "test_instance", ".", "fail", "(", "\"\\n\"", ".", "join", "(", "msgs", ")", ")" ]
Check if the different tokens were logged in one record, assert by level.
[ "Check", "if", "the", "different", "tokens", "were", "logged", "in", "one", "record", "assert", "by", "level", "." ]
79dc3d22a402fa0fb91cf3b046c63f039aa71890
https://github.com/facundobatista/logassert/blob/79dc3d22a402fa0fb91cf3b046c63f039aa71890/logassert/logassert.py#L71-L82
238,378
facundobatista/logassert
logassert/logassert.py
SetupLogChecker._check_neg
def _check_neg(self, level, *tokens): """Check that the different tokens were NOT logged in one record, assert by level.""" for record in self.records: if level is not None and record.levelno != level: continue if all(token in record.message for token in tokens): break else: return # didn't exit, all tokens found in the same record msg = "Tokens {} found in the following record: {} {!r}".format( tokens, record.levelname, record.message) self.test_instance.fail(msg)
python
def _check_neg(self, level, *tokens): """Check that the different tokens were NOT logged in one record, assert by level.""" for record in self.records: if level is not None and record.levelno != level: continue if all(token in record.message for token in tokens): break else: return # didn't exit, all tokens found in the same record msg = "Tokens {} found in the following record: {} {!r}".format( tokens, record.levelname, record.message) self.test_instance.fail(msg)
[ "def", "_check_neg", "(", "self", ",", "level", ",", "*", "tokens", ")", ":", "for", "record", "in", "self", ".", "records", ":", "if", "level", "is", "not", "None", "and", "record", ".", "levelno", "!=", "level", ":", "continue", "if", "all", "(", "token", "in", "record", ".", "message", "for", "token", "in", "tokens", ")", ":", "break", "else", ":", "return", "# didn't exit, all tokens found in the same record", "msg", "=", "\"Tokens {} found in the following record: {} {!r}\"", ".", "format", "(", "tokens", ",", "record", ".", "levelname", ",", "record", ".", "message", ")", "self", ".", "test_instance", ".", "fail", "(", "msg", ")" ]
Check that the different tokens were NOT logged in one record, assert by level.
[ "Check", "that", "the", "different", "tokens", "were", "NOT", "logged", "in", "one", "record", "assert", "by", "level", "." ]
79dc3d22a402fa0fb91cf3b046c63f039aa71890
https://github.com/facundobatista/logassert/blob/79dc3d22a402fa0fb91cf3b046c63f039aa71890/logassert/logassert.py#L84-L97
238,379
baguette-io/baguette-messaging
farine/amqp/consumer.py
Consumer.get_consumers
def get_consumers(self, _Consumer, channel): """ | ConsumerMixin requirement. | Get the consumers list. :returns: All the consumers. :rtype: list. """ return [_Consumer(queues=[self.queue(channel)], callbacks=[self.main_callback], prefetch_count=self.prefetch_count)]
python
def get_consumers(self, _Consumer, channel): """ | ConsumerMixin requirement. | Get the consumers list. :returns: All the consumers. :rtype: list. """ return [_Consumer(queues=[self.queue(channel)], callbacks=[self.main_callback], prefetch_count=self.prefetch_count)]
[ "def", "get_consumers", "(", "self", ",", "_Consumer", ",", "channel", ")", ":", "return", "[", "_Consumer", "(", "queues", "=", "[", "self", ".", "queue", "(", "channel", ")", "]", ",", "callbacks", "=", "[", "self", ".", "main_callback", "]", ",", "prefetch_count", "=", "self", ".", "prefetch_count", ")", "]" ]
| ConsumerMixin requirement. | Get the consumers list. :returns: All the consumers. :rtype: list.
[ "|", "ConsumerMixin", "requirement", ".", "|", "Get", "the", "consumers", "list", "." ]
8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1
https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/amqp/consumer.py#L71-L79
238,380
baguette-io/baguette-messaging
farine/amqp/consumer.py
Consumer.start
def start(self, *args, **kwargs):#pylint:disable=unused-argument """ | Launch the consumer. | It can listen forever for messages or just wait for one. :param forever: If set, the consumer listens forever. Default to `True`. :type forever: bool :param timeout: If set, the consumer waits the specified seconds before quitting. :type timeout: None, int :rtype: None :raises socket.timeout: when no message has been received since `timeout`. """ forever = kwargs.get('forever', True) timeout = kwargs.get('timeout', None) if forever: return self.run(timeout=timeout) elif timeout: next((self.consume(timeout=timeout)), None) else: next((self.consume(limit=1, timeout=timeout)), None)
python
def start(self, *args, **kwargs):#pylint:disable=unused-argument """ | Launch the consumer. | It can listen forever for messages or just wait for one. :param forever: If set, the consumer listens forever. Default to `True`. :type forever: bool :param timeout: If set, the consumer waits the specified seconds before quitting. :type timeout: None, int :rtype: None :raises socket.timeout: when no message has been received since `timeout`. """ forever = kwargs.get('forever', True) timeout = kwargs.get('timeout', None) if forever: return self.run(timeout=timeout) elif timeout: next((self.consume(timeout=timeout)), None) else: next((self.consume(limit=1, timeout=timeout)), None)
[ "def", "start", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "#pylint:disable=unused-argument", "forever", "=", "kwargs", ".", "get", "(", "'forever'", ",", "True", ")", "timeout", "=", "kwargs", ".", "get", "(", "'timeout'", ",", "None", ")", "if", "forever", ":", "return", "self", ".", "run", "(", "timeout", "=", "timeout", ")", "elif", "timeout", ":", "next", "(", "(", "self", ".", "consume", "(", "timeout", "=", "timeout", ")", ")", ",", "None", ")", "else", ":", "next", "(", "(", "self", ".", "consume", "(", "limit", "=", "1", ",", "timeout", "=", "timeout", ")", ")", ",", "None", ")" ]
| Launch the consumer. | It can listen forever for messages or just wait for one. :param forever: If set, the consumer listens forever. Default to `True`. :type forever: bool :param timeout: If set, the consumer waits the specified seconds before quitting. :type timeout: None, int :rtype: None :raises socket.timeout: when no message has been received since `timeout`.
[ "|", "Launch", "the", "consumer", ".", "|", "It", "can", "listen", "forever", "for", "messages", "or", "just", "wait", "for", "one", "." ]
8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1
https://github.com/baguette-io/baguette-messaging/blob/8d1c4707ea7eace8617fed2d97df2fcc9d0cdee1/farine/amqp/consumer.py#L125-L144
238,381
benzid-wael/djangorestframework-utils
drf_utils/serializers/models.py
modelserializer_factory
def modelserializer_factory(model, serializer=None, fields=None, exclude=None): """ Returns a ModelSerializer containing fields for the given model. :param model: model class. :param fields: is an optional list of field names. If provided, only the named fields will be included in the returned fields. If omitted or '__all__', all fields will be used. :param exclude: is an optional list of field names. If provided, the named fields will be excluded from the returned fields, even if they are listed in the ``fields`` argument. :return: ModelSerializer class """ # default values serializer = serializer or serializers.ModelSerializer attrs = {'model': model} if fields == '__all__': opts = model._meta.concrete_model._meta attrs['fields'] = [field.name for field in opts.fields if field.serialize] elif fields is not None: attrs['fields'] = fields if exclude is not None: attrs['exclude'] = exclude # create meta class parent = (object,) Meta = type('Meta', parent, attrs) # Give this new serializer class a reasonable name. class_name = model.__name__ + 'Serializer' # Class attributes for the new serializer class. serializer_class_attrs = { 'Meta': Meta, } return type(serializer)(class_name, (serializer,), serializer_class_attrs)
python
def modelserializer_factory(model, serializer=None, fields=None, exclude=None): """ Returns a ModelSerializer containing fields for the given model. :param model: model class. :param fields: is an optional list of field names. If provided, only the named fields will be included in the returned fields. If omitted or '__all__', all fields will be used. :param exclude: is an optional list of field names. If provided, the named fields will be excluded from the returned fields, even if they are listed in the ``fields`` argument. :return: ModelSerializer class """ # default values serializer = serializer or serializers.ModelSerializer attrs = {'model': model} if fields == '__all__': opts = model._meta.concrete_model._meta attrs['fields'] = [field.name for field in opts.fields if field.serialize] elif fields is not None: attrs['fields'] = fields if exclude is not None: attrs['exclude'] = exclude # create meta class parent = (object,) Meta = type('Meta', parent, attrs) # Give this new serializer class a reasonable name. class_name = model.__name__ + 'Serializer' # Class attributes for the new serializer class. serializer_class_attrs = { 'Meta': Meta, } return type(serializer)(class_name, (serializer,), serializer_class_attrs)
[ "def", "modelserializer_factory", "(", "model", ",", "serializer", "=", "None", ",", "fields", "=", "None", ",", "exclude", "=", "None", ")", ":", "# default values", "serializer", "=", "serializer", "or", "serializers", ".", "ModelSerializer", "attrs", "=", "{", "'model'", ":", "model", "}", "if", "fields", "==", "'__all__'", ":", "opts", "=", "model", ".", "_meta", ".", "concrete_model", ".", "_meta", "attrs", "[", "'fields'", "]", "=", "[", "field", ".", "name", "for", "field", "in", "opts", ".", "fields", "if", "field", ".", "serialize", "]", "elif", "fields", "is", "not", "None", ":", "attrs", "[", "'fields'", "]", "=", "fields", "if", "exclude", "is", "not", "None", ":", "attrs", "[", "'exclude'", "]", "=", "exclude", "# create meta class", "parent", "=", "(", "object", ",", ")", "Meta", "=", "type", "(", "'Meta'", ",", "parent", ",", "attrs", ")", "# Give this new serializer class a reasonable name.", "class_name", "=", "model", ".", "__name__", "+", "'Serializer'", "# Class attributes for the new serializer class.", "serializer_class_attrs", "=", "{", "'Meta'", ":", "Meta", ",", "}", "return", "type", "(", "serializer", ")", "(", "class_name", ",", "(", "serializer", ",", ")", ",", "serializer_class_attrs", ")" ]
Returns a ModelSerializer containing fields for the given model. :param model: model class. :param fields: is an optional list of field names. If provided, only the named fields will be included in the returned fields. If omitted or '__all__', all fields will be used. :param exclude: is an optional list of field names. If provided, the named fields will be excluded from the returned fields, even if they are listed in the ``fields`` argument. :return: ModelSerializer class
[ "Returns", "a", "ModelSerializer", "containing", "fields", "for", "the", "given", "model", "." ]
9f1296652fbe30cb5044d20b98fbcf8fdfa553be
https://github.com/benzid-wael/djangorestframework-utils/blob/9f1296652fbe30cb5044d20b98fbcf8fdfa553be/drf_utils/serializers/models.py#L9-L46
238,382
Zsailer/kubeconf
kubeconf/kubeconf.py
get_kube_path
def get_kube_path(): """Get the current config path. If the KUBECONFIG environment parameter is set, use it. If multiple paths are listed in KUBECONFIG, use the first path. """ try: path = pathlib.Path(os.environ["KUBECONFIG"].split(':')[0]) except KeyError: path = pathlib.Path.home().joinpath('.kube', 'config') return path
python
def get_kube_path(): """Get the current config path. If the KUBECONFIG environment parameter is set, use it. If multiple paths are listed in KUBECONFIG, use the first path. """ try: path = pathlib.Path(os.environ["KUBECONFIG"].split(':')[0]) except KeyError: path = pathlib.Path.home().joinpath('.kube', 'config') return path
[ "def", "get_kube_path", "(", ")", ":", "try", ":", "path", "=", "pathlib", ".", "Path", "(", "os", ".", "environ", "[", "\"KUBECONFIG\"", "]", ".", "split", "(", "':'", ")", "[", "0", "]", ")", "except", "KeyError", ":", "path", "=", "pathlib", ".", "Path", ".", "home", "(", ")", ".", "joinpath", "(", "'.kube'", ",", "'config'", ")", "return", "path" ]
Get the current config path. If the KUBECONFIG environment parameter is set, use it. If multiple paths are listed in KUBECONFIG, use the first path.
[ "Get", "the", "current", "config", "path", ".", "If", "the", "KUBECONFIG", "environment", "parameter", "is", "set", "use", "it", ".", "If", "multiple", "paths", "are", "listed", "in", "KUBECONFIG", "use", "the", "first", "path", "." ]
b4e81001b5d2fb8d461056f25eb8b03307d57a6b
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L10-L19
238,383
Zsailer/kubeconf
kubeconf/kubeconf.py
KubeConf.open
def open(self, create_if_not_found=True): """Open a kube config file. If the file does not exist, it creates a new file. """ try: self.data = self._read() # If the file does except FileNotFoundError as e: if create_if_not_found is True: self.data = {} else: raise e # Enforce the following keys exists in data. if 'clusters' not in self.data: self.data['clusters'] = [] if 'contexts' not in self.data: self.data['clusters'] = [] if 'users' not in self.data: self.data['users'] = [] if 'apiVersion' not in self.data: self.data['apiVersion'] = 'v1' if 'kind' not in self.data: self.data['kind'] = 'Config' if 'preferences' not in self.data: self.data['preferences'] = {} if 'current-context' not in self.data: self.data['current-context'] = '' return self
python
def open(self, create_if_not_found=True): """Open a kube config file. If the file does not exist, it creates a new file. """ try: self.data = self._read() # If the file does except FileNotFoundError as e: if create_if_not_found is True: self.data = {} else: raise e # Enforce the following keys exists in data. if 'clusters' not in self.data: self.data['clusters'] = [] if 'contexts' not in self.data: self.data['clusters'] = [] if 'users' not in self.data: self.data['users'] = [] if 'apiVersion' not in self.data: self.data['apiVersion'] = 'v1' if 'kind' not in self.data: self.data['kind'] = 'Config' if 'preferences' not in self.data: self.data['preferences'] = {} if 'current-context' not in self.data: self.data['current-context'] = '' return self
[ "def", "open", "(", "self", ",", "create_if_not_found", "=", "True", ")", ":", "try", ":", "self", ".", "data", "=", "self", ".", "_read", "(", ")", "# If the file does", "except", "FileNotFoundError", "as", "e", ":", "if", "create_if_not_found", "is", "True", ":", "self", ".", "data", "=", "{", "}", "else", ":", "raise", "e", "# Enforce the following keys exists in data.", "if", "'clusters'", "not", "in", "self", ".", "data", ":", "self", ".", "data", "[", "'clusters'", "]", "=", "[", "]", "if", "'contexts'", "not", "in", "self", ".", "data", ":", "self", ".", "data", "[", "'clusters'", "]", "=", "[", "]", "if", "'users'", "not", "in", "self", ".", "data", ":", "self", ".", "data", "[", "'users'", "]", "=", "[", "]", "if", "'apiVersion'", "not", "in", "self", ".", "data", ":", "self", ".", "data", "[", "'apiVersion'", "]", "=", "'v1'", "if", "'kind'", "not", "in", "self", ".", "data", ":", "self", ".", "data", "[", "'kind'", "]", "=", "'Config'", "if", "'preferences'", "not", "in", "self", ".", "data", ":", "self", ".", "data", "[", "'preferences'", "]", "=", "{", "}", "if", "'current-context'", "not", "in", "self", ".", "data", ":", "self", ".", "data", "[", "'current-context'", "]", "=", "''", "return", "self" ]
Open a kube config file. If the file does not exist, it creates a new file.
[ "Open", "a", "kube", "config", "file", ".", "If", "the", "file", "does", "not", "exist", "it", "creates", "a", "new", "file", "." ]
b4e81001b5d2fb8d461056f25eb8b03307d57a6b
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L78-L107
238,384
Zsailer/kubeconf
kubeconf/kubeconf.py
KubeConf._read
def _read(self): """Read the kube config file. """ stream = self.path.read_text() data = yaml.load(stream) return data
python
def _read(self): """Read the kube config file. """ stream = self.path.read_text() data = yaml.load(stream) return data
[ "def", "_read", "(", "self", ")", ":", "stream", "=", "self", ".", "path", ".", "read_text", "(", ")", "data", "=", "yaml", ".", "load", "(", "stream", ")", "return", "data" ]
Read the kube config file.
[ "Read", "the", "kube", "config", "file", "." ]
b4e81001b5d2fb8d461056f25eb8b03307d57a6b
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L119-L124
238,385
Zsailer/kubeconf
kubeconf/kubeconf.py
KubeConf._write
def _write(self, data): """Write data to config file.""" stream = yaml.dump(data, default_flow_style=False) self.path.write_text(stream)
python
def _write(self, data): """Write data to config file.""" stream = yaml.dump(data, default_flow_style=False) self.path.write_text(stream)
[ "def", "_write", "(", "self", ",", "data", ")", ":", "stream", "=", "yaml", ".", "dump", "(", "data", ",", "default_flow_style", "=", "False", ")", "self", ".", "path", ".", "write_text", "(", "stream", ")" ]
Write data to config file.
[ "Write", "data", "to", "config", "file", "." ]
b4e81001b5d2fb8d461056f25eb8b03307d57a6b
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L126-L129
238,386
Zsailer/kubeconf
kubeconf/kubeconf.py
KubeConf.cluster_exists
def cluster_exists(self, name): """Check if a given cluster exists.""" clusters = self.data['clusters'] for cluster in clusters: if cluster['name'] == name: return True return False
python
def cluster_exists(self, name): """Check if a given cluster exists.""" clusters = self.data['clusters'] for cluster in clusters: if cluster['name'] == name: return True return False
[ "def", "cluster_exists", "(", "self", ",", "name", ")", ":", "clusters", "=", "self", ".", "data", "[", "'clusters'", "]", "for", "cluster", "in", "clusters", ":", "if", "cluster", "[", "'name'", "]", "==", "name", ":", "return", "True", "return", "False" ]
Check if a given cluster exists.
[ "Check", "if", "a", "given", "cluster", "exists", "." ]
b4e81001b5d2fb8d461056f25eb8b03307d57a6b
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L133-L139
238,387
Zsailer/kubeconf
kubeconf/kubeconf.py
KubeConf.get_cluster
def get_cluster(self, name): """Get cluster from kubeconfig.""" clusters = self.data['clusters'] for cluster in clusters: if cluster['name'] == name: return cluster raise KubeConfError("Cluster name not found.")
python
def get_cluster(self, name): """Get cluster from kubeconfig.""" clusters = self.data['clusters'] for cluster in clusters: if cluster['name'] == name: return cluster raise KubeConfError("Cluster name not found.")
[ "def", "get_cluster", "(", "self", ",", "name", ")", ":", "clusters", "=", "self", ".", "data", "[", "'clusters'", "]", "for", "cluster", "in", "clusters", ":", "if", "cluster", "[", "'name'", "]", "==", "name", ":", "return", "cluster", "raise", "KubeConfError", "(", "\"Cluster name not found.\"", ")" ]
Get cluster from kubeconfig.
[ "Get", "cluster", "from", "kubeconfig", "." ]
b4e81001b5d2fb8d461056f25eb8b03307d57a6b
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L141-L147
238,388
Zsailer/kubeconf
kubeconf/kubeconf.py
KubeConf.print_clusters
def print_clusters(self, names=False): """Print contexts.""" clusters = self.get_clusters() if names: clusters = [cluster['name'] for cluster in clusters] pprint.pprint(clusters)
python
def print_clusters(self, names=False): """Print contexts.""" clusters = self.get_clusters() if names: clusters = [cluster['name'] for cluster in clusters] pprint.pprint(clusters)
[ "def", "print_clusters", "(", "self", ",", "names", "=", "False", ")", ":", "clusters", "=", "self", ".", "get_clusters", "(", ")", "if", "names", ":", "clusters", "=", "[", "cluster", "[", "'name'", "]", "for", "cluster", "in", "clusters", "]", "pprint", ".", "pprint", "(", "clusters", ")" ]
Print contexts.
[ "Print", "contexts", "." ]
b4e81001b5d2fb8d461056f25eb8b03307d57a6b
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L149-L154
238,389
Zsailer/kubeconf
kubeconf/kubeconf.py
KubeConf.add_cluster
def add_cluster( self, name, server=None, certificate_authority_data=None, **attrs): """Add a cluster to config.""" if self.cluster_exists(name): raise KubeConfError("Cluster with the given name already exists.") clusters = self.get_clusters() # Add parameters. new_cluster = {'name': name, 'cluster':{}} attrs_ = new_cluster['cluster'] if server is not None: attrs_['server'] = server if certificate_authority_data is not None: attrs_['certificate-authority-data'] = certificate_authority_data attrs_.update(attrs) clusters.append(new_cluster)
python
def add_cluster( self, name, server=None, certificate_authority_data=None, **attrs): """Add a cluster to config.""" if self.cluster_exists(name): raise KubeConfError("Cluster with the given name already exists.") clusters = self.get_clusters() # Add parameters. new_cluster = {'name': name, 'cluster':{}} attrs_ = new_cluster['cluster'] if server is not None: attrs_['server'] = server if certificate_authority_data is not None: attrs_['certificate-authority-data'] = certificate_authority_data attrs_.update(attrs) clusters.append(new_cluster)
[ "def", "add_cluster", "(", "self", ",", "name", ",", "server", "=", "None", ",", "certificate_authority_data", "=", "None", ",", "*", "*", "attrs", ")", ":", "if", "self", ".", "cluster_exists", "(", "name", ")", ":", "raise", "KubeConfError", "(", "\"Cluster with the given name already exists.\"", ")", "clusters", "=", "self", ".", "get_clusters", "(", ")", "# Add parameters.", "new_cluster", "=", "{", "'name'", ":", "name", ",", "'cluster'", ":", "{", "}", "}", "attrs_", "=", "new_cluster", "[", "'cluster'", "]", "if", "server", "is", "not", "None", ":", "attrs_", "[", "'server'", "]", "=", "server", "if", "certificate_authority_data", "is", "not", "None", ":", "attrs_", "[", "'certificate-authority-data'", "]", "=", "certificate_authority_data", "attrs_", ".", "update", "(", "attrs", ")", "clusters", ".", "append", "(", "new_cluster", ")" ]
Add a cluster to config.
[ "Add", "a", "cluster", "to", "config", "." ]
b4e81001b5d2fb8d461056f25eb8b03307d57a6b
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L160-L181
238,390
Zsailer/kubeconf
kubeconf/kubeconf.py
KubeConf.add_to_cluster
def add_to_cluster(self, name, **attrs): """Add attributes to a cluster. """ cluster = self.get_cluster(name=name) attrs_ = cluster['cluster'] attrs_.update(**attrs)
python
def add_to_cluster(self, name, **attrs): """Add attributes to a cluster. """ cluster = self.get_cluster(name=name) attrs_ = cluster['cluster'] attrs_.update(**attrs)
[ "def", "add_to_cluster", "(", "self", ",", "name", ",", "*", "*", "attrs", ")", ":", "cluster", "=", "self", ".", "get_cluster", "(", "name", "=", "name", ")", "attrs_", "=", "cluster", "[", "'cluster'", "]", "attrs_", ".", "update", "(", "*", "*", "attrs", ")" ]
Add attributes to a cluster.
[ "Add", "attributes", "to", "a", "cluster", "." ]
b4e81001b5d2fb8d461056f25eb8b03307d57a6b
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L184-L189
238,391
Zsailer/kubeconf
kubeconf/kubeconf.py
KubeConf.remove_from_cluster
def remove_from_cluster(self, name, *args): """Remove attributes from a cluster. """ cluster = self.get_cluster(name=name) attrs_ = cluster['cluster'] for a in args: del attrs_[a]
python
def remove_from_cluster(self, name, *args): """Remove attributes from a cluster. """ cluster = self.get_cluster(name=name) attrs_ = cluster['cluster'] for a in args: del attrs_[a]
[ "def", "remove_from_cluster", "(", "self", ",", "name", ",", "*", "args", ")", ":", "cluster", "=", "self", ".", "get_cluster", "(", "name", "=", "name", ")", "attrs_", "=", "cluster", "[", "'cluster'", "]", "for", "a", "in", "args", ":", "del", "attrs_", "[", "a", "]" ]
Remove attributes from a cluster.
[ "Remove", "attributes", "from", "a", "cluster", "." ]
b4e81001b5d2fb8d461056f25eb8b03307d57a6b
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L191-L197
238,392
Zsailer/kubeconf
kubeconf/kubeconf.py
KubeConf.remove_cluster
def remove_cluster(self, name): """Remove a cluster from kubeconfig. """ cluster = self.get_cluster(name) clusters = self.get_clusters() clusters.remove(cluster)
python
def remove_cluster(self, name): """Remove a cluster from kubeconfig. """ cluster = self.get_cluster(name) clusters = self.get_clusters() clusters.remove(cluster)
[ "def", "remove_cluster", "(", "self", ",", "name", ")", ":", "cluster", "=", "self", ".", "get_cluster", "(", "name", ")", "clusters", "=", "self", ".", "get_clusters", "(", ")", "clusters", ".", "remove", "(", "cluster", ")" ]
Remove a cluster from kubeconfig.
[ "Remove", "a", "cluster", "from", "kubeconfig", "." ]
b4e81001b5d2fb8d461056f25eb8b03307d57a6b
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L199-L204
238,393
Zsailer/kubeconf
kubeconf/kubeconf.py
KubeConf.user_exists
def user_exists(self, name): """Check if a given user exists.""" users = self.data['users'] for user in users: if user['name'] == name: return True return False
python
def user_exists(self, name): """Check if a given user exists.""" users = self.data['users'] for user in users: if user['name'] == name: return True return False
[ "def", "user_exists", "(", "self", ",", "name", ")", ":", "users", "=", "self", ".", "data", "[", "'users'", "]", "for", "user", "in", "users", ":", "if", "user", "[", "'name'", "]", "==", "name", ":", "return", "True", "return", "False" ]
Check if a given user exists.
[ "Check", "if", "a", "given", "user", "exists", "." ]
b4e81001b5d2fb8d461056f25eb8b03307d57a6b
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L208-L214
238,394
Zsailer/kubeconf
kubeconf/kubeconf.py
KubeConf.get_user
def get_user(self, name): """Get user from kubeconfig.""" users = self.data['users'] for user in users: if user['name'] == name: return user raise KubeConfError("user name not found.")
python
def get_user(self, name): """Get user from kubeconfig.""" users = self.data['users'] for user in users: if user['name'] == name: return user raise KubeConfError("user name not found.")
[ "def", "get_user", "(", "self", ",", "name", ")", ":", "users", "=", "self", ".", "data", "[", "'users'", "]", "for", "user", "in", "users", ":", "if", "user", "[", "'name'", "]", "==", "name", ":", "return", "user", "raise", "KubeConfError", "(", "\"user name not found.\"", ")" ]
Get user from kubeconfig.
[ "Get", "user", "from", "kubeconfig", "." ]
b4e81001b5d2fb8d461056f25eb8b03307d57a6b
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L216-L222
238,395
Zsailer/kubeconf
kubeconf/kubeconf.py
KubeConf.add_user
def add_user( self, name, **attrs ): """Add a user to config.""" if self.user_exists(name): raise KubeConfError("user with the given name already exists.") users = self.get_users() # Add parameters. new_user = {'name': name, 'user':{}} attrs_ = new_user['user'] attrs_.update(attrs) users.append(new_user)
python
def add_user( self, name, **attrs ): """Add a user to config.""" if self.user_exists(name): raise KubeConfError("user with the given name already exists.") users = self.get_users() # Add parameters. new_user = {'name': name, 'user':{}} attrs_ = new_user['user'] attrs_.update(attrs) users.append(new_user)
[ "def", "add_user", "(", "self", ",", "name", ",", "*", "*", "attrs", ")", ":", "if", "self", ".", "user_exists", "(", "name", ")", ":", "raise", "KubeConfError", "(", "\"user with the given name already exists.\"", ")", "users", "=", "self", ".", "get_users", "(", ")", "# Add parameters.", "new_user", "=", "{", "'name'", ":", "name", ",", "'user'", ":", "{", "}", "}", "attrs_", "=", "new_user", "[", "'user'", "]", "attrs_", ".", "update", "(", "attrs", ")", "users", ".", "append", "(", "new_user", ")" ]
Add a user to config.
[ "Add", "a", "user", "to", "config", "." ]
b4e81001b5d2fb8d461056f25eb8b03307d57a6b
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L235-L250
238,396
Zsailer/kubeconf
kubeconf/kubeconf.py
KubeConf.add_to_user
def add_to_user(self, name, **attrs): """Add attributes to a user. """ user = self.get_user(name=name) attrs_ = user['user'] attrs_.update(**attrs)
python
def add_to_user(self, name, **attrs): """Add attributes to a user. """ user = self.get_user(name=name) attrs_ = user['user'] attrs_.update(**attrs)
[ "def", "add_to_user", "(", "self", ",", "name", ",", "*", "*", "attrs", ")", ":", "user", "=", "self", ".", "get_user", "(", "name", "=", "name", ")", "attrs_", "=", "user", "[", "'user'", "]", "attrs_", ".", "update", "(", "*", "*", "attrs", ")" ]
Add attributes to a user.
[ "Add", "attributes", "to", "a", "user", "." ]
b4e81001b5d2fb8d461056f25eb8b03307d57a6b
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L252-L257
238,397
Zsailer/kubeconf
kubeconf/kubeconf.py
KubeConf.remove_from_user
def remove_from_user(self, name, *args): """Remove attributes from a user. """ user = self.get_user(name=name) attrs_ = user['user'] for a in args: del attrs_[a]
python
def remove_from_user(self, name, *args): """Remove attributes from a user. """ user = self.get_user(name=name) attrs_ = user['user'] for a in args: del attrs_[a]
[ "def", "remove_from_user", "(", "self", ",", "name", ",", "*", "args", ")", ":", "user", "=", "self", ".", "get_user", "(", "name", "=", "name", ")", "attrs_", "=", "user", "[", "'user'", "]", "for", "a", "in", "args", ":", "del", "attrs_", "[", "a", "]" ]
Remove attributes from a user.
[ "Remove", "attributes", "from", "a", "user", "." ]
b4e81001b5d2fb8d461056f25eb8b03307d57a6b
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L259-L265
238,398
Zsailer/kubeconf
kubeconf/kubeconf.py
KubeConf.remove_user
def remove_user(self, name): """Remove a user from kubeconfig. """ user = self.get_user(name) users = self.get_users() users.remove(user)
python
def remove_user(self, name): """Remove a user from kubeconfig. """ user = self.get_user(name) users = self.get_users() users.remove(user)
[ "def", "remove_user", "(", "self", ",", "name", ")", ":", "user", "=", "self", ".", "get_user", "(", "name", ")", "users", "=", "self", ".", "get_users", "(", ")", "users", ".", "remove", "(", "user", ")" ]
Remove a user from kubeconfig.
[ "Remove", "a", "user", "from", "kubeconfig", "." ]
b4e81001b5d2fb8d461056f25eb8b03307d57a6b
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L267-L272
238,399
Zsailer/kubeconf
kubeconf/kubeconf.py
KubeConf.add_exec_to_user
def add_exec_to_user( self, name, env, command, args, **attrs ): """Add an exec option to your user.""" # Add exec option. exec_options = { 'command': command, 'env': env, 'args': args, } exec_options.update(attrs) # Add exec to user. self.add_to_user(name=name, exec=exec_options)
python
def add_exec_to_user( self, name, env, command, args, **attrs ): """Add an exec option to your user.""" # Add exec option. exec_options = { 'command': command, 'env': env, 'args': args, } exec_options.update(attrs) # Add exec to user. self.add_to_user(name=name, exec=exec_options)
[ "def", "add_exec_to_user", "(", "self", ",", "name", ",", "env", ",", "command", ",", "args", ",", "*", "*", "attrs", ")", ":", "# Add exec option.", "exec_options", "=", "{", "'command'", ":", "command", ",", "'env'", ":", "env", ",", "'args'", ":", "args", ",", "}", "exec_options", ".", "update", "(", "attrs", ")", "# Add exec to user.", "self", ".", "add_to_user", "(", "name", "=", "name", ",", "exec", "=", "exec_options", ")" ]
Add an exec option to your user.
[ "Add", "an", "exec", "option", "to", "your", "user", "." ]
b4e81001b5d2fb8d461056f25eb8b03307d57a6b
https://github.com/Zsailer/kubeconf/blob/b4e81001b5d2fb8d461056f25eb8b03307d57a6b/kubeconf/kubeconf.py#L274-L291