id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
243,200
|
maxfischer2781/chainlet
|
docs/conf.py
|
wraplet_signature
|
def wraplet_signature(app, what, name, obj, options, signature, return_annotation):
"""have wrapplets use the signature of the slave"""
try:
wrapped = obj._raw_slave
except AttributeError:
return None
else:
slave_argspec = autodoc.getargspec(wrapped)
slave_signature = autodoc.formatargspec(obj, *slave_argspec)
return (slave_signature, return_annotation)
|
python
|
def wraplet_signature(app, what, name, obj, options, signature, return_annotation):
"""have wrapplets use the signature of the slave"""
try:
wrapped = obj._raw_slave
except AttributeError:
return None
else:
slave_argspec = autodoc.getargspec(wrapped)
slave_signature = autodoc.formatargspec(obj, *slave_argspec)
return (slave_signature, return_annotation)
|
[
"def",
"wraplet_signature",
"(",
"app",
",",
"what",
",",
"name",
",",
"obj",
",",
"options",
",",
"signature",
",",
"return_annotation",
")",
":",
"try",
":",
"wrapped",
"=",
"obj",
".",
"_raw_slave",
"except",
"AttributeError",
":",
"return",
"None",
"else",
":",
"slave_argspec",
"=",
"autodoc",
".",
"getargspec",
"(",
"wrapped",
")",
"slave_signature",
"=",
"autodoc",
".",
"formatargspec",
"(",
"obj",
",",
"*",
"slave_argspec",
")",
"return",
"(",
"slave_signature",
",",
"return_annotation",
")"
] |
have wrapplets use the signature of the slave
|
[
"have",
"wrapplets",
"use",
"the",
"signature",
"of",
"the",
"slave"
] |
4e17f9992b4780bd0d9309202e2847df640bffe8
|
https://github.com/maxfischer2781/chainlet/blob/4e17f9992b4780bd0d9309202e2847df640bffe8/docs/conf.py#L177-L186
|
243,201
|
edeposit/edeposit.amqp.ftp
|
src/edeposit/amqp/ftp/passwd_reader.py
|
load_users
|
def load_users(path=settings.LOGIN_FILE):
"""
Read passwd file and return dict with users and all their settings.
Args:
path (str, default settings.LOGIN_FILE): path of the file,
which will be loaded (default :attr:`ftp.settings.LOGIN_FILE`).
Returns:
(dict): username: {pass_hash, uid, gid, full_name, home, shell}
Example of returned data::
{
"xex": {
"pass_hash": "$asd$aiosjdaiosjdásghwasdjo",
"uid": "2000",
"gid": "2000",
"full_name": "ftftf",
"home": "/home/ftp/xex",
"shell": "/bin/false"
}
}
"""
if not os.path.exists(path):
return {}
data = ""
with open(path) as f:
data = f.read().splitlines()
users = {}
cnt = 1
for line in data:
line = line.split(":")
assert len(line) == 7, "Bad number of fields in '%s', at line %d!" % (
path,
cnt
)
users[line[0]] = {
"pass_hash": line[1],
"uid": line[2],
"gid": line[3],
"full_name": line[4],
"home": line[5],
"shell": line[6]
}
cnt += 1
return users
|
python
|
def load_users(path=settings.LOGIN_FILE):
"""
Read passwd file and return dict with users and all their settings.
Args:
path (str, default settings.LOGIN_FILE): path of the file,
which will be loaded (default :attr:`ftp.settings.LOGIN_FILE`).
Returns:
(dict): username: {pass_hash, uid, gid, full_name, home, shell}
Example of returned data::
{
"xex": {
"pass_hash": "$asd$aiosjdaiosjdásghwasdjo",
"uid": "2000",
"gid": "2000",
"full_name": "ftftf",
"home": "/home/ftp/xex",
"shell": "/bin/false"
}
}
"""
if not os.path.exists(path):
return {}
data = ""
with open(path) as f:
data = f.read().splitlines()
users = {}
cnt = 1
for line in data:
line = line.split(":")
assert len(line) == 7, "Bad number of fields in '%s', at line %d!" % (
path,
cnt
)
users[line[0]] = {
"pass_hash": line[1],
"uid": line[2],
"gid": line[3],
"full_name": line[4],
"home": line[5],
"shell": line[6]
}
cnt += 1
return users
|
[
"def",
"load_users",
"(",
"path",
"=",
"settings",
".",
"LOGIN_FILE",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"return",
"{",
"}",
"data",
"=",
"\"\"",
"with",
"open",
"(",
"path",
")",
"as",
"f",
":",
"data",
"=",
"f",
".",
"read",
"(",
")",
".",
"splitlines",
"(",
")",
"users",
"=",
"{",
"}",
"cnt",
"=",
"1",
"for",
"line",
"in",
"data",
":",
"line",
"=",
"line",
".",
"split",
"(",
"\":\"",
")",
"assert",
"len",
"(",
"line",
")",
"==",
"7",
",",
"\"Bad number of fields in '%s', at line %d!\"",
"%",
"(",
"path",
",",
"cnt",
")",
"users",
"[",
"line",
"[",
"0",
"]",
"]",
"=",
"{",
"\"pass_hash\"",
":",
"line",
"[",
"1",
"]",
",",
"\"uid\"",
":",
"line",
"[",
"2",
"]",
",",
"\"gid\"",
":",
"line",
"[",
"3",
"]",
",",
"\"full_name\"",
":",
"line",
"[",
"4",
"]",
",",
"\"home\"",
":",
"line",
"[",
"5",
"]",
",",
"\"shell\"",
":",
"line",
"[",
"6",
"]",
"}",
"cnt",
"+=",
"1",
"return",
"users"
] |
Read passwd file and return dict with users and all their settings.
Args:
path (str, default settings.LOGIN_FILE): path of the file,
which will be loaded (default :attr:`ftp.settings.LOGIN_FILE`).
Returns:
(dict): username: {pass_hash, uid, gid, full_name, home, shell}
Example of returned data::
{
"xex": {
"pass_hash": "$asd$aiosjdaiosjdásghwasdjo",
"uid": "2000",
"gid": "2000",
"full_name": "ftftf",
"home": "/home/ftp/xex",
"shell": "/bin/false"
}
}
|
[
"Read",
"passwd",
"file",
"and",
"return",
"dict",
"with",
"users",
"and",
"all",
"their",
"settings",
"."
] |
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
|
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/passwd_reader.py#L21-L73
|
243,202
|
edeposit/edeposit.amqp.ftp
|
src/edeposit/amqp/ftp/passwd_reader.py
|
set_permissions
|
def set_permissions(filename, uid=None, gid=None, mode=0775):
"""
Set pemissions for given `filename`.
Args:
filename (str): name of the file/directory
uid (int, default proftpd): user ID - if not set, user ID of `proftpd`
is used
gid (int): group ID, if not set, it is not changed
mode (int, default 0775): unix access mode
"""
if uid is None:
uid = get_ftp_uid()
if gid is None:
gid = -1
os.chown(filename, uid, gid)
os.chmod(filename, mode)
|
python
|
def set_permissions(filename, uid=None, gid=None, mode=0775):
"""
Set pemissions for given `filename`.
Args:
filename (str): name of the file/directory
uid (int, default proftpd): user ID - if not set, user ID of `proftpd`
is used
gid (int): group ID, if not set, it is not changed
mode (int, default 0775): unix access mode
"""
if uid is None:
uid = get_ftp_uid()
if gid is None:
gid = -1
os.chown(filename, uid, gid)
os.chmod(filename, mode)
|
[
"def",
"set_permissions",
"(",
"filename",
",",
"uid",
"=",
"None",
",",
"gid",
"=",
"None",
",",
"mode",
"=",
"0775",
")",
":",
"if",
"uid",
"is",
"None",
":",
"uid",
"=",
"get_ftp_uid",
"(",
")",
"if",
"gid",
"is",
"None",
":",
"gid",
"=",
"-",
"1",
"os",
".",
"chown",
"(",
"filename",
",",
"uid",
",",
"gid",
")",
"os",
".",
"chmod",
"(",
"filename",
",",
"mode",
")"
] |
Set pemissions for given `filename`.
Args:
filename (str): name of the file/directory
uid (int, default proftpd): user ID - if not set, user ID of `proftpd`
is used
gid (int): group ID, if not set, it is not changed
mode (int, default 0775): unix access mode
|
[
"Set",
"pemissions",
"for",
"given",
"filename",
"."
] |
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
|
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/passwd_reader.py#L115-L133
|
243,203
|
edeposit/edeposit.amqp.ftp
|
src/edeposit/amqp/ftp/passwd_reader.py
|
_decode_config
|
def _decode_config(conf_str):
"""
Decode string to configuration dict.
Only values defined in settings._ALLOWED_MERGES can be redefined.
"""
conf_str = conf_str.strip()
# convert "tttff" -> [True, True, True, False, False]
conf = map(
lambda x: True if x.upper() == "T" else False,
list(conf_str)
)
return dict(zip(settings._ALLOWED_MERGES, conf))
|
python
|
def _decode_config(conf_str):
"""
Decode string to configuration dict.
Only values defined in settings._ALLOWED_MERGES can be redefined.
"""
conf_str = conf_str.strip()
# convert "tttff" -> [True, True, True, False, False]
conf = map(
lambda x: True if x.upper() == "T" else False,
list(conf_str)
)
return dict(zip(settings._ALLOWED_MERGES, conf))
|
[
"def",
"_decode_config",
"(",
"conf_str",
")",
":",
"conf_str",
"=",
"conf_str",
".",
"strip",
"(",
")",
"# convert \"tttff\" -> [True, True, True, False, False]",
"conf",
"=",
"map",
"(",
"lambda",
"x",
":",
"True",
"if",
"x",
".",
"upper",
"(",
")",
"==",
"\"T\"",
"else",
"False",
",",
"list",
"(",
"conf_str",
")",
")",
"return",
"dict",
"(",
"zip",
"(",
"settings",
".",
"_ALLOWED_MERGES",
",",
"conf",
")",
")"
] |
Decode string to configuration dict.
Only values defined in settings._ALLOWED_MERGES can be redefined.
|
[
"Decode",
"string",
"to",
"configuration",
"dict",
"."
] |
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
|
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/passwd_reader.py#L136-L150
|
243,204
|
edeposit/edeposit.amqp.ftp
|
src/edeposit/amqp/ftp/passwd_reader.py
|
_encode_config
|
def _encode_config(conf_dict):
"""Encode `conf_dict` to string."""
out = []
# get variables in order defined in settings._ALLOWED_MERGES
for var in settings._ALLOWED_MERGES:
out.append(conf_dict[var])
# convert bools to chars
out = map(
lambda x: "t" if x else "f",
out
)
return "".join(out)
|
python
|
def _encode_config(conf_dict):
"""Encode `conf_dict` to string."""
out = []
# get variables in order defined in settings._ALLOWED_MERGES
for var in settings._ALLOWED_MERGES:
out.append(conf_dict[var])
# convert bools to chars
out = map(
lambda x: "t" if x else "f",
out
)
return "".join(out)
|
[
"def",
"_encode_config",
"(",
"conf_dict",
")",
":",
"out",
"=",
"[",
"]",
"# get variables in order defined in settings._ALLOWED_MERGES",
"for",
"var",
"in",
"settings",
".",
"_ALLOWED_MERGES",
":",
"out",
".",
"append",
"(",
"conf_dict",
"[",
"var",
"]",
")",
"# convert bools to chars",
"out",
"=",
"map",
"(",
"lambda",
"x",
":",
"\"t\"",
"if",
"x",
"else",
"\"f\"",
",",
"out",
")",
"return",
"\"\"",
".",
"join",
"(",
"out",
")"
] |
Encode `conf_dict` to string.
|
[
"Encode",
"conf_dict",
"to",
"string",
"."
] |
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
|
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/passwd_reader.py#L153-L167
|
243,205
|
edeposit/edeposit.amqp.ftp
|
src/edeposit/amqp/ftp/passwd_reader.py
|
read_user_config
|
def read_user_config(username, path=settings.LOGIN_FILE):
"""
Read user's configuration from otherwise unused field ``full_name`` in
passwd file.
Configuration is stored in string as list of t/f characters.
"""
return _decode_config(load_users(path=path)[username]["full_name"])
|
python
|
def read_user_config(username, path=settings.LOGIN_FILE):
"""
Read user's configuration from otherwise unused field ``full_name`` in
passwd file.
Configuration is stored in string as list of t/f characters.
"""
return _decode_config(load_users(path=path)[username]["full_name"])
|
[
"def",
"read_user_config",
"(",
"username",
",",
"path",
"=",
"settings",
".",
"LOGIN_FILE",
")",
":",
"return",
"_decode_config",
"(",
"load_users",
"(",
"path",
"=",
"path",
")",
"[",
"username",
"]",
"[",
"\"full_name\"",
"]",
")"
] |
Read user's configuration from otherwise unused field ``full_name`` in
passwd file.
Configuration is stored in string as list of t/f characters.
|
[
"Read",
"user",
"s",
"configuration",
"from",
"otherwise",
"unused",
"field",
"full_name",
"in",
"passwd",
"file",
"."
] |
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
|
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/passwd_reader.py#L170-L177
|
243,206
|
edeposit/edeposit.amqp.ftp
|
src/edeposit/amqp/ftp/passwd_reader.py
|
save_user_config
|
def save_user_config(username, conf_dict, path=settings.LOGIN_FILE):
"""
Save user's configuration to otherwise unused field ``full_name`` in passwd
file.
"""
users = load_users(path=path)
users[username]["full_name"] = _encode_config(conf_dict)
save_users(users, path=path)
|
python
|
def save_user_config(username, conf_dict, path=settings.LOGIN_FILE):
"""
Save user's configuration to otherwise unused field ``full_name`` in passwd
file.
"""
users = load_users(path=path)
users[username]["full_name"] = _encode_config(conf_dict)
save_users(users, path=path)
|
[
"def",
"save_user_config",
"(",
"username",
",",
"conf_dict",
",",
"path",
"=",
"settings",
".",
"LOGIN_FILE",
")",
":",
"users",
"=",
"load_users",
"(",
"path",
"=",
"path",
")",
"users",
"[",
"username",
"]",
"[",
"\"full_name\"",
"]",
"=",
"_encode_config",
"(",
"conf_dict",
")",
"save_users",
"(",
"users",
",",
"path",
"=",
"path",
")"
] |
Save user's configuration to otherwise unused field ``full_name`` in passwd
file.
|
[
"Save",
"user",
"s",
"configuration",
"to",
"otherwise",
"unused",
"field",
"full_name",
"in",
"passwd",
"file",
"."
] |
fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71
|
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/passwd_reader.py#L180-L187
|
243,207
|
xaptum/xtt-python
|
xtt/crypto/ecdsap256.py
|
create_ecdsap256_key_pair
|
def create_ecdsap256_key_pair():
"""
Create a new ECDSAP256 key pair.
:returns: a tuple of the public and private keys
"""
pub = ECDSAP256PublicKey()
priv = ECDSAP256PrivateKey()
rc = _lib.xtt_crypto_create_ecdsap256_key_pair(pub.native, priv.native)
if rc == RC.SUCCESS:
return (pub, priv)
else:
raise error_from_code(rc)
|
python
|
def create_ecdsap256_key_pair():
"""
Create a new ECDSAP256 key pair.
:returns: a tuple of the public and private keys
"""
pub = ECDSAP256PublicKey()
priv = ECDSAP256PrivateKey()
rc = _lib.xtt_crypto_create_ecdsap256_key_pair(pub.native, priv.native)
if rc == RC.SUCCESS:
return (pub, priv)
else:
raise error_from_code(rc)
|
[
"def",
"create_ecdsap256_key_pair",
"(",
")",
":",
"pub",
"=",
"ECDSAP256PublicKey",
"(",
")",
"priv",
"=",
"ECDSAP256PrivateKey",
"(",
")",
"rc",
"=",
"_lib",
".",
"xtt_crypto_create_ecdsap256_key_pair",
"(",
"pub",
".",
"native",
",",
"priv",
".",
"native",
")",
"if",
"rc",
"==",
"RC",
".",
"SUCCESS",
":",
"return",
"(",
"pub",
",",
"priv",
")",
"else",
":",
"raise",
"error_from_code",
"(",
"rc",
")"
] |
Create a new ECDSAP256 key pair.
:returns: a tuple of the public and private keys
|
[
"Create",
"a",
"new",
"ECDSAP256",
"key",
"pair",
"."
] |
23ee469488d710d730314bec1136c4dd7ac2cd5c
|
https://github.com/xaptum/xtt-python/blob/23ee469488d710d730314bec1136c4dd7ac2cd5c/xtt/crypto/ecdsap256.py#L35-L47
|
243,208
|
skibblenybbles/django-commando
|
commando/management/base.py
|
get_option_default
|
def get_option_default(option):
"""
Given an optparse.Option, returns a two-tuple of the option's variable name
and default value.
"""
return (
option.dest,
None if option.default is optparse.NO_DEFAULT else option.default,
)
|
python
|
def get_option_default(option):
"""
Given an optparse.Option, returns a two-tuple of the option's variable name
and default value.
"""
return (
option.dest,
None if option.default is optparse.NO_DEFAULT else option.default,
)
|
[
"def",
"get_option_default",
"(",
"option",
")",
":",
"return",
"(",
"option",
".",
"dest",
",",
"None",
"if",
"option",
".",
"default",
"is",
"optparse",
".",
"NO_DEFAULT",
"else",
"option",
".",
"default",
",",
")"
] |
Given an optparse.Option, returns a two-tuple of the option's variable name
and default value.
|
[
"Given",
"an",
"optparse",
".",
"Option",
"returns",
"a",
"two",
"-",
"tuple",
"of",
"the",
"option",
"s",
"variable",
"name",
"and",
"default",
"value",
"."
] |
dd1dd6969fc0dd8231fc115fee3eeb690809585b
|
https://github.com/skibblenybbles/django-commando/blob/dd1dd6969fc0dd8231fc115fee3eeb690809585b/commando/management/base.py#L12-L21
|
243,209
|
skibblenybbles/django-commando
|
commando/management/base.py
|
get_command_class_from_apps
|
def get_command_class_from_apps(name, apps, exclude_packages=None, exclude_command_class=None):
"""
Searches through the given apps to find the named command class. Skips
over any packages specified by exclude_packages and any command class
specified by exclude_command_class. Returns the last command class found
or None if the command class could not be found.
Django's command searching behavior is backwards with respect to other
features like template and static file loaders. This function follows
that convention.
"""
if exclude_packages is None:
exclude_packages = []
for app in reversed(
[app for app in apps if not issubpackage(app, exclude_packages)]):
try:
command_class = import_module(
"{app:s}.management.commands.{name:s}".format(
app=app, name=name)).Command
except (ImportError, AttributeError):
pass
else:
if exclude_command_class is None or \
not issubclass(command_class, exclude_command_class):
return command_class
return None
|
python
|
def get_command_class_from_apps(name, apps, exclude_packages=None, exclude_command_class=None):
"""
Searches through the given apps to find the named command class. Skips
over any packages specified by exclude_packages and any command class
specified by exclude_command_class. Returns the last command class found
or None if the command class could not be found.
Django's command searching behavior is backwards with respect to other
features like template and static file loaders. This function follows
that convention.
"""
if exclude_packages is None:
exclude_packages = []
for app in reversed(
[app for app in apps if not issubpackage(app, exclude_packages)]):
try:
command_class = import_module(
"{app:s}.management.commands.{name:s}".format(
app=app, name=name)).Command
except (ImportError, AttributeError):
pass
else:
if exclude_command_class is None or \
not issubclass(command_class, exclude_command_class):
return command_class
return None
|
[
"def",
"get_command_class_from_apps",
"(",
"name",
",",
"apps",
",",
"exclude_packages",
"=",
"None",
",",
"exclude_command_class",
"=",
"None",
")",
":",
"if",
"exclude_packages",
"is",
"None",
":",
"exclude_packages",
"=",
"[",
"]",
"for",
"app",
"in",
"reversed",
"(",
"[",
"app",
"for",
"app",
"in",
"apps",
"if",
"not",
"issubpackage",
"(",
"app",
",",
"exclude_packages",
")",
"]",
")",
":",
"try",
":",
"command_class",
"=",
"import_module",
"(",
"\"{app:s}.management.commands.{name:s}\"",
".",
"format",
"(",
"app",
"=",
"app",
",",
"name",
"=",
"name",
")",
")",
".",
"Command",
"except",
"(",
"ImportError",
",",
"AttributeError",
")",
":",
"pass",
"else",
":",
"if",
"exclude_command_class",
"is",
"None",
"or",
"not",
"issubclass",
"(",
"command_class",
",",
"exclude_command_class",
")",
":",
"return",
"command_class",
"return",
"None"
] |
Searches through the given apps to find the named command class. Skips
over any packages specified by exclude_packages and any command class
specified by exclude_command_class. Returns the last command class found
or None if the command class could not be found.
Django's command searching behavior is backwards with respect to other
features like template and static file loaders. This function follows
that convention.
|
[
"Searches",
"through",
"the",
"given",
"apps",
"to",
"find",
"the",
"named",
"command",
"class",
".",
"Skips",
"over",
"any",
"packages",
"specified",
"by",
"exclude_packages",
"and",
"any",
"command",
"class",
"specified",
"by",
"exclude_command_class",
".",
"Returns",
"the",
"last",
"command",
"class",
"found",
"or",
"None",
"if",
"the",
"command",
"class",
"could",
"not",
"be",
"found",
".",
"Django",
"s",
"command",
"searching",
"behavior",
"is",
"backwards",
"with",
"respect",
"to",
"other",
"features",
"like",
"template",
"and",
"static",
"file",
"loaders",
".",
"This",
"function",
"follows",
"that",
"convention",
"."
] |
dd1dd6969fc0dd8231fc115fee3eeb690809585b
|
https://github.com/skibblenybbles/django-commando/blob/dd1dd6969fc0dd8231fc115fee3eeb690809585b/commando/management/base.py#L45-L71
|
243,210
|
skibblenybbles/django-commando
|
commando/management/base.py
|
get_command_class
|
def get_command_class(name, exclude_packages=None, exclude_command_class=None):
"""
Searches "django.core" and the apps in settings.INSTALLED_APPS to find the
named command class, optionally skipping packages or a particular
command class.
"""
from django.conf import settings
return get_command_class_from_apps(
name,
settings.INSTALLED_APPS \
if "django.core" in settings.INSTALLED_APPS \
else ("django.core",) + tuple(settings.INSTALLED_APPS),
exclude_packages=exclude_packages,
exclude_command_class=exclude_command_class)
|
python
|
def get_command_class(name, exclude_packages=None, exclude_command_class=None):
"""
Searches "django.core" and the apps in settings.INSTALLED_APPS to find the
named command class, optionally skipping packages or a particular
command class.
"""
from django.conf import settings
return get_command_class_from_apps(
name,
settings.INSTALLED_APPS \
if "django.core" in settings.INSTALLED_APPS \
else ("django.core",) + tuple(settings.INSTALLED_APPS),
exclude_packages=exclude_packages,
exclude_command_class=exclude_command_class)
|
[
"def",
"get_command_class",
"(",
"name",
",",
"exclude_packages",
"=",
"None",
",",
"exclude_command_class",
"=",
"None",
")",
":",
"from",
"django",
".",
"conf",
"import",
"settings",
"return",
"get_command_class_from_apps",
"(",
"name",
",",
"settings",
".",
"INSTALLED_APPS",
"if",
"\"django.core\"",
"in",
"settings",
".",
"INSTALLED_APPS",
"else",
"(",
"\"django.core\"",
",",
")",
"+",
"tuple",
"(",
"settings",
".",
"INSTALLED_APPS",
")",
",",
"exclude_packages",
"=",
"exclude_packages",
",",
"exclude_command_class",
"=",
"exclude_command_class",
")"
] |
Searches "django.core" and the apps in settings.INSTALLED_APPS to find the
named command class, optionally skipping packages or a particular
command class.
|
[
"Searches",
"django",
".",
"core",
"and",
"the",
"apps",
"in",
"settings",
".",
"INSTALLED_APPS",
"to",
"find",
"the",
"named",
"command",
"class",
"optionally",
"skipping",
"packages",
"or",
"a",
"particular",
"command",
"class",
"."
] |
dd1dd6969fc0dd8231fc115fee3eeb690809585b
|
https://github.com/skibblenybbles/django-commando/blob/dd1dd6969fc0dd8231fc115fee3eeb690809585b/commando/management/base.py#L74-L88
|
243,211
|
skibblenybbles/django-commando
|
commando/management/base.py
|
check_program
|
def check_program(name):
"""
Uses the shell program "which" to determine whether the named program
is available on the shell PATH.
"""
with open(os.devnull, "w") as null:
try:
subprocess.check_call(("which", name), stdout=null, stderr=null)
except subprocess.CalledProcessError as e:
return False
return True
|
python
|
def check_program(name):
"""
Uses the shell program "which" to determine whether the named program
is available on the shell PATH.
"""
with open(os.devnull, "w") as null:
try:
subprocess.check_call(("which", name), stdout=null, stderr=null)
except subprocess.CalledProcessError as e:
return False
return True
|
[
"def",
"check_program",
"(",
"name",
")",
":",
"with",
"open",
"(",
"os",
".",
"devnull",
",",
"\"w\"",
")",
"as",
"null",
":",
"try",
":",
"subprocess",
".",
"check_call",
"(",
"(",
"\"which\"",
",",
"name",
")",
",",
"stdout",
"=",
"null",
",",
"stderr",
"=",
"null",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"e",
":",
"return",
"False",
"return",
"True"
] |
Uses the shell program "which" to determine whether the named program
is available on the shell PATH.
|
[
"Uses",
"the",
"shell",
"program",
"which",
"to",
"determine",
"whether",
"the",
"named",
"program",
"is",
"available",
"on",
"the",
"shell",
"PATH",
"."
] |
dd1dd6969fc0dd8231fc115fee3eeb690809585b
|
https://github.com/skibblenybbles/django-commando/blob/dd1dd6969fc0dd8231fc115fee3eeb690809585b/commando/management/base.py#L123-L134
|
243,212
|
skibblenybbles/django-commando
|
commando/management/base.py
|
CommandOptions.get_option_lists
|
def get_option_lists(self):
"""
A hook to override the option lists used to generate option names
and defaults.
"""
return [self.get_option_list()] + \
[option_list
for name, description, option_list
in self.get_option_groups()]
|
python
|
def get_option_lists(self):
"""
A hook to override the option lists used to generate option names
and defaults.
"""
return [self.get_option_list()] + \
[option_list
for name, description, option_list
in self.get_option_groups()]
|
[
"def",
"get_option_lists",
"(",
"self",
")",
":",
"return",
"[",
"self",
".",
"get_option_list",
"(",
")",
"]",
"+",
"[",
"option_list",
"for",
"name",
",",
"description",
",",
"option_list",
"in",
"self",
".",
"get_option_groups",
"(",
")",
"]"
] |
A hook to override the option lists used to generate option names
and defaults.
|
[
"A",
"hook",
"to",
"override",
"the",
"option",
"lists",
"used",
"to",
"generate",
"option",
"names",
"and",
"defaults",
"."
] |
dd1dd6969fc0dd8231fc115fee3eeb690809585b
|
https://github.com/skibblenybbles/django-commando/blob/dd1dd6969fc0dd8231fc115fee3eeb690809585b/commando/management/base.py#L194-L203
|
243,213
|
skibblenybbles/django-commando
|
commando/management/base.py
|
CommandOptions.get_options
|
def get_options(self):
"""
A hook to override the flattened list of all options used to generate
option names and defaults.
"""
return reduce(
list.__add__,
[list(option_list) for option_list in self.get_option_lists()],
[])
|
python
|
def get_options(self):
"""
A hook to override the flattened list of all options used to generate
option names and defaults.
"""
return reduce(
list.__add__,
[list(option_list) for option_list in self.get_option_lists()],
[])
|
[
"def",
"get_options",
"(",
"self",
")",
":",
"return",
"reduce",
"(",
"list",
".",
"__add__",
",",
"[",
"list",
"(",
"option_list",
")",
"for",
"option_list",
"in",
"self",
".",
"get_option_lists",
"(",
")",
"]",
",",
"[",
"]",
")"
] |
A hook to override the flattened list of all options used to generate
option names and defaults.
|
[
"A",
"hook",
"to",
"override",
"the",
"flattened",
"list",
"of",
"all",
"options",
"used",
"to",
"generate",
"option",
"names",
"and",
"defaults",
"."
] |
dd1dd6969fc0dd8231fc115fee3eeb690809585b
|
https://github.com/skibblenybbles/django-commando/blob/dd1dd6969fc0dd8231fc115fee3eeb690809585b/commando/management/base.py#L205-L214
|
243,214
|
skibblenybbles/django-commando
|
commando/management/base.py
|
Command.create_parser
|
def create_parser(self, prog_name, subcommand):
"""
Customize the parser to include option groups.
"""
parser = optparse.OptionParser(
prog=prog_name,
usage=self.usage(subcommand),
version=self.get_version(),
option_list=self.get_option_list())
for name, description, option_list in self.get_option_groups():
group = optparse.OptionGroup(parser, name, description);
list(map(group.add_option, option_list))
parser.add_option_group(group)
return parser
|
python
|
def create_parser(self, prog_name, subcommand):
"""
Customize the parser to include option groups.
"""
parser = optparse.OptionParser(
prog=prog_name,
usage=self.usage(subcommand),
version=self.get_version(),
option_list=self.get_option_list())
for name, description, option_list in self.get_option_groups():
group = optparse.OptionGroup(parser, name, description);
list(map(group.add_option, option_list))
parser.add_option_group(group)
return parser
|
[
"def",
"create_parser",
"(",
"self",
",",
"prog_name",
",",
"subcommand",
")",
":",
"parser",
"=",
"optparse",
".",
"OptionParser",
"(",
"prog",
"=",
"prog_name",
",",
"usage",
"=",
"self",
".",
"usage",
"(",
"subcommand",
")",
",",
"version",
"=",
"self",
".",
"get_version",
"(",
")",
",",
"option_list",
"=",
"self",
".",
"get_option_list",
"(",
")",
")",
"for",
"name",
",",
"description",
",",
"option_list",
"in",
"self",
".",
"get_option_groups",
"(",
")",
":",
"group",
"=",
"optparse",
".",
"OptionGroup",
"(",
"parser",
",",
"name",
",",
"description",
")",
"list",
"(",
"map",
"(",
"group",
".",
"add_option",
",",
"option_list",
")",
")",
"parser",
".",
"add_option_group",
"(",
"group",
")",
"return",
"parser"
] |
Customize the parser to include option groups.
|
[
"Customize",
"the",
"parser",
"to",
"include",
"option",
"groups",
"."
] |
dd1dd6969fc0dd8231fc115fee3eeb690809585b
|
https://github.com/skibblenybbles/django-commando/blob/dd1dd6969fc0dd8231fc115fee3eeb690809585b/commando/management/base.py#L249-L263
|
243,215
|
skibblenybbles/django-commando
|
commando/management/base.py
|
BaseCommandOptions.check_command
|
def check_command(self, name):
"""
Checks whether the given Django management command exists, excluding
this command from the search.
"""
if not check_command(
name,
exclude_packages=self.get_exclude_packages(),
exclude_command_class=self.__class__):
raise management.CommandError(
"The management command \"{name:s}\" is not available. "
"Please ensure that you've added the application with "
"the \"{name:s}\" command to your INSTALLED_APPS "
"setting".format(
name=name))
|
python
|
def check_command(self, name):
"""
Checks whether the given Django management command exists, excluding
this command from the search.
"""
if not check_command(
name,
exclude_packages=self.get_exclude_packages(),
exclude_command_class=self.__class__):
raise management.CommandError(
"The management command \"{name:s}\" is not available. "
"Please ensure that you've added the application with "
"the \"{name:s}\" command to your INSTALLED_APPS "
"setting".format(
name=name))
|
[
"def",
"check_command",
"(",
"self",
",",
"name",
")",
":",
"if",
"not",
"check_command",
"(",
"name",
",",
"exclude_packages",
"=",
"self",
".",
"get_exclude_packages",
"(",
")",
",",
"exclude_command_class",
"=",
"self",
".",
"__class__",
")",
":",
"raise",
"management",
".",
"CommandError",
"(",
"\"The management command \\\"{name:s}\\\" is not available. \"",
"\"Please ensure that you've added the application with \"",
"\"the \\\"{name:s}\\\" command to your INSTALLED_APPS \"",
"\"setting\"",
".",
"format",
"(",
"name",
"=",
"name",
")",
")"
] |
Checks whether the given Django management command exists, excluding
this command from the search.
|
[
"Checks",
"whether",
"the",
"given",
"Django",
"management",
"command",
"exists",
"excluding",
"this",
"command",
"from",
"the",
"search",
"."
] |
dd1dd6969fc0dd8231fc115fee3eeb690809585b
|
https://github.com/skibblenybbles/django-commando/blob/dd1dd6969fc0dd8231fc115fee3eeb690809585b/commando/management/base.py#L340-L355
|
243,216
|
skibblenybbles/django-commando
|
commando/management/base.py
|
BaseCommandOptions.check_program
|
def check_program(self, name):
"""
Checks whether a program is available on the shell PATH.
"""
if not check_program(name):
raise management.CommandError(
"The program \"{name:s}\" is not available in the shell. "
"Please ensure that \"{name:s}\" is installed and reachable "
"through your PATH environment variable.".format(
name=name))
|
python
|
def check_program(self, name):
"""
Checks whether a program is available on the shell PATH.
"""
if not check_program(name):
raise management.CommandError(
"The program \"{name:s}\" is not available in the shell. "
"Please ensure that \"{name:s}\" is installed and reachable "
"through your PATH environment variable.".format(
name=name))
|
[
"def",
"check_program",
"(",
"self",
",",
"name",
")",
":",
"if",
"not",
"check_program",
"(",
"name",
")",
":",
"raise",
"management",
".",
"CommandError",
"(",
"\"The program \\\"{name:s}\\\" is not available in the shell. \"",
"\"Please ensure that \\\"{name:s}\\\" is installed and reachable \"",
"\"through your PATH environment variable.\"",
".",
"format",
"(",
"name",
"=",
"name",
")",
")"
] |
Checks whether a program is available on the shell PATH.
|
[
"Checks",
"whether",
"a",
"program",
"is",
"available",
"on",
"the",
"shell",
"PATH",
"."
] |
dd1dd6969fc0dd8231fc115fee3eeb690809585b
|
https://github.com/skibblenybbles/django-commando/blob/dd1dd6969fc0dd8231fc115fee3eeb690809585b/commando/management/base.py#L357-L367
|
243,217
|
skibblenybbles/django-commando
|
commando/management/base.py
|
BaseCommandOptions.call_command
|
def call_command(self, name, *arguments, **options):
"""
Finds the given Django management command and default options,
excluding this command, and calls it with the given arguments
and override options.
"""
command, defaults = get_command_and_defaults(
name,
exclude_packages=self.get_exclude_packages(),
exclude_command_class=self.__class__)
if command is None:
raise management.CommandError(
"Unknown command: {name:s}".format(
name=name))
defaults.update(options)
return command.execute(*arguments, **defaults)
|
python
|
def call_command(self, name, *arguments, **options):
"""
Finds the given Django management command and default options,
excluding this command, and calls it with the given arguments
and override options.
"""
command, defaults = get_command_and_defaults(
name,
exclude_packages=self.get_exclude_packages(),
exclude_command_class=self.__class__)
if command is None:
raise management.CommandError(
"Unknown command: {name:s}".format(
name=name))
defaults.update(options)
return command.execute(*arguments, **defaults)
|
[
"def",
"call_command",
"(",
"self",
",",
"name",
",",
"*",
"arguments",
",",
"*",
"*",
"options",
")",
":",
"command",
",",
"defaults",
"=",
"get_command_and_defaults",
"(",
"name",
",",
"exclude_packages",
"=",
"self",
".",
"get_exclude_packages",
"(",
")",
",",
"exclude_command_class",
"=",
"self",
".",
"__class__",
")",
"if",
"command",
"is",
"None",
":",
"raise",
"management",
".",
"CommandError",
"(",
"\"Unknown command: {name:s}\"",
".",
"format",
"(",
"name",
"=",
"name",
")",
")",
"defaults",
".",
"update",
"(",
"options",
")",
"return",
"command",
".",
"execute",
"(",
"*",
"arguments",
",",
"*",
"*",
"defaults",
")"
] |
Finds the given Django management command and default options,
excluding this command, and calls it with the given arguments
and override options.
|
[
"Finds",
"the",
"given",
"Django",
"management",
"command",
"and",
"default",
"options",
"excluding",
"this",
"command",
"and",
"calls",
"it",
"with",
"the",
"given",
"arguments",
"and",
"override",
"options",
"."
] |
dd1dd6969fc0dd8231fc115fee3eeb690809585b
|
https://github.com/skibblenybbles/django-commando/blob/dd1dd6969fc0dd8231fc115fee3eeb690809585b/commando/management/base.py#L369-L385
|
243,218
|
skibblenybbles/django-commando
|
commando/management/base.py
|
BaseCommandOptions.call_program
|
def call_program(self, name, *arguments):
"""
Calls the shell program on the PATH with the given arguments.
"""
verbosity = self.options.get("verbosity", 1)
with self.devnull as null:
try:
subprocess.check_call((name,) + tuple(arguments),
stdout=null if verbosity == 0 else self.stdout,
stderr=null if verbosity == 0 else self.stderr)
except subprocess.CalledProcessError as error:
raise management.CommandError(
"{name:s} failed with exit code {code:d}".format(
name=name, code=error.returncode))
return 0
|
python
|
def call_program(self, name, *arguments):
"""
Calls the shell program on the PATH with the given arguments.
"""
verbosity = self.options.get("verbosity", 1)
with self.devnull as null:
try:
subprocess.check_call((name,) + tuple(arguments),
stdout=null if verbosity == 0 else self.stdout,
stderr=null if verbosity == 0 else self.stderr)
except subprocess.CalledProcessError as error:
raise management.CommandError(
"{name:s} failed with exit code {code:d}".format(
name=name, code=error.returncode))
return 0
|
[
"def",
"call_program",
"(",
"self",
",",
"name",
",",
"*",
"arguments",
")",
":",
"verbosity",
"=",
"self",
".",
"options",
".",
"get",
"(",
"\"verbosity\"",
",",
"1",
")",
"with",
"self",
".",
"devnull",
"as",
"null",
":",
"try",
":",
"subprocess",
".",
"check_call",
"(",
"(",
"name",
",",
")",
"+",
"tuple",
"(",
"arguments",
")",
",",
"stdout",
"=",
"null",
"if",
"verbosity",
"==",
"0",
"else",
"self",
".",
"stdout",
",",
"stderr",
"=",
"null",
"if",
"verbosity",
"==",
"0",
"else",
"self",
".",
"stderr",
")",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"error",
":",
"raise",
"management",
".",
"CommandError",
"(",
"\"{name:s} failed with exit code {code:d}\"",
".",
"format",
"(",
"name",
"=",
"name",
",",
"code",
"=",
"error",
".",
"returncode",
")",
")",
"return",
"0"
] |
Calls the shell program on the PATH with the given arguments.
|
[
"Calls",
"the",
"shell",
"program",
"on",
"the",
"PATH",
"with",
"the",
"given",
"arguments",
"."
] |
dd1dd6969fc0dd8231fc115fee3eeb690809585b
|
https://github.com/skibblenybbles/django-commando/blob/dd1dd6969fc0dd8231fc115fee3eeb690809585b/commando/management/base.py#L387-L402
|
243,219
|
colab/colab-superarchives-plugin
|
src/colab_superarchives/models.py
|
Thread.update_score
|
def update_score(self):
"""Update the relevance score for this thread.
The score is calculated with the following variables:
* vote_weight: 100 - (minus) 1 for each 3 days since
voted with minimum of 5.
* replies_weight: 300 - (minus) 1 for each 3 days since
replied with minimum of 5.
* page_view_weight: 10.
* vote_score: sum(vote_weight)
* replies_score: sum(replies_weight)
* page_view_score: sum(page_view_weight)
* score = (vote_score + replies_score + page_view_score) // 10
with minimum of 0 and maximum of 5000
"""
if not self.subject_token:
return
vote_score = 0
replies_score = 0
for msg in self.message_set.all():
# Calculate replies_score
replies_score += self._get_score(300, msg.received_time)
# Calculate vote_score
for vote in msg.vote_set.all():
vote_score += self._get_score(100, vote.created)
# Calculate page_view_score
page_view_score = self.hits * 10
self.score = (page_view_score + vote_score + replies_score) // 10
self.save()
|
python
|
def update_score(self):
"""Update the relevance score for this thread.
The score is calculated with the following variables:
* vote_weight: 100 - (minus) 1 for each 3 days since
voted with minimum of 5.
* replies_weight: 300 - (minus) 1 for each 3 days since
replied with minimum of 5.
* page_view_weight: 10.
* vote_score: sum(vote_weight)
* replies_score: sum(replies_weight)
* page_view_score: sum(page_view_weight)
* score = (vote_score + replies_score + page_view_score) // 10
with minimum of 0 and maximum of 5000
"""
if not self.subject_token:
return
vote_score = 0
replies_score = 0
for msg in self.message_set.all():
# Calculate replies_score
replies_score += self._get_score(300, msg.received_time)
# Calculate vote_score
for vote in msg.vote_set.all():
vote_score += self._get_score(100, vote.created)
# Calculate page_view_score
page_view_score = self.hits * 10
self.score = (page_view_score + vote_score + replies_score) // 10
self.save()
|
[
"def",
"update_score",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"subject_token",
":",
"return",
"vote_score",
"=",
"0",
"replies_score",
"=",
"0",
"for",
"msg",
"in",
"self",
".",
"message_set",
".",
"all",
"(",
")",
":",
"# Calculate replies_score",
"replies_score",
"+=",
"self",
".",
"_get_score",
"(",
"300",
",",
"msg",
".",
"received_time",
")",
"# Calculate vote_score",
"for",
"vote",
"in",
"msg",
".",
"vote_set",
".",
"all",
"(",
")",
":",
"vote_score",
"+=",
"self",
".",
"_get_score",
"(",
"100",
",",
"vote",
".",
"created",
")",
"# Calculate page_view_score",
"page_view_score",
"=",
"self",
".",
"hits",
"*",
"10",
"self",
".",
"score",
"=",
"(",
"page_view_score",
"+",
"vote_score",
"+",
"replies_score",
")",
"//",
"10",
"self",
".",
"save",
"(",
")"
] |
Update the relevance score for this thread.
The score is calculated with the following variables:
* vote_weight: 100 - (minus) 1 for each 3 days since
voted with minimum of 5.
* replies_weight: 300 - (minus) 1 for each 3 days since
replied with minimum of 5.
* page_view_weight: 10.
* vote_score: sum(vote_weight)
* replies_score: sum(replies_weight)
* page_view_score: sum(page_view_weight)
* score = (vote_score + replies_score + page_view_score) // 10
with minimum of 0 and maximum of 5000
|
[
"Update",
"the",
"relevance",
"score",
"for",
"this",
"thread",
"."
] |
fe588a1d4fac874ccad2063ee19a857028a22721
|
https://github.com/colab/colab-superarchives-plugin/blob/fe588a1d4fac874ccad2063ee19a857028a22721/src/colab_superarchives/models.py#L173-L210
|
243,220
|
colab/colab-superarchives-plugin
|
src/colab_superarchives/models.py
|
Message.url
|
def url(self):
"""Shortcut to get thread url"""
return reverse('archives:thread_view',
args=[self.mailinglist.name,
self.thread.subject_token])
|
python
|
def url(self):
"""Shortcut to get thread url"""
return reverse('archives:thread_view',
args=[self.mailinglist.name,
self.thread.subject_token])
|
[
"def",
"url",
"(",
"self",
")",
":",
"return",
"reverse",
"(",
"'archives:thread_view'",
",",
"args",
"=",
"[",
"self",
".",
"mailinglist",
".",
"name",
",",
"self",
".",
"thread",
".",
"subject_token",
"]",
")"
] |
Shortcut to get thread url
|
[
"Shortcut",
"to",
"get",
"thread",
"url"
] |
fe588a1d4fac874ccad2063ee19a857028a22721
|
https://github.com/colab/colab-superarchives-plugin/blob/fe588a1d4fac874ccad2063ee19a857028a22721/src/colab_superarchives/models.py#L373-L377
|
243,221
|
fizyk/pyramid_yml
|
tzf/pyramid_yml/scripts.py
|
print_config
|
def print_config(): # pragma: no cover
"""Print config entry function."""
description = """\
Print the deployment settings for a Pyramid application. Example:
'psettings deployment.ini'
"""
parser = argparse.ArgumentParser(
description=textwrap.dedent(description)
)
parser.add_argument(
'config_uri', type=str, help='an integer for the accumulator'
)
parser.add_argument(
'-k', '--key',
dest='key',
metavar='PREFIX',
type=str,
action='store',
help=(
"Tells script to print only specified"
" config tree provided by dotted name"
)
)
args = parser.parse_args(sys.argv[1:])
config_uri = args.config_uri
env = bootstrap(config_uri)
config, closer = env['registry']['config'], env['closer']
try:
print(printer(slice_config(config, args.key)))
except KeyError:
print(
'Sorry, but the key path {0}, does not exists in Your config!'
.format(args.key)
)
finally:
closer()
|
python
|
def print_config(): # pragma: no cover
"""Print config entry function."""
description = """\
Print the deployment settings for a Pyramid application. Example:
'psettings deployment.ini'
"""
parser = argparse.ArgumentParser(
description=textwrap.dedent(description)
)
parser.add_argument(
'config_uri', type=str, help='an integer for the accumulator'
)
parser.add_argument(
'-k', '--key',
dest='key',
metavar='PREFIX',
type=str,
action='store',
help=(
"Tells script to print only specified"
" config tree provided by dotted name"
)
)
args = parser.parse_args(sys.argv[1:])
config_uri = args.config_uri
env = bootstrap(config_uri)
config, closer = env['registry']['config'], env['closer']
try:
print(printer(slice_config(config, args.key)))
except KeyError:
print(
'Sorry, but the key path {0}, does not exists in Your config!'
.format(args.key)
)
finally:
closer()
|
[
"def",
"print_config",
"(",
")",
":",
"# pragma: no cover",
"description",
"=",
"\"\"\"\\\n Print the deployment settings for a Pyramid application. Example:\n 'psettings deployment.ini'\n \"\"\"",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"textwrap",
".",
"dedent",
"(",
"description",
")",
")",
"parser",
".",
"add_argument",
"(",
"'config_uri'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'an integer for the accumulator'",
")",
"parser",
".",
"add_argument",
"(",
"'-k'",
",",
"'--key'",
",",
"dest",
"=",
"'key'",
",",
"metavar",
"=",
"'PREFIX'",
",",
"type",
"=",
"str",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"(",
"\"Tells script to print only specified\"",
"\" config tree provided by dotted name\"",
")",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
")",
"config_uri",
"=",
"args",
".",
"config_uri",
"env",
"=",
"bootstrap",
"(",
"config_uri",
")",
"config",
",",
"closer",
"=",
"env",
"[",
"'registry'",
"]",
"[",
"'config'",
"]",
",",
"env",
"[",
"'closer'",
"]",
"try",
":",
"print",
"(",
"printer",
"(",
"slice_config",
"(",
"config",
",",
"args",
".",
"key",
")",
")",
")",
"except",
"KeyError",
":",
"print",
"(",
"'Sorry, but the key path {0}, does not exists in Your config!'",
".",
"format",
"(",
"args",
".",
"key",
")",
")",
"finally",
":",
"closer",
"(",
")"
] |
Print config entry function.
|
[
"Print",
"config",
"entry",
"function",
"."
] |
1b36c4e74194c04d7d69b4d7f86801757e78f0a6
|
https://github.com/fizyk/pyramid_yml/blob/1b36c4e74194c04d7d69b4d7f86801757e78f0a6/tzf/pyramid_yml/scripts.py#L17-L54
|
243,222
|
fizyk/pyramid_yml
|
tzf/pyramid_yml/scripts.py
|
printer
|
def printer(data, depth=0):
"""
Prepare data for printing.
:param data: a data value that will be processed by method
:param int depth: recurrency indicator, to maintain proper indent
:returns: string with formatted config
:rtype: str
"""
indent = _INDENT * depth
config_string = '' if not depth else ':\n'
if isinstance(data, dict):
for key, val in data.items():
line = '{0}{1}'.format(indent, key)
values = printer(val, depth + 1)
if not values.count('\n'):
values = ': {0}'.format(values.lstrip())
line = '{line}{values}'.format(line=line, values=values)
config_string += '{0}\n'.format(line)
elif isinstance(data, list):
for elem in data:
config_string += '{0} - {1}\n'.format(indent, elem)
else:
config_string = '{0}{1} ({2})'.format(
indent, data, data.__class__.__name__
)
return config_string.rstrip('\n')
|
python
|
def printer(data, depth=0):
"""
Prepare data for printing.
:param data: a data value that will be processed by method
:param int depth: recurrency indicator, to maintain proper indent
:returns: string with formatted config
:rtype: str
"""
indent = _INDENT * depth
config_string = '' if not depth else ':\n'
if isinstance(data, dict):
for key, val in data.items():
line = '{0}{1}'.format(indent, key)
values = printer(val, depth + 1)
if not values.count('\n'):
values = ': {0}'.format(values.lstrip())
line = '{line}{values}'.format(line=line, values=values)
config_string += '{0}\n'.format(line)
elif isinstance(data, list):
for elem in data:
config_string += '{0} - {1}\n'.format(indent, elem)
else:
config_string = '{0}{1} ({2})'.format(
indent, data, data.__class__.__name__
)
return config_string.rstrip('\n')
|
[
"def",
"printer",
"(",
"data",
",",
"depth",
"=",
"0",
")",
":",
"indent",
"=",
"_INDENT",
"*",
"depth",
"config_string",
"=",
"''",
"if",
"not",
"depth",
"else",
"':\\n'",
"if",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"for",
"key",
",",
"val",
"in",
"data",
".",
"items",
"(",
")",
":",
"line",
"=",
"'{0}{1}'",
".",
"format",
"(",
"indent",
",",
"key",
")",
"values",
"=",
"printer",
"(",
"val",
",",
"depth",
"+",
"1",
")",
"if",
"not",
"values",
".",
"count",
"(",
"'\\n'",
")",
":",
"values",
"=",
"': {0}'",
".",
"format",
"(",
"values",
".",
"lstrip",
"(",
")",
")",
"line",
"=",
"'{line}{values}'",
".",
"format",
"(",
"line",
"=",
"line",
",",
"values",
"=",
"values",
")",
"config_string",
"+=",
"'{0}\\n'",
".",
"format",
"(",
"line",
")",
"elif",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"for",
"elem",
"in",
"data",
":",
"config_string",
"+=",
"'{0} - {1}\\n'",
".",
"format",
"(",
"indent",
",",
"elem",
")",
"else",
":",
"config_string",
"=",
"'{0}{1} ({2})'",
".",
"format",
"(",
"indent",
",",
"data",
",",
"data",
".",
"__class__",
".",
"__name__",
")",
"return",
"config_string",
".",
"rstrip",
"(",
"'\\n'",
")"
] |
Prepare data for printing.
:param data: a data value that will be processed by method
:param int depth: recurrency indicator, to maintain proper indent
:returns: string with formatted config
:rtype: str
|
[
"Prepare",
"data",
"for",
"printing",
"."
] |
1b36c4e74194c04d7d69b4d7f86801757e78f0a6
|
https://github.com/fizyk/pyramid_yml/blob/1b36c4e74194c04d7d69b4d7f86801757e78f0a6/tzf/pyramid_yml/scripts.py#L57-L87
|
243,223
|
fizyk/pyramid_yml
|
tzf/pyramid_yml/scripts.py
|
slice_config
|
def slice_config(config, key):
"""
Slice config for printing as defined in key.
:param ConfigManager config: configuration dictionary
:param str key: dotted key, by which config should be sliced for printing
:returns: sliced config
:rtype: dict
"""
if key:
keys = key.split('.')
for k in keys:
config = config[k]
return config
|
python
|
def slice_config(config, key):
"""
Slice config for printing as defined in key.
:param ConfigManager config: configuration dictionary
:param str key: dotted key, by which config should be sliced for printing
:returns: sliced config
:rtype: dict
"""
if key:
keys = key.split('.')
for k in keys:
config = config[k]
return config
|
[
"def",
"slice_config",
"(",
"config",
",",
"key",
")",
":",
"if",
"key",
":",
"keys",
"=",
"key",
".",
"split",
"(",
"'.'",
")",
"for",
"k",
"in",
"keys",
":",
"config",
"=",
"config",
"[",
"k",
"]",
"return",
"config"
] |
Slice config for printing as defined in key.
:param ConfigManager config: configuration dictionary
:param str key: dotted key, by which config should be sliced for printing
:returns: sliced config
:rtype: dict
|
[
"Slice",
"config",
"for",
"printing",
"as",
"defined",
"in",
"key",
"."
] |
1b36c4e74194c04d7d69b4d7f86801757e78f0a6
|
https://github.com/fizyk/pyramid_yml/blob/1b36c4e74194c04d7d69b4d7f86801757e78f0a6/tzf/pyramid_yml/scripts.py#L90-L105
|
243,224
|
physacco/reverse
|
reverse.py
|
split_size
|
def split_size(size):
'''Split the file size into several chunks.'''
rem = size % CHUNK_SIZE
if rem == 0:
cnt = size // CHUNK_SIZE
else:
cnt = size // CHUNK_SIZE + 1
chunks = []
for i in range(cnt):
pos = i * CHUNK_SIZE
if i == cnt - 1:
disp = size - pos
else:
disp = CHUNK_SIZE
chunks.append((pos, disp))
return chunks
|
python
|
def split_size(size):
'''Split the file size into several chunks.'''
rem = size % CHUNK_SIZE
if rem == 0:
cnt = size // CHUNK_SIZE
else:
cnt = size // CHUNK_SIZE + 1
chunks = []
for i in range(cnt):
pos = i * CHUNK_SIZE
if i == cnt - 1:
disp = size - pos
else:
disp = CHUNK_SIZE
chunks.append((pos, disp))
return chunks
|
[
"def",
"split_size",
"(",
"size",
")",
":",
"rem",
"=",
"size",
"%",
"CHUNK_SIZE",
"if",
"rem",
"==",
"0",
":",
"cnt",
"=",
"size",
"//",
"CHUNK_SIZE",
"else",
":",
"cnt",
"=",
"size",
"//",
"CHUNK_SIZE",
"+",
"1",
"chunks",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"cnt",
")",
":",
"pos",
"=",
"i",
"*",
"CHUNK_SIZE",
"if",
"i",
"==",
"cnt",
"-",
"1",
":",
"disp",
"=",
"size",
"-",
"pos",
"else",
":",
"disp",
"=",
"CHUNK_SIZE",
"chunks",
".",
"append",
"(",
"(",
"pos",
",",
"disp",
")",
")",
"return",
"chunks"
] |
Split the file size into several chunks.
|
[
"Split",
"the",
"file",
"size",
"into",
"several",
"chunks",
"."
] |
bb900e4831a3e33745d15265fbbbb2502397ebfd
|
https://github.com/physacco/reverse/blob/bb900e4831a3e33745d15265fbbbb2502397ebfd/reverse.py#L38-L54
|
243,225
|
physacco/reverse
|
reverse.py
|
reverse_fd
|
def reverse_fd(inf, outf):
'''Reverse the content of inf, write to outf.
Both inf and outf are file objects.
inf must be seekable.
'''
inf.seek(0, 2)
size = inf.tell()
if not size:
return
chunks = split_size(size)
for chunk in reversed(chunks):
inf.seek(chunk[0], 0)
data = inf.read(chunk[1])
if len(data) != chunk[1]:
raise IOError('incomplete I/O operation')
outf.write(data[::-1])
|
python
|
def reverse_fd(inf, outf):
'''Reverse the content of inf, write to outf.
Both inf and outf are file objects.
inf must be seekable.
'''
inf.seek(0, 2)
size = inf.tell()
if not size:
return
chunks = split_size(size)
for chunk in reversed(chunks):
inf.seek(chunk[0], 0)
data = inf.read(chunk[1])
if len(data) != chunk[1]:
raise IOError('incomplete I/O operation')
outf.write(data[::-1])
|
[
"def",
"reverse_fd",
"(",
"inf",
",",
"outf",
")",
":",
"inf",
".",
"seek",
"(",
"0",
",",
"2",
")",
"size",
"=",
"inf",
".",
"tell",
"(",
")",
"if",
"not",
"size",
":",
"return",
"chunks",
"=",
"split_size",
"(",
"size",
")",
"for",
"chunk",
"in",
"reversed",
"(",
"chunks",
")",
":",
"inf",
".",
"seek",
"(",
"chunk",
"[",
"0",
"]",
",",
"0",
")",
"data",
"=",
"inf",
".",
"read",
"(",
"chunk",
"[",
"1",
"]",
")",
"if",
"len",
"(",
"data",
")",
"!=",
"chunk",
"[",
"1",
"]",
":",
"raise",
"IOError",
"(",
"'incomplete I/O operation'",
")",
"outf",
".",
"write",
"(",
"data",
"[",
":",
":",
"-",
"1",
"]",
")"
] |
Reverse the content of inf, write to outf.
Both inf and outf are file objects.
inf must be seekable.
|
[
"Reverse",
"the",
"content",
"of",
"inf",
"write",
"to",
"outf",
".",
"Both",
"inf",
"and",
"outf",
"are",
"file",
"objects",
".",
"inf",
"must",
"be",
"seekable",
"."
] |
bb900e4831a3e33745d15265fbbbb2502397ebfd
|
https://github.com/physacco/reverse/blob/bb900e4831a3e33745d15265fbbbb2502397ebfd/reverse.py#L56-L72
|
243,226
|
physacco/reverse
|
reverse.py
|
reverse_file
|
def reverse_file(infile, outfile):
'''Reverse the content of infile, write to outfile.
Both infile and outfile are filenames or filepaths.
'''
with open(infile, 'rb') as inf:
with open(outfile, 'wb') as outf:
reverse_fd(inf, outf)
|
python
|
def reverse_file(infile, outfile):
'''Reverse the content of infile, write to outfile.
Both infile and outfile are filenames or filepaths.
'''
with open(infile, 'rb') as inf:
with open(outfile, 'wb') as outf:
reverse_fd(inf, outf)
|
[
"def",
"reverse_file",
"(",
"infile",
",",
"outfile",
")",
":",
"with",
"open",
"(",
"infile",
",",
"'rb'",
")",
"as",
"inf",
":",
"with",
"open",
"(",
"outfile",
",",
"'wb'",
")",
"as",
"outf",
":",
"reverse_fd",
"(",
"inf",
",",
"outf",
")"
] |
Reverse the content of infile, write to outfile.
Both infile and outfile are filenames or filepaths.
|
[
"Reverse",
"the",
"content",
"of",
"infile",
"write",
"to",
"outfile",
".",
"Both",
"infile",
"and",
"outfile",
"are",
"filenames",
"or",
"filepaths",
"."
] |
bb900e4831a3e33745d15265fbbbb2502397ebfd
|
https://github.com/physacco/reverse/blob/bb900e4831a3e33745d15265fbbbb2502397ebfd/reverse.py#L74-L80
|
243,227
|
eight04/safeprint
|
safeprint/__init__.py
|
BasePrinter.print
|
def print(self, *objects, **kwargs):
"""Micmic print interface"""
file = kwargs.get("file")
if file is not None and file is not sys.stdout:
PRINT(*objects, **kwargs)
else:
sep = STR(kwargs.get("sep", " "))
end = STR(kwargs.get("end", "\n"))
text = sep.join(STR(o) for o in objects)
self.imp_print(text, end)
for callback in self.listeners:
callback(text)
|
python
|
def print(self, *objects, **kwargs):
"""Micmic print interface"""
file = kwargs.get("file")
if file is not None and file is not sys.stdout:
PRINT(*objects, **kwargs)
else:
sep = STR(kwargs.get("sep", " "))
end = STR(kwargs.get("end", "\n"))
text = sep.join(STR(o) for o in objects)
self.imp_print(text, end)
for callback in self.listeners:
callback(text)
|
[
"def",
"print",
"(",
"self",
",",
"*",
"objects",
",",
"*",
"*",
"kwargs",
")",
":",
"file",
"=",
"kwargs",
".",
"get",
"(",
"\"file\"",
")",
"if",
"file",
"is",
"not",
"None",
"and",
"file",
"is",
"not",
"sys",
".",
"stdout",
":",
"PRINT",
"(",
"*",
"objects",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"sep",
"=",
"STR",
"(",
"kwargs",
".",
"get",
"(",
"\"sep\"",
",",
"\" \"",
")",
")",
"end",
"=",
"STR",
"(",
"kwargs",
".",
"get",
"(",
"\"end\"",
",",
"\"\\n\"",
")",
")",
"text",
"=",
"sep",
".",
"join",
"(",
"STR",
"(",
"o",
")",
"for",
"o",
"in",
"objects",
")",
"self",
".",
"imp_print",
"(",
"text",
",",
"end",
")",
"for",
"callback",
"in",
"self",
".",
"listeners",
":",
"callback",
"(",
"text",
")"
] |
Micmic print interface
|
[
"Micmic",
"print",
"interface"
] |
e27a1653fb8d29e83952ec474931105b7911651a
|
https://github.com/eight04/safeprint/blob/e27a1653fb8d29e83952ec474931105b7911651a/safeprint/__init__.py#L61-L77
|
243,228
|
eight04/safeprint
|
safeprint/__init__.py
|
EncodePrinter.imp_print
|
def imp_print(self, text, end):
"""Directly send utf8 bytes to stdout"""
sys.stdout.write((text + end).encode("utf-8"))
|
python
|
def imp_print(self, text, end):
"""Directly send utf8 bytes to stdout"""
sys.stdout.write((text + end).encode("utf-8"))
|
[
"def",
"imp_print",
"(",
"self",
",",
"text",
",",
"end",
")",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"(",
"text",
"+",
"end",
")",
".",
"encode",
"(",
"\"utf-8\"",
")",
")"
] |
Directly send utf8 bytes to stdout
|
[
"Directly",
"send",
"utf8",
"bytes",
"to",
"stdout"
] |
e27a1653fb8d29e83952ec474931105b7911651a
|
https://github.com/eight04/safeprint/blob/e27a1653fb8d29e83952ec474931105b7911651a/safeprint/__init__.py#L110-L112
|
243,229
|
rosenbrockc/acorn
|
acorn/logging/diff.py
|
restore
|
def restore(cdiff, a):
"""Restores the full text of either the edited text using the
compressed diff.
Args:
cdiff (dict): compressed diff returned by
:func:`~acorn.logging.diff.compress`.
a (str or list): *original* string or list of strings to use as a
reference to restore the edited version.
"""
left = a.splitlines(1) if isinstance(a, string_types) else a
lrest = []
iline = 0
for i, line in enumerate(left):
if iline not in cdiff:
lrest.append(" " + line)
iline += 1
else:
cs = [l[0] for l in cdiff[iline]]
add = cs.count('+') - cs.count('-')
lrest.extend(cdiff[iline])
iline += add + 1
for i in sorted(cdiff.keys()):
if i >= len(left):
lrest.extend(cdiff[i])
from difflib import restore
return list(restore(lrest, 2))
|
python
|
def restore(cdiff, a):
"""Restores the full text of either the edited text using the
compressed diff.
Args:
cdiff (dict): compressed diff returned by
:func:`~acorn.logging.diff.compress`.
a (str or list): *original* string or list of strings to use as a
reference to restore the edited version.
"""
left = a.splitlines(1) if isinstance(a, string_types) else a
lrest = []
iline = 0
for i, line in enumerate(left):
if iline not in cdiff:
lrest.append(" " + line)
iline += 1
else:
cs = [l[0] for l in cdiff[iline]]
add = cs.count('+') - cs.count('-')
lrest.extend(cdiff[iline])
iline += add + 1
for i in sorted(cdiff.keys()):
if i >= len(left):
lrest.extend(cdiff[i])
from difflib import restore
return list(restore(lrest, 2))
|
[
"def",
"restore",
"(",
"cdiff",
",",
"a",
")",
":",
"left",
"=",
"a",
".",
"splitlines",
"(",
"1",
")",
"if",
"isinstance",
"(",
"a",
",",
"string_types",
")",
"else",
"a",
"lrest",
"=",
"[",
"]",
"iline",
"=",
"0",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"left",
")",
":",
"if",
"iline",
"not",
"in",
"cdiff",
":",
"lrest",
".",
"append",
"(",
"\" \"",
"+",
"line",
")",
"iline",
"+=",
"1",
"else",
":",
"cs",
"=",
"[",
"l",
"[",
"0",
"]",
"for",
"l",
"in",
"cdiff",
"[",
"iline",
"]",
"]",
"add",
"=",
"cs",
".",
"count",
"(",
"'+'",
")",
"-",
"cs",
".",
"count",
"(",
"'-'",
")",
"lrest",
".",
"extend",
"(",
"cdiff",
"[",
"iline",
"]",
")",
"iline",
"+=",
"add",
"+",
"1",
"for",
"i",
"in",
"sorted",
"(",
"cdiff",
".",
"keys",
"(",
")",
")",
":",
"if",
"i",
">=",
"len",
"(",
"left",
")",
":",
"lrest",
".",
"extend",
"(",
"cdiff",
"[",
"i",
"]",
")",
"from",
"difflib",
"import",
"restore",
"return",
"list",
"(",
"restore",
"(",
"lrest",
",",
"2",
")",
")"
] |
Restores the full text of either the edited text using the
compressed diff.
Args:
cdiff (dict): compressed diff returned by
:func:`~acorn.logging.diff.compress`.
a (str or list): *original* string or list of strings to use as a
reference to restore the edited version.
|
[
"Restores",
"the",
"full",
"text",
"of",
"either",
"the",
"edited",
"text",
"using",
"the",
"compressed",
"diff",
"."
] |
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
|
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/logging/diff.py#L32-L61
|
243,230
|
datadesk/django-greeking
|
greeking/fillmurray.py
|
get_url
|
def get_url(width, height, color=True):
"""
Craft the URL for a placekitten image.
By default they are in color. To retrieve a grayscale image, set
the color kwarg to False.
"""
d = dict(width=width, height=height)
return URL % d
|
python
|
def get_url(width, height, color=True):
"""
Craft the URL for a placekitten image.
By default they are in color. To retrieve a grayscale image, set
the color kwarg to False.
"""
d = dict(width=width, height=height)
return URL % d
|
[
"def",
"get_url",
"(",
"width",
",",
"height",
",",
"color",
"=",
"True",
")",
":",
"d",
"=",
"dict",
"(",
"width",
"=",
"width",
",",
"height",
"=",
"height",
")",
"return",
"URL",
"%",
"d"
] |
Craft the URL for a placekitten image.
By default they are in color. To retrieve a grayscale image, set
the color kwarg to False.
|
[
"Craft",
"the",
"URL",
"for",
"a",
"placekitten",
"image",
"."
] |
72509c94952279503bbe8d5a710c1fd344da0670
|
https://github.com/datadesk/django-greeking/blob/72509c94952279503bbe8d5a710c1fd344da0670/greeking/fillmurray.py#L8-L16
|
243,231
|
mengzhuo/ioloop
|
ioloop/__init__.py
|
IOLoop.instance
|
def instance():
"""Returns a global `IOLoop` instance.
Most applications have a single, global `IOLoop` running on the
main thread. Use this method to get this instance from
another thread. To get the current thread's `IOLoop`, use `current()`.
"""
if not hasattr(IOLoop, "_instance"):
with IOLoop._instance_lock:
if not hasattr(IOLoop, "_instance"):
# New instance after double check
IOLoop._instance = IOLoop()
return IOLoop._instance
|
python
|
def instance():
"""Returns a global `IOLoop` instance.
Most applications have a single, global `IOLoop` running on the
main thread. Use this method to get this instance from
another thread. To get the current thread's `IOLoop`, use `current()`.
"""
if not hasattr(IOLoop, "_instance"):
with IOLoop._instance_lock:
if not hasattr(IOLoop, "_instance"):
# New instance after double check
IOLoop._instance = IOLoop()
return IOLoop._instance
|
[
"def",
"instance",
"(",
")",
":",
"if",
"not",
"hasattr",
"(",
"IOLoop",
",",
"\"_instance\"",
")",
":",
"with",
"IOLoop",
".",
"_instance_lock",
":",
"if",
"not",
"hasattr",
"(",
"IOLoop",
",",
"\"_instance\"",
")",
":",
"# New instance after double check",
"IOLoop",
".",
"_instance",
"=",
"IOLoop",
"(",
")",
"return",
"IOLoop",
".",
"_instance"
] |
Returns a global `IOLoop` instance.
Most applications have a single, global `IOLoop` running on the
main thread. Use this method to get this instance from
another thread. To get the current thread's `IOLoop`, use `current()`.
|
[
"Returns",
"a",
"global",
"IOLoop",
"instance",
"."
] |
9a3602ab5f1435753253f63906fd4364135b07b5
|
https://github.com/mengzhuo/ioloop/blob/9a3602ab5f1435753253f63906fd4364135b07b5/ioloop/__init__.py#L26-L38
|
243,232
|
dolph/pasteraw-client
|
pasteraw.py
|
Client.create_paste
|
def create_paste(self, content):
"""Create a raw paste of the given content.
Returns a URL to the paste, or raises a ``pasteraw.Error`` if something
tragic happens instead.
"""
r = requests.post(
self.endpoint + '/pastes',
data={'content': content},
allow_redirects=False)
if r.status_code == 302:
return r.headers['Location']
if r.status_code == 413:
raise MaxLengthExceeded('%d bytes' % len(content))
try:
error_message = r.json()['error']
except Exception:
error_message = r.text
raise UnexpectedError(error_message)
|
python
|
def create_paste(self, content):
"""Create a raw paste of the given content.
Returns a URL to the paste, or raises a ``pasteraw.Error`` if something
tragic happens instead.
"""
r = requests.post(
self.endpoint + '/pastes',
data={'content': content},
allow_redirects=False)
if r.status_code == 302:
return r.headers['Location']
if r.status_code == 413:
raise MaxLengthExceeded('%d bytes' % len(content))
try:
error_message = r.json()['error']
except Exception:
error_message = r.text
raise UnexpectedError(error_message)
|
[
"def",
"create_paste",
"(",
"self",
",",
"content",
")",
":",
"r",
"=",
"requests",
".",
"post",
"(",
"self",
".",
"endpoint",
"+",
"'/pastes'",
",",
"data",
"=",
"{",
"'content'",
":",
"content",
"}",
",",
"allow_redirects",
"=",
"False",
")",
"if",
"r",
".",
"status_code",
"==",
"302",
":",
"return",
"r",
".",
"headers",
"[",
"'Location'",
"]",
"if",
"r",
".",
"status_code",
"==",
"413",
":",
"raise",
"MaxLengthExceeded",
"(",
"'%d bytes'",
"%",
"len",
"(",
"content",
")",
")",
"try",
":",
"error_message",
"=",
"r",
".",
"json",
"(",
")",
"[",
"'error'",
"]",
"except",
"Exception",
":",
"error_message",
"=",
"r",
".",
"text",
"raise",
"UnexpectedError",
"(",
"error_message",
")"
] |
Create a raw paste of the given content.
Returns a URL to the paste, or raises a ``pasteraw.Error`` if something
tragic happens instead.
|
[
"Create",
"a",
"raw",
"paste",
"of",
"the",
"given",
"content",
"."
] |
f1d5ec0bc0fb02584ad12f50266ac8f1fd2a3297
|
https://github.com/dolph/pasteraw-client/blob/f1d5ec0bc0fb02584ad12f50266ac8f1fd2a3297/pasteraw.py#L63-L86
|
243,233
|
blha303/DO-runin
|
runin/runin.py
|
match_keys
|
def match_keys(inp, p=False):
"""Takes a comma-separated string of key ids or fingerprints and returns a list of key ids"""
_keys = []
ssh_keys = DO.get_ssh_keys()
for k in inp.split(","):
done = False
if k.isdigit():
for _ in [s for s in ssh_keys if s["id"] == int(k)]:
done = True
_keys.append(_["fingerprint"])
else:
for _ in [s for s in ssh_keys if s["fingerprint"] == k]:
done = True
_keys.append(_["fingerprint"])
if p and not done:
print("Could not find a match for '{}', skipping".format(k), file=sys.stderr)
return _keys
|
python
|
def match_keys(inp, p=False):
"""Takes a comma-separated string of key ids or fingerprints and returns a list of key ids"""
_keys = []
ssh_keys = DO.get_ssh_keys()
for k in inp.split(","):
done = False
if k.isdigit():
for _ in [s for s in ssh_keys if s["id"] == int(k)]:
done = True
_keys.append(_["fingerprint"])
else:
for _ in [s for s in ssh_keys if s["fingerprint"] == k]:
done = True
_keys.append(_["fingerprint"])
if p and not done:
print("Could not find a match for '{}', skipping".format(k), file=sys.stderr)
return _keys
|
[
"def",
"match_keys",
"(",
"inp",
",",
"p",
"=",
"False",
")",
":",
"_keys",
"=",
"[",
"]",
"ssh_keys",
"=",
"DO",
".",
"get_ssh_keys",
"(",
")",
"for",
"k",
"in",
"inp",
".",
"split",
"(",
"\",\"",
")",
":",
"done",
"=",
"False",
"if",
"k",
".",
"isdigit",
"(",
")",
":",
"for",
"_",
"in",
"[",
"s",
"for",
"s",
"in",
"ssh_keys",
"if",
"s",
"[",
"\"id\"",
"]",
"==",
"int",
"(",
"k",
")",
"]",
":",
"done",
"=",
"True",
"_keys",
".",
"append",
"(",
"_",
"[",
"\"fingerprint\"",
"]",
")",
"else",
":",
"for",
"_",
"in",
"[",
"s",
"for",
"s",
"in",
"ssh_keys",
"if",
"s",
"[",
"\"fingerprint\"",
"]",
"==",
"k",
"]",
":",
"done",
"=",
"True",
"_keys",
".",
"append",
"(",
"_",
"[",
"\"fingerprint\"",
"]",
")",
"if",
"p",
"and",
"not",
"done",
":",
"print",
"(",
"\"Could not find a match for '{}', skipping\"",
".",
"format",
"(",
"k",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"return",
"_keys"
] |
Takes a comma-separated string of key ids or fingerprints and returns a list of key ids
|
[
"Takes",
"a",
"comma",
"-",
"separated",
"string",
"of",
"key",
"ids",
"or",
"fingerprints",
"and",
"returns",
"a",
"list",
"of",
"key",
"ids"
] |
4e725165e79f8bc0a2e1cb07a83f414686570e90
|
https://github.com/blha303/DO-runin/blob/4e725165e79f8bc0a2e1cb07a83f414686570e90/runin/runin.py#L13-L29
|
243,234
|
SkyLothar/shcmd
|
shcmd/cmd.py
|
cd
|
def cd(cd_path, create=False):
"""cd to target dir when running in this block
:param cd_path: dir to cd into
:param create: create new dir if destination not there
Usage::
>>> with cd("/tmp"):
... print("we are in /tmp now")
"""
oricwd = os.getcwd()
if create:
mkdir(cd_path)
try:
os.chdir(cd_path)
yield
finally:
os.chdir(oricwd)
|
python
|
def cd(cd_path, create=False):
"""cd to target dir when running in this block
:param cd_path: dir to cd into
:param create: create new dir if destination not there
Usage::
>>> with cd("/tmp"):
... print("we are in /tmp now")
"""
oricwd = os.getcwd()
if create:
mkdir(cd_path)
try:
os.chdir(cd_path)
yield
finally:
os.chdir(oricwd)
|
[
"def",
"cd",
"(",
"cd_path",
",",
"create",
"=",
"False",
")",
":",
"oricwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"if",
"create",
":",
"mkdir",
"(",
"cd_path",
")",
"try",
":",
"os",
".",
"chdir",
"(",
"cd_path",
")",
"yield",
"finally",
":",
"os",
".",
"chdir",
"(",
"oricwd",
")"
] |
cd to target dir when running in this block
:param cd_path: dir to cd into
:param create: create new dir if destination not there
Usage::
>>> with cd("/tmp"):
... print("we are in /tmp now")
|
[
"cd",
"to",
"target",
"dir",
"when",
"running",
"in",
"this",
"block"
] |
d8cad6311a4da7ef09f3419c86b58e30388b7ee3
|
https://github.com/SkyLothar/shcmd/blob/d8cad6311a4da7ef09f3419c86b58e30388b7ee3/shcmd/cmd.py#L10-L28
|
243,235
|
SkyLothar/shcmd
|
shcmd/cmd.py
|
cd_to
|
def cd_to(path, mkdir=False):
"""make a generator like cd, but use it for function
Usage::
>>> @cd_to("/")
... def say_where():
... print(os.getcwd())
...
>>> say_where()
/
"""
def cd_to_decorator(func):
@functools.wraps(func)
def _cd_and_exec(*args, **kwargs):
with cd(path, mkdir):
return func(*args, **kwargs)
return _cd_and_exec
return cd_to_decorator
|
python
|
def cd_to(path, mkdir=False):
"""make a generator like cd, but use it for function
Usage::
>>> @cd_to("/")
... def say_where():
... print(os.getcwd())
...
>>> say_where()
/
"""
def cd_to_decorator(func):
@functools.wraps(func)
def _cd_and_exec(*args, **kwargs):
with cd(path, mkdir):
return func(*args, **kwargs)
return _cd_and_exec
return cd_to_decorator
|
[
"def",
"cd_to",
"(",
"path",
",",
"mkdir",
"=",
"False",
")",
":",
"def",
"cd_to_decorator",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"_cd_and_exec",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"cd",
"(",
"path",
",",
"mkdir",
")",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"_cd_and_exec",
"return",
"cd_to_decorator"
] |
make a generator like cd, but use it for function
Usage::
>>> @cd_to("/")
... def say_where():
... print(os.getcwd())
...
>>> say_where()
/
|
[
"make",
"a",
"generator",
"like",
"cd",
"but",
"use",
"it",
"for",
"function"
] |
d8cad6311a4da7ef09f3419c86b58e30388b7ee3
|
https://github.com/SkyLothar/shcmd/blob/d8cad6311a4da7ef09f3419c86b58e30388b7ee3/shcmd/cmd.py#L31-L50
|
243,236
|
matthewdeanmartin/jiggle_version
|
sample_projects/ver_in_weird_file/setup_helpers.py
|
require_python
|
def require_python(minimum):
"""Require at least a minimum Python version.
The version number is expressed in terms of `sys.hexversion`. E.g. to
require a minimum of Python 2.6, use::
>>> require_python(0x206000f0)
:param minimum: Minimum Python version supported.
:type minimum: integer
"""
if sys.hexversion < minimum:
hversion = hex(minimum)[2:]
if len(hversion) % 2 != 0:
hversion = '0' + hversion
split = list(hversion)
parts = []
while split:
parts.append(int(''.join((split.pop(0), split.pop(0))), 16))
major, minor, micro, release = parts
if release == 0xf0:
print('Python {0}.{1}.{2} or better is required'.format(
major, minor, micro))
else:
print('Python {0}.{1}.{2} ({3}) or better is required'.format(
major, minor, micro, hex(release)[2:]))
sys.exit(1)
|
python
|
def require_python(minimum):
"""Require at least a minimum Python version.
The version number is expressed in terms of `sys.hexversion`. E.g. to
require a minimum of Python 2.6, use::
>>> require_python(0x206000f0)
:param minimum: Minimum Python version supported.
:type minimum: integer
"""
if sys.hexversion < minimum:
hversion = hex(minimum)[2:]
if len(hversion) % 2 != 0:
hversion = '0' + hversion
split = list(hversion)
parts = []
while split:
parts.append(int(''.join((split.pop(0), split.pop(0))), 16))
major, minor, micro, release = parts
if release == 0xf0:
print('Python {0}.{1}.{2} or better is required'.format(
major, minor, micro))
else:
print('Python {0}.{1}.{2} ({3}) or better is required'.format(
major, minor, micro, hex(release)[2:]))
sys.exit(1)
|
[
"def",
"require_python",
"(",
"minimum",
")",
":",
"if",
"sys",
".",
"hexversion",
"<",
"minimum",
":",
"hversion",
"=",
"hex",
"(",
"minimum",
")",
"[",
"2",
":",
"]",
"if",
"len",
"(",
"hversion",
")",
"%",
"2",
"!=",
"0",
":",
"hversion",
"=",
"'0'",
"+",
"hversion",
"split",
"=",
"list",
"(",
"hversion",
")",
"parts",
"=",
"[",
"]",
"while",
"split",
":",
"parts",
".",
"append",
"(",
"int",
"(",
"''",
".",
"join",
"(",
"(",
"split",
".",
"pop",
"(",
"0",
")",
",",
"split",
".",
"pop",
"(",
"0",
")",
")",
")",
",",
"16",
")",
")",
"major",
",",
"minor",
",",
"micro",
",",
"release",
"=",
"parts",
"if",
"release",
"==",
"0xf0",
":",
"print",
"(",
"'Python {0}.{1}.{2} or better is required'",
".",
"format",
"(",
"major",
",",
"minor",
",",
"micro",
")",
")",
"else",
":",
"print",
"(",
"'Python {0}.{1}.{2} ({3}) or better is required'",
".",
"format",
"(",
"major",
",",
"minor",
",",
"micro",
",",
"hex",
"(",
"release",
")",
"[",
"2",
":",
"]",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] |
Require at least a minimum Python version.
The version number is expressed in terms of `sys.hexversion`. E.g. to
require a minimum of Python 2.6, use::
>>> require_python(0x206000f0)
:param minimum: Minimum Python version supported.
:type minimum: integer
|
[
"Require",
"at",
"least",
"a",
"minimum",
"Python",
"version",
"."
] |
963656a0a47b7162780a5f6c8f4b8bbbebc148f5
|
https://github.com/matthewdeanmartin/jiggle_version/blob/963656a0a47b7162780a5f6c8f4b8bbbebc148f5/sample_projects/ver_in_weird_file/setup_helpers.py#L41-L67
|
243,237
|
matthewdeanmartin/jiggle_version
|
sample_projects/ver_in_weird_file/setup_helpers.py
|
long_description
|
def long_description(*filenames):
"""Provide a long description."""
res = ['']
for filename in filenames:
with open(filename) as fp:
for line in fp:
res.append(' ' + line)
res.append('')
res.append('\n')
return EMPTYSTRING.join(res)
|
python
|
def long_description(*filenames):
"""Provide a long description."""
res = ['']
for filename in filenames:
with open(filename) as fp:
for line in fp:
res.append(' ' + line)
res.append('')
res.append('\n')
return EMPTYSTRING.join(res)
|
[
"def",
"long_description",
"(",
"*",
"filenames",
")",
":",
"res",
"=",
"[",
"''",
"]",
"for",
"filename",
"in",
"filenames",
":",
"with",
"open",
"(",
"filename",
")",
"as",
"fp",
":",
"for",
"line",
"in",
"fp",
":",
"res",
".",
"append",
"(",
"' '",
"+",
"line",
")",
"res",
".",
"append",
"(",
"''",
")",
"res",
".",
"append",
"(",
"'\\n'",
")",
"return",
"EMPTYSTRING",
".",
"join",
"(",
"res",
")"
] |
Provide a long description.
|
[
"Provide",
"a",
"long",
"description",
"."
] |
963656a0a47b7162780a5f6c8f4b8bbbebc148f5
|
https://github.com/matthewdeanmartin/jiggle_version/blob/963656a0a47b7162780a5f6c8f4b8bbbebc148f5/sample_projects/ver_in_weird_file/setup_helpers.py#L125-L134
|
243,238
|
matthewdeanmartin/jiggle_version
|
sample_projects/ver_in_weird_file/setup_helpers.py
|
description
|
def description(filename):
"""Provide a short description."""
# This ends up in the Summary header for PKG-INFO and it should be a
# one-liner. It will get rendered on the package page just below the
# package version header but above the long_description, which ironically
# gets stuff into the Description header. It should not include reST, so
# pick out the first single line after the double header.
with open(filename) as fp:
for lineno, line in enumerate(fp):
if lineno < 3:
continue
line = line.strip()
if len(line) > 0:
return line
|
python
|
def description(filename):
"""Provide a short description."""
# This ends up in the Summary header for PKG-INFO and it should be a
# one-liner. It will get rendered on the package page just below the
# package version header but above the long_description, which ironically
# gets stuff into the Description header. It should not include reST, so
# pick out the first single line after the double header.
with open(filename) as fp:
for lineno, line in enumerate(fp):
if lineno < 3:
continue
line = line.strip()
if len(line) > 0:
return line
|
[
"def",
"description",
"(",
"filename",
")",
":",
"# This ends up in the Summary header for PKG-INFO and it should be a",
"# one-liner. It will get rendered on the package page just below the",
"# package version header but above the long_description, which ironically",
"# gets stuff into the Description header. It should not include reST, so",
"# pick out the first single line after the double header.",
"with",
"open",
"(",
"filename",
")",
"as",
"fp",
":",
"for",
"lineno",
",",
"line",
"in",
"enumerate",
"(",
"fp",
")",
":",
"if",
"lineno",
"<",
"3",
":",
"continue",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"len",
"(",
"line",
")",
">",
"0",
":",
"return",
"line"
] |
Provide a short description.
|
[
"Provide",
"a",
"short",
"description",
"."
] |
963656a0a47b7162780a5f6c8f4b8bbbebc148f5
|
https://github.com/matthewdeanmartin/jiggle_version/blob/963656a0a47b7162780a5f6c8f4b8bbbebc148f5/sample_projects/ver_in_weird_file/setup_helpers.py#L137-L150
|
243,239
|
heathbar/plum-lightpad-python
|
plumlightpad/plumcloud.py
|
PlumCloud.fetch_house
|
async def fetch_house(self, house_id):
"""Lookup details for a given house id"""
url = "https://production.plum.technology/v2/getHouse"
data = {"hid": house_id}
return await self.__post(url, data)
|
python
|
async def fetch_house(self, house_id):
"""Lookup details for a given house id"""
url = "https://production.plum.technology/v2/getHouse"
data = {"hid": house_id}
return await self.__post(url, data)
|
[
"async",
"def",
"fetch_house",
"(",
"self",
",",
"house_id",
")",
":",
"url",
"=",
"\"https://production.plum.technology/v2/getHouse\"",
"data",
"=",
"{",
"\"hid\"",
":",
"house_id",
"}",
"return",
"await",
"self",
".",
"__post",
"(",
"url",
",",
"data",
")"
] |
Lookup details for a given house id
|
[
"Lookup",
"details",
"for",
"a",
"given",
"house",
"id"
] |
181b3dc26cfe77830565f6f844af5ed0728dccd5
|
https://github.com/heathbar/plum-lightpad-python/blob/181b3dc26cfe77830565f6f844af5ed0728dccd5/plumlightpad/plumcloud.py#L48-L52
|
243,240
|
heathbar/plum-lightpad-python
|
plumlightpad/plumcloud.py
|
PlumCloud.fetch_room
|
async def fetch_room(self, room_id):
"""Lookup details for a given room id"""
url = "https://production.plum.technology/v2/getRoom"
data = {"rid": room_id}
return await self.__post(url, data)
|
python
|
async def fetch_room(self, room_id):
"""Lookup details for a given room id"""
url = "https://production.plum.technology/v2/getRoom"
data = {"rid": room_id}
return await self.__post(url, data)
|
[
"async",
"def",
"fetch_room",
"(",
"self",
",",
"room_id",
")",
":",
"url",
"=",
"\"https://production.plum.technology/v2/getRoom\"",
"data",
"=",
"{",
"\"rid\"",
":",
"room_id",
"}",
"return",
"await",
"self",
".",
"__post",
"(",
"url",
",",
"data",
")"
] |
Lookup details for a given room id
|
[
"Lookup",
"details",
"for",
"a",
"given",
"room",
"id"
] |
181b3dc26cfe77830565f6f844af5ed0728dccd5
|
https://github.com/heathbar/plum-lightpad-python/blob/181b3dc26cfe77830565f6f844af5ed0728dccd5/plumlightpad/plumcloud.py#L54-L58
|
243,241
|
heathbar/plum-lightpad-python
|
plumlightpad/plumcloud.py
|
PlumCloud.fetch_logical_load
|
async def fetch_logical_load(self, llid):
"""Lookup details for a given logical load"""
url = "https://production.plum.technology/v2/getLogicalLoad"
data = {"llid": llid}
return await self.__post(url, data)
|
python
|
async def fetch_logical_load(self, llid):
"""Lookup details for a given logical load"""
url = "https://production.plum.technology/v2/getLogicalLoad"
data = {"llid": llid}
return await self.__post(url, data)
|
[
"async",
"def",
"fetch_logical_load",
"(",
"self",
",",
"llid",
")",
":",
"url",
"=",
"\"https://production.plum.technology/v2/getLogicalLoad\"",
"data",
"=",
"{",
"\"llid\"",
":",
"llid",
"}",
"return",
"await",
"self",
".",
"__post",
"(",
"url",
",",
"data",
")"
] |
Lookup details for a given logical load
|
[
"Lookup",
"details",
"for",
"a",
"given",
"logical",
"load"
] |
181b3dc26cfe77830565f6f844af5ed0728dccd5
|
https://github.com/heathbar/plum-lightpad-python/blob/181b3dc26cfe77830565f6f844af5ed0728dccd5/plumlightpad/plumcloud.py#L60-L64
|
243,242
|
heathbar/plum-lightpad-python
|
plumlightpad/plumcloud.py
|
PlumCloud.fetch_lightpad
|
async def fetch_lightpad(self, lpid):
"""Lookup details for a given lightpad"""
url = "https://production.plum.technology/v2/getLightpad"
data = {"lpid": lpid}
return await self.__post(url, data)
|
python
|
async def fetch_lightpad(self, lpid):
"""Lookup details for a given lightpad"""
url = "https://production.plum.technology/v2/getLightpad"
data = {"lpid": lpid}
return await self.__post(url, data)
|
[
"async",
"def",
"fetch_lightpad",
"(",
"self",
",",
"lpid",
")",
":",
"url",
"=",
"\"https://production.plum.technology/v2/getLightpad\"",
"data",
"=",
"{",
"\"lpid\"",
":",
"lpid",
"}",
"return",
"await",
"self",
".",
"__post",
"(",
"url",
",",
"data",
")"
] |
Lookup details for a given lightpad
|
[
"Lookup",
"details",
"for",
"a",
"given",
"lightpad"
] |
181b3dc26cfe77830565f6f844af5ed0728dccd5
|
https://github.com/heathbar/plum-lightpad-python/blob/181b3dc26cfe77830565f6f844af5ed0728dccd5/plumlightpad/plumcloud.py#L66-L70
|
243,243
|
xaptum/xtt-python
|
xtt/_utils.py
|
to_bytes
|
def to_bytes(s, encoding="utf-8"):
"""
Converts the string to a bytes type, if not already.
:s: the string to convert to bytes
:returns: `str` on Python2 and `bytes` on Python3.
"""
if isinstance(s, six.binary_type):
return s
else:
return six.text_type(s).encode(encoding)
|
python
|
def to_bytes(s, encoding="utf-8"):
"""
Converts the string to a bytes type, if not already.
:s: the string to convert to bytes
:returns: `str` on Python2 and `bytes` on Python3.
"""
if isinstance(s, six.binary_type):
return s
else:
return six.text_type(s).encode(encoding)
|
[
"def",
"to_bytes",
"(",
"s",
",",
"encoding",
"=",
"\"utf-8\"",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"six",
".",
"binary_type",
")",
":",
"return",
"s",
"else",
":",
"return",
"six",
".",
"text_type",
"(",
"s",
")",
".",
"encode",
"(",
"encoding",
")"
] |
Converts the string to a bytes type, if not already.
:s: the string to convert to bytes
:returns: `str` on Python2 and `bytes` on Python3.
|
[
"Converts",
"the",
"string",
"to",
"a",
"bytes",
"type",
"if",
"not",
"already",
"."
] |
23ee469488d710d730314bec1136c4dd7ac2cd5c
|
https://github.com/xaptum/xtt-python/blob/23ee469488d710d730314bec1136c4dd7ac2cd5c/xtt/_utils.py#L21-L31
|
243,244
|
xaptum/xtt-python
|
xtt/_utils.py
|
to_text
|
def to_text(s, encoding="utf-8"):
"""
Converts the bytes to a text type, if not already.
:s: the bytes to convert to text
:returns: `unicode` on Python2 and `str` on Python3.
"""
if isinstance(s, six.text_type):
return s
else:
return six.binary_type(s).decode(encoding)
|
python
|
def to_text(s, encoding="utf-8"):
"""
Converts the bytes to a text type, if not already.
:s: the bytes to convert to text
:returns: `unicode` on Python2 and `str` on Python3.
"""
if isinstance(s, six.text_type):
return s
else:
return six.binary_type(s).decode(encoding)
|
[
"def",
"to_text",
"(",
"s",
",",
"encoding",
"=",
"\"utf-8\"",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"six",
".",
"text_type",
")",
":",
"return",
"s",
"else",
":",
"return",
"six",
".",
"binary_type",
"(",
"s",
")",
".",
"decode",
"(",
"encoding",
")"
] |
Converts the bytes to a text type, if not already.
:s: the bytes to convert to text
:returns: `unicode` on Python2 and `str` on Python3.
|
[
"Converts",
"the",
"bytes",
"to",
"a",
"text",
"type",
"if",
"not",
"already",
"."
] |
23ee469488d710d730314bec1136c4dd7ac2cd5c
|
https://github.com/xaptum/xtt-python/blob/23ee469488d710d730314bec1136c4dd7ac2cd5c/xtt/_utils.py#L33-L43
|
243,245
|
xaptum/xtt-python
|
xtt/_utils.py
|
_check_len
|
def _check_len(a, b):
"""
Raises an exception if the two values do not have the same
length. This is useful for validating preconditions.
:a: the first value
:b: the second value
:raises ValueError: if the sizes do not match
"""
if len(a) != len(b):
msg = "Length must be {}. Got {}".format(len(a), len(b))
raise ValueError(msg)
|
python
|
def _check_len(a, b):
"""
Raises an exception if the two values do not have the same
length. This is useful for validating preconditions.
:a: the first value
:b: the second value
:raises ValueError: if the sizes do not match
"""
if len(a) != len(b):
msg = "Length must be {}. Got {}".format(len(a), len(b))
raise ValueError(msg)
|
[
"def",
"_check_len",
"(",
"a",
",",
"b",
")",
":",
"if",
"len",
"(",
"a",
")",
"!=",
"len",
"(",
"b",
")",
":",
"msg",
"=",
"\"Length must be {}. Got {}\"",
".",
"format",
"(",
"len",
"(",
"a",
")",
",",
"len",
"(",
"b",
")",
")",
"raise",
"ValueError",
"(",
"msg",
")"
] |
Raises an exception if the two values do not have the same
length. This is useful for validating preconditions.
:a: the first value
:b: the second value
:raises ValueError: if the sizes do not match
|
[
"Raises",
"an",
"exception",
"if",
"the",
"two",
"values",
"do",
"not",
"have",
"the",
"same",
"length",
".",
"This",
"is",
"useful",
"for",
"validating",
"preconditions",
"."
] |
23ee469488d710d730314bec1136c4dd7ac2cd5c
|
https://github.com/xaptum/xtt-python/blob/23ee469488d710d730314bec1136c4dd7ac2cd5c/xtt/_utils.py#L45-L56
|
243,246
|
radjkarl/fancyTools
|
fancytools/math/similarity1DdiffShapedArrays.py
|
similarity1DdiffShapedArrays
|
def similarity1DdiffShapedArrays(arr1, arr2, normalize=False):
"""
compare two strictly monotonous increasing 1d arrays
of same or different size
return a similarity index-> 0=identical
"""
# assign longer and shorter here, because jit cannot do it
if len(arr1) < len(arr2):
arr1, arr2 = arr2, arr1
if not len(arr2):
out = sum(arr1)
else:
out = _calc(arr1, arr2)
if normalize:
if not len(arr2):
mn = arr1[0]
mx = arr1[-1]
else:
mn = min(arr1[0], arr2[0])
mx = max(arr1[-1], arr2[-1])
out = out/ (mx - mn)
return out
|
python
|
def similarity1DdiffShapedArrays(arr1, arr2, normalize=False):
"""
compare two strictly monotonous increasing 1d arrays
of same or different size
return a similarity index-> 0=identical
"""
# assign longer and shorter here, because jit cannot do it
if len(arr1) < len(arr2):
arr1, arr2 = arr2, arr1
if not len(arr2):
out = sum(arr1)
else:
out = _calc(arr1, arr2)
if normalize:
if not len(arr2):
mn = arr1[0]
mx = arr1[-1]
else:
mn = min(arr1[0], arr2[0])
mx = max(arr1[-1], arr2[-1])
out = out/ (mx - mn)
return out
|
[
"def",
"similarity1DdiffShapedArrays",
"(",
"arr1",
",",
"arr2",
",",
"normalize",
"=",
"False",
")",
":",
"# assign longer and shorter here, because jit cannot do it",
"if",
"len",
"(",
"arr1",
")",
"<",
"len",
"(",
"arr2",
")",
":",
"arr1",
",",
"arr2",
"=",
"arr2",
",",
"arr1",
"if",
"not",
"len",
"(",
"arr2",
")",
":",
"out",
"=",
"sum",
"(",
"arr1",
")",
"else",
":",
"out",
"=",
"_calc",
"(",
"arr1",
",",
"arr2",
")",
"if",
"normalize",
":",
"if",
"not",
"len",
"(",
"arr2",
")",
":",
"mn",
"=",
"arr1",
"[",
"0",
"]",
"mx",
"=",
"arr1",
"[",
"-",
"1",
"]",
"else",
":",
"mn",
"=",
"min",
"(",
"arr1",
"[",
"0",
"]",
",",
"arr2",
"[",
"0",
"]",
")",
"mx",
"=",
"max",
"(",
"arr1",
"[",
"-",
"1",
"]",
",",
"arr2",
"[",
"-",
"1",
"]",
")",
"out",
"=",
"out",
"/",
"(",
"mx",
"-",
"mn",
")",
"return",
"out"
] |
compare two strictly monotonous increasing 1d arrays
of same or different size
return a similarity index-> 0=identical
|
[
"compare",
"two",
"strictly",
"monotonous",
"increasing",
"1d",
"arrays",
"of",
"same",
"or",
"different",
"size",
"return",
"a",
"similarity",
"index",
"-",
">",
"0",
"=",
"identical"
] |
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
|
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/math/similarity1DdiffShapedArrays.py#L7-L30
|
243,247
|
nefarioustim/parker
|
parker/consumestore.py
|
get_instance
|
def get_instance(
model, method="file",
img_dir=None, data_dir=None,
bucket=None
):
"""Return an instance of ConsumeStore."""
global _instances
if not isinstance(model, ConsumeModel):
raise TypeError(
"get_instance() expects a parker.ConsumeModel derivative."
)
if method == "file":
my_store = store.get_filestore_instance(
img_dir=img_dir,
data_dir=data_dir
)
elif method == "s3":
my_store = store.get_s3store_instance(
bucket=bucket
)
else:
raise ValueError("Unexpected method value, '%s'." % method)
key = "%s:%s" % (repr(model), repr(my_store))
try:
instance = _instances[key]
except KeyError:
instance = ConsumeStore(model, my_store)
_instances[key] = instance
return instance
|
python
|
def get_instance(
model, method="file",
img_dir=None, data_dir=None,
bucket=None
):
"""Return an instance of ConsumeStore."""
global _instances
if not isinstance(model, ConsumeModel):
raise TypeError(
"get_instance() expects a parker.ConsumeModel derivative."
)
if method == "file":
my_store = store.get_filestore_instance(
img_dir=img_dir,
data_dir=data_dir
)
elif method == "s3":
my_store = store.get_s3store_instance(
bucket=bucket
)
else:
raise ValueError("Unexpected method value, '%s'." % method)
key = "%s:%s" % (repr(model), repr(my_store))
try:
instance = _instances[key]
except KeyError:
instance = ConsumeStore(model, my_store)
_instances[key] = instance
return instance
|
[
"def",
"get_instance",
"(",
"model",
",",
"method",
"=",
"\"file\"",
",",
"img_dir",
"=",
"None",
",",
"data_dir",
"=",
"None",
",",
"bucket",
"=",
"None",
")",
":",
"global",
"_instances",
"if",
"not",
"isinstance",
"(",
"model",
",",
"ConsumeModel",
")",
":",
"raise",
"TypeError",
"(",
"\"get_instance() expects a parker.ConsumeModel derivative.\"",
")",
"if",
"method",
"==",
"\"file\"",
":",
"my_store",
"=",
"store",
".",
"get_filestore_instance",
"(",
"img_dir",
"=",
"img_dir",
",",
"data_dir",
"=",
"data_dir",
")",
"elif",
"method",
"==",
"\"s3\"",
":",
"my_store",
"=",
"store",
".",
"get_s3store_instance",
"(",
"bucket",
"=",
"bucket",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unexpected method value, '%s'.\"",
"%",
"method",
")",
"key",
"=",
"\"%s:%s\"",
"%",
"(",
"repr",
"(",
"model",
")",
",",
"repr",
"(",
"my_store",
")",
")",
"try",
":",
"instance",
"=",
"_instances",
"[",
"key",
"]",
"except",
"KeyError",
":",
"instance",
"=",
"ConsumeStore",
"(",
"model",
",",
"my_store",
")",
"_instances",
"[",
"key",
"]",
"=",
"instance",
"return",
"instance"
] |
Return an instance of ConsumeStore.
|
[
"Return",
"an",
"instance",
"of",
"ConsumeStore",
"."
] |
ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6
|
https://github.com/nefarioustim/parker/blob/ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6/parker/consumestore.py#L13-L45
|
243,248
|
nefarioustim/parker
|
parker/consumestore.py
|
ConsumeStore.save_media
|
def save_media(self):
"""Store any media within model.media_list."""
chunk_path = fileops.get_chunk_path_from_string(
self.model.unique_field
)
for i, mediafile in enumerate(self.model.media_list):
filename = os.path.join(
self._get_prefix(),
chunk_path,
"%s_%d" % (self.model.unique_field, i)
)
self.store.store_media(filename, mediafile)
|
python
|
def save_media(self):
"""Store any media within model.media_list."""
chunk_path = fileops.get_chunk_path_from_string(
self.model.unique_field
)
for i, mediafile in enumerate(self.model.media_list):
filename = os.path.join(
self._get_prefix(),
chunk_path,
"%s_%d" % (self.model.unique_field, i)
)
self.store.store_media(filename, mediafile)
|
[
"def",
"save_media",
"(",
"self",
")",
":",
"chunk_path",
"=",
"fileops",
".",
"get_chunk_path_from_string",
"(",
"self",
".",
"model",
".",
"unique_field",
")",
"for",
"i",
",",
"mediafile",
"in",
"enumerate",
"(",
"self",
".",
"model",
".",
"media_list",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_get_prefix",
"(",
")",
",",
"chunk_path",
",",
"\"%s_%d\"",
"%",
"(",
"self",
".",
"model",
".",
"unique_field",
",",
"i",
")",
")",
"self",
".",
"store",
".",
"store_media",
"(",
"filename",
",",
"mediafile",
")"
] |
Store any media within model.media_list.
|
[
"Store",
"any",
"media",
"within",
"model",
".",
"media_list",
"."
] |
ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6
|
https://github.com/nefarioustim/parker/blob/ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6/parker/consumestore.py#L57-L69
|
243,249
|
nefarioustim/parker
|
parker/consumestore.py
|
ConsumeStore.save_data
|
def save_data(self):
"""Store data as a JSON dump."""
filename = os.path.join(
self._get_prefix(),
self.model.site
)
self.store.store_json(
filename,
self.model.get_dict()
)
|
python
|
def save_data(self):
"""Store data as a JSON dump."""
filename = os.path.join(
self._get_prefix(),
self.model.site
)
self.store.store_json(
filename,
self.model.get_dict()
)
|
[
"def",
"save_data",
"(",
"self",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_get_prefix",
"(",
")",
",",
"self",
".",
"model",
".",
"site",
")",
"self",
".",
"store",
".",
"store_json",
"(",
"filename",
",",
"self",
".",
"model",
".",
"get_dict",
"(",
")",
")"
] |
Store data as a JSON dump.
|
[
"Store",
"data",
"as",
"a",
"JSON",
"dump",
"."
] |
ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6
|
https://github.com/nefarioustim/parker/blob/ccc1de1ac6bfb5e0a8cfa4fdebb2f38f2ee027d6/parker/consumestore.py#L71-L80
|
243,250
|
rosenbrockc/acorn
|
acorn/ipython.py
|
findloop
|
def findloop(m):
"""Determines if the specified member of `_ast` contains any for or while loops
in its body definition.
"""
from _ast import For, While, FunctionDef, ClassDef, ListComp
from _ast import DictComp
if isinstance(m, (FunctionDef, ClassDef)):
return False
elif isinstance(m, (For, While, ListComp, DictComp)):
return True
elif hasattr(m, "value"):
return findloop(m.value)
elif hasattr(m, "__iter__"):
for sm in m:
present = findloop(sm)
if present:
break
else:
present = False
return present
elif hasattr(m, "body") or hasattr(m, "orelse"):
body = hasattr(m, "body") and findloop(m.body)
orelse = hasattr(m, "orelse") and findloop(m.orelse)
return body or orelse
else:
return False
|
python
|
def findloop(m):
"""Determines if the specified member of `_ast` contains any for or while loops
in its body definition.
"""
from _ast import For, While, FunctionDef, ClassDef, ListComp
from _ast import DictComp
if isinstance(m, (FunctionDef, ClassDef)):
return False
elif isinstance(m, (For, While, ListComp, DictComp)):
return True
elif hasattr(m, "value"):
return findloop(m.value)
elif hasattr(m, "__iter__"):
for sm in m:
present = findloop(sm)
if present:
break
else:
present = False
return present
elif hasattr(m, "body") or hasattr(m, "orelse"):
body = hasattr(m, "body") and findloop(m.body)
orelse = hasattr(m, "orelse") and findloop(m.orelse)
return body or orelse
else:
return False
|
[
"def",
"findloop",
"(",
"m",
")",
":",
"from",
"_ast",
"import",
"For",
",",
"While",
",",
"FunctionDef",
",",
"ClassDef",
",",
"ListComp",
"from",
"_ast",
"import",
"DictComp",
"if",
"isinstance",
"(",
"m",
",",
"(",
"FunctionDef",
",",
"ClassDef",
")",
")",
":",
"return",
"False",
"elif",
"isinstance",
"(",
"m",
",",
"(",
"For",
",",
"While",
",",
"ListComp",
",",
"DictComp",
")",
")",
":",
"return",
"True",
"elif",
"hasattr",
"(",
"m",
",",
"\"value\"",
")",
":",
"return",
"findloop",
"(",
"m",
".",
"value",
")",
"elif",
"hasattr",
"(",
"m",
",",
"\"__iter__\"",
")",
":",
"for",
"sm",
"in",
"m",
":",
"present",
"=",
"findloop",
"(",
"sm",
")",
"if",
"present",
":",
"break",
"else",
":",
"present",
"=",
"False",
"return",
"present",
"elif",
"hasattr",
"(",
"m",
",",
"\"body\"",
")",
"or",
"hasattr",
"(",
"m",
",",
"\"orelse\"",
")",
":",
"body",
"=",
"hasattr",
"(",
"m",
",",
"\"body\"",
")",
"and",
"findloop",
"(",
"m",
".",
"body",
")",
"orelse",
"=",
"hasattr",
"(",
"m",
",",
"\"orelse\"",
")",
"and",
"findloop",
"(",
"m",
".",
"orelse",
")",
"return",
"body",
"or",
"orelse",
"else",
":",
"return",
"False"
] |
Determines if the specified member of `_ast` contains any for or while loops
in its body definition.
|
[
"Determines",
"if",
"the",
"specified",
"member",
"of",
"_ast",
"contains",
"any",
"for",
"or",
"while",
"loops",
"in",
"its",
"body",
"definition",
"."
] |
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
|
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/ipython.py#L453-L478
|
243,251
|
rosenbrockc/acorn
|
acorn/ipython.py
|
record_markdown
|
def record_markdown(text, cellid):
"""Records the specified markdown text to the acorn database.
Args:
text (str): the *raw* markdown text entered into the cell in the ipython
notebook.
"""
from acorn.logging.database import record
from time import time
ekey = "nb-{}".format(cellid)
global _cellid_map
if cellid not in _cellid_map:
from acorn.logging.database import active_db
from difflib import SequenceMatcher
from acorn.logging.diff import cascade
taskdb = active_db()
if ekey not in taskdb.entities:
#Compute a new ekey if possible with the most similar markdown cell
#in the database.
possible = [k for k in taskdb.entities if k[0:3] == "nb-"]
maxkey, maxvalue = None, 0.
for pkey in possible:
sequence = [e["c"] for e in taskdb.entities[pkey]]
state = ''.join(cascade(sequence))
matcher = SequenceMatcher(a=state, b=text)
ratio = matcher.quick_ratio()
if ratio > maxvalue and ratio > 0.5:
maxkey, maxvalue = pkey, ratio
#We expect the similarity to be at least 0.5; otherwise we decide
#that it is a new cell.
if maxkey is not None:
ekey = pkey
_cellid_map[cellid] = ekey
ekey = _cellid_map[cellid]
entry = {
"m": "md",
"a": None,
"s": time(),
"r": None,
"c": text,
}
record(ekey, entry, diff=True)
|
python
|
def record_markdown(text, cellid):
"""Records the specified markdown text to the acorn database.
Args:
text (str): the *raw* markdown text entered into the cell in the ipython
notebook.
"""
from acorn.logging.database import record
from time import time
ekey = "nb-{}".format(cellid)
global _cellid_map
if cellid not in _cellid_map:
from acorn.logging.database import active_db
from difflib import SequenceMatcher
from acorn.logging.diff import cascade
taskdb = active_db()
if ekey not in taskdb.entities:
#Compute a new ekey if possible with the most similar markdown cell
#in the database.
possible = [k for k in taskdb.entities if k[0:3] == "nb-"]
maxkey, maxvalue = None, 0.
for pkey in possible:
sequence = [e["c"] for e in taskdb.entities[pkey]]
state = ''.join(cascade(sequence))
matcher = SequenceMatcher(a=state, b=text)
ratio = matcher.quick_ratio()
if ratio > maxvalue and ratio > 0.5:
maxkey, maxvalue = pkey, ratio
#We expect the similarity to be at least 0.5; otherwise we decide
#that it is a new cell.
if maxkey is not None:
ekey = pkey
_cellid_map[cellid] = ekey
ekey = _cellid_map[cellid]
entry = {
"m": "md",
"a": None,
"s": time(),
"r": None,
"c": text,
}
record(ekey, entry, diff=True)
|
[
"def",
"record_markdown",
"(",
"text",
",",
"cellid",
")",
":",
"from",
"acorn",
".",
"logging",
".",
"database",
"import",
"record",
"from",
"time",
"import",
"time",
"ekey",
"=",
"\"nb-{}\"",
".",
"format",
"(",
"cellid",
")",
"global",
"_cellid_map",
"if",
"cellid",
"not",
"in",
"_cellid_map",
":",
"from",
"acorn",
".",
"logging",
".",
"database",
"import",
"active_db",
"from",
"difflib",
"import",
"SequenceMatcher",
"from",
"acorn",
".",
"logging",
".",
"diff",
"import",
"cascade",
"taskdb",
"=",
"active_db",
"(",
")",
"if",
"ekey",
"not",
"in",
"taskdb",
".",
"entities",
":",
"#Compute a new ekey if possible with the most similar markdown cell",
"#in the database.",
"possible",
"=",
"[",
"k",
"for",
"k",
"in",
"taskdb",
".",
"entities",
"if",
"k",
"[",
"0",
":",
"3",
"]",
"==",
"\"nb-\"",
"]",
"maxkey",
",",
"maxvalue",
"=",
"None",
",",
"0.",
"for",
"pkey",
"in",
"possible",
":",
"sequence",
"=",
"[",
"e",
"[",
"\"c\"",
"]",
"for",
"e",
"in",
"taskdb",
".",
"entities",
"[",
"pkey",
"]",
"]",
"state",
"=",
"''",
".",
"join",
"(",
"cascade",
"(",
"sequence",
")",
")",
"matcher",
"=",
"SequenceMatcher",
"(",
"a",
"=",
"state",
",",
"b",
"=",
"text",
")",
"ratio",
"=",
"matcher",
".",
"quick_ratio",
"(",
")",
"if",
"ratio",
">",
"maxvalue",
"and",
"ratio",
">",
"0.5",
":",
"maxkey",
",",
"maxvalue",
"=",
"pkey",
",",
"ratio",
"#We expect the similarity to be at least 0.5; otherwise we decide",
"#that it is a new cell.",
"if",
"maxkey",
"is",
"not",
"None",
":",
"ekey",
"=",
"pkey",
"_cellid_map",
"[",
"cellid",
"]",
"=",
"ekey",
"ekey",
"=",
"_cellid_map",
"[",
"cellid",
"]",
"entry",
"=",
"{",
"\"m\"",
":",
"\"md\"",
",",
"\"a\"",
":",
"None",
",",
"\"s\"",
":",
"time",
"(",
")",
",",
"\"r\"",
":",
"None",
",",
"\"c\"",
":",
"text",
",",
"}",
"record",
"(",
"ekey",
",",
"entry",
",",
"diff",
"=",
"True",
")"
] |
Records the specified markdown text to the acorn database.
Args:
text (str): the *raw* markdown text entered into the cell in the ipython
notebook.
|
[
"Records",
"the",
"specified",
"markdown",
"text",
"to",
"the",
"acorn",
"database",
"."
] |
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
|
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/ipython.py#L488-L534
|
243,252
|
rosenbrockc/acorn
|
acorn/ipython.py
|
load_ipython_extension
|
def load_ipython_extension(ip):
"""Loads the interacting decorator that ships with `acorn` into the ipython
interactive shell.
Args:
ip (IPython.core.interactiveshell.InteractiveShell): ipython shell instance
for interacting with the shell variables.
"""
decor = InteractiveDecorator(ip)
ip.events.register('post_run_cell', decor.post_run_cell)
#Unfortunately, the built-in "pre-execute" and "pre-run" methods are
#triggered *before* the input from the cell has been stored to
#history. Thus, we don't have access to the actual code that is about to be
#executed. Instead, we use our own :class:`HistoryManager` that overrides
#the :meth:`store_inputs` so we can handle the loop detection.
newhist = AcornHistoryManager(ip.history_manager, decor)
ip.history_manager = newhist
|
python
|
def load_ipython_extension(ip):
"""Loads the interacting decorator that ships with `acorn` into the ipython
interactive shell.
Args:
ip (IPython.core.interactiveshell.InteractiveShell): ipython shell instance
for interacting with the shell variables.
"""
decor = InteractiveDecorator(ip)
ip.events.register('post_run_cell', decor.post_run_cell)
#Unfortunately, the built-in "pre-execute" and "pre-run" methods are
#triggered *before* the input from the cell has been stored to
#history. Thus, we don't have access to the actual code that is about to be
#executed. Instead, we use our own :class:`HistoryManager` that overrides
#the :meth:`store_inputs` so we can handle the loop detection.
newhist = AcornHistoryManager(ip.history_manager, decor)
ip.history_manager = newhist
|
[
"def",
"load_ipython_extension",
"(",
"ip",
")",
":",
"decor",
"=",
"InteractiveDecorator",
"(",
"ip",
")",
"ip",
".",
"events",
".",
"register",
"(",
"'post_run_cell'",
",",
"decor",
".",
"post_run_cell",
")",
"#Unfortunately, the built-in \"pre-execute\" and \"pre-run\" methods are",
"#triggered *before* the input from the cell has been stored to",
"#history. Thus, we don't have access to the actual code that is about to be",
"#executed. Instead, we use our own :class:`HistoryManager` that overrides",
"#the :meth:`store_inputs` so we can handle the loop detection.",
"newhist",
"=",
"AcornHistoryManager",
"(",
"ip",
".",
"history_manager",
",",
"decor",
")",
"ip",
".",
"history_manager",
"=",
"newhist"
] |
Loads the interacting decorator that ships with `acorn` into the ipython
interactive shell.
Args:
ip (IPython.core.interactiveshell.InteractiveShell): ipython shell instance
for interacting with the shell variables.
|
[
"Loads",
"the",
"interacting",
"decorator",
"that",
"ships",
"with",
"acorn",
"into",
"the",
"ipython",
"interactive",
"shell",
"."
] |
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
|
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/ipython.py#L536-L553
|
243,253
|
rosenbrockc/acorn
|
acorn/ipython.py
|
InteractiveDecorator._get_decoratables
|
def _get_decoratables(self, atype):
"""Returns a list of the objects that need to be decorated in the
current user namespace based on their type.
Args:
atype (str): one of the values in :attr:`atypes`. Specifies the type of
object to search.
"""
result = []
defmsg = "Skipping {}; not decoratable or already decorated."
for varname in self.shell.run_line_magic("who_ls", atype):
varobj = self.shell.user_ns.get(varname, None)
decorate = False
if varobj is None: # Nothing useful can be done.
continue
if atype in ["classobj", "type"]:
#Classes are only relevant if they have no __file__
#attribute; all other classes should be decorated by the
#full acorn machinery.
if (not hasattr(varobj, "__acorn__") and
hasattr(varobj, "__module__") and
varobj.__module__ == "__main__" and
not hasattr(varobj, "__file__")):
decorate = True
else:
msg.std(defmsg.format(varname), 3)
elif atype in ["function", "staticmethod"]:
# %who_ls will only return functions from the *user*
# namespace, so we don't have a lot to worry about here.
func = None
if atype == "staticmethod" and hasattr(varobj, "__func__"):
func = varobj.__func__
elif atype == "function":
func = varobj
if (func is not None and
not hasattr(func, "__acorn__") and
hasattr(func, "__code__") and
"<ipython-input" in func.__code__.co_filename):
decorate = True
else:
msg.std(defmsg.format(varname), 3)
if decorate:
self.entities[atype][varname] = varobj
result.append((varname, varobj))
return result
|
python
|
def _get_decoratables(self, atype):
"""Returns a list of the objects that need to be decorated in the
current user namespace based on their type.
Args:
atype (str): one of the values in :attr:`atypes`. Specifies the type of
object to search.
"""
result = []
defmsg = "Skipping {}; not decoratable or already decorated."
for varname in self.shell.run_line_magic("who_ls", atype):
varobj = self.shell.user_ns.get(varname, None)
decorate = False
if varobj is None: # Nothing useful can be done.
continue
if atype in ["classobj", "type"]:
#Classes are only relevant if they have no __file__
#attribute; all other classes should be decorated by the
#full acorn machinery.
if (not hasattr(varobj, "__acorn__") and
hasattr(varobj, "__module__") and
varobj.__module__ == "__main__" and
not hasattr(varobj, "__file__")):
decorate = True
else:
msg.std(defmsg.format(varname), 3)
elif atype in ["function", "staticmethod"]:
# %who_ls will only return functions from the *user*
# namespace, so we don't have a lot to worry about here.
func = None
if atype == "staticmethod" and hasattr(varobj, "__func__"):
func = varobj.__func__
elif atype == "function":
func = varobj
if (func is not None and
not hasattr(func, "__acorn__") and
hasattr(func, "__code__") and
"<ipython-input" in func.__code__.co_filename):
decorate = True
else:
msg.std(defmsg.format(varname), 3)
if decorate:
self.entities[atype][varname] = varobj
result.append((varname, varobj))
return result
|
[
"def",
"_get_decoratables",
"(",
"self",
",",
"atype",
")",
":",
"result",
"=",
"[",
"]",
"defmsg",
"=",
"\"Skipping {}; not decoratable or already decorated.\"",
"for",
"varname",
"in",
"self",
".",
"shell",
".",
"run_line_magic",
"(",
"\"who_ls\"",
",",
"atype",
")",
":",
"varobj",
"=",
"self",
".",
"shell",
".",
"user_ns",
".",
"get",
"(",
"varname",
",",
"None",
")",
"decorate",
"=",
"False",
"if",
"varobj",
"is",
"None",
":",
"# Nothing useful can be done.",
"continue",
"if",
"atype",
"in",
"[",
"\"classobj\"",
",",
"\"type\"",
"]",
":",
"#Classes are only relevant if they have no __file__",
"#attribute; all other classes should be decorated by the",
"#full acorn machinery.",
"if",
"(",
"not",
"hasattr",
"(",
"varobj",
",",
"\"__acorn__\"",
")",
"and",
"hasattr",
"(",
"varobj",
",",
"\"__module__\"",
")",
"and",
"varobj",
".",
"__module__",
"==",
"\"__main__\"",
"and",
"not",
"hasattr",
"(",
"varobj",
",",
"\"__file__\"",
")",
")",
":",
"decorate",
"=",
"True",
"else",
":",
"msg",
".",
"std",
"(",
"defmsg",
".",
"format",
"(",
"varname",
")",
",",
"3",
")",
"elif",
"atype",
"in",
"[",
"\"function\"",
",",
"\"staticmethod\"",
"]",
":",
"# %who_ls will only return functions from the *user*",
"# namespace, so we don't have a lot to worry about here.",
"func",
"=",
"None",
"if",
"atype",
"==",
"\"staticmethod\"",
"and",
"hasattr",
"(",
"varobj",
",",
"\"__func__\"",
")",
":",
"func",
"=",
"varobj",
".",
"__func__",
"elif",
"atype",
"==",
"\"function\"",
":",
"func",
"=",
"varobj",
"if",
"(",
"func",
"is",
"not",
"None",
"and",
"not",
"hasattr",
"(",
"func",
",",
"\"__acorn__\"",
")",
"and",
"hasattr",
"(",
"func",
",",
"\"__code__\"",
")",
"and",
"\"<ipython-input\"",
"in",
"func",
".",
"__code__",
".",
"co_filename",
")",
":",
"decorate",
"=",
"True",
"else",
":",
"msg",
".",
"std",
"(",
"defmsg",
".",
"format",
"(",
"varname",
")",
",",
"3",
")",
"if",
"decorate",
":",
"self",
".",
"entities",
"[",
"atype",
"]",
"[",
"varname",
"]",
"=",
"varobj",
"result",
".",
"append",
"(",
"(",
"varname",
",",
"varobj",
")",
")",
"return",
"result"
] |
Returns a list of the objects that need to be decorated in the
current user namespace based on their type.
Args:
atype (str): one of the values in :attr:`atypes`. Specifies the type of
object to search.
|
[
"Returns",
"a",
"list",
"of",
"the",
"objects",
"that",
"need",
"to",
"be",
"decorated",
"in",
"the",
"current",
"user",
"namespace",
"based",
"on",
"their",
"type",
"."
] |
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
|
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/ipython.py#L149-L199
|
243,254
|
rosenbrockc/acorn
|
acorn/ipython.py
|
InteractiveDecorator._logdef
|
def _logdef(self, n, o, otype):
"""Logs the definition of the object that was just auto-decorated inside
the `ipython` notebook.
"""
import re
try:
#The latest input cell will be the one that this got executed
#from. TODO: actually, if acorn got imported after the fact, then
#the import would have caused all the undecorated functions to be
#decorated as soon as acorn imported. I suppose we just won't have
#any code for that case.
if otype == "classes":
cellno = max([int(k[2:]) for k in self.shell.user_ns.keys()
if re.match("_i\d+", k)])
elif otype == "functions":
cellno = int(o.__code__.co_filename.strip("<>").split('-')[2])
except:
#This must not have been an ipython notebook declaration, so we
#don't store the code.
cellno = None
pass
code = ""
if cellno is not None:
cellstr = "_i{0:d}".format(cellno)
if cellstr in self.shell.user_ns:
cellcode = self.shell.user_ns[cellstr]
import ast
astm = ast.parse(cellcode)
ab = astm.body
parts = {ab[i].name: (ab[i].lineno, None if i+1 >= len(ab)
else ab[i+1].lineno)
for i, d in enumerate(ab)}
if n in parts:
celllines = cellcode.split('\n')
start, end = parts[n]
if end is not None:
code = celllines[start-1:end-1]
else:
code = celllines[start-1:]
#Now, we actually create the entry. Since the execution for function
#definitions is almost instantaneous, we just log the pre and post
#events at the same time.
from time import time
from acorn.logging.database import record
entry = {
"m": "def",
"a": None,
"s": time(),
"r": None,
"c": code,
}
from acorn import msg
record("__main__.{}".format(n), entry, diff=True)
msg.info(entry, 1)
|
python
|
def _logdef(self, n, o, otype):
"""Logs the definition of the object that was just auto-decorated inside
the `ipython` notebook.
"""
import re
try:
#The latest input cell will be the one that this got executed
#from. TODO: actually, if acorn got imported after the fact, then
#the import would have caused all the undecorated functions to be
#decorated as soon as acorn imported. I suppose we just won't have
#any code for that case.
if otype == "classes":
cellno = max([int(k[2:]) for k in self.shell.user_ns.keys()
if re.match("_i\d+", k)])
elif otype == "functions":
cellno = int(o.__code__.co_filename.strip("<>").split('-')[2])
except:
#This must not have been an ipython notebook declaration, so we
#don't store the code.
cellno = None
pass
code = ""
if cellno is not None:
cellstr = "_i{0:d}".format(cellno)
if cellstr in self.shell.user_ns:
cellcode = self.shell.user_ns[cellstr]
import ast
astm = ast.parse(cellcode)
ab = astm.body
parts = {ab[i].name: (ab[i].lineno, None if i+1 >= len(ab)
else ab[i+1].lineno)
for i, d in enumerate(ab)}
if n in parts:
celllines = cellcode.split('\n')
start, end = parts[n]
if end is not None:
code = celllines[start-1:end-1]
else:
code = celllines[start-1:]
#Now, we actually create the entry. Since the execution for function
#definitions is almost instantaneous, we just log the pre and post
#events at the same time.
from time import time
from acorn.logging.database import record
entry = {
"m": "def",
"a": None,
"s": time(),
"r": None,
"c": code,
}
from acorn import msg
record("__main__.{}".format(n), entry, diff=True)
msg.info(entry, 1)
|
[
"def",
"_logdef",
"(",
"self",
",",
"n",
",",
"o",
",",
"otype",
")",
":",
"import",
"re",
"try",
":",
"#The latest input cell will be the one that this got executed",
"#from. TODO: actually, if acorn got imported after the fact, then",
"#the import would have caused all the undecorated functions to be",
"#decorated as soon as acorn imported. I suppose we just won't have",
"#any code for that case.",
"if",
"otype",
"==",
"\"classes\"",
":",
"cellno",
"=",
"max",
"(",
"[",
"int",
"(",
"k",
"[",
"2",
":",
"]",
")",
"for",
"k",
"in",
"self",
".",
"shell",
".",
"user_ns",
".",
"keys",
"(",
")",
"if",
"re",
".",
"match",
"(",
"\"_i\\d+\"",
",",
"k",
")",
"]",
")",
"elif",
"otype",
"==",
"\"functions\"",
":",
"cellno",
"=",
"int",
"(",
"o",
".",
"__code__",
".",
"co_filename",
".",
"strip",
"(",
"\"<>\"",
")",
".",
"split",
"(",
"'-'",
")",
"[",
"2",
"]",
")",
"except",
":",
"#This must not have been an ipython notebook declaration, so we",
"#don't store the code.",
"cellno",
"=",
"None",
"pass",
"code",
"=",
"\"\"",
"if",
"cellno",
"is",
"not",
"None",
":",
"cellstr",
"=",
"\"_i{0:d}\"",
".",
"format",
"(",
"cellno",
")",
"if",
"cellstr",
"in",
"self",
".",
"shell",
".",
"user_ns",
":",
"cellcode",
"=",
"self",
".",
"shell",
".",
"user_ns",
"[",
"cellstr",
"]",
"import",
"ast",
"astm",
"=",
"ast",
".",
"parse",
"(",
"cellcode",
")",
"ab",
"=",
"astm",
".",
"body",
"parts",
"=",
"{",
"ab",
"[",
"i",
"]",
".",
"name",
":",
"(",
"ab",
"[",
"i",
"]",
".",
"lineno",
",",
"None",
"if",
"i",
"+",
"1",
">=",
"len",
"(",
"ab",
")",
"else",
"ab",
"[",
"i",
"+",
"1",
"]",
".",
"lineno",
")",
"for",
"i",
",",
"d",
"in",
"enumerate",
"(",
"ab",
")",
"}",
"if",
"n",
"in",
"parts",
":",
"celllines",
"=",
"cellcode",
".",
"split",
"(",
"'\\n'",
")",
"start",
",",
"end",
"=",
"parts",
"[",
"n",
"]",
"if",
"end",
"is",
"not",
"None",
":",
"code",
"=",
"celllines",
"[",
"start",
"-",
"1",
":",
"end",
"-",
"1",
"]",
"else",
":",
"code",
"=",
"celllines",
"[",
"start",
"-",
"1",
":",
"]",
"#Now, we actually create the entry. Since the execution for function",
"#definitions is almost instantaneous, we just log the pre and post",
"#events at the same time.",
"from",
"time",
"import",
"time",
"from",
"acorn",
".",
"logging",
".",
"database",
"import",
"record",
"entry",
"=",
"{",
"\"m\"",
":",
"\"def\"",
",",
"\"a\"",
":",
"None",
",",
"\"s\"",
":",
"time",
"(",
")",
",",
"\"r\"",
":",
"None",
",",
"\"c\"",
":",
"code",
",",
"}",
"from",
"acorn",
"import",
"msg",
"record",
"(",
"\"__main__.{}\"",
".",
"format",
"(",
"n",
")",
",",
"entry",
",",
"diff",
"=",
"True",
")",
"msg",
".",
"info",
"(",
"entry",
",",
"1",
")"
] |
Logs the definition of the object that was just auto-decorated inside
the `ipython` notebook.
|
[
"Logs",
"the",
"definition",
"of",
"the",
"object",
"that",
"was",
"just",
"auto",
"-",
"decorated",
"inside",
"the",
"ipython",
"notebook",
"."
] |
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
|
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/ipython.py#L201-L256
|
243,255
|
rosenbrockc/acorn
|
acorn/ipython.py
|
InteractiveDecorator._decorate
|
def _decorate(self, atype, n, o):
"""Decorates the specified object for automatic logging with acorn.
Args:
atype (str): one of the types specified in :attr:`atypes`.
varobj: object instance to decorate; no additional type checking is
performed.
"""
typemap = {"function": "functions",
"classobj": "classes",
"staticmethod": "methods",
"type": "classes"}
from acorn.logging.decoration import decorate_obj
try:
otype = typemap[atype]
decorate_obj(self.shell.user_ns, n, o, otype)
#Also create a log in the database for this execution; this allows a
#user to track the changes they make in prototyping function and
#class definitions.
self._logdef(n, o, otype)
msg.okay("Auto-decorated {}: {}.".format(n, o))
except:
msg.err("Error auto-decorating {}: {}.".format(n, o))
raise
|
python
|
def _decorate(self, atype, n, o):
"""Decorates the specified object for automatic logging with acorn.
Args:
atype (str): one of the types specified in :attr:`atypes`.
varobj: object instance to decorate; no additional type checking is
performed.
"""
typemap = {"function": "functions",
"classobj": "classes",
"staticmethod": "methods",
"type": "classes"}
from acorn.logging.decoration import decorate_obj
try:
otype = typemap[atype]
decorate_obj(self.shell.user_ns, n, o, otype)
#Also create a log in the database for this execution; this allows a
#user to track the changes they make in prototyping function and
#class definitions.
self._logdef(n, o, otype)
msg.okay("Auto-decorated {}: {}.".format(n, o))
except:
msg.err("Error auto-decorating {}: {}.".format(n, o))
raise
|
[
"def",
"_decorate",
"(",
"self",
",",
"atype",
",",
"n",
",",
"o",
")",
":",
"typemap",
"=",
"{",
"\"function\"",
":",
"\"functions\"",
",",
"\"classobj\"",
":",
"\"classes\"",
",",
"\"staticmethod\"",
":",
"\"methods\"",
",",
"\"type\"",
":",
"\"classes\"",
"}",
"from",
"acorn",
".",
"logging",
".",
"decoration",
"import",
"decorate_obj",
"try",
":",
"otype",
"=",
"typemap",
"[",
"atype",
"]",
"decorate_obj",
"(",
"self",
".",
"shell",
".",
"user_ns",
",",
"n",
",",
"o",
",",
"otype",
")",
"#Also create a log in the database for this execution; this allows a",
"#user to track the changes they make in prototyping function and",
"#class definitions.",
"self",
".",
"_logdef",
"(",
"n",
",",
"o",
",",
"otype",
")",
"msg",
".",
"okay",
"(",
"\"Auto-decorated {}: {}.\"",
".",
"format",
"(",
"n",
",",
"o",
")",
")",
"except",
":",
"msg",
".",
"err",
"(",
"\"Error auto-decorating {}: {}.\"",
".",
"format",
"(",
"n",
",",
"o",
")",
")",
"raise"
] |
Decorates the specified object for automatic logging with acorn.
Args:
atype (str): one of the types specified in :attr:`atypes`.
varobj: object instance to decorate; no additional type checking is
performed.
|
[
"Decorates",
"the",
"specified",
"object",
"for",
"automatic",
"logging",
"with",
"acorn",
"."
] |
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
|
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/ipython.py#L258-L281
|
243,256
|
rosenbrockc/acorn
|
acorn/ipython.py
|
InteractiveDecorator.post_run_cell
|
def post_run_cell(self):
"""Runs after the user-entered code in a cell has been executed. It
detects any new, decoratable objects that haven't been decorated yet and
then decorates them.
"""
#We just want to detect any new, decoratable objects that haven't been
#decorated yet.
decorlist = {k: [] for k in self.atypes}
for atype in self.atypes:
for n, o in self._get_decoratables(atype):
self._decorate(atype, n, o)
#Next, check whether we have an outstanding "loop intercept" that we
#"wrapped" with respect to acorn by enabling streamlining.
if self.pre is not None:
#Re-enable the acorn logging systems so that it gets back to normal.
from acorn.logging.decoration import set_streamlining
set_streamlining(False)
from acorn import msg
from acorn.logging.database import record
from time import time
#Determine the elapsed time for the execution of the entire cell.
entry = self.pre
entry["e"] = time() - entry["s"]
#See if we can match the executed cell's code up with one that we
#intercepted in the past..
cellid = self._find_cellid(entry["c"])
if cellid is None:
cellid = self.cellid
#Store the contents of the cell *before* they get overwritten by a
#diff.
self.cellids[cellid] = entry["c"]
record("__main__.{0:d}".format(cellid), entry, diff=True)
msg.info(entry, 1)
self.pre = None
#Finally, check whether any new variables have shown up, or have had
#their values changed.
from acorn.logging.database import tracker, active_db, Instance
varchange = self._var_changes()
taskdb = active_db()
for n, o in varchange:
otrack = tracker(o)
if isinstance(otrack, Instance):
taskdb.log_uuid(otrack.uuid)
global thumb_uuid
if thumb_uuid is not None:
self._log_images()
#Reset the image tracker list so that we don't save these images
#again next cell execution.
thumb_uuid = None
self.cellid = None
|
python
|
def post_run_cell(self):
"""Runs after the user-entered code in a cell has been executed. It
detects any new, decoratable objects that haven't been decorated yet and
then decorates them.
"""
#We just want to detect any new, decoratable objects that haven't been
#decorated yet.
decorlist = {k: [] for k in self.atypes}
for atype in self.atypes:
for n, o in self._get_decoratables(atype):
self._decorate(atype, n, o)
#Next, check whether we have an outstanding "loop intercept" that we
#"wrapped" with respect to acorn by enabling streamlining.
if self.pre is not None:
#Re-enable the acorn logging systems so that it gets back to normal.
from acorn.logging.decoration import set_streamlining
set_streamlining(False)
from acorn import msg
from acorn.logging.database import record
from time import time
#Determine the elapsed time for the execution of the entire cell.
entry = self.pre
entry["e"] = time() - entry["s"]
#See if we can match the executed cell's code up with one that we
#intercepted in the past..
cellid = self._find_cellid(entry["c"])
if cellid is None:
cellid = self.cellid
#Store the contents of the cell *before* they get overwritten by a
#diff.
self.cellids[cellid] = entry["c"]
record("__main__.{0:d}".format(cellid), entry, diff=True)
msg.info(entry, 1)
self.pre = None
#Finally, check whether any new variables have shown up, or have had
#their values changed.
from acorn.logging.database import tracker, active_db, Instance
varchange = self._var_changes()
taskdb = active_db()
for n, o in varchange:
otrack = tracker(o)
if isinstance(otrack, Instance):
taskdb.log_uuid(otrack.uuid)
global thumb_uuid
if thumb_uuid is not None:
self._log_images()
#Reset the image tracker list so that we don't save these images
#again next cell execution.
thumb_uuid = None
self.cellid = None
|
[
"def",
"post_run_cell",
"(",
"self",
")",
":",
"#We just want to detect any new, decoratable objects that haven't been",
"#decorated yet.",
"decorlist",
"=",
"{",
"k",
":",
"[",
"]",
"for",
"k",
"in",
"self",
".",
"atypes",
"}",
"for",
"atype",
"in",
"self",
".",
"atypes",
":",
"for",
"n",
",",
"o",
"in",
"self",
".",
"_get_decoratables",
"(",
"atype",
")",
":",
"self",
".",
"_decorate",
"(",
"atype",
",",
"n",
",",
"o",
")",
"#Next, check whether we have an outstanding \"loop intercept\" that we",
"#\"wrapped\" with respect to acorn by enabling streamlining.",
"if",
"self",
".",
"pre",
"is",
"not",
"None",
":",
"#Re-enable the acorn logging systems so that it gets back to normal.",
"from",
"acorn",
".",
"logging",
".",
"decoration",
"import",
"set_streamlining",
"set_streamlining",
"(",
"False",
")",
"from",
"acorn",
"import",
"msg",
"from",
"acorn",
".",
"logging",
".",
"database",
"import",
"record",
"from",
"time",
"import",
"time",
"#Determine the elapsed time for the execution of the entire cell.",
"entry",
"=",
"self",
".",
"pre",
"entry",
"[",
"\"e\"",
"]",
"=",
"time",
"(",
")",
"-",
"entry",
"[",
"\"s\"",
"]",
"#See if we can match the executed cell's code up with one that we",
"#intercepted in the past..",
"cellid",
"=",
"self",
".",
"_find_cellid",
"(",
"entry",
"[",
"\"c\"",
"]",
")",
"if",
"cellid",
"is",
"None",
":",
"cellid",
"=",
"self",
".",
"cellid",
"#Store the contents of the cell *before* they get overwritten by a",
"#diff.",
"self",
".",
"cellids",
"[",
"cellid",
"]",
"=",
"entry",
"[",
"\"c\"",
"]",
"record",
"(",
"\"__main__.{0:d}\"",
".",
"format",
"(",
"cellid",
")",
",",
"entry",
",",
"diff",
"=",
"True",
")",
"msg",
".",
"info",
"(",
"entry",
",",
"1",
")",
"self",
".",
"pre",
"=",
"None",
"#Finally, check whether any new variables have shown up, or have had",
"#their values changed.",
"from",
"acorn",
".",
"logging",
".",
"database",
"import",
"tracker",
",",
"active_db",
",",
"Instance",
"varchange",
"=",
"self",
".",
"_var_changes",
"(",
")",
"taskdb",
"=",
"active_db",
"(",
")",
"for",
"n",
",",
"o",
"in",
"varchange",
":",
"otrack",
"=",
"tracker",
"(",
"o",
")",
"if",
"isinstance",
"(",
"otrack",
",",
"Instance",
")",
":",
"taskdb",
".",
"log_uuid",
"(",
"otrack",
".",
"uuid",
")",
"global",
"thumb_uuid",
"if",
"thumb_uuid",
"is",
"not",
"None",
":",
"self",
".",
"_log_images",
"(",
")",
"#Reset the image tracker list so that we don't save these images",
"#again next cell execution.",
"thumb_uuid",
"=",
"None",
"self",
".",
"cellid",
"=",
"None"
] |
Runs after the user-entered code in a cell has been executed. It
detects any new, decoratable objects that haven't been decorated yet and
then decorates them.
|
[
"Runs",
"after",
"the",
"user",
"-",
"entered",
"code",
"in",
"a",
"cell",
"has",
"been",
"executed",
".",
"It",
"detects",
"any",
"new",
"decoratable",
"objects",
"that",
"haven",
"t",
"been",
"decorated",
"yet",
"and",
"then",
"decorates",
"them",
"."
] |
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
|
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/ipython.py#L329-L387
|
243,257
|
rosenbrockc/acorn
|
acorn/ipython.py
|
InteractiveDecorator._var_changes
|
def _var_changes(self):
"""Determines the list of variables whose values have changed since the
last cell execution.
"""
result = []
variables = self.shell.run_line_magic("who_ls", "")
if variables is None:
return result
import inspect
for varname in variables:
varobj = self.shell.user_ns.get(varname, None)
if varobj is None:
continue
#We need to make sure that the objects have types that make
#sense. We auto-decorate all classes and functions; also modules and
#other programming constructs are not variables.
keep = False
for ifunc in inspectors:
if getattr(inspect, ifunc)(varobj):
break
else:
keep = True
if keep:
whoid = id(varobj)
if varname not in self.who or self.who[varname] != whoid:
result.append((varname, varobj))
self.who[varname] = whoid
return result
|
python
|
def _var_changes(self):
"""Determines the list of variables whose values have changed since the
last cell execution.
"""
result = []
variables = self.shell.run_line_magic("who_ls", "")
if variables is None:
return result
import inspect
for varname in variables:
varobj = self.shell.user_ns.get(varname, None)
if varobj is None:
continue
#We need to make sure that the objects have types that make
#sense. We auto-decorate all classes and functions; also modules and
#other programming constructs are not variables.
keep = False
for ifunc in inspectors:
if getattr(inspect, ifunc)(varobj):
break
else:
keep = True
if keep:
whoid = id(varobj)
if varname not in self.who or self.who[varname] != whoid:
result.append((varname, varobj))
self.who[varname] = whoid
return result
|
[
"def",
"_var_changes",
"(",
"self",
")",
":",
"result",
"=",
"[",
"]",
"variables",
"=",
"self",
".",
"shell",
".",
"run_line_magic",
"(",
"\"who_ls\"",
",",
"\"\"",
")",
"if",
"variables",
"is",
"None",
":",
"return",
"result",
"import",
"inspect",
"for",
"varname",
"in",
"variables",
":",
"varobj",
"=",
"self",
".",
"shell",
".",
"user_ns",
".",
"get",
"(",
"varname",
",",
"None",
")",
"if",
"varobj",
"is",
"None",
":",
"continue",
"#We need to make sure that the objects have types that make",
"#sense. We auto-decorate all classes and functions; also modules and",
"#other programming constructs are not variables.",
"keep",
"=",
"False",
"for",
"ifunc",
"in",
"inspectors",
":",
"if",
"getattr",
"(",
"inspect",
",",
"ifunc",
")",
"(",
"varobj",
")",
":",
"break",
"else",
":",
"keep",
"=",
"True",
"if",
"keep",
":",
"whoid",
"=",
"id",
"(",
"varobj",
")",
"if",
"varname",
"not",
"in",
"self",
".",
"who",
"or",
"self",
".",
"who",
"[",
"varname",
"]",
"!=",
"whoid",
":",
"result",
".",
"append",
"(",
"(",
"varname",
",",
"varobj",
")",
")",
"self",
".",
"who",
"[",
"varname",
"]",
"=",
"whoid",
"return",
"result"
] |
Determines the list of variables whose values have changed since the
last cell execution.
|
[
"Determines",
"the",
"list",
"of",
"variables",
"whose",
"values",
"have",
"changed",
"since",
"the",
"last",
"cell",
"execution",
"."
] |
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
|
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/ipython.py#L389-L419
|
243,258
|
rosenbrockc/acorn
|
acorn/ipython.py
|
InteractiveDecorator.pre_run_cell
|
def pre_run_cell(self, cellno, code):
"""Executes before the user-entered code in `ipython` is run. This
intercepts loops and other problematic code that would produce lots of
database entries and streamlines it to produce only a single entry.
Args:
cellno (int): the cell number that is about to be executed.
code (str): python source code that is about to be executed.
"""
#First, we look for loops and list/dict comprehensions in the code. Find
#the id of the latest cell that was executed.
self.cellid = cellno
#If there is a loop somewhere in the code, it could generate millions of
#database entries and make the notebook unusable.
import ast
if findloop(ast.parse(code)):
#Disable the acorn logging systems so that we don't pollute the
#database.
from acorn.logging.decoration import set_streamlining
set_streamlining(True)
#Create the pre-execute entry for the database.
from time import time
self.pre = {
"m": "loop",
"a": None,
"s": time(),
"r": None,
"c": code,
}
|
python
|
def pre_run_cell(self, cellno, code):
"""Executes before the user-entered code in `ipython` is run. This
intercepts loops and other problematic code that would produce lots of
database entries and streamlines it to produce only a single entry.
Args:
cellno (int): the cell number that is about to be executed.
code (str): python source code that is about to be executed.
"""
#First, we look for loops and list/dict comprehensions in the code. Find
#the id of the latest cell that was executed.
self.cellid = cellno
#If there is a loop somewhere in the code, it could generate millions of
#database entries and make the notebook unusable.
import ast
if findloop(ast.parse(code)):
#Disable the acorn logging systems so that we don't pollute the
#database.
from acorn.logging.decoration import set_streamlining
set_streamlining(True)
#Create the pre-execute entry for the database.
from time import time
self.pre = {
"m": "loop",
"a": None,
"s": time(),
"r": None,
"c": code,
}
|
[
"def",
"pre_run_cell",
"(",
"self",
",",
"cellno",
",",
"code",
")",
":",
"#First, we look for loops and list/dict comprehensions in the code. Find",
"#the id of the latest cell that was executed.",
"self",
".",
"cellid",
"=",
"cellno",
"#If there is a loop somewhere in the code, it could generate millions of",
"#database entries and make the notebook unusable.",
"import",
"ast",
"if",
"findloop",
"(",
"ast",
".",
"parse",
"(",
"code",
")",
")",
":",
"#Disable the acorn logging systems so that we don't pollute the",
"#database.",
"from",
"acorn",
".",
"logging",
".",
"decoration",
"import",
"set_streamlining",
"set_streamlining",
"(",
"True",
")",
"#Create the pre-execute entry for the database.",
"from",
"time",
"import",
"time",
"self",
".",
"pre",
"=",
"{",
"\"m\"",
":",
"\"loop\"",
",",
"\"a\"",
":",
"None",
",",
"\"s\"",
":",
"time",
"(",
")",
",",
"\"r\"",
":",
"None",
",",
"\"c\"",
":",
"code",
",",
"}"
] |
Executes before the user-entered code in `ipython` is run. This
intercepts loops and other problematic code that would produce lots of
database entries and streamlines it to produce only a single entry.
Args:
cellno (int): the cell number that is about to be executed.
code (str): python source code that is about to be executed.
|
[
"Executes",
"before",
"the",
"user",
"-",
"entered",
"code",
"in",
"ipython",
"is",
"run",
".",
"This",
"intercepts",
"loops",
"and",
"other",
"problematic",
"code",
"that",
"would",
"produce",
"lots",
"of",
"database",
"entries",
"and",
"streamlines",
"it",
"to",
"produce",
"only",
"a",
"single",
"entry",
"."
] |
9a44d1a1ad8bfc2c54a6b56d9efe54433a797820
|
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/ipython.py#L421-L451
|
243,259
|
radjkarl/fancyTools
|
fancytools/stats/inverseHistogram.py
|
inverseHistogram
|
def inverseHistogram(hist, bin_range):
"""sample data from given histogram and min, max values within range
Returns:
np.array: data that would create the same histogram as given
"""
data = hist.astype(float) / np.min(hist[np.nonzero(hist)])
new_data = np.empty(shape=np.sum(data, dtype=int))
i = 0
xvals = np.linspace(bin_range[0], bin_range[1], len(data))
for d, x in zip(data, xvals):
new_data[i:i + d] = x
i += int(d)
return new_data
|
python
|
def inverseHistogram(hist, bin_range):
"""sample data from given histogram and min, max values within range
Returns:
np.array: data that would create the same histogram as given
"""
data = hist.astype(float) / np.min(hist[np.nonzero(hist)])
new_data = np.empty(shape=np.sum(data, dtype=int))
i = 0
xvals = np.linspace(bin_range[0], bin_range[1], len(data))
for d, x in zip(data, xvals):
new_data[i:i + d] = x
i += int(d)
return new_data
|
[
"def",
"inverseHistogram",
"(",
"hist",
",",
"bin_range",
")",
":",
"data",
"=",
"hist",
".",
"astype",
"(",
"float",
")",
"/",
"np",
".",
"min",
"(",
"hist",
"[",
"np",
".",
"nonzero",
"(",
"hist",
")",
"]",
")",
"new_data",
"=",
"np",
".",
"empty",
"(",
"shape",
"=",
"np",
".",
"sum",
"(",
"data",
",",
"dtype",
"=",
"int",
")",
")",
"i",
"=",
"0",
"xvals",
"=",
"np",
".",
"linspace",
"(",
"bin_range",
"[",
"0",
"]",
",",
"bin_range",
"[",
"1",
"]",
",",
"len",
"(",
"data",
")",
")",
"for",
"d",
",",
"x",
"in",
"zip",
"(",
"data",
",",
"xvals",
")",
":",
"new_data",
"[",
"i",
":",
"i",
"+",
"d",
"]",
"=",
"x",
"i",
"+=",
"int",
"(",
"d",
")",
"return",
"new_data"
] |
sample data from given histogram and min, max values within range
Returns:
np.array: data that would create the same histogram as given
|
[
"sample",
"data",
"from",
"given",
"histogram",
"and",
"min",
"max",
"values",
"within",
"range"
] |
4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b
|
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/stats/inverseHistogram.py#L7-L20
|
243,260
|
Aperture-py/aperture-lib
|
aperturelib/watermark.py
|
watermark_image
|
def watermark_image(image, wtrmrk_path, corner=2):
'''Adds a watermark image to an instance of a PIL Image.
If the provided watermark image (wtrmrk_path) is
larger than the provided base image (image), then
the watermark image will be automatically resized to
roughly 1/8 the size of the base image.
Args:
image: An instance of a PIL Image. This is the base image.
wtrmrk_path: Path to the watermark image to use.
corner: An integer between 0 and 3 representing the corner
where the watermark image should be placed on top of the
base image. 0 is top left, 1 is top right, 2 is bottom
right and 3 is bottom left. NOTE: Right now, this is
permanently set to 2 (bottom right) but this can be
changed in the future by either creating a new cmd-line
flag or putting this in the config file.
Returns: The watermarked image
'''
padding = 2
wtrmrk_img = Image.open(wtrmrk_path)
#Need to perform size check in here rather than in options.py because this is
# the only place where we know the size of the image that the watermark is
# being placed onto
if wtrmrk_img.width > (image.width - padding * 2) or wtrmrk_img.height > (
image.height - padding * 2):
res = (int(image.width / 8.0), int(image.height / 8.0))
resize_in_place(wtrmrk_img, res)
pos = get_pos(corner, image.size, wtrmrk_img.size, padding)
was_P = image.mode == 'P'
was_L = image.mode == 'L'
# Fix PIL palette issue by converting palette images to RGBA
if image.mode not in ['RGB', 'RGBA']:
if image.format in ['JPG', 'JPEG']:
image = image.convert('RGB')
else:
image = image.convert('RGBA')
image.paste(wtrmrk_img.convert('RGBA'), pos, wtrmrk_img.convert('RGBA'))
if was_P:
image = image.convert('P', palette=Image.ADAPTIVE, colors=256)
elif was_L:
image = image.convert('L')
return image
|
python
|
def watermark_image(image, wtrmrk_path, corner=2):
'''Adds a watermark image to an instance of a PIL Image.
If the provided watermark image (wtrmrk_path) is
larger than the provided base image (image), then
the watermark image will be automatically resized to
roughly 1/8 the size of the base image.
Args:
image: An instance of a PIL Image. This is the base image.
wtrmrk_path: Path to the watermark image to use.
corner: An integer between 0 and 3 representing the corner
where the watermark image should be placed on top of the
base image. 0 is top left, 1 is top right, 2 is bottom
right and 3 is bottom left. NOTE: Right now, this is
permanently set to 2 (bottom right) but this can be
changed in the future by either creating a new cmd-line
flag or putting this in the config file.
Returns: The watermarked image
'''
padding = 2
wtrmrk_img = Image.open(wtrmrk_path)
#Need to perform size check in here rather than in options.py because this is
# the only place where we know the size of the image that the watermark is
# being placed onto
if wtrmrk_img.width > (image.width - padding * 2) or wtrmrk_img.height > (
image.height - padding * 2):
res = (int(image.width / 8.0), int(image.height / 8.0))
resize_in_place(wtrmrk_img, res)
pos = get_pos(corner, image.size, wtrmrk_img.size, padding)
was_P = image.mode == 'P'
was_L = image.mode == 'L'
# Fix PIL palette issue by converting palette images to RGBA
if image.mode not in ['RGB', 'RGBA']:
if image.format in ['JPG', 'JPEG']:
image = image.convert('RGB')
else:
image = image.convert('RGBA')
image.paste(wtrmrk_img.convert('RGBA'), pos, wtrmrk_img.convert('RGBA'))
if was_P:
image = image.convert('P', palette=Image.ADAPTIVE, colors=256)
elif was_L:
image = image.convert('L')
return image
|
[
"def",
"watermark_image",
"(",
"image",
",",
"wtrmrk_path",
",",
"corner",
"=",
"2",
")",
":",
"padding",
"=",
"2",
"wtrmrk_img",
"=",
"Image",
".",
"open",
"(",
"wtrmrk_path",
")",
"#Need to perform size check in here rather than in options.py because this is",
"# the only place where we know the size of the image that the watermark is",
"# being placed onto",
"if",
"wtrmrk_img",
".",
"width",
">",
"(",
"image",
".",
"width",
"-",
"padding",
"*",
"2",
")",
"or",
"wtrmrk_img",
".",
"height",
">",
"(",
"image",
".",
"height",
"-",
"padding",
"*",
"2",
")",
":",
"res",
"=",
"(",
"int",
"(",
"image",
".",
"width",
"/",
"8.0",
")",
",",
"int",
"(",
"image",
".",
"height",
"/",
"8.0",
")",
")",
"resize_in_place",
"(",
"wtrmrk_img",
",",
"res",
")",
"pos",
"=",
"get_pos",
"(",
"corner",
",",
"image",
".",
"size",
",",
"wtrmrk_img",
".",
"size",
",",
"padding",
")",
"was_P",
"=",
"image",
".",
"mode",
"==",
"'P'",
"was_L",
"=",
"image",
".",
"mode",
"==",
"'L'",
"# Fix PIL palette issue by converting palette images to RGBA",
"if",
"image",
".",
"mode",
"not",
"in",
"[",
"'RGB'",
",",
"'RGBA'",
"]",
":",
"if",
"image",
".",
"format",
"in",
"[",
"'JPG'",
",",
"'JPEG'",
"]",
":",
"image",
"=",
"image",
".",
"convert",
"(",
"'RGB'",
")",
"else",
":",
"image",
"=",
"image",
".",
"convert",
"(",
"'RGBA'",
")",
"image",
".",
"paste",
"(",
"wtrmrk_img",
".",
"convert",
"(",
"'RGBA'",
")",
",",
"pos",
",",
"wtrmrk_img",
".",
"convert",
"(",
"'RGBA'",
")",
")",
"if",
"was_P",
":",
"image",
"=",
"image",
".",
"convert",
"(",
"'P'",
",",
"palette",
"=",
"Image",
".",
"ADAPTIVE",
",",
"colors",
"=",
"256",
")",
"elif",
"was_L",
":",
"image",
"=",
"image",
".",
"convert",
"(",
"'L'",
")",
"return",
"image"
] |
Adds a watermark image to an instance of a PIL Image.
If the provided watermark image (wtrmrk_path) is
larger than the provided base image (image), then
the watermark image will be automatically resized to
roughly 1/8 the size of the base image.
Args:
image: An instance of a PIL Image. This is the base image.
wtrmrk_path: Path to the watermark image to use.
corner: An integer between 0 and 3 representing the corner
where the watermark image should be placed on top of the
base image. 0 is top left, 1 is top right, 2 is bottom
right and 3 is bottom left. NOTE: Right now, this is
permanently set to 2 (bottom right) but this can be
changed in the future by either creating a new cmd-line
flag or putting this in the config file.
Returns: The watermarked image
|
[
"Adds",
"a",
"watermark",
"image",
"to",
"an",
"instance",
"of",
"a",
"PIL",
"Image",
"."
] |
5c54af216319f297ddf96181a16f088cf1ba23f3
|
https://github.com/Aperture-py/aperture-lib/blob/5c54af216319f297ddf96181a16f088cf1ba23f3/aperturelib/watermark.py#L5-L56
|
243,261
|
Aperture-py/aperture-lib
|
aperturelib/watermark.py
|
watermark_text
|
def watermark_text(image, text, corner=2):
'''Adds a text watermark to an instance of a PIL Image.
The text will be sized so that the height of the text is
roughly 1/20th the height of the base image. The text will
be white with a thin black outline.
Args:
image: An instance of a PIL Image. This is the base image.
text: Text to use as a watermark.
corner: An integer between 0 and 3 representing the corner
where the watermark image should be placed on top of the
base image. 0 is top left, 1 is top right, 2 is bottom
right and 3 is bottom left. NOTE: Right now, this is
permanently set to 2 (bottom right) but this can be
changed in the future by either creating a new cmd-line
flag or putting this in the config file.
Returns: The watermarked image
'''
# Load Font
FONT_PATH = ''
if resource_exists(__name__, 'resources/fonts/SourceSansPro-Regular.ttf'):
FONT_PATH = resource_filename(
__name__, 'resources/fonts/SourceSansPro-Regular.ttf')
padding = 5
was_P = image.mode == 'P'
was_L = image.mode == 'L'
# Fix PIL palette issue by converting palette images to RGBA
if image.mode not in ['RGB', 'RGBA']:
if image.format in ['JPG', 'JPEG']:
image = image.convert('RGB')
else:
image = image.convert('RGBA')
# Get drawable image
img_draw = ImageDraw.Draw(image)
fontsize = 1 # starting font size
# portion of image width you want text height to be.
# default font size will have a height that is ~1/20
# the height of the base image.
img_fraction = 0.05
# attempt to use Aperture default font. If that fails, use ImageFont default
try:
font = ImageFont.truetype(font=FONT_PATH, size=fontsize)
was_over = False
inc = 2
while True:
if font.getsize(text)[1] > img_fraction * image.height:
if not was_over:
was_over = True
inc = -1
else:
if was_over:
break
# iterate until the text size is just larger than the criteria
fontsize += inc
font = ImageFont.truetype(font=FONT_PATH, size=fontsize)
fontsize -= 1
font = ImageFont.truetype(font=FONT_PATH, size=fontsize)
except:
# replace with log message
print('Failed to load Aperture font. Using default font instead.')
font = ImageFont.load_default() # Bad because default is suuuuper small
# get position of text
pos = get_pos(corner, image.size, font.getsize(text), padding)
# draw a thin black border
img_draw.text((pos[0] - 1, pos[1]), text, font=font, fill='black')
img_draw.text((pos[0] + 1, pos[1]), text, font=font, fill='black')
img_draw.text((pos[0], pos[1] - 1), text, font=font, fill='black')
img_draw.text((pos[0], pos[1] + 1), text, font=font, fill='black')
# draw the actual text
img_draw.text(pos, text, font=font, fill='white')
# Remove cached font file
cleanup_resources()
del img_draw
if was_P:
image = image.convert('P', palette=Image.ADAPTIVE, colors=256)
elif was_L:
image = image.convert('L')
return image
|
python
|
def watermark_text(image, text, corner=2):
'''Adds a text watermark to an instance of a PIL Image.
The text will be sized so that the height of the text is
roughly 1/20th the height of the base image. The text will
be white with a thin black outline.
Args:
image: An instance of a PIL Image. This is the base image.
text: Text to use as a watermark.
corner: An integer between 0 and 3 representing the corner
where the watermark image should be placed on top of the
base image. 0 is top left, 1 is top right, 2 is bottom
right and 3 is bottom left. NOTE: Right now, this is
permanently set to 2 (bottom right) but this can be
changed in the future by either creating a new cmd-line
flag or putting this in the config file.
Returns: The watermarked image
'''
# Load Font
FONT_PATH = ''
if resource_exists(__name__, 'resources/fonts/SourceSansPro-Regular.ttf'):
FONT_PATH = resource_filename(
__name__, 'resources/fonts/SourceSansPro-Regular.ttf')
padding = 5
was_P = image.mode == 'P'
was_L = image.mode == 'L'
# Fix PIL palette issue by converting palette images to RGBA
if image.mode not in ['RGB', 'RGBA']:
if image.format in ['JPG', 'JPEG']:
image = image.convert('RGB')
else:
image = image.convert('RGBA')
# Get drawable image
img_draw = ImageDraw.Draw(image)
fontsize = 1 # starting font size
# portion of image width you want text height to be.
# default font size will have a height that is ~1/20
# the height of the base image.
img_fraction = 0.05
# attempt to use Aperture default font. If that fails, use ImageFont default
try:
font = ImageFont.truetype(font=FONT_PATH, size=fontsize)
was_over = False
inc = 2
while True:
if font.getsize(text)[1] > img_fraction * image.height:
if not was_over:
was_over = True
inc = -1
else:
if was_over:
break
# iterate until the text size is just larger than the criteria
fontsize += inc
font = ImageFont.truetype(font=FONT_PATH, size=fontsize)
fontsize -= 1
font = ImageFont.truetype(font=FONT_PATH, size=fontsize)
except:
# replace with log message
print('Failed to load Aperture font. Using default font instead.')
font = ImageFont.load_default() # Bad because default is suuuuper small
# get position of text
pos = get_pos(corner, image.size, font.getsize(text), padding)
# draw a thin black border
img_draw.text((pos[0] - 1, pos[1]), text, font=font, fill='black')
img_draw.text((pos[0] + 1, pos[1]), text, font=font, fill='black')
img_draw.text((pos[0], pos[1] - 1), text, font=font, fill='black')
img_draw.text((pos[0], pos[1] + 1), text, font=font, fill='black')
# draw the actual text
img_draw.text(pos, text, font=font, fill='white')
# Remove cached font file
cleanup_resources()
del img_draw
if was_P:
image = image.convert('P', palette=Image.ADAPTIVE, colors=256)
elif was_L:
image = image.convert('L')
return image
|
[
"def",
"watermark_text",
"(",
"image",
",",
"text",
",",
"corner",
"=",
"2",
")",
":",
"# Load Font",
"FONT_PATH",
"=",
"''",
"if",
"resource_exists",
"(",
"__name__",
",",
"'resources/fonts/SourceSansPro-Regular.ttf'",
")",
":",
"FONT_PATH",
"=",
"resource_filename",
"(",
"__name__",
",",
"'resources/fonts/SourceSansPro-Regular.ttf'",
")",
"padding",
"=",
"5",
"was_P",
"=",
"image",
".",
"mode",
"==",
"'P'",
"was_L",
"=",
"image",
".",
"mode",
"==",
"'L'",
"# Fix PIL palette issue by converting palette images to RGBA",
"if",
"image",
".",
"mode",
"not",
"in",
"[",
"'RGB'",
",",
"'RGBA'",
"]",
":",
"if",
"image",
".",
"format",
"in",
"[",
"'JPG'",
",",
"'JPEG'",
"]",
":",
"image",
"=",
"image",
".",
"convert",
"(",
"'RGB'",
")",
"else",
":",
"image",
"=",
"image",
".",
"convert",
"(",
"'RGBA'",
")",
"# Get drawable image",
"img_draw",
"=",
"ImageDraw",
".",
"Draw",
"(",
"image",
")",
"fontsize",
"=",
"1",
"# starting font size",
"# portion of image width you want text height to be.",
"# default font size will have a height that is ~1/20",
"# the height of the base image.",
"img_fraction",
"=",
"0.05",
"# attempt to use Aperture default font. If that fails, use ImageFont default",
"try",
":",
"font",
"=",
"ImageFont",
".",
"truetype",
"(",
"font",
"=",
"FONT_PATH",
",",
"size",
"=",
"fontsize",
")",
"was_over",
"=",
"False",
"inc",
"=",
"2",
"while",
"True",
":",
"if",
"font",
".",
"getsize",
"(",
"text",
")",
"[",
"1",
"]",
">",
"img_fraction",
"*",
"image",
".",
"height",
":",
"if",
"not",
"was_over",
":",
"was_over",
"=",
"True",
"inc",
"=",
"-",
"1",
"else",
":",
"if",
"was_over",
":",
"break",
"# iterate until the text size is just larger than the criteria",
"fontsize",
"+=",
"inc",
"font",
"=",
"ImageFont",
".",
"truetype",
"(",
"font",
"=",
"FONT_PATH",
",",
"size",
"=",
"fontsize",
")",
"fontsize",
"-=",
"1",
"font",
"=",
"ImageFont",
".",
"truetype",
"(",
"font",
"=",
"FONT_PATH",
",",
"size",
"=",
"fontsize",
")",
"except",
":",
"# replace with log message",
"print",
"(",
"'Failed to load Aperture font. Using default font instead.'",
")",
"font",
"=",
"ImageFont",
".",
"load_default",
"(",
")",
"# Bad because default is suuuuper small",
"# get position of text",
"pos",
"=",
"get_pos",
"(",
"corner",
",",
"image",
".",
"size",
",",
"font",
".",
"getsize",
"(",
"text",
")",
",",
"padding",
")",
"# draw a thin black border",
"img_draw",
".",
"text",
"(",
"(",
"pos",
"[",
"0",
"]",
"-",
"1",
",",
"pos",
"[",
"1",
"]",
")",
",",
"text",
",",
"font",
"=",
"font",
",",
"fill",
"=",
"'black'",
")",
"img_draw",
".",
"text",
"(",
"(",
"pos",
"[",
"0",
"]",
"+",
"1",
",",
"pos",
"[",
"1",
"]",
")",
",",
"text",
",",
"font",
"=",
"font",
",",
"fill",
"=",
"'black'",
")",
"img_draw",
".",
"text",
"(",
"(",
"pos",
"[",
"0",
"]",
",",
"pos",
"[",
"1",
"]",
"-",
"1",
")",
",",
"text",
",",
"font",
"=",
"font",
",",
"fill",
"=",
"'black'",
")",
"img_draw",
".",
"text",
"(",
"(",
"pos",
"[",
"0",
"]",
",",
"pos",
"[",
"1",
"]",
"+",
"1",
")",
",",
"text",
",",
"font",
"=",
"font",
",",
"fill",
"=",
"'black'",
")",
"# draw the actual text",
"img_draw",
".",
"text",
"(",
"pos",
",",
"text",
",",
"font",
"=",
"font",
",",
"fill",
"=",
"'white'",
")",
"# Remove cached font file",
"cleanup_resources",
"(",
")",
"del",
"img_draw",
"if",
"was_P",
":",
"image",
"=",
"image",
".",
"convert",
"(",
"'P'",
",",
"palette",
"=",
"Image",
".",
"ADAPTIVE",
",",
"colors",
"=",
"256",
")",
"elif",
"was_L",
":",
"image",
"=",
"image",
".",
"convert",
"(",
"'L'",
")",
"return",
"image"
] |
Adds a text watermark to an instance of a PIL Image.
The text will be sized so that the height of the text is
roughly 1/20th the height of the base image. The text will
be white with a thin black outline.
Args:
image: An instance of a PIL Image. This is the base image.
text: Text to use as a watermark.
corner: An integer between 0 and 3 representing the corner
where the watermark image should be placed on top of the
base image. 0 is top left, 1 is top right, 2 is bottom
right and 3 is bottom left. NOTE: Right now, this is
permanently set to 2 (bottom right) but this can be
changed in the future by either creating a new cmd-line
flag or putting this in the config file.
Returns: The watermarked image
|
[
"Adds",
"a",
"text",
"watermark",
"to",
"an",
"instance",
"of",
"a",
"PIL",
"Image",
"."
] |
5c54af216319f297ddf96181a16f088cf1ba23f3
|
https://github.com/Aperture-py/aperture-lib/blob/5c54af216319f297ddf96181a16f088cf1ba23f3/aperturelib/watermark.py#L59-L152
|
243,262
|
bazzisoft/webmake
|
webmake/modules/utils.py
|
log
|
def log(msg, *args, **kwargs):
"""
Print out a log message.
"""
if len(args) == 0 and len(kwargs) == 0:
print(msg)
else:
print(msg.format(*args, **kwargs))
|
python
|
def log(msg, *args, **kwargs):
"""
Print out a log message.
"""
if len(args) == 0 and len(kwargs) == 0:
print(msg)
else:
print(msg.format(*args, **kwargs))
|
[
"def",
"log",
"(",
"msg",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"0",
"and",
"len",
"(",
"kwargs",
")",
"==",
"0",
":",
"print",
"(",
"msg",
")",
"else",
":",
"print",
"(",
"msg",
".",
"format",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")"
] |
Print out a log message.
|
[
"Print",
"out",
"a",
"log",
"message",
"."
] |
c11918900529c801f1675647760ededc0ea5d0cd
|
https://github.com/bazzisoft/webmake/blob/c11918900529c801f1675647760ededc0ea5d0cd/webmake/modules/utils.py#L8-L15
|
243,263
|
bazzisoft/webmake
|
webmake/modules/utils.py
|
logv
|
def logv(msg, *args, **kwargs):
"""
Print out a log message, only if verbose mode.
"""
if settings.VERBOSE:
log(msg, *args, **kwargs)
|
python
|
def logv(msg, *args, **kwargs):
"""
Print out a log message, only if verbose mode.
"""
if settings.VERBOSE:
log(msg, *args, **kwargs)
|
[
"def",
"logv",
"(",
"msg",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"settings",
".",
"VERBOSE",
":",
"log",
"(",
"msg",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Print out a log message, only if verbose mode.
|
[
"Print",
"out",
"a",
"log",
"message",
"only",
"if",
"verbose",
"mode",
"."
] |
c11918900529c801f1675647760ededc0ea5d0cd
|
https://github.com/bazzisoft/webmake/blob/c11918900529c801f1675647760ededc0ea5d0cd/webmake/modules/utils.py#L18-L23
|
243,264
|
rdo-management/python-rdomanager-oscplugin
|
rdomanager_oscplugin/v1/baremetal.py
|
_csv_to_nodes_dict
|
def _csv_to_nodes_dict(nodes_csv):
"""Convert CSV to a list of dicts formatted for os_cloud_config
Given a CSV file in the format below, convert it into the
structure expected by os_could_config JSON files.
pm_type, pm_addr, pm_user, pm_password, mac
"""
data = []
for row in csv.reader(nodes_csv):
node = {
"pm_user": row[2],
"pm_addr": row[1],
"pm_password": row[3],
"pm_type": row[0],
"mac": [
row[4]
]
}
data.append(node)
return data
|
python
|
def _csv_to_nodes_dict(nodes_csv):
"""Convert CSV to a list of dicts formatted for os_cloud_config
Given a CSV file in the format below, convert it into the
structure expected by os_could_config JSON files.
pm_type, pm_addr, pm_user, pm_password, mac
"""
data = []
for row in csv.reader(nodes_csv):
node = {
"pm_user": row[2],
"pm_addr": row[1],
"pm_password": row[3],
"pm_type": row[0],
"mac": [
row[4]
]
}
data.append(node)
return data
|
[
"def",
"_csv_to_nodes_dict",
"(",
"nodes_csv",
")",
":",
"data",
"=",
"[",
"]",
"for",
"row",
"in",
"csv",
".",
"reader",
"(",
"nodes_csv",
")",
":",
"node",
"=",
"{",
"\"pm_user\"",
":",
"row",
"[",
"2",
"]",
",",
"\"pm_addr\"",
":",
"row",
"[",
"1",
"]",
",",
"\"pm_password\"",
":",
"row",
"[",
"3",
"]",
",",
"\"pm_type\"",
":",
"row",
"[",
"0",
"]",
",",
"\"mac\"",
":",
"[",
"row",
"[",
"4",
"]",
"]",
"}",
"data",
".",
"append",
"(",
"node",
")",
"return",
"data"
] |
Convert CSV to a list of dicts formatted for os_cloud_config
Given a CSV file in the format below, convert it into the
structure expected by os_could_config JSON files.
pm_type, pm_addr, pm_user, pm_password, mac
|
[
"Convert",
"CSV",
"to",
"a",
"list",
"of",
"dicts",
"formatted",
"for",
"os_cloud_config"
] |
165a166fb2e5a2598380779b35812b8b8478c4fb
|
https://github.com/rdo-management/python-rdomanager-oscplugin/blob/165a166fb2e5a2598380779b35812b8b8478c4fb/rdomanager_oscplugin/v1/baremetal.py#L35-L58
|
243,265
|
mbodenhamer/syn
|
syn/base_utils/context.py
|
assign
|
def assign(A, attr, B, lock=False):
'''Assigns B to A.attr, yields, and then assigns A.attr back to its
original value.
'''
class NoAttr(object): pass
context = threading.Lock if lock else null_context
with context():
if not hasattr(A, attr):
tmp = NoAttr
else:
tmp = getattr(A, attr)
setattr(A, attr, B)
try:
yield B
finally:
if tmp is NoAttr:
delattr(A, attr)
else:
setattr(A, attr, tmp)
|
python
|
def assign(A, attr, B, lock=False):
'''Assigns B to A.attr, yields, and then assigns A.attr back to its
original value.
'''
class NoAttr(object): pass
context = threading.Lock if lock else null_context
with context():
if not hasattr(A, attr):
tmp = NoAttr
else:
tmp = getattr(A, attr)
setattr(A, attr, B)
try:
yield B
finally:
if tmp is NoAttr:
delattr(A, attr)
else:
setattr(A, attr, tmp)
|
[
"def",
"assign",
"(",
"A",
",",
"attr",
",",
"B",
",",
"lock",
"=",
"False",
")",
":",
"class",
"NoAttr",
"(",
"object",
")",
":",
"pass",
"context",
"=",
"threading",
".",
"Lock",
"if",
"lock",
"else",
"null_context",
"with",
"context",
"(",
")",
":",
"if",
"not",
"hasattr",
"(",
"A",
",",
"attr",
")",
":",
"tmp",
"=",
"NoAttr",
"else",
":",
"tmp",
"=",
"getattr",
"(",
"A",
",",
"attr",
")",
"setattr",
"(",
"A",
",",
"attr",
",",
"B",
")",
"try",
":",
"yield",
"B",
"finally",
":",
"if",
"tmp",
"is",
"NoAttr",
":",
"delattr",
"(",
"A",
",",
"attr",
")",
"else",
":",
"setattr",
"(",
"A",
",",
"attr",
",",
"tmp",
")"
] |
Assigns B to A.attr, yields, and then assigns A.attr back to its
original value.
|
[
"Assigns",
"B",
"to",
"A",
".",
"attr",
"yields",
"and",
"then",
"assigns",
"A",
".",
"attr",
"back",
"to",
"its",
"original",
"value",
"."
] |
aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258
|
https://github.com/mbodenhamer/syn/blob/aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258/syn/base_utils/context.py#L23-L45
|
243,266
|
mbodenhamer/syn
|
syn/base_utils/context.py
|
delete
|
def delete(*args):
'''For using then deleting objects.'''
from syn.base_utils import this_module
mod = this_module(npop=3)
yield
for arg in args:
name = arg
if not isinstance(name, STR):
name = arg.__name__
delattr(mod, name)
|
python
|
def delete(*args):
'''For using then deleting objects.'''
from syn.base_utils import this_module
mod = this_module(npop=3)
yield
for arg in args:
name = arg
if not isinstance(name, STR):
name = arg.__name__
delattr(mod, name)
|
[
"def",
"delete",
"(",
"*",
"args",
")",
":",
"from",
"syn",
".",
"base_utils",
"import",
"this_module",
"mod",
"=",
"this_module",
"(",
"npop",
"=",
"3",
")",
"yield",
"for",
"arg",
"in",
"args",
":",
"name",
"=",
"arg",
"if",
"not",
"isinstance",
"(",
"name",
",",
"STR",
")",
":",
"name",
"=",
"arg",
".",
"__name__",
"delattr",
"(",
"mod",
",",
"name",
")"
] |
For using then deleting objects.
|
[
"For",
"using",
"then",
"deleting",
"objects",
"."
] |
aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258
|
https://github.com/mbodenhamer/syn/blob/aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258/syn/base_utils/context.py#L90-L99
|
243,267
|
alexpearce/jobmonitor
|
jobmonitor/jobs.py
|
serialize_job
|
def serialize_job(job):
"""Return a dictionary representing the job."""
d = dict(
id=job.get_id(),
uri=url_for('jobs.get_job', job_id=job.get_id(), _external=True),
status=job.get_status(),
result=job.result
)
return d
|
python
|
def serialize_job(job):
"""Return a dictionary representing the job."""
d = dict(
id=job.get_id(),
uri=url_for('jobs.get_job', job_id=job.get_id(), _external=True),
status=job.get_status(),
result=job.result
)
return d
|
[
"def",
"serialize_job",
"(",
"job",
")",
":",
"d",
"=",
"dict",
"(",
"id",
"=",
"job",
".",
"get_id",
"(",
")",
",",
"uri",
"=",
"url_for",
"(",
"'jobs.get_job'",
",",
"job_id",
"=",
"job",
".",
"get_id",
"(",
")",
",",
"_external",
"=",
"True",
")",
",",
"status",
"=",
"job",
".",
"get_status",
"(",
")",
",",
"result",
"=",
"job",
".",
"result",
")",
"return",
"d"
] |
Return a dictionary representing the job.
|
[
"Return",
"a",
"dictionary",
"representing",
"the",
"job",
"."
] |
c08955ed3c357b2b3518aa0853b43bc237bc0814
|
https://github.com/alexpearce/jobmonitor/blob/c08955ed3c357b2b3518aa0853b43bc237bc0814/jobmonitor/jobs.py#L26-L34
|
243,268
|
noirbizarre/minibench
|
minibench/cli.py
|
resolve_pattern
|
def resolve_pattern(pattern):
'''Resolve a glob pattern into a filelist'''
if os.path.exists(pattern) and os.path.isdir(pattern):
pattern = os.path.join(pattern, '**/*.bench.py')
return recursive_glob(pattern)
|
python
|
def resolve_pattern(pattern):
'''Resolve a glob pattern into a filelist'''
if os.path.exists(pattern) and os.path.isdir(pattern):
pattern = os.path.join(pattern, '**/*.bench.py')
return recursive_glob(pattern)
|
[
"def",
"resolve_pattern",
"(",
"pattern",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"pattern",
")",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"pattern",
")",
":",
"pattern",
"=",
"os",
".",
"path",
".",
"join",
"(",
"pattern",
",",
"'**/*.bench.py'",
")",
"return",
"recursive_glob",
"(",
"pattern",
")"
] |
Resolve a glob pattern into a filelist
|
[
"Resolve",
"a",
"glob",
"pattern",
"into",
"a",
"filelist"
] |
a1ac66dc075181c62bb3c0d3a26beb5c46d5f4ab
|
https://github.com/noirbizarre/minibench/blob/a1ac66dc075181c62bb3c0d3a26beb5c46d5f4ab/minibench/cli.py#L144-L148
|
243,269
|
noirbizarre/minibench
|
minibench/cli.py
|
cli
|
def cli(patterns, times, json, csv, rst, md, ref, unit, precision, debug):
'''Execute minibench benchmarks'''
if ref:
ref = JSON.load(ref)
filenames = []
reporters = [CliReporter(ref=ref, debug=debug, unit=unit, precision=precision)]
kwargs = {}
for pattern in patterns or ['**/*.bench.py']:
filenames.extend(resolve_pattern(pattern))
if json:
reporters.append(JsonReporter(json, precision=precision))
if csv:
reporters.append(CsvReporter(csv, precision=precision))
if rst:
reporters.append(RstReporter(rst, precision=precision))
if md:
reporters.append(MarkdownReporter(md, precision=precision))
if times:
kwargs['times'] = times
runner = BenchmarkRunner(*filenames, reporters=reporters, debug=debug)
runner.run(**kwargs)
|
python
|
def cli(patterns, times, json, csv, rst, md, ref, unit, precision, debug):
'''Execute minibench benchmarks'''
if ref:
ref = JSON.load(ref)
filenames = []
reporters = [CliReporter(ref=ref, debug=debug, unit=unit, precision=precision)]
kwargs = {}
for pattern in patterns or ['**/*.bench.py']:
filenames.extend(resolve_pattern(pattern))
if json:
reporters.append(JsonReporter(json, precision=precision))
if csv:
reporters.append(CsvReporter(csv, precision=precision))
if rst:
reporters.append(RstReporter(rst, precision=precision))
if md:
reporters.append(MarkdownReporter(md, precision=precision))
if times:
kwargs['times'] = times
runner = BenchmarkRunner(*filenames, reporters=reporters, debug=debug)
runner.run(**kwargs)
|
[
"def",
"cli",
"(",
"patterns",
",",
"times",
",",
"json",
",",
"csv",
",",
"rst",
",",
"md",
",",
"ref",
",",
"unit",
",",
"precision",
",",
"debug",
")",
":",
"if",
"ref",
":",
"ref",
"=",
"JSON",
".",
"load",
"(",
"ref",
")",
"filenames",
"=",
"[",
"]",
"reporters",
"=",
"[",
"CliReporter",
"(",
"ref",
"=",
"ref",
",",
"debug",
"=",
"debug",
",",
"unit",
"=",
"unit",
",",
"precision",
"=",
"precision",
")",
"]",
"kwargs",
"=",
"{",
"}",
"for",
"pattern",
"in",
"patterns",
"or",
"[",
"'**/*.bench.py'",
"]",
":",
"filenames",
".",
"extend",
"(",
"resolve_pattern",
"(",
"pattern",
")",
")",
"if",
"json",
":",
"reporters",
".",
"append",
"(",
"JsonReporter",
"(",
"json",
",",
"precision",
"=",
"precision",
")",
")",
"if",
"csv",
":",
"reporters",
".",
"append",
"(",
"CsvReporter",
"(",
"csv",
",",
"precision",
"=",
"precision",
")",
")",
"if",
"rst",
":",
"reporters",
".",
"append",
"(",
"RstReporter",
"(",
"rst",
",",
"precision",
"=",
"precision",
")",
")",
"if",
"md",
":",
"reporters",
".",
"append",
"(",
"MarkdownReporter",
"(",
"md",
",",
"precision",
"=",
"precision",
")",
")",
"if",
"times",
":",
"kwargs",
"[",
"'times'",
"]",
"=",
"times",
"runner",
"=",
"BenchmarkRunner",
"(",
"*",
"filenames",
",",
"reporters",
"=",
"reporters",
",",
"debug",
"=",
"debug",
")",
"runner",
".",
"run",
"(",
"*",
"*",
"kwargs",
")"
] |
Execute minibench benchmarks
|
[
"Execute",
"minibench",
"benchmarks"
] |
a1ac66dc075181c62bb3c0d3a26beb5c46d5f4ab
|
https://github.com/noirbizarre/minibench/blob/a1ac66dc075181c62bb3c0d3a26beb5c46d5f4ab/minibench/cli.py#L165-L186
|
243,270
|
urbn/Caesium
|
caesium/handler.py
|
BaseHandler.load_json
|
def load_json(self):
"""Load JSON from the request body and store them in
self.request.arguments, like Tornado does by default for POSTed form
parameters.
If JSON cannot be decoded
:raises ValueError: JSON Could not be decoded
"""
try:
self.request.arguments = json.loads(self.request.body)
except ValueError:
msg = "Could not decode JSON: %s" % self.request.body
self.logger.debug(msg)
self.raise_error(400, msg)
|
python
|
def load_json(self):
"""Load JSON from the request body and store them in
self.request.arguments, like Tornado does by default for POSTed form
parameters.
If JSON cannot be decoded
:raises ValueError: JSON Could not be decoded
"""
try:
self.request.arguments = json.loads(self.request.body)
except ValueError:
msg = "Could not decode JSON: %s" % self.request.body
self.logger.debug(msg)
self.raise_error(400, msg)
|
[
"def",
"load_json",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"request",
".",
"arguments",
"=",
"json",
".",
"loads",
"(",
"self",
".",
"request",
".",
"body",
")",
"except",
"ValueError",
":",
"msg",
"=",
"\"Could not decode JSON: %s\"",
"%",
"self",
".",
"request",
".",
"body",
"self",
".",
"logger",
".",
"debug",
"(",
"msg",
")",
"self",
".",
"raise_error",
"(",
"400",
",",
"msg",
")"
] |
Load JSON from the request body and store them in
self.request.arguments, like Tornado does by default for POSTed form
parameters.
If JSON cannot be decoded
:raises ValueError: JSON Could not be decoded
|
[
"Load",
"JSON",
"from",
"the",
"request",
"body",
"and",
"store",
"them",
"in",
"self",
".",
"request",
".",
"arguments",
"like",
"Tornado",
"does",
"by",
"default",
"for",
"POSTed",
"form",
"parameters",
"."
] |
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
|
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/handler.py#L33-L47
|
243,271
|
urbn/Caesium
|
caesium/handler.py
|
BaseHandler.get_dict_of_all_args
|
def get_dict_of_all_args(self):
"""Generates a dictionary from a handler paths query string and returns it
:returns: Dictionary of all key/values in arguments list
:rtype: dict
"""
dictionary = {}
for arg in [arg for arg in self.request.arguments if arg not in self.settings.get("reserved_query_string_params", [])]:
val = self.get_argument(arg, default=None)
if val:
dictionary[arg] = val
return dictionary
|
python
|
def get_dict_of_all_args(self):
"""Generates a dictionary from a handler paths query string and returns it
:returns: Dictionary of all key/values in arguments list
:rtype: dict
"""
dictionary = {}
for arg in [arg for arg in self.request.arguments if arg not in self.settings.get("reserved_query_string_params", [])]:
val = self.get_argument(arg, default=None)
if val:
dictionary[arg] = val
return dictionary
|
[
"def",
"get_dict_of_all_args",
"(",
"self",
")",
":",
"dictionary",
"=",
"{",
"}",
"for",
"arg",
"in",
"[",
"arg",
"for",
"arg",
"in",
"self",
".",
"request",
".",
"arguments",
"if",
"arg",
"not",
"in",
"self",
".",
"settings",
".",
"get",
"(",
"\"reserved_query_string_params\"",
",",
"[",
"]",
")",
"]",
":",
"val",
"=",
"self",
".",
"get_argument",
"(",
"arg",
",",
"default",
"=",
"None",
")",
"if",
"val",
":",
"dictionary",
"[",
"arg",
"]",
"=",
"val",
"return",
"dictionary"
] |
Generates a dictionary from a handler paths query string and returns it
:returns: Dictionary of all key/values in arguments list
:rtype: dict
|
[
"Generates",
"a",
"dictionary",
"from",
"a",
"handler",
"paths",
"query",
"string",
"and",
"returns",
"it"
] |
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
|
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/handler.py#L74-L85
|
243,272
|
urbn/Caesium
|
caesium/handler.py
|
BaseHandler.get_arg_value_as_type
|
def get_arg_value_as_type(self, key, default=None, convert_int=False):
"""Allow users to pass through truthy type values like true, yes, no and get to a typed variable in your code
:param str val: The string reprensentation of the value you want to convert
:returns: adapted value
:rtype: dynamic
"""
val = self.get_query_argument(key, default)
if isinstance(val, int):
return val
if val.lower() in ['true', 'yes']:
return True
if val.lower() in ['false', 'no']:
return False
return val
|
python
|
def get_arg_value_as_type(self, key, default=None, convert_int=False):
"""Allow users to pass through truthy type values like true, yes, no and get to a typed variable in your code
:param str val: The string reprensentation of the value you want to convert
:returns: adapted value
:rtype: dynamic
"""
val = self.get_query_argument(key, default)
if isinstance(val, int):
return val
if val.lower() in ['true', 'yes']:
return True
if val.lower() in ['false', 'no']:
return False
return val
|
[
"def",
"get_arg_value_as_type",
"(",
"self",
",",
"key",
",",
"default",
"=",
"None",
",",
"convert_int",
"=",
"False",
")",
":",
"val",
"=",
"self",
".",
"get_query_argument",
"(",
"key",
",",
"default",
")",
"if",
"isinstance",
"(",
"val",
",",
"int",
")",
":",
"return",
"val",
"if",
"val",
".",
"lower",
"(",
")",
"in",
"[",
"'true'",
",",
"'yes'",
"]",
":",
"return",
"True",
"if",
"val",
".",
"lower",
"(",
")",
"in",
"[",
"'false'",
",",
"'no'",
"]",
":",
"return",
"False",
"return",
"val"
] |
Allow users to pass through truthy type values like true, yes, no and get to a typed variable in your code
:param str val: The string reprensentation of the value you want to convert
:returns: adapted value
:rtype: dynamic
|
[
"Allow",
"users",
"to",
"pass",
"through",
"truthy",
"type",
"values",
"like",
"true",
"yes",
"no",
"and",
"get",
"to",
"a",
"typed",
"variable",
"in",
"your",
"code"
] |
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
|
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/handler.py#L87-L106
|
243,273
|
urbn/Caesium
|
caesium/handler.py
|
BaseHandler.get_mongo_query_from_arguments
|
def get_mongo_query_from_arguments(self, reserved_attributes=[]):
"""Generate a mongo query from the given URL query parameters, handles OR query via multiples
:param list reserved_attributes: A list of attributes you want to exclude from this particular query
:return: dict
"""
query = {}
for arg in self.request.arguments:
if arg not in reserved_attributes:
if len(self.request.arguments.get(arg)) > 1:
query["$or"] = []
for val in self.request.arguments.get(arg):
query["$or"].append({arg: self.get_arg_value_as_type(val)})
else:
query[arg] = self.get_arg_value_as_type(self.request.arguments.get(arg)[0])
return query
|
python
|
def get_mongo_query_from_arguments(self, reserved_attributes=[]):
"""Generate a mongo query from the given URL query parameters, handles OR query via multiples
:param list reserved_attributes: A list of attributes you want to exclude from this particular query
:return: dict
"""
query = {}
for arg in self.request.arguments:
if arg not in reserved_attributes:
if len(self.request.arguments.get(arg)) > 1:
query["$or"] = []
for val in self.request.arguments.get(arg):
query["$or"].append({arg: self.get_arg_value_as_type(val)})
else:
query[arg] = self.get_arg_value_as_type(self.request.arguments.get(arg)[0])
return query
|
[
"def",
"get_mongo_query_from_arguments",
"(",
"self",
",",
"reserved_attributes",
"=",
"[",
"]",
")",
":",
"query",
"=",
"{",
"}",
"for",
"arg",
"in",
"self",
".",
"request",
".",
"arguments",
":",
"if",
"arg",
"not",
"in",
"reserved_attributes",
":",
"if",
"len",
"(",
"self",
".",
"request",
".",
"arguments",
".",
"get",
"(",
"arg",
")",
")",
">",
"1",
":",
"query",
"[",
"\"$or\"",
"]",
"=",
"[",
"]",
"for",
"val",
"in",
"self",
".",
"request",
".",
"arguments",
".",
"get",
"(",
"arg",
")",
":",
"query",
"[",
"\"$or\"",
"]",
".",
"append",
"(",
"{",
"arg",
":",
"self",
".",
"get_arg_value_as_type",
"(",
"val",
")",
"}",
")",
"else",
":",
"query",
"[",
"arg",
"]",
"=",
"self",
".",
"get_arg_value_as_type",
"(",
"self",
".",
"request",
".",
"arguments",
".",
"get",
"(",
"arg",
")",
"[",
"0",
"]",
")",
"return",
"query"
] |
Generate a mongo query from the given URL query parameters, handles OR query via multiples
:param list reserved_attributes: A list of attributes you want to exclude from this particular query
:return: dict
|
[
"Generate",
"a",
"mongo",
"query",
"from",
"the",
"given",
"URL",
"query",
"parameters",
"handles",
"OR",
"query",
"via",
"multiples"
] |
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
|
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/handler.py#L108-L125
|
243,274
|
urbn/Caesium
|
caesium/handler.py
|
BaseHandler._get_meta_data
|
def _get_meta_data(self):
"""Creates the meta data dictionary for a revision"""
return {
"comment": self.request.headers.get("comment", ""),
"author": self.get_current_user() or self.settings.get('annonymous_user')
}
|
python
|
def _get_meta_data(self):
"""Creates the meta data dictionary for a revision"""
return {
"comment": self.request.headers.get("comment", ""),
"author": self.get_current_user() or self.settings.get('annonymous_user')
}
|
[
"def",
"_get_meta_data",
"(",
"self",
")",
":",
"return",
"{",
"\"comment\"",
":",
"self",
".",
"request",
".",
"headers",
".",
"get",
"(",
"\"comment\"",
",",
"\"\"",
")",
",",
"\"author\"",
":",
"self",
".",
"get_current_user",
"(",
")",
"or",
"self",
".",
"settings",
".",
"get",
"(",
"'annonymous_user'",
")",
"}"
] |
Creates the meta data dictionary for a revision
|
[
"Creates",
"the",
"meta",
"data",
"dictionary",
"for",
"a",
"revision"
] |
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
|
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/handler.py#L146-L151
|
243,275
|
urbn/Caesium
|
caesium/handler.py
|
BaseHandler.arg_as_array
|
def arg_as_array(self, arg, split_char="|"):
"""Turns an argument into an array, split by the splitChar
:param str arg: The name of the query param you want to turn into an array based on the value
:param str split_char: The character the value should be split on.
:returns: A list of values
:rtype: list
"""
valuesString = self.get_argument(arg, default=None)
if valuesString:
valuesArray = valuesString.split(split_char)
return valuesArray
return None
|
python
|
def arg_as_array(self, arg, split_char="|"):
"""Turns an argument into an array, split by the splitChar
:param str arg: The name of the query param you want to turn into an array based on the value
:param str split_char: The character the value should be split on.
:returns: A list of values
:rtype: list
"""
valuesString = self.get_argument(arg, default=None)
if valuesString:
valuesArray = valuesString.split(split_char)
return valuesArray
return None
|
[
"def",
"arg_as_array",
"(",
"self",
",",
"arg",
",",
"split_char",
"=",
"\"|\"",
")",
":",
"valuesString",
"=",
"self",
".",
"get_argument",
"(",
"arg",
",",
"default",
"=",
"None",
")",
"if",
"valuesString",
":",
"valuesArray",
"=",
"valuesString",
".",
"split",
"(",
"split_char",
")",
"return",
"valuesArray",
"return",
"None"
] |
Turns an argument into an array, split by the splitChar
:param str arg: The name of the query param you want to turn into an array based on the value
:param str split_char: The character the value should be split on.
:returns: A list of values
:rtype: list
|
[
"Turns",
"an",
"argument",
"into",
"an",
"array",
"split",
"by",
"the",
"splitChar"
] |
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
|
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/handler.py#L167-L180
|
243,276
|
urbn/Caesium
|
caesium/handler.py
|
BaseHandler.raise_error
|
def raise_error(self, status=500, message="Generic server error. Out of luck..."):
"""
Sets an error status and returns a message to the user in JSON format
:param int status: The status code to use
:param str message: The message to return in the JSON response
"""
self.set_status(status)
self.write({"message" : message,
"status" : status})
|
python
|
def raise_error(self, status=500, message="Generic server error. Out of luck..."):
"""
Sets an error status and returns a message to the user in JSON format
:param int status: The status code to use
:param str message: The message to return in the JSON response
"""
self.set_status(status)
self.write({"message" : message,
"status" : status})
|
[
"def",
"raise_error",
"(",
"self",
",",
"status",
"=",
"500",
",",
"message",
"=",
"\"Generic server error. Out of luck...\"",
")",
":",
"self",
".",
"set_status",
"(",
"status",
")",
"self",
".",
"write",
"(",
"{",
"\"message\"",
":",
"message",
",",
"\"status\"",
":",
"status",
"}",
")"
] |
Sets an error status and returns a message to the user in JSON format
:param int status: The status code to use
:param str message: The message to return in the JSON response
|
[
"Sets",
"an",
"error",
"status",
"and",
"returns",
"a",
"message",
"to",
"the",
"user",
"in",
"JSON",
"format"
] |
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
|
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/handler.py#L182-L191
|
243,277
|
urbn/Caesium
|
caesium/handler.py
|
BaseHandler.return_resource
|
def return_resource(self, resource, status=200, statusMessage="OK"):
"""Return a resource response
:param str resource: The JSON String representation of a resource response
:param int status: Status code to use
:param str statusMessage: The message to use in the error response
"""
self.set_status(status, statusMessage)
self.write(json.loads(json_util.dumps(resource)))
|
python
|
def return_resource(self, resource, status=200, statusMessage="OK"):
"""Return a resource response
:param str resource: The JSON String representation of a resource response
:param int status: Status code to use
:param str statusMessage: The message to use in the error response
"""
self.set_status(status, statusMessage)
self.write(json.loads(json_util.dumps(resource)))
|
[
"def",
"return_resource",
"(",
"self",
",",
"resource",
",",
"status",
"=",
"200",
",",
"statusMessage",
"=",
"\"OK\"",
")",
":",
"self",
".",
"set_status",
"(",
"status",
",",
"statusMessage",
")",
"self",
".",
"write",
"(",
"json",
".",
"loads",
"(",
"json_util",
".",
"dumps",
"(",
"resource",
")",
")",
")"
] |
Return a resource response
:param str resource: The JSON String representation of a resource response
:param int status: Status code to use
:param str statusMessage: The message to use in the error response
|
[
"Return",
"a",
"resource",
"response"
] |
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
|
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/handler.py#L200-L208
|
243,278
|
urbn/Caesium
|
caesium/handler.py
|
BaseHandler.group_objects_by
|
def group_objects_by(self, list, attr, valueLabel="value", childrenLabel="children"):
"""
Generates a group object based on the attribute value on of the given attr value that is passed in.
:param list list: A list of dictionary objects
:param str attr: The attribute that the dictionaries should be sorted upon
:param str valueLabel: What to call the key of the field we're sorting upon
:param str childrenLabel: What to call the list of child objects on the group object
:returns: list of grouped objects by a given attribute
:rtype: list
"""
groups = []
for obj in list:
val = obj.get(attr)
if not val:
pass
newGroup = {"attribute": attr, valueLabel: val, childrenLabel: [obj]}
found = False
for i in range(0,len(groups)):
if val == groups[i].get(valueLabel):
found = True
groups[i][childrenLabel].append(obj)
pass
if not found:
groups.append(newGroup)
return groups
|
python
|
def group_objects_by(self, list, attr, valueLabel="value", childrenLabel="children"):
"""
Generates a group object based on the attribute value on of the given attr value that is passed in.
:param list list: A list of dictionary objects
:param str attr: The attribute that the dictionaries should be sorted upon
:param str valueLabel: What to call the key of the field we're sorting upon
:param str childrenLabel: What to call the list of child objects on the group object
:returns: list of grouped objects by a given attribute
:rtype: list
"""
groups = []
for obj in list:
val = obj.get(attr)
if not val:
pass
newGroup = {"attribute": attr, valueLabel: val, childrenLabel: [obj]}
found = False
for i in range(0,len(groups)):
if val == groups[i].get(valueLabel):
found = True
groups[i][childrenLabel].append(obj)
pass
if not found:
groups.append(newGroup)
return groups
|
[
"def",
"group_objects_by",
"(",
"self",
",",
"list",
",",
"attr",
",",
"valueLabel",
"=",
"\"value\"",
",",
"childrenLabel",
"=",
"\"children\"",
")",
":",
"groups",
"=",
"[",
"]",
"for",
"obj",
"in",
"list",
":",
"val",
"=",
"obj",
".",
"get",
"(",
"attr",
")",
"if",
"not",
"val",
":",
"pass",
"newGroup",
"=",
"{",
"\"attribute\"",
":",
"attr",
",",
"valueLabel",
":",
"val",
",",
"childrenLabel",
":",
"[",
"obj",
"]",
"}",
"found",
"=",
"False",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"groups",
")",
")",
":",
"if",
"val",
"==",
"groups",
"[",
"i",
"]",
".",
"get",
"(",
"valueLabel",
")",
":",
"found",
"=",
"True",
"groups",
"[",
"i",
"]",
"[",
"childrenLabel",
"]",
".",
"append",
"(",
"obj",
")",
"pass",
"if",
"not",
"found",
":",
"groups",
".",
"append",
"(",
"newGroup",
")",
"return",
"groups"
] |
Generates a group object based on the attribute value on of the given attr value that is passed in.
:param list list: A list of dictionary objects
:param str attr: The attribute that the dictionaries should be sorted upon
:param str valueLabel: What to call the key of the field we're sorting upon
:param str childrenLabel: What to call the list of child objects on the group object
:returns: list of grouped objects by a given attribute
:rtype: list
|
[
"Generates",
"a",
"group",
"object",
"based",
"on",
"the",
"attribute",
"value",
"on",
"of",
"the",
"given",
"attr",
"value",
"that",
"is",
"passed",
"in",
"."
] |
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
|
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/handler.py#L211-L242
|
243,279
|
urbn/Caesium
|
caesium/handler.py
|
BaseHandler.write_hyper_response
|
def write_hyper_response(self, links=[], meta={}, entity_name=None, entity=None, notifications=[], actions=[]):
"""Writes a hyper media response object
:param list links: A list of links to the resources
:param dict meta: The meta data for this response
:param str entity_name: The entity name
:param object entity: The Entity itself
:param list notifications: List of notifications
:param list actions: List of actions
"""
assert entity_name is not None
assert entity is not None
meta.update({
"status": self.get_status()
})
self.write({
"links": links,
"meta": meta,
entity_name: entity,
"notifications": notifications,
"actions": actions
})
|
python
|
def write_hyper_response(self, links=[], meta={}, entity_name=None, entity=None, notifications=[], actions=[]):
"""Writes a hyper media response object
:param list links: A list of links to the resources
:param dict meta: The meta data for this response
:param str entity_name: The entity name
:param object entity: The Entity itself
:param list notifications: List of notifications
:param list actions: List of actions
"""
assert entity_name is not None
assert entity is not None
meta.update({
"status": self.get_status()
})
self.write({
"links": links,
"meta": meta,
entity_name: entity,
"notifications": notifications,
"actions": actions
})
|
[
"def",
"write_hyper_response",
"(",
"self",
",",
"links",
"=",
"[",
"]",
",",
"meta",
"=",
"{",
"}",
",",
"entity_name",
"=",
"None",
",",
"entity",
"=",
"None",
",",
"notifications",
"=",
"[",
"]",
",",
"actions",
"=",
"[",
"]",
")",
":",
"assert",
"entity_name",
"is",
"not",
"None",
"assert",
"entity",
"is",
"not",
"None",
"meta",
".",
"update",
"(",
"{",
"\"status\"",
":",
"self",
".",
"get_status",
"(",
")",
"}",
")",
"self",
".",
"write",
"(",
"{",
"\"links\"",
":",
"links",
",",
"\"meta\"",
":",
"meta",
",",
"entity_name",
":",
"entity",
",",
"\"notifications\"",
":",
"notifications",
",",
"\"actions\"",
":",
"actions",
"}",
")"
] |
Writes a hyper media response object
:param list links: A list of links to the resources
:param dict meta: The meta data for this response
:param str entity_name: The entity name
:param object entity: The Entity itself
:param list notifications: List of notifications
:param list actions: List of actions
|
[
"Writes",
"a",
"hyper",
"media",
"response",
"object"
] |
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
|
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/handler.py#L252-L275
|
243,280
|
urbn/Caesium
|
caesium/handler.py
|
BaseRestfulMotorHandler.get
|
def get(self, id):
"""
Get an by object by unique identifier
:id string id: the bson id of an object
:rtype: JSON
"""
try:
if self.request.headers.get("Id"):
object_ = yield self.client.find_one({self.request.headers.get("Id"): id})
else:
object_ = yield self.client.find_one_by_id(id)
if object_:
self.write(object_)
return
self.raise_error(404, "%s/%s not found" % (self.object_name, id))
except InvalidId as ex:
self.raise_error(400, message="Your ID is malformed: %s" % id)
except Exception as ex:
self.logger.error(ex)
self.raise_error()
|
python
|
def get(self, id):
"""
Get an by object by unique identifier
:id string id: the bson id of an object
:rtype: JSON
"""
try:
if self.request.headers.get("Id"):
object_ = yield self.client.find_one({self.request.headers.get("Id"): id})
else:
object_ = yield self.client.find_one_by_id(id)
if object_:
self.write(object_)
return
self.raise_error(404, "%s/%s not found" % (self.object_name, id))
except InvalidId as ex:
self.raise_error(400, message="Your ID is malformed: %s" % id)
except Exception as ex:
self.logger.error(ex)
self.raise_error()
|
[
"def",
"get",
"(",
"self",
",",
"id",
")",
":",
"try",
":",
"if",
"self",
".",
"request",
".",
"headers",
".",
"get",
"(",
"\"Id\"",
")",
":",
"object_",
"=",
"yield",
"self",
".",
"client",
".",
"find_one",
"(",
"{",
"self",
".",
"request",
".",
"headers",
".",
"get",
"(",
"\"Id\"",
")",
":",
"id",
"}",
")",
"else",
":",
"object_",
"=",
"yield",
"self",
".",
"client",
".",
"find_one_by_id",
"(",
"id",
")",
"if",
"object_",
":",
"self",
".",
"write",
"(",
"object_",
")",
"return",
"self",
".",
"raise_error",
"(",
"404",
",",
"\"%s/%s not found\"",
"%",
"(",
"self",
".",
"object_name",
",",
"id",
")",
")",
"except",
"InvalidId",
"as",
"ex",
":",
"self",
".",
"raise_error",
"(",
"400",
",",
"message",
"=",
"\"Your ID is malformed: %s\"",
"%",
"id",
")",
"except",
"Exception",
"as",
"ex",
":",
"self",
".",
"logger",
".",
"error",
"(",
"ex",
")",
"self",
".",
"raise_error",
"(",
")"
] |
Get an by object by unique identifier
:id string id: the bson id of an object
:rtype: JSON
|
[
"Get",
"an",
"by",
"object",
"by",
"unique",
"identifier"
] |
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
|
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/handler.py#L294-L317
|
243,281
|
urbn/Caesium
|
caesium/handler.py
|
BaseRestfulMotorHandler.put
|
def put(self, id):
"""
Update a resource by bson ObjectId
:returns: json string representation
:rtype: JSON
"""
try:
#Async update flow
object_ = json_util.loads(self.request.body)
toa = self.request.headers.get("Caesium-TOA", None)
obj_check = yield self.client.find_one_by_id(id)
if not obj_check:
self.raise_error(404, "Resource not found: %s" % id)
self.finish()
return
if toa:
stack = AsyncSchedulableDocumentRevisionStack(self.client.collection_name, self.settings, master_id=id)
revision_id = yield stack.push(object_, int(toa), meta=self._get_meta_data())
if isinstance(revision_id, str):
self.set_header("Caesium-TOA", toa)
#We add the id of the original request, because we don't want to infer this
#On the client side, as the state of the client code could change easily
#We want this request to return with the originating ID as well.
object_["id"] = id
self.return_resource(object_)
else:
self.raise_error(404, "Revision not scheduled for object: %s" % id)
else:
if object_.get("_id"):
del object_["_id"]
response = yield self.client.update(id, object_)
if response.get("updatedExisting"):
object_ = yield self.client.find_one_by_id(id)
self.return_resource(object_)
else:
self.raise_error(404, "Resource not found: %s" % id)
except ValidationError as vex:
self.logger.error("%s validation error" % self.object_name, vex)
self.raise_error(400, "Your %s cannot be updated because it is missing required fields, see docs" % self.object_name)
except ValueError as ex:
self.raise_error(400, "Invalid JSON Body, check formatting. %s" % ex[0])
except InvalidId as ex:
self.raise_error(message="Your ID is malformed: %s" % id)
except Exception as ex:
self.logger.error(ex)
self.raise_error()
|
python
|
def put(self, id):
"""
Update a resource by bson ObjectId
:returns: json string representation
:rtype: JSON
"""
try:
#Async update flow
object_ = json_util.loads(self.request.body)
toa = self.request.headers.get("Caesium-TOA", None)
obj_check = yield self.client.find_one_by_id(id)
if not obj_check:
self.raise_error(404, "Resource not found: %s" % id)
self.finish()
return
if toa:
stack = AsyncSchedulableDocumentRevisionStack(self.client.collection_name, self.settings, master_id=id)
revision_id = yield stack.push(object_, int(toa), meta=self._get_meta_data())
if isinstance(revision_id, str):
self.set_header("Caesium-TOA", toa)
#We add the id of the original request, because we don't want to infer this
#On the client side, as the state of the client code could change easily
#We want this request to return with the originating ID as well.
object_["id"] = id
self.return_resource(object_)
else:
self.raise_error(404, "Revision not scheduled for object: %s" % id)
else:
if object_.get("_id"):
del object_["_id"]
response = yield self.client.update(id, object_)
if response.get("updatedExisting"):
object_ = yield self.client.find_one_by_id(id)
self.return_resource(object_)
else:
self.raise_error(404, "Resource not found: %s" % id)
except ValidationError as vex:
self.logger.error("%s validation error" % self.object_name, vex)
self.raise_error(400, "Your %s cannot be updated because it is missing required fields, see docs" % self.object_name)
except ValueError as ex:
self.raise_error(400, "Invalid JSON Body, check formatting. %s" % ex[0])
except InvalidId as ex:
self.raise_error(message="Your ID is malformed: %s" % id)
except Exception as ex:
self.logger.error(ex)
self.raise_error()
|
[
"def",
"put",
"(",
"self",
",",
"id",
")",
":",
"try",
":",
"#Async update flow",
"object_",
"=",
"json_util",
".",
"loads",
"(",
"self",
".",
"request",
".",
"body",
")",
"toa",
"=",
"self",
".",
"request",
".",
"headers",
".",
"get",
"(",
"\"Caesium-TOA\"",
",",
"None",
")",
"obj_check",
"=",
"yield",
"self",
".",
"client",
".",
"find_one_by_id",
"(",
"id",
")",
"if",
"not",
"obj_check",
":",
"self",
".",
"raise_error",
"(",
"404",
",",
"\"Resource not found: %s\"",
"%",
"id",
")",
"self",
".",
"finish",
"(",
")",
"return",
"if",
"toa",
":",
"stack",
"=",
"AsyncSchedulableDocumentRevisionStack",
"(",
"self",
".",
"client",
".",
"collection_name",
",",
"self",
".",
"settings",
",",
"master_id",
"=",
"id",
")",
"revision_id",
"=",
"yield",
"stack",
".",
"push",
"(",
"object_",
",",
"int",
"(",
"toa",
")",
",",
"meta",
"=",
"self",
".",
"_get_meta_data",
"(",
")",
")",
"if",
"isinstance",
"(",
"revision_id",
",",
"str",
")",
":",
"self",
".",
"set_header",
"(",
"\"Caesium-TOA\"",
",",
"toa",
")",
"#We add the id of the original request, because we don't want to infer this",
"#On the client side, as the state of the client code could change easily",
"#We want this request to return with the originating ID as well.",
"object_",
"[",
"\"id\"",
"]",
"=",
"id",
"self",
".",
"return_resource",
"(",
"object_",
")",
"else",
":",
"self",
".",
"raise_error",
"(",
"404",
",",
"\"Revision not scheduled for object: %s\"",
"%",
"id",
")",
"else",
":",
"if",
"object_",
".",
"get",
"(",
"\"_id\"",
")",
":",
"del",
"object_",
"[",
"\"_id\"",
"]",
"response",
"=",
"yield",
"self",
".",
"client",
".",
"update",
"(",
"id",
",",
"object_",
")",
"if",
"response",
".",
"get",
"(",
"\"updatedExisting\"",
")",
":",
"object_",
"=",
"yield",
"self",
".",
"client",
".",
"find_one_by_id",
"(",
"id",
")",
"self",
".",
"return_resource",
"(",
"object_",
")",
"else",
":",
"self",
".",
"raise_error",
"(",
"404",
",",
"\"Resource not found: %s\"",
"%",
"id",
")",
"except",
"ValidationError",
"as",
"vex",
":",
"self",
".",
"logger",
".",
"error",
"(",
"\"%s validation error\"",
"%",
"self",
".",
"object_name",
",",
"vex",
")",
"self",
".",
"raise_error",
"(",
"400",
",",
"\"Your %s cannot be updated because it is missing required fields, see docs\"",
"%",
"self",
".",
"object_name",
")",
"except",
"ValueError",
"as",
"ex",
":",
"self",
".",
"raise_error",
"(",
"400",
",",
"\"Invalid JSON Body, check formatting. %s\"",
"%",
"ex",
"[",
"0",
"]",
")",
"except",
"InvalidId",
"as",
"ex",
":",
"self",
".",
"raise_error",
"(",
"message",
"=",
"\"Your ID is malformed: %s\"",
"%",
"id",
")",
"except",
"Exception",
"as",
"ex",
":",
"self",
".",
"logger",
".",
"error",
"(",
"ex",
")",
"self",
".",
"raise_error",
"(",
")"
] |
Update a resource by bson ObjectId
:returns: json string representation
:rtype: JSON
|
[
"Update",
"a",
"resource",
"by",
"bson",
"ObjectId"
] |
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
|
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/handler.py#L321-L377
|
243,282
|
urbn/Caesium
|
caesium/handler.py
|
BaseRestfulMotorHandler.post
|
def post(self, id=None):
"""
Create a new object resource
:json: Object to create
:returns: json string representation
:rtype: JSON
"""
try:
try:
base_object = json_util.loads(self.request.body)
except TypeError:
base_object = json_util.loads(self.request.body.decode())
#assert not hasattr(base_object, "_id")
toa = self.request.headers.get("Caesium-TOA", None)
if toa:
# Async create flow
stack = AsyncSchedulableDocumentRevisionStack(self.client.collection_name, self.settings)
revision_id = yield stack.push(base_object, toa=int(toa), meta=self._get_meta_data())
resource = yield stack.preview(revision_id)
if isinstance(revision_id, str):
self.set_header("Caesium-TOA", toa)
self.return_resource(resource.get("snapshot"))
else:
self.raise_error(404, "Revision not scheduled for object: %s" % id)
else:
id = yield self.client.insert(base_object)
base_object = yield self.client.find_one_by_id(id)
self.return_resource(base_object)
except ValidationError as vex:
self.logger.error("%s validation error" % self.object_name, vex)
self.raise_error(400, "Your %s cannot be created because it is missing required fields, see docs" % self.object_name)
except ValueError as ex:
self.raise_error(400, "Invalid JSON Body, check formatting. %s" % ex[0])
except Exception as ex:
self.logger.error(ex)
self.raise_error()
|
python
|
def post(self, id=None):
"""
Create a new object resource
:json: Object to create
:returns: json string representation
:rtype: JSON
"""
try:
try:
base_object = json_util.loads(self.request.body)
except TypeError:
base_object = json_util.loads(self.request.body.decode())
#assert not hasattr(base_object, "_id")
toa = self.request.headers.get("Caesium-TOA", None)
if toa:
# Async create flow
stack = AsyncSchedulableDocumentRevisionStack(self.client.collection_name, self.settings)
revision_id = yield stack.push(base_object, toa=int(toa), meta=self._get_meta_data())
resource = yield stack.preview(revision_id)
if isinstance(revision_id, str):
self.set_header("Caesium-TOA", toa)
self.return_resource(resource.get("snapshot"))
else:
self.raise_error(404, "Revision not scheduled for object: %s" % id)
else:
id = yield self.client.insert(base_object)
base_object = yield self.client.find_one_by_id(id)
self.return_resource(base_object)
except ValidationError as vex:
self.logger.error("%s validation error" % self.object_name, vex)
self.raise_error(400, "Your %s cannot be created because it is missing required fields, see docs" % self.object_name)
except ValueError as ex:
self.raise_error(400, "Invalid JSON Body, check formatting. %s" % ex[0])
except Exception as ex:
self.logger.error(ex)
self.raise_error()
|
[
"def",
"post",
"(",
"self",
",",
"id",
"=",
"None",
")",
":",
"try",
":",
"try",
":",
"base_object",
"=",
"json_util",
".",
"loads",
"(",
"self",
".",
"request",
".",
"body",
")",
"except",
"TypeError",
":",
"base_object",
"=",
"json_util",
".",
"loads",
"(",
"self",
".",
"request",
".",
"body",
".",
"decode",
"(",
")",
")",
"#assert not hasattr(base_object, \"_id\")",
"toa",
"=",
"self",
".",
"request",
".",
"headers",
".",
"get",
"(",
"\"Caesium-TOA\"",
",",
"None",
")",
"if",
"toa",
":",
"# Async create flow",
"stack",
"=",
"AsyncSchedulableDocumentRevisionStack",
"(",
"self",
".",
"client",
".",
"collection_name",
",",
"self",
".",
"settings",
")",
"revision_id",
"=",
"yield",
"stack",
".",
"push",
"(",
"base_object",
",",
"toa",
"=",
"int",
"(",
"toa",
")",
",",
"meta",
"=",
"self",
".",
"_get_meta_data",
"(",
")",
")",
"resource",
"=",
"yield",
"stack",
".",
"preview",
"(",
"revision_id",
")",
"if",
"isinstance",
"(",
"revision_id",
",",
"str",
")",
":",
"self",
".",
"set_header",
"(",
"\"Caesium-TOA\"",
",",
"toa",
")",
"self",
".",
"return_resource",
"(",
"resource",
".",
"get",
"(",
"\"snapshot\"",
")",
")",
"else",
":",
"self",
".",
"raise_error",
"(",
"404",
",",
"\"Revision not scheduled for object: %s\"",
"%",
"id",
")",
"else",
":",
"id",
"=",
"yield",
"self",
".",
"client",
".",
"insert",
"(",
"base_object",
")",
"base_object",
"=",
"yield",
"self",
".",
"client",
".",
"find_one_by_id",
"(",
"id",
")",
"self",
".",
"return_resource",
"(",
"base_object",
")",
"except",
"ValidationError",
"as",
"vex",
":",
"self",
".",
"logger",
".",
"error",
"(",
"\"%s validation error\"",
"%",
"self",
".",
"object_name",
",",
"vex",
")",
"self",
".",
"raise_error",
"(",
"400",
",",
"\"Your %s cannot be created because it is missing required fields, see docs\"",
"%",
"self",
".",
"object_name",
")",
"except",
"ValueError",
"as",
"ex",
":",
"self",
".",
"raise_error",
"(",
"400",
",",
"\"Invalid JSON Body, check formatting. %s\"",
"%",
"ex",
"[",
"0",
"]",
")",
"except",
"Exception",
"as",
"ex",
":",
"self",
".",
"logger",
".",
"error",
"(",
"ex",
")",
"self",
".",
"raise_error",
"(",
")"
] |
Create a new object resource
:json: Object to create
:returns: json string representation
:rtype: JSON
|
[
"Create",
"a",
"new",
"object",
"resource"
] |
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
|
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/handler.py#L381-L428
|
243,283
|
urbn/Caesium
|
caesium/handler.py
|
BaseRevisionList.initialize
|
def initialize(self):
"""Initializer for the Search Handler"""
self.logger = logging.getLogger(self.__class__.__name__)
self.client = None
|
python
|
def initialize(self):
"""Initializer for the Search Handler"""
self.logger = logging.getLogger(self.__class__.__name__)
self.client = None
|
[
"def",
"initialize",
"(",
"self",
")",
":",
"self",
".",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"self",
".",
"__class__",
".",
"__name__",
")",
"self",
".",
"client",
"=",
"None"
] |
Initializer for the Search Handler
|
[
"Initializer",
"for",
"the",
"Search",
"Handler"
] |
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
|
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/handler.py#L456-L460
|
243,284
|
urbn/Caesium
|
caesium/handler.py
|
BaseRevisionList.__lazy_migration
|
def __lazy_migration(self, master_id):
"""
Creates a revision for a master id that didn't previously have a
revision, this allows you to easily turn on revisioning for a
collection that didn't previously allow for it.
:param master_id:
:returns: list of objects
"""
collection_name = self.request.headers.get("collection")
if collection_name:
stack = AsyncSchedulableDocumentRevisionStack(collection_name,
self.settings,
master_id=master_id,
)
objects = yield stack._lazy_migration(meta=self._get_meta_data())
raise Return(objects)
self.raise_error(500, "This object %s/%s didn't exist as a revision, "
"we tried to create it but we failed... Sorry. "
"Please check this object" % (collection_name,
master_id))
raise Return(None)
|
python
|
def __lazy_migration(self, master_id):
"""
Creates a revision for a master id that didn't previously have a
revision, this allows you to easily turn on revisioning for a
collection that didn't previously allow for it.
:param master_id:
:returns: list of objects
"""
collection_name = self.request.headers.get("collection")
if collection_name:
stack = AsyncSchedulableDocumentRevisionStack(collection_name,
self.settings,
master_id=master_id,
)
objects = yield stack._lazy_migration(meta=self._get_meta_data())
raise Return(objects)
self.raise_error(500, "This object %s/%s didn't exist as a revision, "
"we tried to create it but we failed... Sorry. "
"Please check this object" % (collection_name,
master_id))
raise Return(None)
|
[
"def",
"__lazy_migration",
"(",
"self",
",",
"master_id",
")",
":",
"collection_name",
"=",
"self",
".",
"request",
".",
"headers",
".",
"get",
"(",
"\"collection\"",
")",
"if",
"collection_name",
":",
"stack",
"=",
"AsyncSchedulableDocumentRevisionStack",
"(",
"collection_name",
",",
"self",
".",
"settings",
",",
"master_id",
"=",
"master_id",
",",
")",
"objects",
"=",
"yield",
"stack",
".",
"_lazy_migration",
"(",
"meta",
"=",
"self",
".",
"_get_meta_data",
"(",
")",
")",
"raise",
"Return",
"(",
"objects",
")",
"self",
".",
"raise_error",
"(",
"500",
",",
"\"This object %s/%s didn't exist as a revision, \"",
"\"we tried to create it but we failed... Sorry. \"",
"\"Please check this object\"",
"%",
"(",
"collection_name",
",",
"master_id",
")",
")",
"raise",
"Return",
"(",
"None",
")"
] |
Creates a revision for a master id that didn't previously have a
revision, this allows you to easily turn on revisioning for a
collection that didn't previously allow for it.
:param master_id:
:returns: list of objects
|
[
"Creates",
"a",
"revision",
"for",
"a",
"master",
"id",
"that",
"didn",
"t",
"previously",
"have",
"a",
"revision",
"this",
"allows",
"you",
"to",
"easily",
"turn",
"on",
"revisioning",
"for",
"a",
"collection",
"that",
"didn",
"t",
"previously",
"allow",
"for",
"it",
"."
] |
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
|
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/handler.py#L463-L486
|
243,285
|
urbn/Caesium
|
caesium/handler.py
|
BaseRevisionList.get
|
def get(self, master_id):
"""
Get a list of revisions by master ID
:param master_id:
:return:
"""
collection_name = self.request.headers.get("collection")
self.client = BaseAsyncMotorDocument("%s_revisions" % collection_name)
limit = self.get_query_argument("limit", 2)
add_current_revision = self.get_arg_value_as_type("addCurrent",
"false")
show_history = self.get_arg_value_as_type("showHistory", "false")
objects_processed = []
if isinstance(limit, unicode):
limit = int(limit)
objects = yield self.client.find({"master_id": master_id,
"processed": False},
orderby="toa",
order_by_direction=1,
page=0,
limit=20)
# If this is a document that should have a revision and doesn't we
# orchestratioin creation of the first one
if len(objects) == 0:
new_revision = yield self.__lazy_migration(master_id)
if not new_revision:
return
if show_history:
objects_processed = yield self.client.find({"master_id": master_id,
"processed": True},
orderby="toa",
order_by_direction=-1,
page=0,
limit=limit)
elif add_current_revision:
objects_processed = yield self.client.find({"master_id": master_id,
"processed": True},
orderby="toa",
order_by_direction=-1,
page=0,
limit=1)
if len(objects_processed) > 0:
objects_processed = objects_processed[::-1]
objects_processed[-1]["current"] = True
objects = objects_processed + objects
self.write({
"count": len(objects),
"results": objects
})
|
python
|
def get(self, master_id):
"""
Get a list of revisions by master ID
:param master_id:
:return:
"""
collection_name = self.request.headers.get("collection")
self.client = BaseAsyncMotorDocument("%s_revisions" % collection_name)
limit = self.get_query_argument("limit", 2)
add_current_revision = self.get_arg_value_as_type("addCurrent",
"false")
show_history = self.get_arg_value_as_type("showHistory", "false")
objects_processed = []
if isinstance(limit, unicode):
limit = int(limit)
objects = yield self.client.find({"master_id": master_id,
"processed": False},
orderby="toa",
order_by_direction=1,
page=0,
limit=20)
# If this is a document that should have a revision and doesn't we
# orchestratioin creation of the first one
if len(objects) == 0:
new_revision = yield self.__lazy_migration(master_id)
if not new_revision:
return
if show_history:
objects_processed = yield self.client.find({"master_id": master_id,
"processed": True},
orderby="toa",
order_by_direction=-1,
page=0,
limit=limit)
elif add_current_revision:
objects_processed = yield self.client.find({"master_id": master_id,
"processed": True},
orderby="toa",
order_by_direction=-1,
page=0,
limit=1)
if len(objects_processed) > 0:
objects_processed = objects_processed[::-1]
objects_processed[-1]["current"] = True
objects = objects_processed + objects
self.write({
"count": len(objects),
"results": objects
})
|
[
"def",
"get",
"(",
"self",
",",
"master_id",
")",
":",
"collection_name",
"=",
"self",
".",
"request",
".",
"headers",
".",
"get",
"(",
"\"collection\"",
")",
"self",
".",
"client",
"=",
"BaseAsyncMotorDocument",
"(",
"\"%s_revisions\"",
"%",
"collection_name",
")",
"limit",
"=",
"self",
".",
"get_query_argument",
"(",
"\"limit\"",
",",
"2",
")",
"add_current_revision",
"=",
"self",
".",
"get_arg_value_as_type",
"(",
"\"addCurrent\"",
",",
"\"false\"",
")",
"show_history",
"=",
"self",
".",
"get_arg_value_as_type",
"(",
"\"showHistory\"",
",",
"\"false\"",
")",
"objects_processed",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"limit",
",",
"unicode",
")",
":",
"limit",
"=",
"int",
"(",
"limit",
")",
"objects",
"=",
"yield",
"self",
".",
"client",
".",
"find",
"(",
"{",
"\"master_id\"",
":",
"master_id",
",",
"\"processed\"",
":",
"False",
"}",
",",
"orderby",
"=",
"\"toa\"",
",",
"order_by_direction",
"=",
"1",
",",
"page",
"=",
"0",
",",
"limit",
"=",
"20",
")",
"# If this is a document that should have a revision and doesn't we",
"# orchestratioin creation of the first one",
"if",
"len",
"(",
"objects",
")",
"==",
"0",
":",
"new_revision",
"=",
"yield",
"self",
".",
"__lazy_migration",
"(",
"master_id",
")",
"if",
"not",
"new_revision",
":",
"return",
"if",
"show_history",
":",
"objects_processed",
"=",
"yield",
"self",
".",
"client",
".",
"find",
"(",
"{",
"\"master_id\"",
":",
"master_id",
",",
"\"processed\"",
":",
"True",
"}",
",",
"orderby",
"=",
"\"toa\"",
",",
"order_by_direction",
"=",
"-",
"1",
",",
"page",
"=",
"0",
",",
"limit",
"=",
"limit",
")",
"elif",
"add_current_revision",
":",
"objects_processed",
"=",
"yield",
"self",
".",
"client",
".",
"find",
"(",
"{",
"\"master_id\"",
":",
"master_id",
",",
"\"processed\"",
":",
"True",
"}",
",",
"orderby",
"=",
"\"toa\"",
",",
"order_by_direction",
"=",
"-",
"1",
",",
"page",
"=",
"0",
",",
"limit",
"=",
"1",
")",
"if",
"len",
"(",
"objects_processed",
")",
">",
"0",
":",
"objects_processed",
"=",
"objects_processed",
"[",
":",
":",
"-",
"1",
"]",
"objects_processed",
"[",
"-",
"1",
"]",
"[",
"\"current\"",
"]",
"=",
"True",
"objects",
"=",
"objects_processed",
"+",
"objects",
"self",
".",
"write",
"(",
"{",
"\"count\"",
":",
"len",
"(",
"objects",
")",
",",
"\"results\"",
":",
"objects",
"}",
")"
] |
Get a list of revisions by master ID
:param master_id:
:return:
|
[
"Get",
"a",
"list",
"of",
"revisions",
"by",
"master",
"ID"
] |
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
|
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/handler.py#L489-L548
|
243,286
|
urbn/Caesium
|
caesium/handler.py
|
RevisionHandler.put
|
def put(self, id):
"""
Update a revision by ID
:param id: BSON id
:return:
"""
collection_name = self.request.headers.get("collection")
if not collection_name:
self.raise_error(400, "Missing a collection name header")
self.client = BaseAsyncMotorDocument("%s_revisions" % collection_name)
super(self.__class__, self).put(id)
|
python
|
def put(self, id):
"""
Update a revision by ID
:param id: BSON id
:return:
"""
collection_name = self.request.headers.get("collection")
if not collection_name:
self.raise_error(400, "Missing a collection name header")
self.client = BaseAsyncMotorDocument("%s_revisions" % collection_name)
super(self.__class__, self).put(id)
|
[
"def",
"put",
"(",
"self",
",",
"id",
")",
":",
"collection_name",
"=",
"self",
".",
"request",
".",
"headers",
".",
"get",
"(",
"\"collection\"",
")",
"if",
"not",
"collection_name",
":",
"self",
".",
"raise_error",
"(",
"400",
",",
"\"Missing a collection name header\"",
")",
"self",
".",
"client",
"=",
"BaseAsyncMotorDocument",
"(",
"\"%s_revisions\"",
"%",
"collection_name",
")",
"super",
"(",
"self",
".",
"__class__",
",",
"self",
")",
".",
"put",
"(",
"id",
")"
] |
Update a revision by ID
:param id: BSON id
:return:
|
[
"Update",
"a",
"revision",
"by",
"ID"
] |
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
|
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/handler.py#L559-L574
|
243,287
|
urbn/Caesium
|
caesium/handler.py
|
RevisionHandler.get
|
def get(self, id):
"""
Get revision based on the stack preview algorithm
:param id: BSON id
:return: JSON
"""
collection_name = self.request.headers.get("collection")
if not collection_name:
self.raise_error(400, "Missing a collection name for stack")
self.stack = AsyncSchedulableDocumentRevisionStack(collection_name, self.settings)
revision = yield self.stack.preview(id)
self.write(revision)
|
python
|
def get(self, id):
"""
Get revision based on the stack preview algorithm
:param id: BSON id
:return: JSON
"""
collection_name = self.request.headers.get("collection")
if not collection_name:
self.raise_error(400, "Missing a collection name for stack")
self.stack = AsyncSchedulableDocumentRevisionStack(collection_name, self.settings)
revision = yield self.stack.preview(id)
self.write(revision)
|
[
"def",
"get",
"(",
"self",
",",
"id",
")",
":",
"collection_name",
"=",
"self",
".",
"request",
".",
"headers",
".",
"get",
"(",
"\"collection\"",
")",
"if",
"not",
"collection_name",
":",
"self",
".",
"raise_error",
"(",
"400",
",",
"\"Missing a collection name for stack\"",
")",
"self",
".",
"stack",
"=",
"AsyncSchedulableDocumentRevisionStack",
"(",
"collection_name",
",",
"self",
".",
"settings",
")",
"revision",
"=",
"yield",
"self",
".",
"stack",
".",
"preview",
"(",
"id",
")",
"self",
".",
"write",
"(",
"revision",
")"
] |
Get revision based on the stack preview algorithm
:param id: BSON id
:return: JSON
|
[
"Get",
"revision",
"based",
"on",
"the",
"stack",
"preview",
"algorithm"
] |
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
|
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/handler.py#L613-L628
|
243,288
|
urbn/Caesium
|
caesium/handler.py
|
BaseMotorSearch.get
|
def get(self):
"""
Standard search end point for a resource of any type, override this get method as necessary
in any specifc sub class. This is mostly here as a convenience for basic querying functionality
on attribute
example URL::
foo?attr1=foo&attr2=true
will create a query of::
{
"attr1": "foo",
"attr2": true
}
"""
objects = yield self.client.find(self.get_mongo_query_from_arguments())
self.write({
"count" : len(objects),
"results": objects
})
self.finish()
|
python
|
def get(self):
"""
Standard search end point for a resource of any type, override this get method as necessary
in any specifc sub class. This is mostly here as a convenience for basic querying functionality
on attribute
example URL::
foo?attr1=foo&attr2=true
will create a query of::
{
"attr1": "foo",
"attr2": true
}
"""
objects = yield self.client.find(self.get_mongo_query_from_arguments())
self.write({
"count" : len(objects),
"results": objects
})
self.finish()
|
[
"def",
"get",
"(",
"self",
")",
":",
"objects",
"=",
"yield",
"self",
".",
"client",
".",
"find",
"(",
"self",
".",
"get_mongo_query_from_arguments",
"(",
")",
")",
"self",
".",
"write",
"(",
"{",
"\"count\"",
":",
"len",
"(",
"objects",
")",
",",
"\"results\"",
":",
"objects",
"}",
")",
"self",
".",
"finish",
"(",
")"
] |
Standard search end point for a resource of any type, override this get method as necessary
in any specifc sub class. This is mostly here as a convenience for basic querying functionality
on attribute
example URL::
foo?attr1=foo&attr2=true
will create a query of::
{
"attr1": "foo",
"attr2": true
}
|
[
"Standard",
"search",
"end",
"point",
"for",
"a",
"resource",
"of",
"any",
"type",
"override",
"this",
"get",
"method",
"as",
"necessary",
"in",
"any",
"specifc",
"sub",
"class",
".",
"This",
"is",
"mostly",
"here",
"as",
"a",
"convenience",
"for",
"basic",
"querying",
"functionality",
"on",
"attribute"
] |
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
|
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/handler.py#L640-L666
|
243,289
|
urbn/Caesium
|
caesium/handler.py
|
BaseBulkScheduleableUpdateHandler.put
|
def put(self, id=None):
"""Update many objects with a single PUT.
Example Request::
{
"ids": ["52b0ede98ac752b358b1bd69", "52b0ede98ac752b358b1bd70"],
"patch": {
"foo": "bar"
}
}
"""
toa = self.request.headers.get("Caesium-TOA")
if not toa:
self.raise_error(400, "Caesium-TOA header is required, none found")
self.finish(self.request.headers.get("Caesium-TOA"))
meta = self._get_meta_data()
meta["bulk_id"] = uuid.uuid4().get_hex()
ids = self.get_json_argument("ids")
patch = self.get_json_argument("patch")
self.get_json_argument("ids", [])
for id in ids:
stack = AsyncSchedulableDocumentRevisionStack(self.client.collection_name, self.settings, master_id=id)
stack.push(patch, toa=toa, meta=meta)
self.write({
"count": len(ids),
"result": {
"ids": ids,
"toa": toa,
"patch": patch,
}
})
self.finish()
|
python
|
def put(self, id=None):
"""Update many objects with a single PUT.
Example Request::
{
"ids": ["52b0ede98ac752b358b1bd69", "52b0ede98ac752b358b1bd70"],
"patch": {
"foo": "bar"
}
}
"""
toa = self.request.headers.get("Caesium-TOA")
if not toa:
self.raise_error(400, "Caesium-TOA header is required, none found")
self.finish(self.request.headers.get("Caesium-TOA"))
meta = self._get_meta_data()
meta["bulk_id"] = uuid.uuid4().get_hex()
ids = self.get_json_argument("ids")
patch = self.get_json_argument("patch")
self.get_json_argument("ids", [])
for id in ids:
stack = AsyncSchedulableDocumentRevisionStack(self.client.collection_name, self.settings, master_id=id)
stack.push(patch, toa=toa, meta=meta)
self.write({
"count": len(ids),
"result": {
"ids": ids,
"toa": toa,
"patch": patch,
}
})
self.finish()
|
[
"def",
"put",
"(",
"self",
",",
"id",
"=",
"None",
")",
":",
"toa",
"=",
"self",
".",
"request",
".",
"headers",
".",
"get",
"(",
"\"Caesium-TOA\"",
")",
"if",
"not",
"toa",
":",
"self",
".",
"raise_error",
"(",
"400",
",",
"\"Caesium-TOA header is required, none found\"",
")",
"self",
".",
"finish",
"(",
"self",
".",
"request",
".",
"headers",
".",
"get",
"(",
"\"Caesium-TOA\"",
")",
")",
"meta",
"=",
"self",
".",
"_get_meta_data",
"(",
")",
"meta",
"[",
"\"bulk_id\"",
"]",
"=",
"uuid",
".",
"uuid4",
"(",
")",
".",
"get_hex",
"(",
")",
"ids",
"=",
"self",
".",
"get_json_argument",
"(",
"\"ids\"",
")",
"patch",
"=",
"self",
".",
"get_json_argument",
"(",
"\"patch\"",
")",
"self",
".",
"get_json_argument",
"(",
"\"ids\"",
",",
"[",
"]",
")",
"for",
"id",
"in",
"ids",
":",
"stack",
"=",
"AsyncSchedulableDocumentRevisionStack",
"(",
"self",
".",
"client",
".",
"collection_name",
",",
"self",
".",
"settings",
",",
"master_id",
"=",
"id",
")",
"stack",
".",
"push",
"(",
"patch",
",",
"toa",
"=",
"toa",
",",
"meta",
"=",
"meta",
")",
"self",
".",
"write",
"(",
"{",
"\"count\"",
":",
"len",
"(",
"ids",
")",
",",
"\"result\"",
":",
"{",
"\"ids\"",
":",
"ids",
",",
"\"toa\"",
":",
"toa",
",",
"\"patch\"",
":",
"patch",
",",
"}",
"}",
")",
"self",
".",
"finish",
"(",
")"
] |
Update many objects with a single PUT.
Example Request::
{
"ids": ["52b0ede98ac752b358b1bd69", "52b0ede98ac752b358b1bd70"],
"patch": {
"foo": "bar"
}
}
|
[
"Update",
"many",
"objects",
"with",
"a",
"single",
"PUT",
"."
] |
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
|
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/handler.py#L675-L713
|
243,290
|
urbn/Caesium
|
caesium/handler.py
|
BaseBulkScheduleableUpdateHandler.delete
|
def delete(self, bulk_id):
"""Update many objects with a single toa
:param str bulk_id: The bulk id for the job you want to delete
"""
collection_name = self.request.headers.get("collection")
if not collection_name:
self.raise_error(400, "Missing a collection name header")
self.revisions = BaseAsyncMotorDocument("%s_revisions" % collection_name)
self.logger.info("Deleting revisions with bulk_id %s" % (bulk_id))
result = yield self.revisions.collection.remove({"meta.bulk_id": bulk_id})
self.write(result)
|
python
|
def delete(self, bulk_id):
"""Update many objects with a single toa
:param str bulk_id: The bulk id for the job you want to delete
"""
collection_name = self.request.headers.get("collection")
if not collection_name:
self.raise_error(400, "Missing a collection name header")
self.revisions = BaseAsyncMotorDocument("%s_revisions" % collection_name)
self.logger.info("Deleting revisions with bulk_id %s" % (bulk_id))
result = yield self.revisions.collection.remove({"meta.bulk_id": bulk_id})
self.write(result)
|
[
"def",
"delete",
"(",
"self",
",",
"bulk_id",
")",
":",
"collection_name",
"=",
"self",
".",
"request",
".",
"headers",
".",
"get",
"(",
"\"collection\"",
")",
"if",
"not",
"collection_name",
":",
"self",
".",
"raise_error",
"(",
"400",
",",
"\"Missing a collection name header\"",
")",
"self",
".",
"revisions",
"=",
"BaseAsyncMotorDocument",
"(",
"\"%s_revisions\"",
"%",
"collection_name",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"Deleting revisions with bulk_id %s\"",
"%",
"(",
"bulk_id",
")",
")",
"result",
"=",
"yield",
"self",
".",
"revisions",
".",
"collection",
".",
"remove",
"(",
"{",
"\"meta.bulk_id\"",
":",
"bulk_id",
"}",
")",
"self",
".",
"write",
"(",
"result",
")"
] |
Update many objects with a single toa
:param str bulk_id: The bulk id for the job you want to delete
|
[
"Update",
"many",
"objects",
"with",
"a",
"single",
"toa"
] |
2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1
|
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/handler.py#L716-L733
|
243,291
|
stephanepechard/projy
|
projy/collectors/AuthorCollector.py
|
AuthorCollector.author_from_git
|
def author_from_git(self):
""" Get the author name from git information. """
self.author = None
try:
encoding = locale.getdefaultlocale()[1]
# launch git command and get answer
cmd = Popen(["git", "config", "--get", "user.name"], stdout=PIPE)
stdoutdata = cmd.communicate().decode(encoding)
if (stdoutdata[0]):
import ipdb;ipdb.set_trace()
author = stdoutdata[0].rstrip(os.linesep)
self.author = author#.decode('utf8')
except ImportError:
pass
except CalledProcessError:
pass
except OSError:
pass
return self.author
|
python
|
def author_from_git(self):
""" Get the author name from git information. """
self.author = None
try:
encoding = locale.getdefaultlocale()[1]
# launch git command and get answer
cmd = Popen(["git", "config", "--get", "user.name"], stdout=PIPE)
stdoutdata = cmd.communicate().decode(encoding)
if (stdoutdata[0]):
import ipdb;ipdb.set_trace()
author = stdoutdata[0].rstrip(os.linesep)
self.author = author#.decode('utf8')
except ImportError:
pass
except CalledProcessError:
pass
except OSError:
pass
return self.author
|
[
"def",
"author_from_git",
"(",
"self",
")",
":",
"self",
".",
"author",
"=",
"None",
"try",
":",
"encoding",
"=",
"locale",
".",
"getdefaultlocale",
"(",
")",
"[",
"1",
"]",
"# launch git command and get answer",
"cmd",
"=",
"Popen",
"(",
"[",
"\"git\"",
",",
"\"config\"",
",",
"\"--get\"",
",",
"\"user.name\"",
"]",
",",
"stdout",
"=",
"PIPE",
")",
"stdoutdata",
"=",
"cmd",
".",
"communicate",
"(",
")",
".",
"decode",
"(",
"encoding",
")",
"if",
"(",
"stdoutdata",
"[",
"0",
"]",
")",
":",
"import",
"ipdb",
"ipdb",
".",
"set_trace",
"(",
")",
"author",
"=",
"stdoutdata",
"[",
"0",
"]",
".",
"rstrip",
"(",
"os",
".",
"linesep",
")",
"self",
".",
"author",
"=",
"author",
"#.decode('utf8')",
"except",
"ImportError",
":",
"pass",
"except",
"CalledProcessError",
":",
"pass",
"except",
"OSError",
":",
"pass",
"return",
"self",
".",
"author"
] |
Get the author name from git information.
|
[
"Get",
"the",
"author",
"name",
"from",
"git",
"information",
"."
] |
3146b0e3c207b977e1b51fcb33138746dae83c23
|
https://github.com/stephanepechard/projy/blob/3146b0e3c207b977e1b51fcb33138746dae83c23/projy/collectors/AuthorCollector.py#L26-L45
|
243,292
|
gsamokovarov/frames.py
|
frames/compat.py
|
_getframe
|
def _getframe(level=0):
'''
A reimplementation of `sys._getframe`.
`sys._getframe` is a private function, and isn't guaranteed to exist in all
versions and implementations of Python.
This function is about 2 times slower than the native implementation. It
relies on the asumption that the traceback objects have `tb_frame`
attributues holding proper frame objects.
:param level:
The number of levels deep in the stack to return the frame from.
Defaults to `0`.
:returns:
A frame object `levels` deep from the top of the stack.
'''
if level < 0:
level = 0
try:
raise
except:
# `sys.exc_info` returns `(type, value, traceback)`.
_, _, traceback = sys.exc_info()
frame = traceback.tb_frame
# Account for our exception, this will stop at `-1`.
while ~level:
frame = frame.f_back
if frame is None:
break
level -= 1
finally:
sys.exc_clear()
# Act as close to `sys._getframe` as possible.
if frame is None:
raise ValueError('call stack is not deep enough')
return frame
|
python
|
def _getframe(level=0):
'''
A reimplementation of `sys._getframe`.
`sys._getframe` is a private function, and isn't guaranteed to exist in all
versions and implementations of Python.
This function is about 2 times slower than the native implementation. It
relies on the asumption that the traceback objects have `tb_frame`
attributues holding proper frame objects.
:param level:
The number of levels deep in the stack to return the frame from.
Defaults to `0`.
:returns:
A frame object `levels` deep from the top of the stack.
'''
if level < 0:
level = 0
try:
raise
except:
# `sys.exc_info` returns `(type, value, traceback)`.
_, _, traceback = sys.exc_info()
frame = traceback.tb_frame
# Account for our exception, this will stop at `-1`.
while ~level:
frame = frame.f_back
if frame is None:
break
level -= 1
finally:
sys.exc_clear()
# Act as close to `sys._getframe` as possible.
if frame is None:
raise ValueError('call stack is not deep enough')
return frame
|
[
"def",
"_getframe",
"(",
"level",
"=",
"0",
")",
":",
"if",
"level",
"<",
"0",
":",
"level",
"=",
"0",
"try",
":",
"raise",
"except",
":",
"# `sys.exc_info` returns `(type, value, traceback)`.",
"_",
",",
"_",
",",
"traceback",
"=",
"sys",
".",
"exc_info",
"(",
")",
"frame",
"=",
"traceback",
".",
"tb_frame",
"# Account for our exception, this will stop at `-1`.",
"while",
"~",
"level",
":",
"frame",
"=",
"frame",
".",
"f_back",
"if",
"frame",
"is",
"None",
":",
"break",
"level",
"-=",
"1",
"finally",
":",
"sys",
".",
"exc_clear",
"(",
")",
"# Act as close to `sys._getframe` as possible.",
"if",
"frame",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'call stack is not deep enough'",
")",
"return",
"frame"
] |
A reimplementation of `sys._getframe`.
`sys._getframe` is a private function, and isn't guaranteed to exist in all
versions and implementations of Python.
This function is about 2 times slower than the native implementation. It
relies on the asumption that the traceback objects have `tb_frame`
attributues holding proper frame objects.
:param level:
The number of levels deep in the stack to return the frame from.
Defaults to `0`.
:returns:
A frame object `levels` deep from the top of the stack.
|
[
"A",
"reimplementation",
"of",
"sys",
".",
"_getframe",
"."
] |
ba43782d043691fb5a388a1e749e0f0edb68a3d7
|
https://github.com/gsamokovarov/frames.py/blob/ba43782d043691fb5a388a1e749e0f0edb68a3d7/frames/compat.py#L4-L47
|
243,293
|
johnnoone/facts
|
facts/grafts/__init__.py
|
graft
|
def graft(func=None, *, namespace=None):
"""Decorator for marking a function as a graft.
Parameters:
namespace (str): namespace of data, same format as targeting.
Returns:
Graft
For example, these grafts::
@graft
def foo_data:
return {'foo', True}
@graft(namespace='bar')
def bar_data:
return False
will be redered has::
{
'foo': True,
'bar': False
}
"""
if not func:
return functools.partial(graft, namespace=namespace)
if isinstance(func, Graft):
return func
return Graft(func, namespace=namespace)
|
python
|
def graft(func=None, *, namespace=None):
"""Decorator for marking a function as a graft.
Parameters:
namespace (str): namespace of data, same format as targeting.
Returns:
Graft
For example, these grafts::
@graft
def foo_data:
return {'foo', True}
@graft(namespace='bar')
def bar_data:
return False
will be redered has::
{
'foo': True,
'bar': False
}
"""
if not func:
return functools.partial(graft, namespace=namespace)
if isinstance(func, Graft):
return func
return Graft(func, namespace=namespace)
|
[
"def",
"graft",
"(",
"func",
"=",
"None",
",",
"*",
",",
"namespace",
"=",
"None",
")",
":",
"if",
"not",
"func",
":",
"return",
"functools",
".",
"partial",
"(",
"graft",
",",
"namespace",
"=",
"namespace",
")",
"if",
"isinstance",
"(",
"func",
",",
"Graft",
")",
":",
"return",
"func",
"return",
"Graft",
"(",
"func",
",",
"namespace",
"=",
"namespace",
")"
] |
Decorator for marking a function as a graft.
Parameters:
namespace (str): namespace of data, same format as targeting.
Returns:
Graft
For example, these grafts::
@graft
def foo_data:
return {'foo', True}
@graft(namespace='bar')
def bar_data:
return False
will be redered has::
{
'foo': True,
'bar': False
}
|
[
"Decorator",
"for",
"marking",
"a",
"function",
"as",
"a",
"graft",
"."
] |
82d38a46c15d9c01200445526f4c0d1825fc1e51
|
https://github.com/johnnoone/facts/blob/82d38a46c15d9c01200445526f4c0d1825fc1e51/facts/grafts/__init__.py#L29-L61
|
243,294
|
johnnoone/facts
|
facts/grafts/__init__.py
|
load
|
def load(force=False):
"""Magical loading of all grafted functions.
Parameters:
force (bool): force reload
"""
if GRAFTS and not force:
return GRAFTS
# insert missing paths
# this could be a configurated item
userpath = settings.userpath
if os.path.isdir(userpath) and userpath not in __path__:
__path__.append(userpath)
def notify_error(name):
logging.error('unable to load %s package' % name)
# autoload decorated functions
walker = walk_packages(__path__, '%s.' % __name__, onerror=notify_error)
for module_finder, name, ispkg in walker:
loader = module_finder.find_module(name)
mod = loader.load_module(name)
for func in mod.__dict__.values():
if is_graft(func):
GRAFTS.append(func)
# append setuptools modules
for entry_point in iter_entry_points(group=settings.entry_point):
try:
func = entry_point.load()
if is_graft(func):
GRAFTS.append(func)
else:
notify_error(entry_point.name)
except Exception as error:
logging.exception(error)
notify_error(entry_point.name)
return GRAFTS
|
python
|
def load(force=False):
"""Magical loading of all grafted functions.
Parameters:
force (bool): force reload
"""
if GRAFTS and not force:
return GRAFTS
# insert missing paths
# this could be a configurated item
userpath = settings.userpath
if os.path.isdir(userpath) and userpath not in __path__:
__path__.append(userpath)
def notify_error(name):
logging.error('unable to load %s package' % name)
# autoload decorated functions
walker = walk_packages(__path__, '%s.' % __name__, onerror=notify_error)
for module_finder, name, ispkg in walker:
loader = module_finder.find_module(name)
mod = loader.load_module(name)
for func in mod.__dict__.values():
if is_graft(func):
GRAFTS.append(func)
# append setuptools modules
for entry_point in iter_entry_points(group=settings.entry_point):
try:
func = entry_point.load()
if is_graft(func):
GRAFTS.append(func)
else:
notify_error(entry_point.name)
except Exception as error:
logging.exception(error)
notify_error(entry_point.name)
return GRAFTS
|
[
"def",
"load",
"(",
"force",
"=",
"False",
")",
":",
"if",
"GRAFTS",
"and",
"not",
"force",
":",
"return",
"GRAFTS",
"# insert missing paths",
"# this could be a configurated item",
"userpath",
"=",
"settings",
".",
"userpath",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"userpath",
")",
"and",
"userpath",
"not",
"in",
"__path__",
":",
"__path__",
".",
"append",
"(",
"userpath",
")",
"def",
"notify_error",
"(",
"name",
")",
":",
"logging",
".",
"error",
"(",
"'unable to load %s package'",
"%",
"name",
")",
"# autoload decorated functions",
"walker",
"=",
"walk_packages",
"(",
"__path__",
",",
"'%s.'",
"%",
"__name__",
",",
"onerror",
"=",
"notify_error",
")",
"for",
"module_finder",
",",
"name",
",",
"ispkg",
"in",
"walker",
":",
"loader",
"=",
"module_finder",
".",
"find_module",
"(",
"name",
")",
"mod",
"=",
"loader",
".",
"load_module",
"(",
"name",
")",
"for",
"func",
"in",
"mod",
".",
"__dict__",
".",
"values",
"(",
")",
":",
"if",
"is_graft",
"(",
"func",
")",
":",
"GRAFTS",
".",
"append",
"(",
"func",
")",
"# append setuptools modules",
"for",
"entry_point",
"in",
"iter_entry_points",
"(",
"group",
"=",
"settings",
".",
"entry_point",
")",
":",
"try",
":",
"func",
"=",
"entry_point",
".",
"load",
"(",
")",
"if",
"is_graft",
"(",
"func",
")",
":",
"GRAFTS",
".",
"append",
"(",
"func",
")",
"else",
":",
"notify_error",
"(",
"entry_point",
".",
"name",
")",
"except",
"Exception",
"as",
"error",
":",
"logging",
".",
"exception",
"(",
"error",
")",
"notify_error",
"(",
"entry_point",
".",
"name",
")",
"return",
"GRAFTS"
] |
Magical loading of all grafted functions.
Parameters:
force (bool): force reload
|
[
"Magical",
"loading",
"of",
"all",
"grafted",
"functions",
"."
] |
82d38a46c15d9c01200445526f4c0d1825fc1e51
|
https://github.com/johnnoone/facts/blob/82d38a46c15d9c01200445526f4c0d1825fc1e51/facts/grafts/__init__.py#L70-L110
|
243,295
|
yougov/vr.builder
|
vr/builder/build.py
|
_write_buildproc_yaml
|
def _write_buildproc_yaml(build_data, env, user, cmd, volumes, app_folder):
"""
Write a proc.yaml for the container and return the container path
"""
buildproc = ProcData({
'app_folder': str(app_folder),
'app_name': build_data.app_name,
'app_repo_url': '',
'app_repo_type': '',
'buildpack_url': '',
'buildpack_version': '',
'config_name': 'build',
'env': env,
'host': '',
'port': 0,
'version': build_data.version,
'release_hash': '',
'settings': {},
'user': user,
'cmd': cmd,
'volumes': volumes,
'proc_name': 'build',
'image_name': build_data.image_name,
'image_url': build_data.image_url,
'image_md5': build_data.image_md5,
})
# write a proc.yaml for the container.
with open('buildproc.yaml', 'w') as f:
f.write(buildproc.as_yaml())
return get_container_path(buildproc)
|
python
|
def _write_buildproc_yaml(build_data, env, user, cmd, volumes, app_folder):
"""
Write a proc.yaml for the container and return the container path
"""
buildproc = ProcData({
'app_folder': str(app_folder),
'app_name': build_data.app_name,
'app_repo_url': '',
'app_repo_type': '',
'buildpack_url': '',
'buildpack_version': '',
'config_name': 'build',
'env': env,
'host': '',
'port': 0,
'version': build_data.version,
'release_hash': '',
'settings': {},
'user': user,
'cmd': cmd,
'volumes': volumes,
'proc_name': 'build',
'image_name': build_data.image_name,
'image_url': build_data.image_url,
'image_md5': build_data.image_md5,
})
# write a proc.yaml for the container.
with open('buildproc.yaml', 'w') as f:
f.write(buildproc.as_yaml())
return get_container_path(buildproc)
|
[
"def",
"_write_buildproc_yaml",
"(",
"build_data",
",",
"env",
",",
"user",
",",
"cmd",
",",
"volumes",
",",
"app_folder",
")",
":",
"buildproc",
"=",
"ProcData",
"(",
"{",
"'app_folder'",
":",
"str",
"(",
"app_folder",
")",
",",
"'app_name'",
":",
"build_data",
".",
"app_name",
",",
"'app_repo_url'",
":",
"''",
",",
"'app_repo_type'",
":",
"''",
",",
"'buildpack_url'",
":",
"''",
",",
"'buildpack_version'",
":",
"''",
",",
"'config_name'",
":",
"'build'",
",",
"'env'",
":",
"env",
",",
"'host'",
":",
"''",
",",
"'port'",
":",
"0",
",",
"'version'",
":",
"build_data",
".",
"version",
",",
"'release_hash'",
":",
"''",
",",
"'settings'",
":",
"{",
"}",
",",
"'user'",
":",
"user",
",",
"'cmd'",
":",
"cmd",
",",
"'volumes'",
":",
"volumes",
",",
"'proc_name'",
":",
"'build'",
",",
"'image_name'",
":",
"build_data",
".",
"image_name",
",",
"'image_url'",
":",
"build_data",
".",
"image_url",
",",
"'image_md5'",
":",
"build_data",
".",
"image_md5",
",",
"}",
")",
"# write a proc.yaml for the container.",
"with",
"open",
"(",
"'buildproc.yaml'",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"buildproc",
".",
"as_yaml",
"(",
")",
")",
"return",
"get_container_path",
"(",
"buildproc",
")"
] |
Write a proc.yaml for the container and return the container path
|
[
"Write",
"a",
"proc",
".",
"yaml",
"for",
"the",
"container",
"and",
"return",
"the",
"container",
"path"
] |
666b28f997d0cff52e82eed4ace1c73fee4b2136
|
https://github.com/yougov/vr.builder/blob/666b28f997d0cff52e82eed4ace1c73fee4b2136/vr/builder/build.py#L207-L238
|
243,296
|
yougov/vr.builder
|
vr/builder/build.py
|
assert_compile_finished
|
def assert_compile_finished(app_folder):
"""
Once builder.sh has invoked the compile script, it should return and we
should set a flag to the script returned. If that flag is missing, then
it is an indication that the container crashed, and we generate an error.
This function will clean up the flag after the check is performed, so only
call this function once. See issue #141.
"""
fpath = os.path.join(app_folder, '.postbuild.flag')
if not os.path.isfile(fpath):
msg = ('No postbuild flag set, LXC container may have crashed while '
'building. Check compile logs for build.')
raise AssertionError(msg)
try:
os.remove(fpath)
except OSError:
# It doesn't matter if it fails.
pass
|
python
|
def assert_compile_finished(app_folder):
"""
Once builder.sh has invoked the compile script, it should return and we
should set a flag to the script returned. If that flag is missing, then
it is an indication that the container crashed, and we generate an error.
This function will clean up the flag after the check is performed, so only
call this function once. See issue #141.
"""
fpath = os.path.join(app_folder, '.postbuild.flag')
if not os.path.isfile(fpath):
msg = ('No postbuild flag set, LXC container may have crashed while '
'building. Check compile logs for build.')
raise AssertionError(msg)
try:
os.remove(fpath)
except OSError:
# It doesn't matter if it fails.
pass
|
[
"def",
"assert_compile_finished",
"(",
"app_folder",
")",
":",
"fpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"app_folder",
",",
"'.postbuild.flag'",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"fpath",
")",
":",
"msg",
"=",
"(",
"'No postbuild flag set, LXC container may have crashed while '",
"'building. Check compile logs for build.'",
")",
"raise",
"AssertionError",
"(",
"msg",
")",
"try",
":",
"os",
".",
"remove",
"(",
"fpath",
")",
"except",
"OSError",
":",
"# It doesn't matter if it fails.",
"pass"
] |
Once builder.sh has invoked the compile script, it should return and we
should set a flag to the script returned. If that flag is missing, then
it is an indication that the container crashed, and we generate an error.
This function will clean up the flag after the check is performed, so only
call this function once. See issue #141.
|
[
"Once",
"builder",
".",
"sh",
"has",
"invoked",
"the",
"compile",
"script",
"it",
"should",
"return",
"and",
"we",
"should",
"set",
"a",
"flag",
"to",
"the",
"script",
"returned",
".",
"If",
"that",
"flag",
"is",
"missing",
"then",
"it",
"is",
"an",
"indication",
"that",
"the",
"container",
"crashed",
"and",
"we",
"generate",
"an",
"error",
"."
] |
666b28f997d0cff52e82eed4ace1c73fee4b2136
|
https://github.com/yougov/vr.builder/blob/666b28f997d0cff52e82eed4ace1c73fee4b2136/vr/builder/build.py#L241-L259
|
243,297
|
yougov/vr.builder
|
vr/builder/build.py
|
recover_release_data
|
def recover_release_data(app_folder):
"""
Given the path to an app folder where an app was just built, return a
dictionary containing the data emitted from running the buildpack's release
script.
Relies on the builder.sh script storing the release data in ./.release.yaml
inside the app folder.
"""
with open(os.path.join(app_folder, '.release.yaml'), 'rb') as f:
return yaml.safe_load(f)
|
python
|
def recover_release_data(app_folder):
"""
Given the path to an app folder where an app was just built, return a
dictionary containing the data emitted from running the buildpack's release
script.
Relies on the builder.sh script storing the release data in ./.release.yaml
inside the app folder.
"""
with open(os.path.join(app_folder, '.release.yaml'), 'rb') as f:
return yaml.safe_load(f)
|
[
"def",
"recover_release_data",
"(",
"app_folder",
")",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"app_folder",
",",
"'.release.yaml'",
")",
",",
"'rb'",
")",
"as",
"f",
":",
"return",
"yaml",
".",
"safe_load",
"(",
"f",
")"
] |
Given the path to an app folder where an app was just built, return a
dictionary containing the data emitted from running the buildpack's release
script.
Relies on the builder.sh script storing the release data in ./.release.yaml
inside the app folder.
|
[
"Given",
"the",
"path",
"to",
"an",
"app",
"folder",
"where",
"an",
"app",
"was",
"just",
"built",
"return",
"a",
"dictionary",
"containing",
"the",
"data",
"emitted",
"from",
"running",
"the",
"buildpack",
"s",
"release",
"script",
"."
] |
666b28f997d0cff52e82eed4ace1c73fee4b2136
|
https://github.com/yougov/vr.builder/blob/666b28f997d0cff52e82eed4ace1c73fee4b2136/vr/builder/build.py#L262-L272
|
243,298
|
yougov/vr.builder
|
vr/builder/build.py
|
recover_buildpack
|
def recover_buildpack(app_folder):
"""
Given the path to an app folder where an app was just built, return a
BuildPack object pointing to the dir for the buildpack used during the
build.
Relies on the builder.sh script storing the buildpack location in
/.buildpack inside the container.
"""
filepath = os.path.join(app_folder, '.buildpack')
with open(filepath) as f:
buildpack_picked = f.read()
buildpack_picked = buildpack_picked.lstrip('/')
buildpack_picked = buildpack_picked.rstrip('\n')
buildpack_picked = os.path.join(os.getcwd(), buildpack_picked)
return BuildPack(buildpack_picked)
|
python
|
def recover_buildpack(app_folder):
"""
Given the path to an app folder where an app was just built, return a
BuildPack object pointing to the dir for the buildpack used during the
build.
Relies on the builder.sh script storing the buildpack location in
/.buildpack inside the container.
"""
filepath = os.path.join(app_folder, '.buildpack')
with open(filepath) as f:
buildpack_picked = f.read()
buildpack_picked = buildpack_picked.lstrip('/')
buildpack_picked = buildpack_picked.rstrip('\n')
buildpack_picked = os.path.join(os.getcwd(), buildpack_picked)
return BuildPack(buildpack_picked)
|
[
"def",
"recover_buildpack",
"(",
"app_folder",
")",
":",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"app_folder",
",",
"'.buildpack'",
")",
"with",
"open",
"(",
"filepath",
")",
"as",
"f",
":",
"buildpack_picked",
"=",
"f",
".",
"read",
"(",
")",
"buildpack_picked",
"=",
"buildpack_picked",
".",
"lstrip",
"(",
"'/'",
")",
"buildpack_picked",
"=",
"buildpack_picked",
".",
"rstrip",
"(",
"'\\n'",
")",
"buildpack_picked",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"buildpack_picked",
")",
"return",
"BuildPack",
"(",
"buildpack_picked",
")"
] |
Given the path to an app folder where an app was just built, return a
BuildPack object pointing to the dir for the buildpack used during the
build.
Relies on the builder.sh script storing the buildpack location in
/.buildpack inside the container.
|
[
"Given",
"the",
"path",
"to",
"an",
"app",
"folder",
"where",
"an",
"app",
"was",
"just",
"built",
"return",
"a",
"BuildPack",
"object",
"pointing",
"to",
"the",
"dir",
"for",
"the",
"buildpack",
"used",
"during",
"the",
"build",
"."
] |
666b28f997d0cff52e82eed4ace1c73fee4b2136
|
https://github.com/yougov/vr.builder/blob/666b28f997d0cff52e82eed4ace1c73fee4b2136/vr/builder/build.py#L275-L290
|
243,299
|
yougov/vr.builder
|
vr/builder/build.py
|
pull_buildpack
|
def pull_buildpack(url):
"""
Update a buildpack in its shared location, then make a copy into the
current directory, using an md5 of the url.
"""
defrag = _defrag(urllib.parse.urldefrag(url))
with lock_or_wait(defrag.url):
bp = update_buildpack(url)
dest = bp.basename + '-' + hash_text(defrag.url)
shutil.copytree(bp.folder, dest)
# Make the buildpack dir writable, per
# https://bitbucket.org/yougov/velociraptor/issues/178
path.Path(dest).chmod('a+wx')
return dest
|
python
|
def pull_buildpack(url):
"""
Update a buildpack in its shared location, then make a copy into the
current directory, using an md5 of the url.
"""
defrag = _defrag(urllib.parse.urldefrag(url))
with lock_or_wait(defrag.url):
bp = update_buildpack(url)
dest = bp.basename + '-' + hash_text(defrag.url)
shutil.copytree(bp.folder, dest)
# Make the buildpack dir writable, per
# https://bitbucket.org/yougov/velociraptor/issues/178
path.Path(dest).chmod('a+wx')
return dest
|
[
"def",
"pull_buildpack",
"(",
"url",
")",
":",
"defrag",
"=",
"_defrag",
"(",
"urllib",
".",
"parse",
".",
"urldefrag",
"(",
"url",
")",
")",
"with",
"lock_or_wait",
"(",
"defrag",
".",
"url",
")",
":",
"bp",
"=",
"update_buildpack",
"(",
"url",
")",
"dest",
"=",
"bp",
".",
"basename",
"+",
"'-'",
"+",
"hash_text",
"(",
"defrag",
".",
"url",
")",
"shutil",
".",
"copytree",
"(",
"bp",
".",
"folder",
",",
"dest",
")",
"# Make the buildpack dir writable, per",
"# https://bitbucket.org/yougov/velociraptor/issues/178",
"path",
".",
"Path",
"(",
"dest",
")",
".",
"chmod",
"(",
"'a+wx'",
")",
"return",
"dest"
] |
Update a buildpack in its shared location, then make a copy into the
current directory, using an md5 of the url.
|
[
"Update",
"a",
"buildpack",
"in",
"its",
"shared",
"location",
"then",
"make",
"a",
"copy",
"into",
"the",
"current",
"directory",
"using",
"an",
"md5",
"of",
"the",
"url",
"."
] |
666b28f997d0cff52e82eed4ace1c73fee4b2136
|
https://github.com/yougov/vr.builder/blob/666b28f997d0cff52e82eed4ace1c73fee4b2136/vr/builder/build.py#L304-L317
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.