id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
242,600
|
OpenGov/carpenter
|
carpenter/carpenter.py
|
remove_column
|
def remove_column(table, remove_index):
'''
Removes the specified column from the table.
'''
for row_index in range(len(table)):
old_row = table[row_index]
new_row = []
for column_index in range(len(old_row)):
if column_index != remove_index:
new_row.append(old_row[column_index])
table[row_index] = new_row
return table
|
python
|
def remove_column(table, remove_index):
'''
Removes the specified column from the table.
'''
for row_index in range(len(table)):
old_row = table[row_index]
new_row = []
for column_index in range(len(old_row)):
if column_index != remove_index:
new_row.append(old_row[column_index])
table[row_index] = new_row
return table
|
[
"def",
"remove_column",
"(",
"table",
",",
"remove_index",
")",
":",
"for",
"row_index",
"in",
"range",
"(",
"len",
"(",
"table",
")",
")",
":",
"old_row",
"=",
"table",
"[",
"row_index",
"]",
"new_row",
"=",
"[",
"]",
"for",
"column_index",
"in",
"range",
"(",
"len",
"(",
"old_row",
")",
")",
":",
"if",
"column_index",
"!=",
"remove_index",
":",
"new_row",
".",
"append",
"(",
"old_row",
"[",
"column_index",
"]",
")",
"table",
"[",
"row_index",
"]",
"=",
"new_row",
"return",
"table"
] |
Removes the specified column from the table.
|
[
"Removes",
"the",
"specified",
"column",
"from",
"the",
"table",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/carpenter.py#L16-L27
|
242,601
|
OpenGov/carpenter
|
carpenter/carpenter.py
|
row_content_length
|
def row_content_length(row):
'''
Returns the length of non-empty content in a given row.
'''
if not row:
return 0
try:
return (index + 1 for index, cell in reversed(list(enumerate(row))) if not is_empty_cell(cell)).next()
except StopIteration:
return len(row)
|
python
|
def row_content_length(row):
'''
Returns the length of non-empty content in a given row.
'''
if not row:
return 0
try:
return (index + 1 for index, cell in reversed(list(enumerate(row))) if not is_empty_cell(cell)).next()
except StopIteration:
return len(row)
|
[
"def",
"row_content_length",
"(",
"row",
")",
":",
"if",
"not",
"row",
":",
"return",
"0",
"try",
":",
"return",
"(",
"index",
"+",
"1",
"for",
"index",
",",
"cell",
"in",
"reversed",
"(",
"list",
"(",
"enumerate",
"(",
"row",
")",
")",
")",
"if",
"not",
"is_empty_cell",
"(",
"cell",
")",
")",
".",
"next",
"(",
")",
"except",
"StopIteration",
":",
"return",
"len",
"(",
"row",
")"
] |
Returns the length of non-empty content in a given row.
|
[
"Returns",
"the",
"length",
"of",
"non",
"-",
"empty",
"content",
"in",
"a",
"given",
"row",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/carpenter.py#L99-L108
|
242,602
|
OpenGov/carpenter
|
carpenter/carpenter.py
|
split_block_by_row_length
|
def split_block_by_row_length(block, split_row_length):
'''
Splits the block by finding all rows with less consequetive, non-empty rows than the
min_row_length input.
'''
split_blocks = []
current_block = []
for row in block:
if row_content_length(row) <= split_row_length:
if current_block:
split_blocks.append(current_block)
split_blocks.append([row])
current_block = []
else:
current_block.append(row)
if current_block:
split_blocks.append(current_block)
return split_blocks
|
python
|
def split_block_by_row_length(block, split_row_length):
'''
Splits the block by finding all rows with less consequetive, non-empty rows than the
min_row_length input.
'''
split_blocks = []
current_block = []
for row in block:
if row_content_length(row) <= split_row_length:
if current_block:
split_blocks.append(current_block)
split_blocks.append([row])
current_block = []
else:
current_block.append(row)
if current_block:
split_blocks.append(current_block)
return split_blocks
|
[
"def",
"split_block_by_row_length",
"(",
"block",
",",
"split_row_length",
")",
":",
"split_blocks",
"=",
"[",
"]",
"current_block",
"=",
"[",
"]",
"for",
"row",
"in",
"block",
":",
"if",
"row_content_length",
"(",
"row",
")",
"<=",
"split_row_length",
":",
"if",
"current_block",
":",
"split_blocks",
".",
"append",
"(",
"current_block",
")",
"split_blocks",
".",
"append",
"(",
"[",
"row",
"]",
")",
"current_block",
"=",
"[",
"]",
"else",
":",
"current_block",
".",
"append",
"(",
"row",
")",
"if",
"current_block",
":",
"split_blocks",
".",
"append",
"(",
"current_block",
")",
"return",
"split_blocks"
] |
Splits the block by finding all rows with less consequetive, non-empty rows than the
min_row_length input.
|
[
"Splits",
"the",
"block",
"by",
"finding",
"all",
"rows",
"with",
"less",
"consequetive",
"non",
"-",
"empty",
"rows",
"than",
"the",
"min_row_length",
"input",
"."
] |
0ab3c54c05133b9b0468c63e834a7ce3a6fb575b
|
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/carpenter.py#L110-L128
|
242,603
|
MacHu-GWU/angora-project
|
angora/zzz_manual_install.py
|
check_need_install
|
def check_need_install():
"""Check if installed package are exactly the same to this one.
"""
md5_root, md5_dst = list(), list()
need_install_flag = False
for root, _, basename_list in os.walk(_ROOT):
if os.path.basename(root) != "__pycache__":
for basename in basename_list:
src = os.path.join(root, basename)
dst = os.path.join(root.replace(_ROOT, _DST), basename)
if os.path.exists(dst):
if md5_of_file(src) != md5_of_file(dst):
return True
else:
return True
return need_install_flag
|
python
|
def check_need_install():
"""Check if installed package are exactly the same to this one.
"""
md5_root, md5_dst = list(), list()
need_install_flag = False
for root, _, basename_list in os.walk(_ROOT):
if os.path.basename(root) != "__pycache__":
for basename in basename_list:
src = os.path.join(root, basename)
dst = os.path.join(root.replace(_ROOT, _DST), basename)
if os.path.exists(dst):
if md5_of_file(src) != md5_of_file(dst):
return True
else:
return True
return need_install_flag
|
[
"def",
"check_need_install",
"(",
")",
":",
"md5_root",
",",
"md5_dst",
"=",
"list",
"(",
")",
",",
"list",
"(",
")",
"need_install_flag",
"=",
"False",
"for",
"root",
",",
"_",
",",
"basename_list",
"in",
"os",
".",
"walk",
"(",
"_ROOT",
")",
":",
"if",
"os",
".",
"path",
".",
"basename",
"(",
"root",
")",
"!=",
"\"__pycache__\"",
":",
"for",
"basename",
"in",
"basename_list",
":",
"src",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"basename",
")",
"dst",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
".",
"replace",
"(",
"_ROOT",
",",
"_DST",
")",
",",
"basename",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"dst",
")",
":",
"if",
"md5_of_file",
"(",
"src",
")",
"!=",
"md5_of_file",
"(",
"dst",
")",
":",
"return",
"True",
"else",
":",
"return",
"True",
"return",
"need_install_flag"
] |
Check if installed package are exactly the same to this one.
|
[
"Check",
"if",
"installed",
"package",
"are",
"exactly",
"the",
"same",
"to",
"this",
"one",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/zzz_manual_install.py#L145-L160
|
242,604
|
ebrelsford/feincms-pagepermissions
|
pagepermissions/extension.py
|
has_permission_to_view
|
def has_permission_to_view(page, user):
"""
Check whether the user has permission to view the page. If the user has
any of the page's permissions, they have permission. If the page has no set
permissions, they have permission.
"""
if page.permissions.count() == 0:
return True
for perm in page.permissions.all():
perm_label = '%s.%s' % (perm.content_type.app_label, perm.codename)
if user.has_perm(perm_label):
return True
return False
|
python
|
def has_permission_to_view(page, user):
"""
Check whether the user has permission to view the page. If the user has
any of the page's permissions, they have permission. If the page has no set
permissions, they have permission.
"""
if page.permissions.count() == 0:
return True
for perm in page.permissions.all():
perm_label = '%s.%s' % (perm.content_type.app_label, perm.codename)
if user.has_perm(perm_label):
return True
return False
|
[
"def",
"has_permission_to_view",
"(",
"page",
",",
"user",
")",
":",
"if",
"page",
".",
"permissions",
".",
"count",
"(",
")",
"==",
"0",
":",
"return",
"True",
"for",
"perm",
"in",
"page",
".",
"permissions",
".",
"all",
"(",
")",
":",
"perm_label",
"=",
"'%s.%s'",
"%",
"(",
"perm",
".",
"content_type",
".",
"app_label",
",",
"perm",
".",
"codename",
")",
"if",
"user",
".",
"has_perm",
"(",
"perm_label",
")",
":",
"return",
"True",
"return",
"False"
] |
Check whether the user has permission to view the page. If the user has
any of the page's permissions, they have permission. If the page has no set
permissions, they have permission.
|
[
"Check",
"whether",
"the",
"user",
"has",
"permission",
"to",
"view",
"the",
"page",
".",
"If",
"the",
"user",
"has",
"any",
"of",
"the",
"page",
"s",
"permissions",
"they",
"have",
"permission",
".",
"If",
"the",
"page",
"has",
"no",
"set",
"permissions",
"they",
"have",
"permission",
"."
] |
097ebf810e794feb4219defc1dc0cd9dd394a734
|
https://github.com/ebrelsford/feincms-pagepermissions/blob/097ebf810e794feb4219defc1dc0cd9dd394a734/pagepermissions/extension.py#L41-L54
|
242,605
|
mariocesar/pengbot
|
src/pengbot/adapters/shell.py
|
Shell.do_directives
|
def do_directives(self, line):
"""List all directives supported by the bot"""
for name, cmd in self.adapter.directives.items():
with colorize('blue'):
print('bot %s:' % name)
if cmd.__doc__:
for line in cmd.__doc__.split('\n'):
print(' %s' % line)
else:
print()
|
python
|
def do_directives(self, line):
"""List all directives supported by the bot"""
for name, cmd in self.adapter.directives.items():
with colorize('blue'):
print('bot %s:' % name)
if cmd.__doc__:
for line in cmd.__doc__.split('\n'):
print(' %s' % line)
else:
print()
|
[
"def",
"do_directives",
"(",
"self",
",",
"line",
")",
":",
"for",
"name",
",",
"cmd",
"in",
"self",
".",
"adapter",
".",
"directives",
".",
"items",
"(",
")",
":",
"with",
"colorize",
"(",
"'blue'",
")",
":",
"print",
"(",
"'bot %s:'",
"%",
"name",
")",
"if",
"cmd",
".",
"__doc__",
":",
"for",
"line",
"in",
"cmd",
".",
"__doc__",
".",
"split",
"(",
"'\\n'",
")",
":",
"print",
"(",
"' %s'",
"%",
"line",
")",
"else",
":",
"print",
"(",
")"
] |
List all directives supported by the bot
|
[
"List",
"all",
"directives",
"supported",
"by",
"the",
"bot"
] |
070854f92ac1314ee56f7f6cb9d27430b8f0fda8
|
https://github.com/mariocesar/pengbot/blob/070854f92ac1314ee56f7f6cb9d27430b8f0fda8/src/pengbot/adapters/shell.py#L30-L39
|
242,606
|
mariocesar/pengbot
|
src/pengbot/adapters/shell.py
|
Shell.do_bot
|
def do_bot(self, line):
"""Call the bot"""
with colorize('blue'):
if not line:
self.say('what?')
try:
res = self.adapter.receive(message=line)
except UnknownCommand:
self.say("I do not known what the '%s' directive is" % line)
else:
self.say(res)
|
python
|
def do_bot(self, line):
"""Call the bot"""
with colorize('blue'):
if not line:
self.say('what?')
try:
res = self.adapter.receive(message=line)
except UnknownCommand:
self.say("I do not known what the '%s' directive is" % line)
else:
self.say(res)
|
[
"def",
"do_bot",
"(",
"self",
",",
"line",
")",
":",
"with",
"colorize",
"(",
"'blue'",
")",
":",
"if",
"not",
"line",
":",
"self",
".",
"say",
"(",
"'what?'",
")",
"try",
":",
"res",
"=",
"self",
".",
"adapter",
".",
"receive",
"(",
"message",
"=",
"line",
")",
"except",
"UnknownCommand",
":",
"self",
".",
"say",
"(",
"\"I do not known what the '%s' directive is\"",
"%",
"line",
")",
"else",
":",
"self",
".",
"say",
"(",
"res",
")"
] |
Call the bot
|
[
"Call",
"the",
"bot"
] |
070854f92ac1314ee56f7f6cb9d27430b8f0fda8
|
https://github.com/mariocesar/pengbot/blob/070854f92ac1314ee56f7f6cb9d27430b8f0fda8/src/pengbot/adapters/shell.py#L45-L56
|
242,607
|
Vito2015/pyextend
|
pyextend/core/wrappers/timeout.py
|
timeout
|
def timeout(seconds, error_message=None):
"""Timeout checking just for Linux-like platform, not working in Windows platform."""
def decorated(func):
result = ""
def _handle_timeout(signum, frame):
errmsg = error_message or 'Timeout: The action <%s> is timeout!' % func.__name__
global result
result = None
import inspect
stack_frame = inspect.stack()[4]
file_name = os.path.basename(stack_frame[1])
line_no = stack_frame[2]
method_name = stack_frame[3]
code_text = ','.join(stack_frame[4])
stack_info = 'Stack: %s, %s:%s >%s' % (method_name, file_name, line_no, code_text)
sys.stderr.write(errmsg+'\n')
sys.stderr.write(stack_info+'\n')
raise TimeoutError(errmsg)
@sysx.platform(sysx.UNIX_LIKE, case_false_wraps=func)
def wrapper(*args, **kwargs):
global result
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return functools.wraps(func)(wrapper)
return decorated
|
python
|
def timeout(seconds, error_message=None):
"""Timeout checking just for Linux-like platform, not working in Windows platform."""
def decorated(func):
result = ""
def _handle_timeout(signum, frame):
errmsg = error_message or 'Timeout: The action <%s> is timeout!' % func.__name__
global result
result = None
import inspect
stack_frame = inspect.stack()[4]
file_name = os.path.basename(stack_frame[1])
line_no = stack_frame[2]
method_name = stack_frame[3]
code_text = ','.join(stack_frame[4])
stack_info = 'Stack: %s, %s:%s >%s' % (method_name, file_name, line_no, code_text)
sys.stderr.write(errmsg+'\n')
sys.stderr.write(stack_info+'\n')
raise TimeoutError(errmsg)
@sysx.platform(sysx.UNIX_LIKE, case_false_wraps=func)
def wrapper(*args, **kwargs):
global result
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return functools.wraps(func)(wrapper)
return decorated
|
[
"def",
"timeout",
"(",
"seconds",
",",
"error_message",
"=",
"None",
")",
":",
"def",
"decorated",
"(",
"func",
")",
":",
"result",
"=",
"\"\"",
"def",
"_handle_timeout",
"(",
"signum",
",",
"frame",
")",
":",
"errmsg",
"=",
"error_message",
"or",
"'Timeout: The action <%s> is timeout!'",
"%",
"func",
".",
"__name__",
"global",
"result",
"result",
"=",
"None",
"import",
"inspect",
"stack_frame",
"=",
"inspect",
".",
"stack",
"(",
")",
"[",
"4",
"]",
"file_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"stack_frame",
"[",
"1",
"]",
")",
"line_no",
"=",
"stack_frame",
"[",
"2",
"]",
"method_name",
"=",
"stack_frame",
"[",
"3",
"]",
"code_text",
"=",
"','",
".",
"join",
"(",
"stack_frame",
"[",
"4",
"]",
")",
"stack_info",
"=",
"'Stack: %s, %s:%s >%s'",
"%",
"(",
"method_name",
",",
"file_name",
",",
"line_no",
",",
"code_text",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"errmsg",
"+",
"'\\n'",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"stack_info",
"+",
"'\\n'",
")",
"raise",
"TimeoutError",
"(",
"errmsg",
")",
"@",
"sysx",
".",
"platform",
"(",
"sysx",
".",
"UNIX_LIKE",
",",
"case_false_wraps",
"=",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"global",
"result",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGALRM",
",",
"_handle_timeout",
")",
"signal",
".",
"alarm",
"(",
"seconds",
")",
"try",
":",
"result",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"finally",
":",
"signal",
".",
"alarm",
"(",
"0",
")",
"return",
"result",
"return",
"functools",
".",
"wraps",
"(",
"func",
")",
"(",
"wrapper",
")",
"return",
"decorated"
] |
Timeout checking just for Linux-like platform, not working in Windows platform.
|
[
"Timeout",
"checking",
"just",
"for",
"Linux",
"-",
"like",
"platform",
"not",
"working",
"in",
"Windows",
"platform",
"."
] |
36861dfe1087e437ffe9b5a1da9345c85b4fa4a1
|
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/core/wrappers/timeout.py#L25-L60
|
242,608
|
luismasuelli/python-cantrips
|
cantrips/patterns/broadcast.py
|
IBroadcast.broadcast
|
def broadcast(self, command, *args, **kwargs):
"""
Notifies each user with a specified command.
"""
criterion = kwargs.pop('criterion', self.BROADCAST_FILTER_ALL)
for index, user in items(self.users()):
if criterion(user, command, *args, **kwargs):
self.notify(user, command, *args, **kwargs)
|
python
|
def broadcast(self, command, *args, **kwargs):
"""
Notifies each user with a specified command.
"""
criterion = kwargs.pop('criterion', self.BROADCAST_FILTER_ALL)
for index, user in items(self.users()):
if criterion(user, command, *args, **kwargs):
self.notify(user, command, *args, **kwargs)
|
[
"def",
"broadcast",
"(",
"self",
",",
"command",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"criterion",
"=",
"kwargs",
".",
"pop",
"(",
"'criterion'",
",",
"self",
".",
"BROADCAST_FILTER_ALL",
")",
"for",
"index",
",",
"user",
"in",
"items",
"(",
"self",
".",
"users",
"(",
")",
")",
":",
"if",
"criterion",
"(",
"user",
",",
"command",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"notify",
"(",
"user",
",",
"command",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
Notifies each user with a specified command.
|
[
"Notifies",
"each",
"user",
"with",
"a",
"specified",
"command",
"."
] |
dba2742c1d1a60863bb65f4a291464f6e68eb2ee
|
https://github.com/luismasuelli/python-cantrips/blob/dba2742c1d1a60863bb65f4a291464f6e68eb2ee/cantrips/patterns/broadcast.py#L88-L95
|
242,609
|
KitB/compose-deploy
|
compose_deploy/_main.py
|
get_config
|
def get_config(basedir, files):
""" Returns the config object for the selected docker-compose.yml
This is an instance of `compose.config.config.Config`.
"""
config_details = config.find(
basedir, files,
environment.Environment.from_env_file(basedir))
return config.load(config_details)
|
python
|
def get_config(basedir, files):
""" Returns the config object for the selected docker-compose.yml
This is an instance of `compose.config.config.Config`.
"""
config_details = config.find(
basedir, files,
environment.Environment.from_env_file(basedir))
return config.load(config_details)
|
[
"def",
"get_config",
"(",
"basedir",
",",
"files",
")",
":",
"config_details",
"=",
"config",
".",
"find",
"(",
"basedir",
",",
"files",
",",
"environment",
".",
"Environment",
".",
"from_env_file",
"(",
"basedir",
")",
")",
"return",
"config",
".",
"load",
"(",
"config_details",
")"
] |
Returns the config object for the selected docker-compose.yml
This is an instance of `compose.config.config.Config`.
|
[
"Returns",
"the",
"config",
"object",
"for",
"the",
"selected",
"docker",
"-",
"compose",
".",
"yml"
] |
45e351ff4a3e8001e544655e7068d9cacdbb48e5
|
https://github.com/KitB/compose-deploy/blob/45e351ff4a3e8001e544655e7068d9cacdbb48e5/compose_deploy/_main.py#L31-L40
|
242,610
|
KitB/compose-deploy
|
compose_deploy/_main.py
|
build
|
def build(config, services):
""" Builds images and tags them appropriately.
Where "appropriately" means with the output of:
git describe --tags HEAD
and 'latest' as well (so the "latest" image for each will always be the
most recently built)
"""
filtered_services = {name: service for name, service in services.iteritems() if 'build' in service}
_call_output('docker-compose build {}'.format(' '.join(filtered_services.iterkeys())))
version = _get_version()
for service_name, service_dict in filtered_services.iteritems():
# Tag with proper version, they're already tagged latest from build
image = service_dict['image']
_call('docker tag {image}:latest {image}:{version}'.format(
image=image,
version=version
)
)
|
python
|
def build(config, services):
""" Builds images and tags them appropriately.
Where "appropriately" means with the output of:
git describe --tags HEAD
and 'latest' as well (so the "latest" image for each will always be the
most recently built)
"""
filtered_services = {name: service for name, service in services.iteritems() if 'build' in service}
_call_output('docker-compose build {}'.format(' '.join(filtered_services.iterkeys())))
version = _get_version()
for service_name, service_dict in filtered_services.iteritems():
# Tag with proper version, they're already tagged latest from build
image = service_dict['image']
_call('docker tag {image}:latest {image}:{version}'.format(
image=image,
version=version
)
)
|
[
"def",
"build",
"(",
"config",
",",
"services",
")",
":",
"filtered_services",
"=",
"{",
"name",
":",
"service",
"for",
"name",
",",
"service",
"in",
"services",
".",
"iteritems",
"(",
")",
"if",
"'build'",
"in",
"service",
"}",
"_call_output",
"(",
"'docker-compose build {}'",
".",
"format",
"(",
"' '",
".",
"join",
"(",
"filtered_services",
".",
"iterkeys",
"(",
")",
")",
")",
")",
"version",
"=",
"_get_version",
"(",
")",
"for",
"service_name",
",",
"service_dict",
"in",
"filtered_services",
".",
"iteritems",
"(",
")",
":",
"# Tag with proper version, they're already tagged latest from build",
"image",
"=",
"service_dict",
"[",
"'image'",
"]",
"_call",
"(",
"'docker tag {image}:latest {image}:{version}'",
".",
"format",
"(",
"image",
"=",
"image",
",",
"version",
"=",
"version",
")",
")"
] |
Builds images and tags them appropriately.
Where "appropriately" means with the output of:
git describe --tags HEAD
and 'latest' as well (so the "latest" image for each will always be the
most recently built)
|
[
"Builds",
"images",
"and",
"tags",
"them",
"appropriately",
"."
] |
45e351ff4a3e8001e544655e7068d9cacdbb48e5
|
https://github.com/KitB/compose-deploy/blob/45e351ff4a3e8001e544655e7068d9cacdbb48e5/compose_deploy/_main.py#L99-L122
|
242,611
|
KitB/compose-deploy
|
compose_deploy/_main.py
|
push
|
def push(config, services):
""" Upload the defined services to their respective repositories.
So's we can then tell the remote docker host to then pull and run them.
"""
version = _get_version()
for service_name, service_dict in services.iteritems():
image = service_dict['image']
things = {'image': image, 'version': version}
_call_output('docker push {image}:latest'.format(**things))
_call_output('docker push {image}:{version}'.format(**things))
|
python
|
def push(config, services):
""" Upload the defined services to their respective repositories.
So's we can then tell the remote docker host to then pull and run them.
"""
version = _get_version()
for service_name, service_dict in services.iteritems():
image = service_dict['image']
things = {'image': image, 'version': version}
_call_output('docker push {image}:latest'.format(**things))
_call_output('docker push {image}:{version}'.format(**things))
|
[
"def",
"push",
"(",
"config",
",",
"services",
")",
":",
"version",
"=",
"_get_version",
"(",
")",
"for",
"service_name",
",",
"service_dict",
"in",
"services",
".",
"iteritems",
"(",
")",
":",
"image",
"=",
"service_dict",
"[",
"'image'",
"]",
"things",
"=",
"{",
"'image'",
":",
"image",
",",
"'version'",
":",
"version",
"}",
"_call_output",
"(",
"'docker push {image}:latest'",
".",
"format",
"(",
"*",
"*",
"things",
")",
")",
"_call_output",
"(",
"'docker push {image}:{version}'",
".",
"format",
"(",
"*",
"*",
"things",
")",
")"
] |
Upload the defined services to their respective repositories.
So's we can then tell the remote docker host to then pull and run them.
|
[
"Upload",
"the",
"defined",
"services",
"to",
"their",
"respective",
"repositories",
"."
] |
45e351ff4a3e8001e544655e7068d9cacdbb48e5
|
https://github.com/KitB/compose-deploy/blob/45e351ff4a3e8001e544655e7068d9cacdbb48e5/compose_deploy/_main.py#L125-L135
|
242,612
|
hobson/pug-invest
|
pug/invest/plot.py
|
generate_bins
|
def generate_bins(bins, values=None):
"""Compute bin edges for numpy.histogram based on values and a requested bin parameters
Unlike `range`, the largest value is included within the range of the last, largest value,
so generate_bins(N) with produce a sequence with length N+1
Arguments:
bins (int or 2-tuple of floats or sequence of floats) s or the first pair of bin edges
>>> generate_bins(0, [])
[0]
>>> generate_bins(3, [])
[0, 1, 2, 3]
>>> generate_bins(0)
[0]
>>> generate_bins(10)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> generate_bins(10, range(21))
[0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0]
>>> generate_bins((0, 3), range(21))
[0, 3, 6, 9, 12, 15, 18, 21]
"""
if isinstance(bins, int):
bins = (bins,)
if isinstance(bins, float):
bins = (0, bins)
if not len(bins) in (1, 2):
return bins
if values is None or not hasattr(values, '__iter__') or not any(values) or not hasattr(values, '__len__') or len(values) < 1:
values = [0]
value_min, value_max = pd.np.min(values), pd.np.max(values)
value_range = value_max - value_min
if len(bins) == 1:
if not value_range:
return range(int(bins[0]) + 1)
bins = (0, value_range / float(bins[0]))
if len(bins) == 2:
if not value_range:
return bins
binwidth = ((bins[1] - bins[0]) or 1)
bin0 = bins[0] or pd.np.min(values)
if (bin0 / value_range) <= .3:
bin0 = 0
numbins = int(value_range / float(binwidth))
bins = list(pd.np.arange(numbins + 1) * binwidth + bin0)
else:
binwidth = pd.np.min(pd.np.diff(bins)) or pd.np.mean(pd.np.diff(bins)) or 1.
bins = list(bins)
while bins[-1] < value_max:
bins.append(bins[-1] + binwidth)
return bins
|
python
|
def generate_bins(bins, values=None):
"""Compute bin edges for numpy.histogram based on values and a requested bin parameters
Unlike `range`, the largest value is included within the range of the last, largest value,
so generate_bins(N) with produce a sequence with length N+1
Arguments:
bins (int or 2-tuple of floats or sequence of floats) s or the first pair of bin edges
>>> generate_bins(0, [])
[0]
>>> generate_bins(3, [])
[0, 1, 2, 3]
>>> generate_bins(0)
[0]
>>> generate_bins(10)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> generate_bins(10, range(21))
[0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0]
>>> generate_bins((0, 3), range(21))
[0, 3, 6, 9, 12, 15, 18, 21]
"""
if isinstance(bins, int):
bins = (bins,)
if isinstance(bins, float):
bins = (0, bins)
if not len(bins) in (1, 2):
return bins
if values is None or not hasattr(values, '__iter__') or not any(values) or not hasattr(values, '__len__') or len(values) < 1:
values = [0]
value_min, value_max = pd.np.min(values), pd.np.max(values)
value_range = value_max - value_min
if len(bins) == 1:
if not value_range:
return range(int(bins[0]) + 1)
bins = (0, value_range / float(bins[0]))
if len(bins) == 2:
if not value_range:
return bins
binwidth = ((bins[1] - bins[0]) or 1)
bin0 = bins[0] or pd.np.min(values)
if (bin0 / value_range) <= .3:
bin0 = 0
numbins = int(value_range / float(binwidth))
bins = list(pd.np.arange(numbins + 1) * binwidth + bin0)
else:
binwidth = pd.np.min(pd.np.diff(bins)) or pd.np.mean(pd.np.diff(bins)) or 1.
bins = list(bins)
while bins[-1] < value_max:
bins.append(bins[-1] + binwidth)
return bins
|
[
"def",
"generate_bins",
"(",
"bins",
",",
"values",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"bins",
",",
"int",
")",
":",
"bins",
"=",
"(",
"bins",
",",
")",
"if",
"isinstance",
"(",
"bins",
",",
"float",
")",
":",
"bins",
"=",
"(",
"0",
",",
"bins",
")",
"if",
"not",
"len",
"(",
"bins",
")",
"in",
"(",
"1",
",",
"2",
")",
":",
"return",
"bins",
"if",
"values",
"is",
"None",
"or",
"not",
"hasattr",
"(",
"values",
",",
"'__iter__'",
")",
"or",
"not",
"any",
"(",
"values",
")",
"or",
"not",
"hasattr",
"(",
"values",
",",
"'__len__'",
")",
"or",
"len",
"(",
"values",
")",
"<",
"1",
":",
"values",
"=",
"[",
"0",
"]",
"value_min",
",",
"value_max",
"=",
"pd",
".",
"np",
".",
"min",
"(",
"values",
")",
",",
"pd",
".",
"np",
".",
"max",
"(",
"values",
")",
"value_range",
"=",
"value_max",
"-",
"value_min",
"if",
"len",
"(",
"bins",
")",
"==",
"1",
":",
"if",
"not",
"value_range",
":",
"return",
"range",
"(",
"int",
"(",
"bins",
"[",
"0",
"]",
")",
"+",
"1",
")",
"bins",
"=",
"(",
"0",
",",
"value_range",
"/",
"float",
"(",
"bins",
"[",
"0",
"]",
")",
")",
"if",
"len",
"(",
"bins",
")",
"==",
"2",
":",
"if",
"not",
"value_range",
":",
"return",
"bins",
"binwidth",
"=",
"(",
"(",
"bins",
"[",
"1",
"]",
"-",
"bins",
"[",
"0",
"]",
")",
"or",
"1",
")",
"bin0",
"=",
"bins",
"[",
"0",
"]",
"or",
"pd",
".",
"np",
".",
"min",
"(",
"values",
")",
"if",
"(",
"bin0",
"/",
"value_range",
")",
"<=",
".3",
":",
"bin0",
"=",
"0",
"numbins",
"=",
"int",
"(",
"value_range",
"/",
"float",
"(",
"binwidth",
")",
")",
"bins",
"=",
"list",
"(",
"pd",
".",
"np",
".",
"arange",
"(",
"numbins",
"+",
"1",
")",
"*",
"binwidth",
"+",
"bin0",
")",
"else",
":",
"binwidth",
"=",
"pd",
".",
"np",
".",
"min",
"(",
"pd",
".",
"np",
".",
"diff",
"(",
"bins",
")",
")",
"or",
"pd",
".",
"np",
".",
"mean",
"(",
"pd",
".",
"np",
".",
"diff",
"(",
"bins",
")",
")",
"or",
"1.",
"bins",
"=",
"list",
"(",
"bins",
")",
"while",
"bins",
"[",
"-",
"1",
"]",
"<",
"value_max",
":",
"bins",
".",
"append",
"(",
"bins",
"[",
"-",
"1",
"]",
"+",
"binwidth",
")",
"return",
"bins"
] |
Compute bin edges for numpy.histogram based on values and a requested bin parameters
Unlike `range`, the largest value is included within the range of the last, largest value,
so generate_bins(N) with produce a sequence with length N+1
Arguments:
bins (int or 2-tuple of floats or sequence of floats) s or the first pair of bin edges
>>> generate_bins(0, [])
[0]
>>> generate_bins(3, [])
[0, 1, 2, 3]
>>> generate_bins(0)
[0]
>>> generate_bins(10)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> generate_bins(10, range(21))
[0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0]
>>> generate_bins((0, 3), range(21))
[0, 3, 6, 9, 12, 15, 18, 21]
|
[
"Compute",
"bin",
"edges",
"for",
"numpy",
".",
"histogram",
"based",
"on",
"values",
"and",
"a",
"requested",
"bin",
"parameters"
] |
836911258a0e920083a88c91beae88eefdebb20c
|
https://github.com/hobson/pug-invest/blob/836911258a0e920083a88c91beae88eefdebb20c/pug/invest/plot.py#L203-L256
|
242,613
|
appstore-zencore/dictop
|
dictop.py
|
select
|
def select(target, path, default=None, slient=True):
"""Select item with path from target.
If not find item and slient marked as True, return default value.
If not find item and slient marked as False, raise KeyError.
"""
def _(value, slient):
if slient:
return value
else:
raise KeyError("")
default = partial(_, default, slient)
names = path.split(".")
node = target
for name in names:
if isinstance(node, dict):
try:
node = node[name]
except:
return default()
elif isinstance(node, list) and name.isdigit():
try:
node = node[int(name)]
except:
return default()
elif hasattr(node, name):
node = getattr(node, name)
else:
return default()
return node
|
python
|
def select(target, path, default=None, slient=True):
"""Select item with path from target.
If not find item and slient marked as True, return default value.
If not find item and slient marked as False, raise KeyError.
"""
def _(value, slient):
if slient:
return value
else:
raise KeyError("")
default = partial(_, default, slient)
names = path.split(".")
node = target
for name in names:
if isinstance(node, dict):
try:
node = node[name]
except:
return default()
elif isinstance(node, list) and name.isdigit():
try:
node = node[int(name)]
except:
return default()
elif hasattr(node, name):
node = getattr(node, name)
else:
return default()
return node
|
[
"def",
"select",
"(",
"target",
",",
"path",
",",
"default",
"=",
"None",
",",
"slient",
"=",
"True",
")",
":",
"def",
"_",
"(",
"value",
",",
"slient",
")",
":",
"if",
"slient",
":",
"return",
"value",
"else",
":",
"raise",
"KeyError",
"(",
"\"\"",
")",
"default",
"=",
"partial",
"(",
"_",
",",
"default",
",",
"slient",
")",
"names",
"=",
"path",
".",
"split",
"(",
"\".\"",
")",
"node",
"=",
"target",
"for",
"name",
"in",
"names",
":",
"if",
"isinstance",
"(",
"node",
",",
"dict",
")",
":",
"try",
":",
"node",
"=",
"node",
"[",
"name",
"]",
"except",
":",
"return",
"default",
"(",
")",
"elif",
"isinstance",
"(",
"node",
",",
"list",
")",
"and",
"name",
".",
"isdigit",
"(",
")",
":",
"try",
":",
"node",
"=",
"node",
"[",
"int",
"(",
"name",
")",
"]",
"except",
":",
"return",
"default",
"(",
")",
"elif",
"hasattr",
"(",
"node",
",",
"name",
")",
":",
"node",
"=",
"getattr",
"(",
"node",
",",
"name",
")",
"else",
":",
"return",
"default",
"(",
")",
"return",
"node"
] |
Select item with path from target.
If not find item and slient marked as True, return default value.
If not find item and slient marked as False, raise KeyError.
|
[
"Select",
"item",
"with",
"path",
"from",
"target",
"."
] |
d730f74f6db9c65b7679db237ccf9b1c4031266b
|
https://github.com/appstore-zencore/dictop/blob/d730f74f6db9c65b7679db237ccf9b1c4031266b/dictop.py#L5-L34
|
242,614
|
appstore-zencore/dictop
|
dictop.py
|
update
|
def update(target, path, value):
"""Update item in path of target with given value.
"""
names = path.split(".")
names_length = len(names)
node = target
for index in range(names_length):
name = names[index]
if index == names_length - 1:
last = True
else:
last = False
if isinstance(node, dict):
if last:
node[name] = value
return
else:
if not name in node:
node[name] = {}
node = node[name]
elif isinstance(node, list):
name = int(name)
listpad(node, name+1)
if last:
node[name] = value
return
else:
node[name] = {}
node = node[name]
else:
if last:
setattr(node, name, value)
else:
setattr(node, name, {})
node = getattr(node, name)
|
python
|
def update(target, path, value):
"""Update item in path of target with given value.
"""
names = path.split(".")
names_length = len(names)
node = target
for index in range(names_length):
name = names[index]
if index == names_length - 1:
last = True
else:
last = False
if isinstance(node, dict):
if last:
node[name] = value
return
else:
if not name in node:
node[name] = {}
node = node[name]
elif isinstance(node, list):
name = int(name)
listpad(node, name+1)
if last:
node[name] = value
return
else:
node[name] = {}
node = node[name]
else:
if last:
setattr(node, name, value)
else:
setattr(node, name, {})
node = getattr(node, name)
|
[
"def",
"update",
"(",
"target",
",",
"path",
",",
"value",
")",
":",
"names",
"=",
"path",
".",
"split",
"(",
"\".\"",
")",
"names_length",
"=",
"len",
"(",
"names",
")",
"node",
"=",
"target",
"for",
"index",
"in",
"range",
"(",
"names_length",
")",
":",
"name",
"=",
"names",
"[",
"index",
"]",
"if",
"index",
"==",
"names_length",
"-",
"1",
":",
"last",
"=",
"True",
"else",
":",
"last",
"=",
"False",
"if",
"isinstance",
"(",
"node",
",",
"dict",
")",
":",
"if",
"last",
":",
"node",
"[",
"name",
"]",
"=",
"value",
"return",
"else",
":",
"if",
"not",
"name",
"in",
"node",
":",
"node",
"[",
"name",
"]",
"=",
"{",
"}",
"node",
"=",
"node",
"[",
"name",
"]",
"elif",
"isinstance",
"(",
"node",
",",
"list",
")",
":",
"name",
"=",
"int",
"(",
"name",
")",
"listpad",
"(",
"node",
",",
"name",
"+",
"1",
")",
"if",
"last",
":",
"node",
"[",
"name",
"]",
"=",
"value",
"return",
"else",
":",
"node",
"[",
"name",
"]",
"=",
"{",
"}",
"node",
"=",
"node",
"[",
"name",
"]",
"else",
":",
"if",
"last",
":",
"setattr",
"(",
"node",
",",
"name",
",",
"value",
")",
"else",
":",
"setattr",
"(",
"node",
",",
"name",
",",
"{",
"}",
")",
"node",
"=",
"getattr",
"(",
"node",
",",
"name",
")"
] |
Update item in path of target with given value.
|
[
"Update",
"item",
"in",
"path",
"of",
"target",
"with",
"given",
"value",
"."
] |
d730f74f6db9c65b7679db237ccf9b1c4031266b
|
https://github.com/appstore-zencore/dictop/blob/d730f74f6db9c65b7679db237ccf9b1c4031266b/dictop.py#L44-L78
|
242,615
|
cdeboever3/cdpybio
|
cdpybio/picard.py
|
parse_bam_index_stats
|
def parse_bam_index_stats(fn):
"""
Parse the output from Picard's BamIndexStast and return as pandas Dataframe.
Parameters
----------
filename : str of filename or file handle
Filename of the Picard output you want to parse.
Returns
-------
df : pandas.DataFrame
Data from output file.
"""
with open(fn) as f:
lines = [x.strip().split() for x in f.readlines()]
no_counts = int(lines[-1][-1])
lines = lines[:-1]
chrom = [x[0] for x in lines]
length = [int(x[2]) for x in lines]
aligned = [int(x[4]) for x in lines]
unaligned = [int(x[6]) for x in lines]
df = pd.DataFrame([length, aligned, unaligned], columns=chrom,
index=['length', 'aligned', 'unaligned']).T
df = df.ix[sorted(df.index)]
return df
|
python
|
def parse_bam_index_stats(fn):
"""
Parse the output from Picard's BamIndexStast and return as pandas Dataframe.
Parameters
----------
filename : str of filename or file handle
Filename of the Picard output you want to parse.
Returns
-------
df : pandas.DataFrame
Data from output file.
"""
with open(fn) as f:
lines = [x.strip().split() for x in f.readlines()]
no_counts = int(lines[-1][-1])
lines = lines[:-1]
chrom = [x[0] for x in lines]
length = [int(x[2]) for x in lines]
aligned = [int(x[4]) for x in lines]
unaligned = [int(x[6]) for x in lines]
df = pd.DataFrame([length, aligned, unaligned], columns=chrom,
index=['length', 'aligned', 'unaligned']).T
df = df.ix[sorted(df.index)]
return df
|
[
"def",
"parse_bam_index_stats",
"(",
"fn",
")",
":",
"with",
"open",
"(",
"fn",
")",
"as",
"f",
":",
"lines",
"=",
"[",
"x",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"for",
"x",
"in",
"f",
".",
"readlines",
"(",
")",
"]",
"no_counts",
"=",
"int",
"(",
"lines",
"[",
"-",
"1",
"]",
"[",
"-",
"1",
"]",
")",
"lines",
"=",
"lines",
"[",
":",
"-",
"1",
"]",
"chrom",
"=",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"lines",
"]",
"length",
"=",
"[",
"int",
"(",
"x",
"[",
"2",
"]",
")",
"for",
"x",
"in",
"lines",
"]",
"aligned",
"=",
"[",
"int",
"(",
"x",
"[",
"4",
"]",
")",
"for",
"x",
"in",
"lines",
"]",
"unaligned",
"=",
"[",
"int",
"(",
"x",
"[",
"6",
"]",
")",
"for",
"x",
"in",
"lines",
"]",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"[",
"length",
",",
"aligned",
",",
"unaligned",
"]",
",",
"columns",
"=",
"chrom",
",",
"index",
"=",
"[",
"'length'",
",",
"'aligned'",
",",
"'unaligned'",
"]",
")",
".",
"T",
"df",
"=",
"df",
".",
"ix",
"[",
"sorted",
"(",
"df",
".",
"index",
")",
"]",
"return",
"df"
] |
Parse the output from Picard's BamIndexStast and return as pandas Dataframe.
Parameters
----------
filename : str of filename or file handle
Filename of the Picard output you want to parse.
Returns
-------
df : pandas.DataFrame
Data from output file.
|
[
"Parse",
"the",
"output",
"from",
"Picard",
"s",
"BamIndexStast",
"and",
"return",
"as",
"pandas",
"Dataframe",
"."
] |
38efdf0e11d01bc00a135921cb91a19c03db5d5c
|
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/picard.py#L4-L30
|
242,616
|
cdeboever3/cdpybio
|
cdpybio/picard.py
|
parse_alignment_summary_metrics
|
def parse_alignment_summary_metrics(fn):
"""
Parse the output from Picard's CollectAlignmentSummaryMetrics and return as
pandas Dataframe.
Parameters
----------
filename : str of filename or file handle
Filename of the Picard output you want to parse.
Returns
-------
df : pandas.DataFrame
Data from output file.
"""
df = pd.read_table(fn, index_col=0, skiprows=range(6) + [10, 11]).T
return df
|
python
|
def parse_alignment_summary_metrics(fn):
"""
Parse the output from Picard's CollectAlignmentSummaryMetrics and return as
pandas Dataframe.
Parameters
----------
filename : str of filename or file handle
Filename of the Picard output you want to parse.
Returns
-------
df : pandas.DataFrame
Data from output file.
"""
df = pd.read_table(fn, index_col=0, skiprows=range(6) + [10, 11]).T
return df
|
[
"def",
"parse_alignment_summary_metrics",
"(",
"fn",
")",
":",
"df",
"=",
"pd",
".",
"read_table",
"(",
"fn",
",",
"index_col",
"=",
"0",
",",
"skiprows",
"=",
"range",
"(",
"6",
")",
"+",
"[",
"10",
",",
"11",
"]",
")",
".",
"T",
"return",
"df"
] |
Parse the output from Picard's CollectAlignmentSummaryMetrics and return as
pandas Dataframe.
Parameters
----------
filename : str of filename or file handle
Filename of the Picard output you want to parse.
Returns
-------
df : pandas.DataFrame
Data from output file.
|
[
"Parse",
"the",
"output",
"from",
"Picard",
"s",
"CollectAlignmentSummaryMetrics",
"and",
"return",
"as",
"pandas",
"Dataframe",
"."
] |
38efdf0e11d01bc00a135921cb91a19c03db5d5c
|
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/picard.py#L32-L49
|
242,617
|
cdeboever3/cdpybio
|
cdpybio/picard.py
|
parse_mark_duplicate_metrics
|
def parse_mark_duplicate_metrics(fn):
"""
Parse the output from Picard's MarkDuplicates and return as pandas
Series.
Parameters
----------
filename : str of filename or file handle
Filename of the Picard output you want to parse.
Returns
-------
metrics : pandas.Series
Duplicate metrics.
hist : pandas.Series
Duplicate histogram.
"""
with open(fn) as f:
lines = [x.strip().split('\t') for x in f.readlines()]
metrics = pd.Series(lines[7], lines[6])
m = pd.to_numeric(metrics[metrics.index[1:]])
metrics[m.index] = m.values
vals = np.array(lines[11:-1])
hist = pd.Series(vals[:, 1], index=[int(float(x)) for x in vals[:, 0]])
hist = pd.to_numeric(hist)
return metrics, hist
|
python
|
def parse_mark_duplicate_metrics(fn):
"""
Parse the output from Picard's MarkDuplicates and return as pandas
Series.
Parameters
----------
filename : str of filename or file handle
Filename of the Picard output you want to parse.
Returns
-------
metrics : pandas.Series
Duplicate metrics.
hist : pandas.Series
Duplicate histogram.
"""
with open(fn) as f:
lines = [x.strip().split('\t') for x in f.readlines()]
metrics = pd.Series(lines[7], lines[6])
m = pd.to_numeric(metrics[metrics.index[1:]])
metrics[m.index] = m.values
vals = np.array(lines[11:-1])
hist = pd.Series(vals[:, 1], index=[int(float(x)) for x in vals[:, 0]])
hist = pd.to_numeric(hist)
return metrics, hist
|
[
"def",
"parse_mark_duplicate_metrics",
"(",
"fn",
")",
":",
"with",
"open",
"(",
"fn",
")",
"as",
"f",
":",
"lines",
"=",
"[",
"x",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"for",
"x",
"in",
"f",
".",
"readlines",
"(",
")",
"]",
"metrics",
"=",
"pd",
".",
"Series",
"(",
"lines",
"[",
"7",
"]",
",",
"lines",
"[",
"6",
"]",
")",
"m",
"=",
"pd",
".",
"to_numeric",
"(",
"metrics",
"[",
"metrics",
".",
"index",
"[",
"1",
":",
"]",
"]",
")",
"metrics",
"[",
"m",
".",
"index",
"]",
"=",
"m",
".",
"values",
"vals",
"=",
"np",
".",
"array",
"(",
"lines",
"[",
"11",
":",
"-",
"1",
"]",
")",
"hist",
"=",
"pd",
".",
"Series",
"(",
"vals",
"[",
":",
",",
"1",
"]",
",",
"index",
"=",
"[",
"int",
"(",
"float",
"(",
"x",
")",
")",
"for",
"x",
"in",
"vals",
"[",
":",
",",
"0",
"]",
"]",
")",
"hist",
"=",
"pd",
".",
"to_numeric",
"(",
"hist",
")",
"return",
"metrics",
",",
"hist"
] |
Parse the output from Picard's MarkDuplicates and return as pandas
Series.
Parameters
----------
filename : str of filename or file handle
Filename of the Picard output you want to parse.
Returns
-------
metrics : pandas.Series
Duplicate metrics.
hist : pandas.Series
Duplicate histogram.
|
[
"Parse",
"the",
"output",
"from",
"Picard",
"s",
"MarkDuplicates",
"and",
"return",
"as",
"pandas",
"Series",
"."
] |
38efdf0e11d01bc00a135921cb91a19c03db5d5c
|
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/picard.py#L51-L79
|
242,618
|
cdeboever3/cdpybio
|
cdpybio/picard.py
|
parse_insert_metrics
|
def parse_insert_metrics(fn):
"""
Parse the output from Picard's CollectInsertSizeMetrics and return as pandas
Series.
Parameters
----------
filename : str of filename or file handle
Filename of the Picard output you want to parse.
Returns
-------
metrics : pandas.Series
Insert size metrics.
hist : pandas.Series
Insert size histogram.
"""
with open(fn) as f:
lines = [x.strip().split('\t') for x in f.readlines()]
index = lines[6]
vals = lines[7]
for i in range(len(index) - len(vals)):
vals.append(np.nan)
for i, v in enumerate(vals):
if type(v) == str:
try:
vals[i] = int(v)
except ValueError:
try:
vals[i] = float(v)
except ValueError:
continue
metrics = pd.Series(vals, index=index)
vals = np.array(lines[11:-1])
hist = pd.Series(vals[:, 1], index=[int(float(x)) for x in vals[:, 0]])
hist = pd.to_numeric(hist)
return metrics, hist
|
python
|
def parse_insert_metrics(fn):
"""
Parse the output from Picard's CollectInsertSizeMetrics and return as pandas
Series.
Parameters
----------
filename : str of filename or file handle
Filename of the Picard output you want to parse.
Returns
-------
metrics : pandas.Series
Insert size metrics.
hist : pandas.Series
Insert size histogram.
"""
with open(fn) as f:
lines = [x.strip().split('\t') for x in f.readlines()]
index = lines[6]
vals = lines[7]
for i in range(len(index) - len(vals)):
vals.append(np.nan)
for i, v in enumerate(vals):
if type(v) == str:
try:
vals[i] = int(v)
except ValueError:
try:
vals[i] = float(v)
except ValueError:
continue
metrics = pd.Series(vals, index=index)
vals = np.array(lines[11:-1])
hist = pd.Series(vals[:, 1], index=[int(float(x)) for x in vals[:, 0]])
hist = pd.to_numeric(hist)
return metrics, hist
|
[
"def",
"parse_insert_metrics",
"(",
"fn",
")",
":",
"with",
"open",
"(",
"fn",
")",
"as",
"f",
":",
"lines",
"=",
"[",
"x",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"for",
"x",
"in",
"f",
".",
"readlines",
"(",
")",
"]",
"index",
"=",
"lines",
"[",
"6",
"]",
"vals",
"=",
"lines",
"[",
"7",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"index",
")",
"-",
"len",
"(",
"vals",
")",
")",
":",
"vals",
".",
"append",
"(",
"np",
".",
"nan",
")",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"vals",
")",
":",
"if",
"type",
"(",
"v",
")",
"==",
"str",
":",
"try",
":",
"vals",
"[",
"i",
"]",
"=",
"int",
"(",
"v",
")",
"except",
"ValueError",
":",
"try",
":",
"vals",
"[",
"i",
"]",
"=",
"float",
"(",
"v",
")",
"except",
"ValueError",
":",
"continue",
"metrics",
"=",
"pd",
".",
"Series",
"(",
"vals",
",",
"index",
"=",
"index",
")",
"vals",
"=",
"np",
".",
"array",
"(",
"lines",
"[",
"11",
":",
"-",
"1",
"]",
")",
"hist",
"=",
"pd",
".",
"Series",
"(",
"vals",
"[",
":",
",",
"1",
"]",
",",
"index",
"=",
"[",
"int",
"(",
"float",
"(",
"x",
")",
")",
"for",
"x",
"in",
"vals",
"[",
":",
",",
"0",
"]",
"]",
")",
"hist",
"=",
"pd",
".",
"to_numeric",
"(",
"hist",
")",
"return",
"metrics",
",",
"hist"
] |
Parse the output from Picard's CollectInsertSizeMetrics and return as pandas
Series.
Parameters
----------
filename : str of filename or file handle
Filename of the Picard output you want to parse.
Returns
-------
metrics : pandas.Series
Insert size metrics.
hist : pandas.Series
Insert size histogram.
|
[
"Parse",
"the",
"output",
"from",
"Picard",
"s",
"CollectInsertSizeMetrics",
"and",
"return",
"as",
"pandas",
"Series",
"."
] |
38efdf0e11d01bc00a135921cb91a19c03db5d5c
|
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/picard.py#L81-L121
|
242,619
|
bahattincinic/apistar_shell
|
apistar_shell/commands.py
|
shell_sqlalchemy
|
def shell_sqlalchemy(session: SqlalchemySession, backend: ShellBackend):
"""
This command includes SQLAlchemy DB Session
"""
namespace = {
'session': session
}
namespace.update(backend.get_namespace())
embed(user_ns=namespace, header=backend.header)
|
python
|
def shell_sqlalchemy(session: SqlalchemySession, backend: ShellBackend):
"""
This command includes SQLAlchemy DB Session
"""
namespace = {
'session': session
}
namespace.update(backend.get_namespace())
embed(user_ns=namespace, header=backend.header)
|
[
"def",
"shell_sqlalchemy",
"(",
"session",
":",
"SqlalchemySession",
",",
"backend",
":",
"ShellBackend",
")",
":",
"namespace",
"=",
"{",
"'session'",
":",
"session",
"}",
"namespace",
".",
"update",
"(",
"backend",
".",
"get_namespace",
"(",
")",
")",
"embed",
"(",
"user_ns",
"=",
"namespace",
",",
"header",
"=",
"backend",
".",
"header",
")"
] |
This command includes SQLAlchemy DB Session
|
[
"This",
"command",
"includes",
"SQLAlchemy",
"DB",
"Session"
] |
8b291fc514d668d6f8ff159da488adae242a338a
|
https://github.com/bahattincinic/apistar_shell/blob/8b291fc514d668d6f8ff159da488adae242a338a/apistar_shell/commands.py#L9-L17
|
242,620
|
bahattincinic/apistar_shell
|
apistar_shell/commands.py
|
shell_django
|
def shell_django(session: DjangoSession, backend: ShellBackend):
"""
This command includes Django DB Session
"""
namespace = {
'session': session
}
namespace.update(backend.get_namespace())
embed(user_ns=namespace, header=backend.header)
|
python
|
def shell_django(session: DjangoSession, backend: ShellBackend):
"""
This command includes Django DB Session
"""
namespace = {
'session': session
}
namespace.update(backend.get_namespace())
embed(user_ns=namespace, header=backend.header)
|
[
"def",
"shell_django",
"(",
"session",
":",
"DjangoSession",
",",
"backend",
":",
"ShellBackend",
")",
":",
"namespace",
"=",
"{",
"'session'",
":",
"session",
"}",
"namespace",
".",
"update",
"(",
"backend",
".",
"get_namespace",
"(",
")",
")",
"embed",
"(",
"user_ns",
"=",
"namespace",
",",
"header",
"=",
"backend",
".",
"header",
")"
] |
This command includes Django DB Session
|
[
"This",
"command",
"includes",
"Django",
"DB",
"Session"
] |
8b291fc514d668d6f8ff159da488adae242a338a
|
https://github.com/bahattincinic/apistar_shell/blob/8b291fc514d668d6f8ff159da488adae242a338a/apistar_shell/commands.py#L20-L28
|
242,621
|
Othernet-Project/squery-pg
|
squery_pg/squery_pg.py
|
Database.serialize_query
|
def serialize_query(func):
""" Ensure any SQLExpression instances are serialized"""
@functools.wraps(func)
def wrapper(self, query, *args, **kwargs):
if hasattr(query, 'serialize'):
query = query.serialize()
assert isinstance(query, basestring), 'Expected query to be string'
if self.debug:
print('SQL:', query)
return func(self, query, *args, **kwargs)
return wrapper
|
python
|
def serialize_query(func):
""" Ensure any SQLExpression instances are serialized"""
@functools.wraps(func)
def wrapper(self, query, *args, **kwargs):
if hasattr(query, 'serialize'):
query = query.serialize()
assert isinstance(query, basestring), 'Expected query to be string'
if self.debug:
print('SQL:', query)
return func(self, query, *args, **kwargs)
return wrapper
|
[
"def",
"serialize_query",
"(",
"func",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"self",
",",
"query",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"hasattr",
"(",
"query",
",",
"'serialize'",
")",
":",
"query",
"=",
"query",
".",
"serialize",
"(",
")",
"assert",
"isinstance",
"(",
"query",
",",
"basestring",
")",
",",
"'Expected query to be string'",
"if",
"self",
".",
"debug",
":",
"print",
"(",
"'SQL:'",
",",
"query",
")",
"return",
"func",
"(",
"self",
",",
"query",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
] |
Ensure any SQLExpression instances are serialized
|
[
"Ensure",
"any",
"SQLExpression",
"instances",
"are",
"serialized"
] |
eaa695c3719e2d2b7e1b049bb58c987c132b6b34
|
https://github.com/Othernet-Project/squery-pg/blob/eaa695c3719e2d2b7e1b049bb58c987c132b6b34/squery_pg/squery_pg.py#L68-L80
|
242,622
|
rsalmaso/django-fluo
|
fluo/admin/models.py
|
ModelAdmin.autocomplete_view
|
def autocomplete_view(self, request):
"""
Searches in the fields of the given related model and returns the
result as a simple string to be used by the jQuery Autocomplete plugin
"""
query = request.GET.get('q', None)
app_label = request.GET.get('app_label', None)
model_name = request.GET.get('model_name', None)
search_fields = request.GET.get('search_fields', None)
object_pk = request.GET.get('object_pk', None)
try:
to_string_function = self.related_string_functions[model_name]
except KeyError:
to_string_function = lambda x: str(x)
if search_fields and app_label and model_name and (query or object_pk):
def construct_search(field_name):
# use different lookup methods depending on the notation
if field_name.startswith('^'):
fmt, name = "{}__istartswith", field_name[1:]
elif field_name.startswith('='):
fmt, name = "{}__iexact", field_name[1:]
elif field_name.startswith('@'):
fmt, name = "{}__search", field_name[1:]
else:
fmt, name = "{}__icontains", field_name
return fmt.format(name)
model = apps.get_model(app_label, model_name)
queryset = model._default_manager.all()
data = ''
if query:
for bit in query.split():
or_queries = [
models.Q(**{construct_search(smart_str(field_name)): smart_str(bit)})
for field_name
in search_fields.split(',')
]
other_qs = QuerySet(model)
other_qs.query.select_related = queryset.query.select_related
other_qs = other_qs.filter(reduce(operator.or_, or_queries))
queryset = queryset & other_qs
if self.autocomplete_limit:
queryset = queryset[:self.autocomplete_limit]
data = ''.join([
'{}|{}\n'.format(to_string_function(f), f.pk)
for f
in queryset
])
elif object_pk:
try:
obj = queryset.get(pk=object_pk)
except:
pass
else:
data = to_string_function(obj)
return HttpResponse(data)
return HttpResponseNotFound()
|
python
|
def autocomplete_view(self, request):
"""
Searches in the fields of the given related model and returns the
result as a simple string to be used by the jQuery Autocomplete plugin
"""
query = request.GET.get('q', None)
app_label = request.GET.get('app_label', None)
model_name = request.GET.get('model_name', None)
search_fields = request.GET.get('search_fields', None)
object_pk = request.GET.get('object_pk', None)
try:
to_string_function = self.related_string_functions[model_name]
except KeyError:
to_string_function = lambda x: str(x)
if search_fields and app_label and model_name and (query or object_pk):
def construct_search(field_name):
# use different lookup methods depending on the notation
if field_name.startswith('^'):
fmt, name = "{}__istartswith", field_name[1:]
elif field_name.startswith('='):
fmt, name = "{}__iexact", field_name[1:]
elif field_name.startswith('@'):
fmt, name = "{}__search", field_name[1:]
else:
fmt, name = "{}__icontains", field_name
return fmt.format(name)
model = apps.get_model(app_label, model_name)
queryset = model._default_manager.all()
data = ''
if query:
for bit in query.split():
or_queries = [
models.Q(**{construct_search(smart_str(field_name)): smart_str(bit)})
for field_name
in search_fields.split(',')
]
other_qs = QuerySet(model)
other_qs.query.select_related = queryset.query.select_related
other_qs = other_qs.filter(reduce(operator.or_, or_queries))
queryset = queryset & other_qs
if self.autocomplete_limit:
queryset = queryset[:self.autocomplete_limit]
data = ''.join([
'{}|{}\n'.format(to_string_function(f), f.pk)
for f
in queryset
])
elif object_pk:
try:
obj = queryset.get(pk=object_pk)
except:
pass
else:
data = to_string_function(obj)
return HttpResponse(data)
return HttpResponseNotFound()
|
[
"def",
"autocomplete_view",
"(",
"self",
",",
"request",
")",
":",
"query",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'q'",
",",
"None",
")",
"app_label",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'app_label'",
",",
"None",
")",
"model_name",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'model_name'",
",",
"None",
")",
"search_fields",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'search_fields'",
",",
"None",
")",
"object_pk",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'object_pk'",
",",
"None",
")",
"try",
":",
"to_string_function",
"=",
"self",
".",
"related_string_functions",
"[",
"model_name",
"]",
"except",
"KeyError",
":",
"to_string_function",
"=",
"lambda",
"x",
":",
"str",
"(",
"x",
")",
"if",
"search_fields",
"and",
"app_label",
"and",
"model_name",
"and",
"(",
"query",
"or",
"object_pk",
")",
":",
"def",
"construct_search",
"(",
"field_name",
")",
":",
"# use different lookup methods depending on the notation",
"if",
"field_name",
".",
"startswith",
"(",
"'^'",
")",
":",
"fmt",
",",
"name",
"=",
"\"{}__istartswith\"",
",",
"field_name",
"[",
"1",
":",
"]",
"elif",
"field_name",
".",
"startswith",
"(",
"'='",
")",
":",
"fmt",
",",
"name",
"=",
"\"{}__iexact\"",
",",
"field_name",
"[",
"1",
":",
"]",
"elif",
"field_name",
".",
"startswith",
"(",
"'@'",
")",
":",
"fmt",
",",
"name",
"=",
"\"{}__search\"",
",",
"field_name",
"[",
"1",
":",
"]",
"else",
":",
"fmt",
",",
"name",
"=",
"\"{}__icontains\"",
",",
"field_name",
"return",
"fmt",
".",
"format",
"(",
"name",
")",
"model",
"=",
"apps",
".",
"get_model",
"(",
"app_label",
",",
"model_name",
")",
"queryset",
"=",
"model",
".",
"_default_manager",
".",
"all",
"(",
")",
"data",
"=",
"''",
"if",
"query",
":",
"for",
"bit",
"in",
"query",
".",
"split",
"(",
")",
":",
"or_queries",
"=",
"[",
"models",
".",
"Q",
"(",
"*",
"*",
"{",
"construct_search",
"(",
"smart_str",
"(",
"field_name",
")",
")",
":",
"smart_str",
"(",
"bit",
")",
"}",
")",
"for",
"field_name",
"in",
"search_fields",
".",
"split",
"(",
"','",
")",
"]",
"other_qs",
"=",
"QuerySet",
"(",
"model",
")",
"other_qs",
".",
"query",
".",
"select_related",
"=",
"queryset",
".",
"query",
".",
"select_related",
"other_qs",
"=",
"other_qs",
".",
"filter",
"(",
"reduce",
"(",
"operator",
".",
"or_",
",",
"or_queries",
")",
")",
"queryset",
"=",
"queryset",
"&",
"other_qs",
"if",
"self",
".",
"autocomplete_limit",
":",
"queryset",
"=",
"queryset",
"[",
":",
"self",
".",
"autocomplete_limit",
"]",
"data",
"=",
"''",
".",
"join",
"(",
"[",
"'{}|{}\\n'",
".",
"format",
"(",
"to_string_function",
"(",
"f",
")",
",",
"f",
".",
"pk",
")",
"for",
"f",
"in",
"queryset",
"]",
")",
"elif",
"object_pk",
":",
"try",
":",
"obj",
"=",
"queryset",
".",
"get",
"(",
"pk",
"=",
"object_pk",
")",
"except",
":",
"pass",
"else",
":",
"data",
"=",
"to_string_function",
"(",
"obj",
")",
"return",
"HttpResponse",
"(",
"data",
")",
"return",
"HttpResponseNotFound",
"(",
")"
] |
Searches in the fields of the given related model and returns the
result as a simple string to be used by the jQuery Autocomplete plugin
|
[
"Searches",
"in",
"the",
"fields",
"of",
"the",
"given",
"related",
"model",
"and",
"returns",
"the",
"result",
"as",
"a",
"simple",
"string",
"to",
"be",
"used",
"by",
"the",
"jQuery",
"Autocomplete",
"plugin"
] |
1321c1e7d6a912108f79be02a9e7f2108c57f89f
|
https://github.com/rsalmaso/django-fluo/blob/1321c1e7d6a912108f79be02a9e7f2108c57f89f/fluo/admin/models.py#L137-L198
|
242,623
|
cuescience/goat
|
goat/model.py
|
Match.run
|
def run(self, context):
"""We have to overwrite this method because we don't want an implicit context
"""
args = []
kwargs = {}
for arg in self.explicit_arguments:
if arg.name is not None:
kwargs[arg.name] = arg.value
else:
args.append(arg.value)
for arg in self.implicit_arguments:
if arg.name is not None:
annotation = self.signature.parameters[arg.name].annotation
annotation_name = annotation
if not isinstance(annotation, str):
annotation_name = annotation.__name__
if annotation is Table:
value = context.table
elif annotation is Context:
value = context
elif annotation is Text:
value = context.text
elif annotation is inspect._empty:
raise RuntimeError(
"Parameter '{}' of step implementation '{}{}' does not have a type! Please specify it in the correct steps file.".format(
arg.name,
self.func.__qualname__,
self.signature,
)
)
elif CONTEXT_NAMESPACE.format(annotation_name) in context:
value = context.__getattr__(CONTEXT_NAMESPACE.format(annotation_name))
else:
raise RuntimeError(
"'{}' was not found in context. Is a context parameter missing?".format(arg.name))
kwargs[arg.name] = value
else:
raise RuntimeError("Argument name shouldn't be None")
with context.user_mode():
return_value = self.func(*args, **kwargs)
return_annotation = self.signature.return_annotation
if return_annotation == inspect.Signature.empty:
return
if not isinstance(return_annotation, str):
return_annotation = return_annotation.__name__
context.__setattr__(CONTEXT_NAMESPACE.format(return_annotation), return_value)
|
python
|
def run(self, context):
"""We have to overwrite this method because we don't want an implicit context
"""
args = []
kwargs = {}
for arg in self.explicit_arguments:
if arg.name is not None:
kwargs[arg.name] = arg.value
else:
args.append(arg.value)
for arg in self.implicit_arguments:
if arg.name is not None:
annotation = self.signature.parameters[arg.name].annotation
annotation_name = annotation
if not isinstance(annotation, str):
annotation_name = annotation.__name__
if annotation is Table:
value = context.table
elif annotation is Context:
value = context
elif annotation is Text:
value = context.text
elif annotation is inspect._empty:
raise RuntimeError(
"Parameter '{}' of step implementation '{}{}' does not have a type! Please specify it in the correct steps file.".format(
arg.name,
self.func.__qualname__,
self.signature,
)
)
elif CONTEXT_NAMESPACE.format(annotation_name) in context:
value = context.__getattr__(CONTEXT_NAMESPACE.format(annotation_name))
else:
raise RuntimeError(
"'{}' was not found in context. Is a context parameter missing?".format(arg.name))
kwargs[arg.name] = value
else:
raise RuntimeError("Argument name shouldn't be None")
with context.user_mode():
return_value = self.func(*args, **kwargs)
return_annotation = self.signature.return_annotation
if return_annotation == inspect.Signature.empty:
return
if not isinstance(return_annotation, str):
return_annotation = return_annotation.__name__
context.__setattr__(CONTEXT_NAMESPACE.format(return_annotation), return_value)
|
[
"def",
"run",
"(",
"self",
",",
"context",
")",
":",
"args",
"=",
"[",
"]",
"kwargs",
"=",
"{",
"}",
"for",
"arg",
"in",
"self",
".",
"explicit_arguments",
":",
"if",
"arg",
".",
"name",
"is",
"not",
"None",
":",
"kwargs",
"[",
"arg",
".",
"name",
"]",
"=",
"arg",
".",
"value",
"else",
":",
"args",
".",
"append",
"(",
"arg",
".",
"value",
")",
"for",
"arg",
"in",
"self",
".",
"implicit_arguments",
":",
"if",
"arg",
".",
"name",
"is",
"not",
"None",
":",
"annotation",
"=",
"self",
".",
"signature",
".",
"parameters",
"[",
"arg",
".",
"name",
"]",
".",
"annotation",
"annotation_name",
"=",
"annotation",
"if",
"not",
"isinstance",
"(",
"annotation",
",",
"str",
")",
":",
"annotation_name",
"=",
"annotation",
".",
"__name__",
"if",
"annotation",
"is",
"Table",
":",
"value",
"=",
"context",
".",
"table",
"elif",
"annotation",
"is",
"Context",
":",
"value",
"=",
"context",
"elif",
"annotation",
"is",
"Text",
":",
"value",
"=",
"context",
".",
"text",
"elif",
"annotation",
"is",
"inspect",
".",
"_empty",
":",
"raise",
"RuntimeError",
"(",
"\"Parameter '{}' of step implementation '{}{}' does not have a type! Please specify it in the correct steps file.\"",
".",
"format",
"(",
"arg",
".",
"name",
",",
"self",
".",
"func",
".",
"__qualname__",
",",
"self",
".",
"signature",
",",
")",
")",
"elif",
"CONTEXT_NAMESPACE",
".",
"format",
"(",
"annotation_name",
")",
"in",
"context",
":",
"value",
"=",
"context",
".",
"__getattr__",
"(",
"CONTEXT_NAMESPACE",
".",
"format",
"(",
"annotation_name",
")",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"'{}' was not found in context. Is a context parameter missing?\"",
".",
"format",
"(",
"arg",
".",
"name",
")",
")",
"kwargs",
"[",
"arg",
".",
"name",
"]",
"=",
"value",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Argument name shouldn't be None\"",
")",
"with",
"context",
".",
"user_mode",
"(",
")",
":",
"return_value",
"=",
"self",
".",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return_annotation",
"=",
"self",
".",
"signature",
".",
"return_annotation",
"if",
"return_annotation",
"==",
"inspect",
".",
"Signature",
".",
"empty",
":",
"return",
"if",
"not",
"isinstance",
"(",
"return_annotation",
",",
"str",
")",
":",
"return_annotation",
"=",
"return_annotation",
".",
"__name__",
"context",
".",
"__setattr__",
"(",
"CONTEXT_NAMESPACE",
".",
"format",
"(",
"return_annotation",
")",
",",
"return_value",
")"
] |
We have to overwrite this method because we don't want an implicit context
|
[
"We",
"have",
"to",
"overwrite",
"this",
"method",
"because",
"we",
"don",
"t",
"want",
"an",
"implicit",
"context"
] |
d76f44b9ec5dc40ad33abca50830c0d7492ef152
|
https://github.com/cuescience/goat/blob/d76f44b9ec5dc40ad33abca50830c0d7492ef152/goat/model.py#L46-L99
|
242,624
|
cdeboever3/cdpybio
|
cdpybio/analysis.py
|
liftover_bed
|
def liftover_bed(
bed,
chain,
mapped=None,
unmapped=None,
liftOver_path='liftOver',
):
"""
Lift over a bed file using a given chain file.
Parameters
----------
bed : str or pybedtools.BedTool
Coordinates to lift over.
chain : str
Path to chain file to use for lift over.
mapped : str
Path for bed file with coordinates that are lifted over correctly.
unmapped : str
Path for text file to store coordinates that did not lift over
correctly. If this is not provided, these are discarded.
liftOver_path : str
Path to liftOver executable if not in path.
Returns
-------
new_coords : pandas.DataFrame
Pandas data frame with lift over results. Index is old coordinates in
the form chrom:start-end and columns are chrom, start, end and loc
(chrom:start-end) in new coordinate system.
"""
import subprocess
import pybedtools as pbt
if mapped == None:
import tempfile
mapped = tempfile.NamedTemporaryFile()
mname = mapped.name
else:
mname = mapped
if unmapped == None:
import tempfile
unmapped = tempfile.NamedTemporaryFile()
uname = unmapped.name
else:
uname = unmapped
if type(bed) == str:
bt = pbt.BedTool(bed)
elif type(bed) == pbt.bedtool.BedTool:
bt = bed
else:
sys.exit(1)
bt = bt.sort()
c = '{} {} {} {} {}'.format(liftOver_path, bt.fn, chain, mname, uname)
subprocess.check_call(c, shell=True)
with open(uname) as f:
missing = pbt.BedTool(''.join([x for x in f.readlines()[1::2]]),
from_string=True)
bt = bt.subtract(missing)
bt_mapped = pbt.BedTool(mname)
old_loc = []
for r in bt:
old_loc.append('{}:{}-{}'.format(r.chrom, r.start, r.end))
new_loc = []
new_chrom = []
new_start = []
new_end = []
for r in bt_mapped:
new_loc.append('{}:{}-{}'.format(r.chrom, r.start, r.end))
new_chrom.append(r.chrom)
new_start.append(r.start)
new_end.append(r.end)
new_coords = pd.DataFrame({'loc':new_loc, 'chrom': new_chrom,
'start': new_start, 'end': new_end},
index=old_loc)
for f in [mapped, unmapped]:
try:
f.close()
except AttributeError:
continue
return new_coords
|
python
|
def liftover_bed(
bed,
chain,
mapped=None,
unmapped=None,
liftOver_path='liftOver',
):
"""
Lift over a bed file using a given chain file.
Parameters
----------
bed : str or pybedtools.BedTool
Coordinates to lift over.
chain : str
Path to chain file to use for lift over.
mapped : str
Path for bed file with coordinates that are lifted over correctly.
unmapped : str
Path for text file to store coordinates that did not lift over
correctly. If this is not provided, these are discarded.
liftOver_path : str
Path to liftOver executable if not in path.
Returns
-------
new_coords : pandas.DataFrame
Pandas data frame with lift over results. Index is old coordinates in
the form chrom:start-end and columns are chrom, start, end and loc
(chrom:start-end) in new coordinate system.
"""
import subprocess
import pybedtools as pbt
if mapped == None:
import tempfile
mapped = tempfile.NamedTemporaryFile()
mname = mapped.name
else:
mname = mapped
if unmapped == None:
import tempfile
unmapped = tempfile.NamedTemporaryFile()
uname = unmapped.name
else:
uname = unmapped
if type(bed) == str:
bt = pbt.BedTool(bed)
elif type(bed) == pbt.bedtool.BedTool:
bt = bed
else:
sys.exit(1)
bt = bt.sort()
c = '{} {} {} {} {}'.format(liftOver_path, bt.fn, chain, mname, uname)
subprocess.check_call(c, shell=True)
with open(uname) as f:
missing = pbt.BedTool(''.join([x for x in f.readlines()[1::2]]),
from_string=True)
bt = bt.subtract(missing)
bt_mapped = pbt.BedTool(mname)
old_loc = []
for r in bt:
old_loc.append('{}:{}-{}'.format(r.chrom, r.start, r.end))
new_loc = []
new_chrom = []
new_start = []
new_end = []
for r in bt_mapped:
new_loc.append('{}:{}-{}'.format(r.chrom, r.start, r.end))
new_chrom.append(r.chrom)
new_start.append(r.start)
new_end.append(r.end)
new_coords = pd.DataFrame({'loc':new_loc, 'chrom': new_chrom,
'start': new_start, 'end': new_end},
index=old_loc)
for f in [mapped, unmapped]:
try:
f.close()
except AttributeError:
continue
return new_coords
|
[
"def",
"liftover_bed",
"(",
"bed",
",",
"chain",
",",
"mapped",
"=",
"None",
",",
"unmapped",
"=",
"None",
",",
"liftOver_path",
"=",
"'liftOver'",
",",
")",
":",
"import",
"subprocess",
"import",
"pybedtools",
"as",
"pbt",
"if",
"mapped",
"==",
"None",
":",
"import",
"tempfile",
"mapped",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
")",
"mname",
"=",
"mapped",
".",
"name",
"else",
":",
"mname",
"=",
"mapped",
"if",
"unmapped",
"==",
"None",
":",
"import",
"tempfile",
"unmapped",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
")",
"uname",
"=",
"unmapped",
".",
"name",
"else",
":",
"uname",
"=",
"unmapped",
"if",
"type",
"(",
"bed",
")",
"==",
"str",
":",
"bt",
"=",
"pbt",
".",
"BedTool",
"(",
"bed",
")",
"elif",
"type",
"(",
"bed",
")",
"==",
"pbt",
".",
"bedtool",
".",
"BedTool",
":",
"bt",
"=",
"bed",
"else",
":",
"sys",
".",
"exit",
"(",
"1",
")",
"bt",
"=",
"bt",
".",
"sort",
"(",
")",
"c",
"=",
"'{} {} {} {} {}'",
".",
"format",
"(",
"liftOver_path",
",",
"bt",
".",
"fn",
",",
"chain",
",",
"mname",
",",
"uname",
")",
"subprocess",
".",
"check_call",
"(",
"c",
",",
"shell",
"=",
"True",
")",
"with",
"open",
"(",
"uname",
")",
"as",
"f",
":",
"missing",
"=",
"pbt",
".",
"BedTool",
"(",
"''",
".",
"join",
"(",
"[",
"x",
"for",
"x",
"in",
"f",
".",
"readlines",
"(",
")",
"[",
"1",
":",
":",
"2",
"]",
"]",
")",
",",
"from_string",
"=",
"True",
")",
"bt",
"=",
"bt",
".",
"subtract",
"(",
"missing",
")",
"bt_mapped",
"=",
"pbt",
".",
"BedTool",
"(",
"mname",
")",
"old_loc",
"=",
"[",
"]",
"for",
"r",
"in",
"bt",
":",
"old_loc",
".",
"append",
"(",
"'{}:{}-{}'",
".",
"format",
"(",
"r",
".",
"chrom",
",",
"r",
".",
"start",
",",
"r",
".",
"end",
")",
")",
"new_loc",
"=",
"[",
"]",
"new_chrom",
"=",
"[",
"]",
"new_start",
"=",
"[",
"]",
"new_end",
"=",
"[",
"]",
"for",
"r",
"in",
"bt_mapped",
":",
"new_loc",
".",
"append",
"(",
"'{}:{}-{}'",
".",
"format",
"(",
"r",
".",
"chrom",
",",
"r",
".",
"start",
",",
"r",
".",
"end",
")",
")",
"new_chrom",
".",
"append",
"(",
"r",
".",
"chrom",
")",
"new_start",
".",
"append",
"(",
"r",
".",
"start",
")",
"new_end",
".",
"append",
"(",
"r",
".",
"end",
")",
"new_coords",
"=",
"pd",
".",
"DataFrame",
"(",
"{",
"'loc'",
":",
"new_loc",
",",
"'chrom'",
":",
"new_chrom",
",",
"'start'",
":",
"new_start",
",",
"'end'",
":",
"new_end",
"}",
",",
"index",
"=",
"old_loc",
")",
"for",
"f",
"in",
"[",
"mapped",
",",
"unmapped",
"]",
":",
"try",
":",
"f",
".",
"close",
"(",
")",
"except",
"AttributeError",
":",
"continue",
"return",
"new_coords"
] |
Lift over a bed file using a given chain file.
Parameters
----------
bed : str or pybedtools.BedTool
Coordinates to lift over.
chain : str
Path to chain file to use for lift over.
mapped : str
Path for bed file with coordinates that are lifted over correctly.
unmapped : str
Path for text file to store coordinates that did not lift over
correctly. If this is not provided, these are discarded.
liftOver_path : str
Path to liftOver executable if not in path.
Returns
-------
new_coords : pandas.DataFrame
Pandas data frame with lift over results. Index is old coordinates in
the form chrom:start-end and columns are chrom, start, end and loc
(chrom:start-end) in new coordinate system.
|
[
"Lift",
"over",
"a",
"bed",
"file",
"using",
"a",
"given",
"chain",
"file",
"."
] |
38efdf0e11d01bc00a135921cb91a19c03db5d5c
|
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/analysis.py#L354-L437
|
242,625
|
cdeboever3/cdpybio
|
cdpybio/analysis.py
|
deseq2_size_factors
|
def deseq2_size_factors(counts, meta, design):
"""
Get size factors for counts using DESeq2.
Parameters
----------
counts : pandas.DataFrame
Counts to pass to DESeq2.
meta : pandas.DataFrame
Pandas dataframe whose index matches the columns of counts. This is
passed to DESeq2's colData.
design : str
Design like ~subject_id that will be passed to DESeq2. The design
variables should match columns in meta.
Returns
-------
sf : pandas.Series
Series whose index matches the columns of counts and whose values are
the size factors from DESeq2. Divide each column by its size factor to
obtain normalized counts.
"""
import rpy2.robjects as r
from rpy2.robjects import pandas2ri
pandas2ri.activate()
r.r('suppressMessages(library(DESeq2))')
r.globalenv['counts'] = counts
r.globalenv['meta'] = meta
r.r('dds = DESeqDataSetFromMatrix(countData=counts, colData=meta, '
'design={})'.format(design))
r.r('dds = estimateSizeFactors(dds)')
r.r('sf = sizeFactors(dds)')
sf = r.globalenv['sf']
return pd.Series(sf, index=counts.columns)
|
python
|
def deseq2_size_factors(counts, meta, design):
"""
Get size factors for counts using DESeq2.
Parameters
----------
counts : pandas.DataFrame
Counts to pass to DESeq2.
meta : pandas.DataFrame
Pandas dataframe whose index matches the columns of counts. This is
passed to DESeq2's colData.
design : str
Design like ~subject_id that will be passed to DESeq2. The design
variables should match columns in meta.
Returns
-------
sf : pandas.Series
Series whose index matches the columns of counts and whose values are
the size factors from DESeq2. Divide each column by its size factor to
obtain normalized counts.
"""
import rpy2.robjects as r
from rpy2.robjects import pandas2ri
pandas2ri.activate()
r.r('suppressMessages(library(DESeq2))')
r.globalenv['counts'] = counts
r.globalenv['meta'] = meta
r.r('dds = DESeqDataSetFromMatrix(countData=counts, colData=meta, '
'design={})'.format(design))
r.r('dds = estimateSizeFactors(dds)')
r.r('sf = sizeFactors(dds)')
sf = r.globalenv['sf']
return pd.Series(sf, index=counts.columns)
|
[
"def",
"deseq2_size_factors",
"(",
"counts",
",",
"meta",
",",
"design",
")",
":",
"import",
"rpy2",
".",
"robjects",
"as",
"r",
"from",
"rpy2",
".",
"robjects",
"import",
"pandas2ri",
"pandas2ri",
".",
"activate",
"(",
")",
"r",
".",
"r",
"(",
"'suppressMessages(library(DESeq2))'",
")",
"r",
".",
"globalenv",
"[",
"'counts'",
"]",
"=",
"counts",
"r",
".",
"globalenv",
"[",
"'meta'",
"]",
"=",
"meta",
"r",
".",
"r",
"(",
"'dds = DESeqDataSetFromMatrix(countData=counts, colData=meta, '",
"'design={})'",
".",
"format",
"(",
"design",
")",
")",
"r",
".",
"r",
"(",
"'dds = estimateSizeFactors(dds)'",
")",
"r",
".",
"r",
"(",
"'sf = sizeFactors(dds)'",
")",
"sf",
"=",
"r",
".",
"globalenv",
"[",
"'sf'",
"]",
"return",
"pd",
".",
"Series",
"(",
"sf",
",",
"index",
"=",
"counts",
".",
"columns",
")"
] |
Get size factors for counts using DESeq2.
Parameters
----------
counts : pandas.DataFrame
Counts to pass to DESeq2.
meta : pandas.DataFrame
Pandas dataframe whose index matches the columns of counts. This is
passed to DESeq2's colData.
design : str
Design like ~subject_id that will be passed to DESeq2. The design
variables should match columns in meta.
Returns
-------
sf : pandas.Series
Series whose index matches the columns of counts and whose values are
the size factors from DESeq2. Divide each column by its size factor to
obtain normalized counts.
|
[
"Get",
"size",
"factors",
"for",
"counts",
"using",
"DESeq2",
"."
] |
38efdf0e11d01bc00a135921cb91a19c03db5d5c
|
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/analysis.py#L439-L475
|
242,626
|
cdeboever3/cdpybio
|
cdpybio/analysis.py
|
goseq_gene_enrichment
|
def goseq_gene_enrichment(genes, sig, plot_fn=None, length_correct=True):
"""
Perform goseq enrichment for an Ensembl gene set.
Parameters
----------
genes : list
List of all genes as Ensembl IDs.
sig : list
List of boolean values indicating whether each gene is significant or
not.
plot_fn : str
Path to save length bias plot to. If not provided, the plot is deleted.
length_correct : bool
Correct for length bias.
Returns
-------
go_results : pandas.DataFrame
Dataframe with goseq results as well as Benjamini-Hochberg correct
p-values.
"""
import os
import readline
import statsmodels.stats.multitest as smm
import rpy2.robjects as r
genes = list(genes)
sig = [bool(x) for x in sig]
r.r('suppressMessages(library(goseq))')
r.globalenv['genes'] = list(genes)
r.globalenv['group'] = list(sig)
r.r('group = as.logical(group)')
r.r('names(group) = genes')
r.r('pwf = nullp(group, "hg19", "ensGene")')
if length_correct:
r.r('wall = goseq(pwf, "hg19", "ensGene")')
else:
r.r('wall = goseq(pwf, "hg19", "ensGene", method="Hypergeometric")')
r.r('t = as.data.frame(wall)')
t = r.globalenv['t']
go_results = pd.DataFrame(columns=list(t.colnames))
for i, c in enumerate(go_results.columns):
go_results[c] = list(t[i])
r, c, ask, abf = smm.multipletests(
go_results.over_represented_pvalue, alpha=0.05, method='fdr_i')
go_results['over_represented_pvalue_bh'] = c
r, c, ask, abf = smm.multipletests(
go_results.under_represented_pvalue, alpha=0.05, method='fdr_i')
go_results['under_represented_pvalue_bh'] = c
go_results.index = go_results.category
go_results = go_results.drop('category', axis=1)
if plot_fn and os.path.exists('Rplots.pdf'):
from os import rename
rename('Rplots.pdf', plot_fn)
elif os.path.exists('Rplots.pdf'):
from os import remove
remove('Rplots.pdf')
return go_results
|
python
|
def goseq_gene_enrichment(genes, sig, plot_fn=None, length_correct=True):
"""
Perform goseq enrichment for an Ensembl gene set.
Parameters
----------
genes : list
List of all genes as Ensembl IDs.
sig : list
List of boolean values indicating whether each gene is significant or
not.
plot_fn : str
Path to save length bias plot to. If not provided, the plot is deleted.
length_correct : bool
Correct for length bias.
Returns
-------
go_results : pandas.DataFrame
Dataframe with goseq results as well as Benjamini-Hochberg correct
p-values.
"""
import os
import readline
import statsmodels.stats.multitest as smm
import rpy2.robjects as r
genes = list(genes)
sig = [bool(x) for x in sig]
r.r('suppressMessages(library(goseq))')
r.globalenv['genes'] = list(genes)
r.globalenv['group'] = list(sig)
r.r('group = as.logical(group)')
r.r('names(group) = genes')
r.r('pwf = nullp(group, "hg19", "ensGene")')
if length_correct:
r.r('wall = goseq(pwf, "hg19", "ensGene")')
else:
r.r('wall = goseq(pwf, "hg19", "ensGene", method="Hypergeometric")')
r.r('t = as.data.frame(wall)')
t = r.globalenv['t']
go_results = pd.DataFrame(columns=list(t.colnames))
for i, c in enumerate(go_results.columns):
go_results[c] = list(t[i])
r, c, ask, abf = smm.multipletests(
go_results.over_represented_pvalue, alpha=0.05, method='fdr_i')
go_results['over_represented_pvalue_bh'] = c
r, c, ask, abf = smm.multipletests(
go_results.under_represented_pvalue, alpha=0.05, method='fdr_i')
go_results['under_represented_pvalue_bh'] = c
go_results.index = go_results.category
go_results = go_results.drop('category', axis=1)
if plot_fn and os.path.exists('Rplots.pdf'):
from os import rename
rename('Rplots.pdf', plot_fn)
elif os.path.exists('Rplots.pdf'):
from os import remove
remove('Rplots.pdf')
return go_results
|
[
"def",
"goseq_gene_enrichment",
"(",
"genes",
",",
"sig",
",",
"plot_fn",
"=",
"None",
",",
"length_correct",
"=",
"True",
")",
":",
"import",
"os",
"import",
"readline",
"import",
"statsmodels",
".",
"stats",
".",
"multitest",
"as",
"smm",
"import",
"rpy2",
".",
"robjects",
"as",
"r",
"genes",
"=",
"list",
"(",
"genes",
")",
"sig",
"=",
"[",
"bool",
"(",
"x",
")",
"for",
"x",
"in",
"sig",
"]",
"r",
".",
"r",
"(",
"'suppressMessages(library(goseq))'",
")",
"r",
".",
"globalenv",
"[",
"'genes'",
"]",
"=",
"list",
"(",
"genes",
")",
"r",
".",
"globalenv",
"[",
"'group'",
"]",
"=",
"list",
"(",
"sig",
")",
"r",
".",
"r",
"(",
"'group = as.logical(group)'",
")",
"r",
".",
"r",
"(",
"'names(group) = genes'",
")",
"r",
".",
"r",
"(",
"'pwf = nullp(group, \"hg19\", \"ensGene\")'",
")",
"if",
"length_correct",
":",
"r",
".",
"r",
"(",
"'wall = goseq(pwf, \"hg19\", \"ensGene\")'",
")",
"else",
":",
"r",
".",
"r",
"(",
"'wall = goseq(pwf, \"hg19\", \"ensGene\", method=\"Hypergeometric\")'",
")",
"r",
".",
"r",
"(",
"'t = as.data.frame(wall)'",
")",
"t",
"=",
"r",
".",
"globalenv",
"[",
"'t'",
"]",
"go_results",
"=",
"pd",
".",
"DataFrame",
"(",
"columns",
"=",
"list",
"(",
"t",
".",
"colnames",
")",
")",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"go_results",
".",
"columns",
")",
":",
"go_results",
"[",
"c",
"]",
"=",
"list",
"(",
"t",
"[",
"i",
"]",
")",
"r",
",",
"c",
",",
"ask",
",",
"abf",
"=",
"smm",
".",
"multipletests",
"(",
"go_results",
".",
"over_represented_pvalue",
",",
"alpha",
"=",
"0.05",
",",
"method",
"=",
"'fdr_i'",
")",
"go_results",
"[",
"'over_represented_pvalue_bh'",
"]",
"=",
"c",
"r",
",",
"c",
",",
"ask",
",",
"abf",
"=",
"smm",
".",
"multipletests",
"(",
"go_results",
".",
"under_represented_pvalue",
",",
"alpha",
"=",
"0.05",
",",
"method",
"=",
"'fdr_i'",
")",
"go_results",
"[",
"'under_represented_pvalue_bh'",
"]",
"=",
"c",
"go_results",
".",
"index",
"=",
"go_results",
".",
"category",
"go_results",
"=",
"go_results",
".",
"drop",
"(",
"'category'",
",",
"axis",
"=",
"1",
")",
"if",
"plot_fn",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"'Rplots.pdf'",
")",
":",
"from",
"os",
"import",
"rename",
"rename",
"(",
"'Rplots.pdf'",
",",
"plot_fn",
")",
"elif",
"os",
".",
"path",
".",
"exists",
"(",
"'Rplots.pdf'",
")",
":",
"from",
"os",
"import",
"remove",
"remove",
"(",
"'Rplots.pdf'",
")",
"return",
"go_results"
] |
Perform goseq enrichment for an Ensembl gene set.
Parameters
----------
genes : list
List of all genes as Ensembl IDs.
sig : list
List of boolean values indicating whether each gene is significant or
not.
plot_fn : str
Path to save length bias plot to. If not provided, the plot is deleted.
length_correct : bool
Correct for length bias.
Returns
-------
go_results : pandas.DataFrame
Dataframe with goseq results as well as Benjamini-Hochberg correct
p-values.
|
[
"Perform",
"goseq",
"enrichment",
"for",
"an",
"Ensembl",
"gene",
"set",
"."
] |
38efdf0e11d01bc00a135921cb91a19c03db5d5c
|
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/analysis.py#L477-L537
|
242,627
|
cdeboever3/cdpybio
|
cdpybio/analysis.py
|
categories_to_colors
|
def categories_to_colors(cats, colormap=None):
"""
Map categorical data to colors.
Parameters
----------
cats : pandas.Series or list
Categorical data as a list or in a Series.
colormap : list
List of RGB triples. If not provided, the tableau20 colormap defined in
this module will be used.
Returns
-------
legend : pd.Series
Series whose values are colors and whose index are the original
categories that correspond to those colors.
"""
if colormap is None:
colormap = tableau20
if type(cats) != pd.Series:
cats = pd.Series(cats)
legend = pd.Series(dict(zip(set(cats), colormap)))
# colors = pd.Series([legend[x] for x in cats.values], index=cats.index)
# I've removed this output:
# colors : pd.Series
# Series whose values are the colors for each category. If cats was a
# Series, then out will have the same index as cats.
return(legend)
|
python
|
def categories_to_colors(cats, colormap=None):
"""
Map categorical data to colors.
Parameters
----------
cats : pandas.Series or list
Categorical data as a list or in a Series.
colormap : list
List of RGB triples. If not provided, the tableau20 colormap defined in
this module will be used.
Returns
-------
legend : pd.Series
Series whose values are colors and whose index are the original
categories that correspond to those colors.
"""
if colormap is None:
colormap = tableau20
if type(cats) != pd.Series:
cats = pd.Series(cats)
legend = pd.Series(dict(zip(set(cats), colormap)))
# colors = pd.Series([legend[x] for x in cats.values], index=cats.index)
# I've removed this output:
# colors : pd.Series
# Series whose values are the colors for each category. If cats was a
# Series, then out will have the same index as cats.
return(legend)
|
[
"def",
"categories_to_colors",
"(",
"cats",
",",
"colormap",
"=",
"None",
")",
":",
"if",
"colormap",
"is",
"None",
":",
"colormap",
"=",
"tableau20",
"if",
"type",
"(",
"cats",
")",
"!=",
"pd",
".",
"Series",
":",
"cats",
"=",
"pd",
".",
"Series",
"(",
"cats",
")",
"legend",
"=",
"pd",
".",
"Series",
"(",
"dict",
"(",
"zip",
"(",
"set",
"(",
"cats",
")",
",",
"colormap",
")",
")",
")",
"# colors = pd.Series([legend[x] for x in cats.values], index=cats.index)",
"# I've removed this output:",
"# colors : pd.Series",
"# Series whose values are the colors for each category. If cats was a",
"# Series, then out will have the same index as cats.",
"return",
"(",
"legend",
")"
] |
Map categorical data to colors.
Parameters
----------
cats : pandas.Series or list
Categorical data as a list or in a Series.
colormap : list
List of RGB triples. If not provided, the tableau20 colormap defined in
this module will be used.
Returns
-------
legend : pd.Series
Series whose values are colors and whose index are the original
categories that correspond to those colors.
|
[
"Map",
"categorical",
"data",
"to",
"colors",
"."
] |
38efdf0e11d01bc00a135921cb91a19c03db5d5c
|
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/analysis.py#L539-L569
|
242,628
|
cdeboever3/cdpybio
|
cdpybio/analysis.py
|
plot_color_legend
|
def plot_color_legend(legend, horizontal=False, ax=None):
"""
Plot a pandas Series with labels and colors.
Parameters
----------
legend : pandas.Series
Pandas Series whose values are RGB triples and whose index contains
categorical labels.
horizontal : bool
If True, plot horizontally.
ax : matplotlib.axis
Axis to plot on.
Returns
-------
ax : matplotlib.axis
Plot axis.
"""
import matplotlib.pyplot as plt
import numpy as np
t = np.array([np.array([x for x in legend])])
if ax is None:
fig, ax = plt.subplots(1, 1)
if horizontal:
ax.imshow(t, interpolation='none')
ax.set_yticks([])
ax.set_xticks(np.arange(0, legend.shape[0]))
t = ax.set_xticklabels(legend.index)
else:
t = t.reshape([legend.shape[0], 1, 3])
ax.imshow(t, interpolation='none')
ax.set_xticks([])
ax.set_yticks(np.arange(0, legend.shape[0]))
t = ax.set_yticklabels(legend.index)
return ax
|
python
|
def plot_color_legend(legend, horizontal=False, ax=None):
"""
Plot a pandas Series with labels and colors.
Parameters
----------
legend : pandas.Series
Pandas Series whose values are RGB triples and whose index contains
categorical labels.
horizontal : bool
If True, plot horizontally.
ax : matplotlib.axis
Axis to plot on.
Returns
-------
ax : matplotlib.axis
Plot axis.
"""
import matplotlib.pyplot as plt
import numpy as np
t = np.array([np.array([x for x in legend])])
if ax is None:
fig, ax = plt.subplots(1, 1)
if horizontal:
ax.imshow(t, interpolation='none')
ax.set_yticks([])
ax.set_xticks(np.arange(0, legend.shape[0]))
t = ax.set_xticklabels(legend.index)
else:
t = t.reshape([legend.shape[0], 1, 3])
ax.imshow(t, interpolation='none')
ax.set_xticks([])
ax.set_yticks(np.arange(0, legend.shape[0]))
t = ax.set_yticklabels(legend.index)
return ax
|
[
"def",
"plot_color_legend",
"(",
"legend",
",",
"horizontal",
"=",
"False",
",",
"ax",
"=",
"None",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"import",
"numpy",
"as",
"np",
"t",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"array",
"(",
"[",
"x",
"for",
"x",
"in",
"legend",
"]",
")",
"]",
")",
"if",
"ax",
"is",
"None",
":",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"1",
",",
"1",
")",
"if",
"horizontal",
":",
"ax",
".",
"imshow",
"(",
"t",
",",
"interpolation",
"=",
"'none'",
")",
"ax",
".",
"set_yticks",
"(",
"[",
"]",
")",
"ax",
".",
"set_xticks",
"(",
"np",
".",
"arange",
"(",
"0",
",",
"legend",
".",
"shape",
"[",
"0",
"]",
")",
")",
"t",
"=",
"ax",
".",
"set_xticklabels",
"(",
"legend",
".",
"index",
")",
"else",
":",
"t",
"=",
"t",
".",
"reshape",
"(",
"[",
"legend",
".",
"shape",
"[",
"0",
"]",
",",
"1",
",",
"3",
"]",
")",
"ax",
".",
"imshow",
"(",
"t",
",",
"interpolation",
"=",
"'none'",
")",
"ax",
".",
"set_xticks",
"(",
"[",
"]",
")",
"ax",
".",
"set_yticks",
"(",
"np",
".",
"arange",
"(",
"0",
",",
"legend",
".",
"shape",
"[",
"0",
"]",
")",
")",
"t",
"=",
"ax",
".",
"set_yticklabels",
"(",
"legend",
".",
"index",
")",
"return",
"ax"
] |
Plot a pandas Series with labels and colors.
Parameters
----------
legend : pandas.Series
Pandas Series whose values are RGB triples and whose index contains
categorical labels.
horizontal : bool
If True, plot horizontally.
ax : matplotlib.axis
Axis to plot on.
Returns
-------
ax : matplotlib.axis
Plot axis.
|
[
"Plot",
"a",
"pandas",
"Series",
"with",
"labels",
"and",
"colors",
"."
] |
38efdf0e11d01bc00a135921cb91a19c03db5d5c
|
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/analysis.py#L571-L609
|
242,629
|
cdeboever3/cdpybio
|
cdpybio/analysis.py
|
make_color_legend_rects
|
def make_color_legend_rects(colors, labels=None):
"""
Make list of rectangles and labels for making legends.
Parameters
----------
colors : pandas.Series or list
Pandas series whose values are colors and index is labels.
Alternatively, you can provide a list with colors and provide the labels
as a list.
labels : list
If colors is a list, this should be the list of corresponding labels.
Returns
-------
out : pd.Series
Pandas series whose values are matplotlib rectangles and whose index are
the legend labels for those rectangles. You can add each of these
rectangles to your axis using ax.add_patch(r) for r in out then create a
legend whose labels are out.values and whose labels are
legend_rects.index:
for r in legend_rects:
ax.add_patch(r)
lgd = ax.legend(legend_rects.values, labels=legend_rects.index)
"""
from matplotlib.pyplot import Rectangle
if labels:
d = dict(zip(labels, colors))
se = pd.Series(d)
else:
se = colors
rects = []
for i in se.index:
r = Rectangle((0, 0), 0, 0, fc=se[i])
rects.append(r)
out = pd.Series(rects, index=se.index)
return out
|
python
|
def make_color_legend_rects(colors, labels=None):
"""
Make list of rectangles and labels for making legends.
Parameters
----------
colors : pandas.Series or list
Pandas series whose values are colors and index is labels.
Alternatively, you can provide a list with colors and provide the labels
as a list.
labels : list
If colors is a list, this should be the list of corresponding labels.
Returns
-------
out : pd.Series
Pandas series whose values are matplotlib rectangles and whose index are
the legend labels for those rectangles. You can add each of these
rectangles to your axis using ax.add_patch(r) for r in out then create a
legend whose labels are out.values and whose labels are
legend_rects.index:
for r in legend_rects:
ax.add_patch(r)
lgd = ax.legend(legend_rects.values, labels=legend_rects.index)
"""
from matplotlib.pyplot import Rectangle
if labels:
d = dict(zip(labels, colors))
se = pd.Series(d)
else:
se = colors
rects = []
for i in se.index:
r = Rectangle((0, 0), 0, 0, fc=se[i])
rects.append(r)
out = pd.Series(rects, index=se.index)
return out
|
[
"def",
"make_color_legend_rects",
"(",
"colors",
",",
"labels",
"=",
"None",
")",
":",
"from",
"matplotlib",
".",
"pyplot",
"import",
"Rectangle",
"if",
"labels",
":",
"d",
"=",
"dict",
"(",
"zip",
"(",
"labels",
",",
"colors",
")",
")",
"se",
"=",
"pd",
".",
"Series",
"(",
"d",
")",
"else",
":",
"se",
"=",
"colors",
"rects",
"=",
"[",
"]",
"for",
"i",
"in",
"se",
".",
"index",
":",
"r",
"=",
"Rectangle",
"(",
"(",
"0",
",",
"0",
")",
",",
"0",
",",
"0",
",",
"fc",
"=",
"se",
"[",
"i",
"]",
")",
"rects",
".",
"append",
"(",
"r",
")",
"out",
"=",
"pd",
".",
"Series",
"(",
"rects",
",",
"index",
"=",
"se",
".",
"index",
")",
"return",
"out"
] |
Make list of rectangles and labels for making legends.
Parameters
----------
colors : pandas.Series or list
Pandas series whose values are colors and index is labels.
Alternatively, you can provide a list with colors and provide the labels
as a list.
labels : list
If colors is a list, this should be the list of corresponding labels.
Returns
-------
out : pd.Series
Pandas series whose values are matplotlib rectangles and whose index are
the legend labels for those rectangles. You can add each of these
rectangles to your axis using ax.add_patch(r) for r in out then create a
legend whose labels are out.values and whose labels are
legend_rects.index:
for r in legend_rects:
ax.add_patch(r)
lgd = ax.legend(legend_rects.values, labels=legend_rects.index)
|
[
"Make",
"list",
"of",
"rectangles",
"and",
"labels",
"for",
"making",
"legends",
"."
] |
38efdf0e11d01bc00a135921cb91a19c03db5d5c
|
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/analysis.py#L611-L649
|
242,630
|
cdeboever3/cdpybio
|
cdpybio/analysis.py
|
SVD.pc_correlation
|
def pc_correlation(self, covariates, num_pc=5):
"""
Calculate the correlation between the first num_pc prinicipal components
and known covariates. The size and index of covariates determines
whether u or v is used.
Parameters
----------
covariates : pandas.DataFrame
Dataframe of covariates whose index corresponds to the index of
either u or v.
num_pc : int
Number of principal components to correlate with.
Returns
-------
corr : pandas.Panel
Panel with correlation values and p-values.
"""
from scipy.stats import spearmanr
if (covariates.shape[0] == self.u.shape[0] and
len(set(covariates.index) & set(self.u.index)) == self.u.shape[0]):
mat = self.u
elif (covariates.shape[0] == self.v.shape[0] and
len(set(covariates.index) & set(self.v.index)) == self.v.shape[0]):
mat = self.v
else:
import sys
sys.stderr.write('Covariates differ in size from input data.\n')
sys.exit(1)
corr = pd.Panel(items=['rho', 'pvalue'],
major_axis=covariates.columns,
minor_axis=mat.columns[0:num_pc])
for i in corr.major_axis:
for j in corr.minor_axis:
rho, p = spearmanr(covariates[i], mat[j])
corr.ix['rho', i, j] = rho
corr.ix['pvalue', i, j] = p
return corr
|
python
|
def pc_correlation(self, covariates, num_pc=5):
"""
Calculate the correlation between the first num_pc prinicipal components
and known covariates. The size and index of covariates determines
whether u or v is used.
Parameters
----------
covariates : pandas.DataFrame
Dataframe of covariates whose index corresponds to the index of
either u or v.
num_pc : int
Number of principal components to correlate with.
Returns
-------
corr : pandas.Panel
Panel with correlation values and p-values.
"""
from scipy.stats import spearmanr
if (covariates.shape[0] == self.u.shape[0] and
len(set(covariates.index) & set(self.u.index)) == self.u.shape[0]):
mat = self.u
elif (covariates.shape[0] == self.v.shape[0] and
len(set(covariates.index) & set(self.v.index)) == self.v.shape[0]):
mat = self.v
else:
import sys
sys.stderr.write('Covariates differ in size from input data.\n')
sys.exit(1)
corr = pd.Panel(items=['rho', 'pvalue'],
major_axis=covariates.columns,
minor_axis=mat.columns[0:num_pc])
for i in corr.major_axis:
for j in corr.minor_axis:
rho, p = spearmanr(covariates[i], mat[j])
corr.ix['rho', i, j] = rho
corr.ix['pvalue', i, j] = p
return corr
|
[
"def",
"pc_correlation",
"(",
"self",
",",
"covariates",
",",
"num_pc",
"=",
"5",
")",
":",
"from",
"scipy",
".",
"stats",
"import",
"spearmanr",
"if",
"(",
"covariates",
".",
"shape",
"[",
"0",
"]",
"==",
"self",
".",
"u",
".",
"shape",
"[",
"0",
"]",
"and",
"len",
"(",
"set",
"(",
"covariates",
".",
"index",
")",
"&",
"set",
"(",
"self",
".",
"u",
".",
"index",
")",
")",
"==",
"self",
".",
"u",
".",
"shape",
"[",
"0",
"]",
")",
":",
"mat",
"=",
"self",
".",
"u",
"elif",
"(",
"covariates",
".",
"shape",
"[",
"0",
"]",
"==",
"self",
".",
"v",
".",
"shape",
"[",
"0",
"]",
"and",
"len",
"(",
"set",
"(",
"covariates",
".",
"index",
")",
"&",
"set",
"(",
"self",
".",
"v",
".",
"index",
")",
")",
"==",
"self",
".",
"v",
".",
"shape",
"[",
"0",
"]",
")",
":",
"mat",
"=",
"self",
".",
"v",
"else",
":",
"import",
"sys",
"sys",
".",
"stderr",
".",
"write",
"(",
"'Covariates differ in size from input data.\\n'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"corr",
"=",
"pd",
".",
"Panel",
"(",
"items",
"=",
"[",
"'rho'",
",",
"'pvalue'",
"]",
",",
"major_axis",
"=",
"covariates",
".",
"columns",
",",
"minor_axis",
"=",
"mat",
".",
"columns",
"[",
"0",
":",
"num_pc",
"]",
")",
"for",
"i",
"in",
"corr",
".",
"major_axis",
":",
"for",
"j",
"in",
"corr",
".",
"minor_axis",
":",
"rho",
",",
"p",
"=",
"spearmanr",
"(",
"covariates",
"[",
"i",
"]",
",",
"mat",
"[",
"j",
"]",
")",
"corr",
".",
"ix",
"[",
"'rho'",
",",
"i",
",",
"j",
"]",
"=",
"rho",
"corr",
".",
"ix",
"[",
"'pvalue'",
",",
"i",
",",
"j",
"]",
"=",
"p",
"return",
"corr"
] |
Calculate the correlation between the first num_pc prinicipal components
and known covariates. The size and index of covariates determines
whether u or v is used.
Parameters
----------
covariates : pandas.DataFrame
Dataframe of covariates whose index corresponds to the index of
either u or v.
num_pc : int
Number of principal components to correlate with.
Returns
-------
corr : pandas.Panel
Panel with correlation values and p-values.
|
[
"Calculate",
"the",
"correlation",
"between",
"the",
"first",
"num_pc",
"prinicipal",
"components",
"and",
"known",
"covariates",
".",
"The",
"size",
"and",
"index",
"of",
"covariates",
"determines",
"whether",
"u",
"or",
"v",
"is",
"used",
"."
] |
38efdf0e11d01bc00a135921cb91a19c03db5d5c
|
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/analysis.py#L911-L951
|
242,631
|
siku2/Loglette
|
loglette/parser/__init__.py
|
Parser.can_handle
|
def can_handle(self, text: str) -> bool:
"""Check whether this parser can parse the text"""
try:
changelogs = self.split_changelogs(text)
if not changelogs:
return False
for changelog in changelogs:
_header, _changes = self.split_changelog(changelog)
if not any((_header, _changes)):
return False
header = self.parse_header(_header)
changes = self.parse_changes(_changes)
if not any((header, changes)):
return False
except Exception:
return False
else:
return True
|
python
|
def can_handle(self, text: str) -> bool:
"""Check whether this parser can parse the text"""
try:
changelogs = self.split_changelogs(text)
if not changelogs:
return False
for changelog in changelogs:
_header, _changes = self.split_changelog(changelog)
if not any((_header, _changes)):
return False
header = self.parse_header(_header)
changes = self.parse_changes(_changes)
if not any((header, changes)):
return False
except Exception:
return False
else:
return True
|
[
"def",
"can_handle",
"(",
"self",
",",
"text",
":",
"str",
")",
"->",
"bool",
":",
"try",
":",
"changelogs",
"=",
"self",
".",
"split_changelogs",
"(",
"text",
")",
"if",
"not",
"changelogs",
":",
"return",
"False",
"for",
"changelog",
"in",
"changelogs",
":",
"_header",
",",
"_changes",
"=",
"self",
".",
"split_changelog",
"(",
"changelog",
")",
"if",
"not",
"any",
"(",
"(",
"_header",
",",
"_changes",
")",
")",
":",
"return",
"False",
"header",
"=",
"self",
".",
"parse_header",
"(",
"_header",
")",
"changes",
"=",
"self",
".",
"parse_changes",
"(",
"_changes",
")",
"if",
"not",
"any",
"(",
"(",
"header",
",",
"changes",
")",
")",
":",
"return",
"False",
"except",
"Exception",
":",
"return",
"False",
"else",
":",
"return",
"True"
] |
Check whether this parser can parse the text
|
[
"Check",
"whether",
"this",
"parser",
"can",
"parse",
"the",
"text"
] |
d69f99c3ead2bb24f2aa491a61a7f82cb9ca8095
|
https://github.com/siku2/Loglette/blob/d69f99c3ead2bb24f2aa491a61a7f82cb9ca8095/loglette/parser/__init__.py#L23-L41
|
242,632
|
lvh/maxims
|
maxims/named.py
|
remember
|
def remember(empowered, powerupClass, interface):
"""
Adds a powerup to ``empowered`` that will instantiate ``powerupClass``
with the empowered's store when adapted to the given interface.
:param empowered: The Empowered (Store or Item) to be powered up.
:type empowered: ``axiom.item.Empowered``
:param powerupClass: The class that will be powered up to.
:type powerupClass: class
:param interface: The interface of the powerup.
:type interface: ``zope.interface.Interface``
:returns: ``None``
"""
className = fullyQualifiedName(powerupClass)
powerup = _StoredByName(store=empowered.store, className=className)
empowered.powerUp(powerup, interface)
|
python
|
def remember(empowered, powerupClass, interface):
"""
Adds a powerup to ``empowered`` that will instantiate ``powerupClass``
with the empowered's store when adapted to the given interface.
:param empowered: The Empowered (Store or Item) to be powered up.
:type empowered: ``axiom.item.Empowered``
:param powerupClass: The class that will be powered up to.
:type powerupClass: class
:param interface: The interface of the powerup.
:type interface: ``zope.interface.Interface``
:returns: ``None``
"""
className = fullyQualifiedName(powerupClass)
powerup = _StoredByName(store=empowered.store, className=className)
empowered.powerUp(powerup, interface)
|
[
"def",
"remember",
"(",
"empowered",
",",
"powerupClass",
",",
"interface",
")",
":",
"className",
"=",
"fullyQualifiedName",
"(",
"powerupClass",
")",
"powerup",
"=",
"_StoredByName",
"(",
"store",
"=",
"empowered",
".",
"store",
",",
"className",
"=",
"className",
")",
"empowered",
".",
"powerUp",
"(",
"powerup",
",",
"interface",
")"
] |
Adds a powerup to ``empowered`` that will instantiate ``powerupClass``
with the empowered's store when adapted to the given interface.
:param empowered: The Empowered (Store or Item) to be powered up.
:type empowered: ``axiom.item.Empowered``
:param powerupClass: The class that will be powered up to.
:type powerupClass: class
:param interface: The interface of the powerup.
:type interface: ``zope.interface.Interface``
:returns: ``None``
|
[
"Adds",
"a",
"powerup",
"to",
"empowered",
"that",
"will",
"instantiate",
"powerupClass",
"with",
"the",
"empowered",
"s",
"store",
"when",
"adapted",
"to",
"the",
"given",
"interface",
"."
] |
5c53b25d2cc4ccecbfe90193ade9ce0dbfbe4623
|
https://github.com/lvh/maxims/blob/5c53b25d2cc4ccecbfe90193ade9ce0dbfbe4623/maxims/named.py#L26-L41
|
242,633
|
lvh/maxims
|
maxims/named.py
|
forget
|
def forget(empowered, powerupClass, interface):
"""
Forgets powerups previously stored with ``remember``.
:param empowered: The Empowered (Store or Item) to be powered down.
:type empowered: ``axiom.item.Empowered``
:param powerupClass: The class for which powerups will be forgotten.
:type powerupClass: class
:param interface: The interface the powerups were installed for.
:type interface: ``zope.interface.Interface``
:returns: ``None``
:raises ValueError: Class wasn't previously remembered.
"""
className = fullyQualifiedName(powerupClass)
withThisName = _StoredByName.className == className
items = empowered.store.query(_StoredByName, withThisName)
if items.count() == 0:
template = "No named powerups for {} (interface: {})".format
raise ValueError(template(powerupClass, interface))
for stored in items:
empowered.powerDown(stored, interface)
stored.deleteFromStore()
|
python
|
def forget(empowered, powerupClass, interface):
"""
Forgets powerups previously stored with ``remember``.
:param empowered: The Empowered (Store or Item) to be powered down.
:type empowered: ``axiom.item.Empowered``
:param powerupClass: The class for which powerups will be forgotten.
:type powerupClass: class
:param interface: The interface the powerups were installed for.
:type interface: ``zope.interface.Interface``
:returns: ``None``
:raises ValueError: Class wasn't previously remembered.
"""
className = fullyQualifiedName(powerupClass)
withThisName = _StoredByName.className == className
items = empowered.store.query(_StoredByName, withThisName)
if items.count() == 0:
template = "No named powerups for {} (interface: {})".format
raise ValueError(template(powerupClass, interface))
for stored in items:
empowered.powerDown(stored, interface)
stored.deleteFromStore()
|
[
"def",
"forget",
"(",
"empowered",
",",
"powerupClass",
",",
"interface",
")",
":",
"className",
"=",
"fullyQualifiedName",
"(",
"powerupClass",
")",
"withThisName",
"=",
"_StoredByName",
".",
"className",
"==",
"className",
"items",
"=",
"empowered",
".",
"store",
".",
"query",
"(",
"_StoredByName",
",",
"withThisName",
")",
"if",
"items",
".",
"count",
"(",
")",
"==",
"0",
":",
"template",
"=",
"\"No named powerups for {} (interface: {})\"",
".",
"format",
"raise",
"ValueError",
"(",
"template",
"(",
"powerupClass",
",",
"interface",
")",
")",
"for",
"stored",
"in",
"items",
":",
"empowered",
".",
"powerDown",
"(",
"stored",
",",
"interface",
")",
"stored",
".",
"deleteFromStore",
"(",
")"
] |
Forgets powerups previously stored with ``remember``.
:param empowered: The Empowered (Store or Item) to be powered down.
:type empowered: ``axiom.item.Empowered``
:param powerupClass: The class for which powerups will be forgotten.
:type powerupClass: class
:param interface: The interface the powerups were installed for.
:type interface: ``zope.interface.Interface``
:returns: ``None``
:raises ValueError: Class wasn't previously remembered.
|
[
"Forgets",
"powerups",
"previously",
"stored",
"with",
"remember",
"."
] |
5c53b25d2cc4ccecbfe90193ade9ce0dbfbe4623
|
https://github.com/lvh/maxims/blob/5c53b25d2cc4ccecbfe90193ade9ce0dbfbe4623/maxims/named.py#L44-L67
|
242,634
|
mayfield/cellulario
|
cellulario/iocell.py
|
IOCell.init_event_loop
|
def init_event_loop(self):
""" Every cell should have its own event loop for proper containment.
The type of event loop is not so important however. """
self.loop = asyncio.new_event_loop()
self.loop.set_debug(self.debug)
if hasattr(self.loop, '_set_coroutine_wrapper'):
self.loop._set_coroutine_wrapper(self.debug)
elif self.debug:
warnings.warn("Cannot set debug on loop: %s" % self.loop)
self.loop_policy = IOCellEventLoopPolicy(self.loop)
if not hasattr(self.loop, '_exception_handler'):
warnings.warn("Cannot save exception handler for: %s" % self.loop)
self.loop_exception_handler_save = None
else:
self.loop_exception_handler_save = self.loop._exception_handler
self.loop.set_exception_handler(self.loop_exception_handler)
|
python
|
def init_event_loop(self):
""" Every cell should have its own event loop for proper containment.
The type of event loop is not so important however. """
self.loop = asyncio.new_event_loop()
self.loop.set_debug(self.debug)
if hasattr(self.loop, '_set_coroutine_wrapper'):
self.loop._set_coroutine_wrapper(self.debug)
elif self.debug:
warnings.warn("Cannot set debug on loop: %s" % self.loop)
self.loop_policy = IOCellEventLoopPolicy(self.loop)
if not hasattr(self.loop, '_exception_handler'):
warnings.warn("Cannot save exception handler for: %s" % self.loop)
self.loop_exception_handler_save = None
else:
self.loop_exception_handler_save = self.loop._exception_handler
self.loop.set_exception_handler(self.loop_exception_handler)
|
[
"def",
"init_event_loop",
"(",
"self",
")",
":",
"self",
".",
"loop",
"=",
"asyncio",
".",
"new_event_loop",
"(",
")",
"self",
".",
"loop",
".",
"set_debug",
"(",
"self",
".",
"debug",
")",
"if",
"hasattr",
"(",
"self",
".",
"loop",
",",
"'_set_coroutine_wrapper'",
")",
":",
"self",
".",
"loop",
".",
"_set_coroutine_wrapper",
"(",
"self",
".",
"debug",
")",
"elif",
"self",
".",
"debug",
":",
"warnings",
".",
"warn",
"(",
"\"Cannot set debug on loop: %s\"",
"%",
"self",
".",
"loop",
")",
"self",
".",
"loop_policy",
"=",
"IOCellEventLoopPolicy",
"(",
"self",
".",
"loop",
")",
"if",
"not",
"hasattr",
"(",
"self",
".",
"loop",
",",
"'_exception_handler'",
")",
":",
"warnings",
".",
"warn",
"(",
"\"Cannot save exception handler for: %s\"",
"%",
"self",
".",
"loop",
")",
"self",
".",
"loop_exception_handler_save",
"=",
"None",
"else",
":",
"self",
".",
"loop_exception_handler_save",
"=",
"self",
".",
"loop",
".",
"_exception_handler",
"self",
".",
"loop",
".",
"set_exception_handler",
"(",
"self",
".",
"loop_exception_handler",
")"
] |
Every cell should have its own event loop for proper containment.
The type of event loop is not so important however.
|
[
"Every",
"cell",
"should",
"have",
"its",
"own",
"event",
"loop",
"for",
"proper",
"containment",
".",
"The",
"type",
"of",
"event",
"loop",
"is",
"not",
"so",
"important",
"however",
"."
] |
e9dc10532a0357bc90ebaa2655b36822f9249673
|
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/iocell.py#L82-L97
|
242,635
|
mayfield/cellulario
|
cellulario/iocell.py
|
IOCell.cleanup_event_loop
|
def cleanup_event_loop(self):
""" Cleanup an event loop and close it down forever. """
for task in asyncio.Task.all_tasks(loop=self.loop):
if self.debug:
warnings.warn('Cancelling task: %s' % task)
task._log_destroy_pending = False
task.cancel()
self.loop.close()
self.loop.set_exception_handler(self.loop_exception_handler_save)
self.loop_exception_handler_save = None
self.loop_policy = None
self.loop = None
|
python
|
def cleanup_event_loop(self):
""" Cleanup an event loop and close it down forever. """
for task in asyncio.Task.all_tasks(loop=self.loop):
if self.debug:
warnings.warn('Cancelling task: %s' % task)
task._log_destroy_pending = False
task.cancel()
self.loop.close()
self.loop.set_exception_handler(self.loop_exception_handler_save)
self.loop_exception_handler_save = None
self.loop_policy = None
self.loop = None
|
[
"def",
"cleanup_event_loop",
"(",
"self",
")",
":",
"for",
"task",
"in",
"asyncio",
".",
"Task",
".",
"all_tasks",
"(",
"loop",
"=",
"self",
".",
"loop",
")",
":",
"if",
"self",
".",
"debug",
":",
"warnings",
".",
"warn",
"(",
"'Cancelling task: %s'",
"%",
"task",
")",
"task",
".",
"_log_destroy_pending",
"=",
"False",
"task",
".",
"cancel",
"(",
")",
"self",
".",
"loop",
".",
"close",
"(",
")",
"self",
".",
"loop",
".",
"set_exception_handler",
"(",
"self",
".",
"loop_exception_handler_save",
")",
"self",
".",
"loop_exception_handler_save",
"=",
"None",
"self",
".",
"loop_policy",
"=",
"None",
"self",
".",
"loop",
"=",
"None"
] |
Cleanup an event loop and close it down forever.
|
[
"Cleanup",
"an",
"event",
"loop",
"and",
"close",
"it",
"down",
"forever",
"."
] |
e9dc10532a0357bc90ebaa2655b36822f9249673
|
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/iocell.py#L99-L110
|
242,636
|
mayfield/cellulario
|
cellulario/iocell.py
|
IOCell.add_tier
|
def add_tier(self, coro, **kwargs):
""" Add a coroutine to the cell as a task tier. The source can be a
single value or a list of either `Tier` types or coroutine functions
already added to a `Tier` via `add_tier`. """
self.assertNotFinalized()
assert asyncio.iscoroutinefunction(coro)
tier = self.Tier(self, coro, **kwargs)
self.tiers.append(tier)
self.tiers_coro_map[coro] = tier
return tier
|
python
|
def add_tier(self, coro, **kwargs):
""" Add a coroutine to the cell as a task tier. The source can be a
single value or a list of either `Tier` types or coroutine functions
already added to a `Tier` via `add_tier`. """
self.assertNotFinalized()
assert asyncio.iscoroutinefunction(coro)
tier = self.Tier(self, coro, **kwargs)
self.tiers.append(tier)
self.tiers_coro_map[coro] = tier
return tier
|
[
"def",
"add_tier",
"(",
"self",
",",
"coro",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"assertNotFinalized",
"(",
")",
"assert",
"asyncio",
".",
"iscoroutinefunction",
"(",
"coro",
")",
"tier",
"=",
"self",
".",
"Tier",
"(",
"self",
",",
"coro",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"tiers",
".",
"append",
"(",
"tier",
")",
"self",
".",
"tiers_coro_map",
"[",
"coro",
"]",
"=",
"tier",
"return",
"tier"
] |
Add a coroutine to the cell as a task tier. The source can be a
single value or a list of either `Tier` types or coroutine functions
already added to a `Tier` via `add_tier`.
|
[
"Add",
"a",
"coroutine",
"to",
"the",
"cell",
"as",
"a",
"task",
"tier",
".",
"The",
"source",
"can",
"be",
"a",
"single",
"value",
"or",
"a",
"list",
"of",
"either",
"Tier",
"types",
"or",
"coroutine",
"functions",
"already",
"added",
"to",
"a",
"Tier",
"via",
"add_tier",
"."
] |
e9dc10532a0357bc90ebaa2655b36822f9249673
|
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/iocell.py#L123-L132
|
242,637
|
mayfield/cellulario
|
cellulario/iocell.py
|
IOCell.append_tier
|
def append_tier(self, coro, **kwargs):
""" Implicitly source from the tail tier like a pipe. """
source = self.tiers[-1] if self.tiers else None
return self.add_tier(coro, source=source, **kwargs)
|
python
|
def append_tier(self, coro, **kwargs):
""" Implicitly source from the tail tier like a pipe. """
source = self.tiers[-1] if self.tiers else None
return self.add_tier(coro, source=source, **kwargs)
|
[
"def",
"append_tier",
"(",
"self",
",",
"coro",
",",
"*",
"*",
"kwargs",
")",
":",
"source",
"=",
"self",
".",
"tiers",
"[",
"-",
"1",
"]",
"if",
"self",
".",
"tiers",
"else",
"None",
"return",
"self",
".",
"add_tier",
"(",
"coro",
",",
"source",
"=",
"source",
",",
"*",
"*",
"kwargs",
")"
] |
Implicitly source from the tail tier like a pipe.
|
[
"Implicitly",
"source",
"from",
"the",
"tail",
"tier",
"like",
"a",
"pipe",
"."
] |
e9dc10532a0357bc90ebaa2655b36822f9249673
|
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/iocell.py#L134-L137
|
242,638
|
mayfield/cellulario
|
cellulario/iocell.py
|
IOCell.tier
|
def tier(self, *args, append=True, source=None, **kwargs):
""" Function decorator for a tier coroutine. If the function being
decorated is not already a coroutine function it will be wrapped. """
if len(args) == 1 and not kwargs and callable(args[0]):
raise TypeError('Uncalled decorator syntax is invalid')
def decorator(coro):
if not asyncio.iscoroutinefunction(coro):
coro = asyncio.coroutine(coro)
if append and source is None:
self.append_tier(coro, *args, **kwargs)
else:
self.add_tier(coro, *args, source=source, **kwargs)
return coro
return decorator
|
python
|
def tier(self, *args, append=True, source=None, **kwargs):
""" Function decorator for a tier coroutine. If the function being
decorated is not already a coroutine function it will be wrapped. """
if len(args) == 1 and not kwargs and callable(args[0]):
raise TypeError('Uncalled decorator syntax is invalid')
def decorator(coro):
if not asyncio.iscoroutinefunction(coro):
coro = asyncio.coroutine(coro)
if append and source is None:
self.append_tier(coro, *args, **kwargs)
else:
self.add_tier(coro, *args, source=source, **kwargs)
return coro
return decorator
|
[
"def",
"tier",
"(",
"self",
",",
"*",
"args",
",",
"append",
"=",
"True",
",",
"source",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"1",
"and",
"not",
"kwargs",
"and",
"callable",
"(",
"args",
"[",
"0",
"]",
")",
":",
"raise",
"TypeError",
"(",
"'Uncalled decorator syntax is invalid'",
")",
"def",
"decorator",
"(",
"coro",
")",
":",
"if",
"not",
"asyncio",
".",
"iscoroutinefunction",
"(",
"coro",
")",
":",
"coro",
"=",
"asyncio",
".",
"coroutine",
"(",
"coro",
")",
"if",
"append",
"and",
"source",
"is",
"None",
":",
"self",
".",
"append_tier",
"(",
"coro",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"self",
".",
"add_tier",
"(",
"coro",
",",
"*",
"args",
",",
"source",
"=",
"source",
",",
"*",
"*",
"kwargs",
")",
"return",
"coro",
"return",
"decorator"
] |
Function decorator for a tier coroutine. If the function being
decorated is not already a coroutine function it will be wrapped.
|
[
"Function",
"decorator",
"for",
"a",
"tier",
"coroutine",
".",
"If",
"the",
"function",
"being",
"decorated",
"is",
"not",
"already",
"a",
"coroutine",
"function",
"it",
"will",
"be",
"wrapped",
"."
] |
e9dc10532a0357bc90ebaa2655b36822f9249673
|
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/iocell.py#L145-L159
|
242,639
|
mayfield/cellulario
|
cellulario/iocell.py
|
IOCell.cleaner
|
def cleaner(self, coro):
""" Function decorator for a cleanup coroutine. """
if not asyncio.iscoroutinefunction(coro):
coro = asyncio.coroutine(coro)
self.add_cleaner(coro)
return coro
|
python
|
def cleaner(self, coro):
""" Function decorator for a cleanup coroutine. """
if not asyncio.iscoroutinefunction(coro):
coro = asyncio.coroutine(coro)
self.add_cleaner(coro)
return coro
|
[
"def",
"cleaner",
"(",
"self",
",",
"coro",
")",
":",
"if",
"not",
"asyncio",
".",
"iscoroutinefunction",
"(",
"coro",
")",
":",
"coro",
"=",
"asyncio",
".",
"coroutine",
"(",
"coro",
")",
"self",
".",
"add_cleaner",
"(",
"coro",
")",
"return",
"coro"
] |
Function decorator for a cleanup coroutine.
|
[
"Function",
"decorator",
"for",
"a",
"cleanup",
"coroutine",
"."
] |
e9dc10532a0357bc90ebaa2655b36822f9249673
|
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/iocell.py#L161-L166
|
242,640
|
mayfield/cellulario
|
cellulario/iocell.py
|
IOCell.finalize
|
def finalize(self):
""" Look at our tiers and setup the final data flow. Once this is run
a cell can not be modified again. """
self.assertNotFinalized()
starters = []
finishers = []
for x in self.tiers:
if not x.sources:
starters.append(x)
if not x.dests:
finishers.append(x)
self.add_tier(self.output_feed, source=finishers)
self.coord.setup_wrap(self)
self.finalized = True
return starters
|
python
|
def finalize(self):
""" Look at our tiers and setup the final data flow. Once this is run
a cell can not be modified again. """
self.assertNotFinalized()
starters = []
finishers = []
for x in self.tiers:
if not x.sources:
starters.append(x)
if not x.dests:
finishers.append(x)
self.add_tier(self.output_feed, source=finishers)
self.coord.setup_wrap(self)
self.finalized = True
return starters
|
[
"def",
"finalize",
"(",
"self",
")",
":",
"self",
".",
"assertNotFinalized",
"(",
")",
"starters",
"=",
"[",
"]",
"finishers",
"=",
"[",
"]",
"for",
"x",
"in",
"self",
".",
"tiers",
":",
"if",
"not",
"x",
".",
"sources",
":",
"starters",
".",
"append",
"(",
"x",
")",
"if",
"not",
"x",
".",
"dests",
":",
"finishers",
".",
"append",
"(",
"x",
")",
"self",
".",
"add_tier",
"(",
"self",
".",
"output_feed",
",",
"source",
"=",
"finishers",
")",
"self",
".",
"coord",
".",
"setup_wrap",
"(",
"self",
")",
"self",
".",
"finalized",
"=",
"True",
"return",
"starters"
] |
Look at our tiers and setup the final data flow. Once this is run
a cell can not be modified again.
|
[
"Look",
"at",
"our",
"tiers",
"and",
"setup",
"the",
"final",
"data",
"flow",
".",
"Once",
"this",
"is",
"run",
"a",
"cell",
"can",
"not",
"be",
"modified",
"again",
"."
] |
e9dc10532a0357bc90ebaa2655b36822f9249673
|
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/iocell.py#L168-L182
|
242,641
|
mayfield/cellulario
|
cellulario/iocell.py
|
IOCell.output
|
def output(self):
""" Produce a classic generator for this cell's final results. """
starters = self.finalize()
try:
yield from self._output(starters)
finally:
self.close()
|
python
|
def output(self):
""" Produce a classic generator for this cell's final results. """
starters = self.finalize()
try:
yield from self._output(starters)
finally:
self.close()
|
[
"def",
"output",
"(",
"self",
")",
":",
"starters",
"=",
"self",
".",
"finalize",
"(",
")",
"try",
":",
"yield",
"from",
"self",
".",
"_output",
"(",
"starters",
")",
"finally",
":",
"self",
".",
"close",
"(",
")"
] |
Produce a classic generator for this cell's final results.
|
[
"Produce",
"a",
"classic",
"generator",
"for",
"this",
"cell",
"s",
"final",
"results",
"."
] |
e9dc10532a0357bc90ebaa2655b36822f9249673
|
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/iocell.py#L200-L206
|
242,642
|
mayfield/cellulario
|
cellulario/iocell.py
|
IOCell.event_loop
|
def event_loop(self):
""" Run the event loop once. """
if hasattr(self.loop, '._run_once'):
self.loop._thread_id = threading.get_ident()
try:
self.loop._run_once()
finally:
self.loop._thread_id = None
else:
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
|
python
|
def event_loop(self):
""" Run the event loop once. """
if hasattr(self.loop, '._run_once'):
self.loop._thread_id = threading.get_ident()
try:
self.loop._run_once()
finally:
self.loop._thread_id = None
else:
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
|
[
"def",
"event_loop",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
".",
"loop",
",",
"'._run_once'",
")",
":",
"self",
".",
"loop",
".",
"_thread_id",
"=",
"threading",
".",
"get_ident",
"(",
")",
"try",
":",
"self",
".",
"loop",
".",
"_run_once",
"(",
")",
"finally",
":",
"self",
".",
"loop",
".",
"_thread_id",
"=",
"None",
"else",
":",
"self",
".",
"loop",
".",
"call_soon",
"(",
"self",
".",
"loop",
".",
"stop",
")",
"self",
".",
"loop",
".",
"run_forever",
"(",
")"
] |
Run the event loop once.
|
[
"Run",
"the",
"event",
"loop",
"once",
"."
] |
e9dc10532a0357bc90ebaa2655b36822f9249673
|
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/iocell.py#L208-L218
|
242,643
|
mayfield/cellulario
|
cellulario/iocell.py
|
IOCell.clean
|
def clean(self):
""" Run all of the cleaners added by the user. """
if self.cleaners:
yield from asyncio.wait([x() for x in self.cleaners],
loop=self.loop)
|
python
|
def clean(self):
""" Run all of the cleaners added by the user. """
if self.cleaners:
yield from asyncio.wait([x() for x in self.cleaners],
loop=self.loop)
|
[
"def",
"clean",
"(",
"self",
")",
":",
"if",
"self",
".",
"cleaners",
":",
"yield",
"from",
"asyncio",
".",
"wait",
"(",
"[",
"x",
"(",
")",
"for",
"x",
"in",
"self",
".",
"cleaners",
"]",
",",
"loop",
"=",
"self",
".",
"loop",
")"
] |
Run all of the cleaners added by the user.
|
[
"Run",
"all",
"of",
"the",
"cleaners",
"added",
"by",
"the",
"user",
"."
] |
e9dc10532a0357bc90ebaa2655b36822f9249673
|
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/iocell.py#L248-L252
|
242,644
|
MacHu-GWU/pyknackhq-project
|
pyknackhq/client.py
|
Collection.get_html_values
|
def get_html_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format.
using html data format.
"""
new_dict = {"id": pydict["id"]}
for field in self:
if field.key in pydict:
if recovery_name:
new_dict[field.name] = pydict[field.key]
else:
new_dict[field.key] = pydict[field.key]
return new_dict
|
python
|
def get_html_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format.
using html data format.
"""
new_dict = {"id": pydict["id"]}
for field in self:
if field.key in pydict:
if recovery_name:
new_dict[field.name] = pydict[field.key]
else:
new_dict[field.key] = pydict[field.key]
return new_dict
|
[
"def",
"get_html_values",
"(",
"self",
",",
"pydict",
",",
"recovery_name",
"=",
"True",
")",
":",
"new_dict",
"=",
"{",
"\"id\"",
":",
"pydict",
"[",
"\"id\"",
"]",
"}",
"for",
"field",
"in",
"self",
":",
"if",
"field",
".",
"key",
"in",
"pydict",
":",
"if",
"recovery_name",
":",
"new_dict",
"[",
"field",
".",
"name",
"]",
"=",
"pydict",
"[",
"field",
".",
"key",
"]",
"else",
":",
"new_dict",
"[",
"field",
".",
"key",
"]",
"=",
"pydict",
"[",
"field",
".",
"key",
"]",
"return",
"new_dict"
] |
Convert naive get response data to human readable field name format.
using html data format.
|
[
"Convert",
"naive",
"get",
"response",
"data",
"to",
"human",
"readable",
"field",
"name",
"format",
".",
"using",
"html",
"data",
"format",
"."
] |
dd937f24d7b0a351ba3818eb746c31b29a8cc341
|
https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/client.py#L50-L62
|
242,645
|
MacHu-GWU/pyknackhq-project
|
pyknackhq/client.py
|
Collection.get_raw_values
|
def get_raw_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format.
using raw data format.
"""
new_dict = {"id": pydict["id"]}
for field in self:
raw_key = "%s_raw" % field.key
if raw_key in pydict:
if recovery_name:
new_dict[field.name] = pydict[raw_key]
else:
new_dict[field.key] = pydict[raw_key]
return new_dict
|
python
|
def get_raw_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format.
using raw data format.
"""
new_dict = {"id": pydict["id"]}
for field in self:
raw_key = "%s_raw" % field.key
if raw_key in pydict:
if recovery_name:
new_dict[field.name] = pydict[raw_key]
else:
new_dict[field.key] = pydict[raw_key]
return new_dict
|
[
"def",
"get_raw_values",
"(",
"self",
",",
"pydict",
",",
"recovery_name",
"=",
"True",
")",
":",
"new_dict",
"=",
"{",
"\"id\"",
":",
"pydict",
"[",
"\"id\"",
"]",
"}",
"for",
"field",
"in",
"self",
":",
"raw_key",
"=",
"\"%s_raw\"",
"%",
"field",
".",
"key",
"if",
"raw_key",
"in",
"pydict",
":",
"if",
"recovery_name",
":",
"new_dict",
"[",
"field",
".",
"name",
"]",
"=",
"pydict",
"[",
"raw_key",
"]",
"else",
":",
"new_dict",
"[",
"field",
".",
"key",
"]",
"=",
"pydict",
"[",
"raw_key",
"]",
"return",
"new_dict"
] |
Convert naive get response data to human readable field name format.
using raw data format.
|
[
"Convert",
"naive",
"get",
"response",
"data",
"to",
"human",
"readable",
"field",
"name",
"format",
".",
"using",
"raw",
"data",
"format",
"."
] |
dd937f24d7b0a351ba3818eb746c31b29a8cc341
|
https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/client.py#L64-L77
|
242,646
|
MacHu-GWU/pyknackhq-project
|
pyknackhq/client.py
|
Collection.convert_values
|
def convert_values(self, pydict):
"""Convert knackhq data type instance to json friendly data.
"""
new_dict = dict()
for key, value in pydict.items():
try: # is it's BaseDataType Instance
new_dict[key] = value._data
except AttributeError:
new_dict[key] = value
return new_dict
|
python
|
def convert_values(self, pydict):
"""Convert knackhq data type instance to json friendly data.
"""
new_dict = dict()
for key, value in pydict.items():
try: # is it's BaseDataType Instance
new_dict[key] = value._data
except AttributeError:
new_dict[key] = value
return new_dict
|
[
"def",
"convert_values",
"(",
"self",
",",
"pydict",
")",
":",
"new_dict",
"=",
"dict",
"(",
")",
"for",
"key",
",",
"value",
"in",
"pydict",
".",
"items",
"(",
")",
":",
"try",
":",
"# is it's BaseDataType Instance",
"new_dict",
"[",
"key",
"]",
"=",
"value",
".",
"_data",
"except",
"AttributeError",
":",
"new_dict",
"[",
"key",
"]",
"=",
"value",
"return",
"new_dict"
] |
Convert knackhq data type instance to json friendly data.
|
[
"Convert",
"knackhq",
"data",
"type",
"instance",
"to",
"json",
"friendly",
"data",
"."
] |
dd937f24d7b0a351ba3818eb746c31b29a8cc341
|
https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/client.py#L79-L88
|
242,647
|
MacHu-GWU/pyknackhq-project
|
pyknackhq/client.py
|
Collection.insert
|
def insert(self, data, using_name=True):
"""Insert one or many records.
:param data: dict type data or list of dict
:param using_name: if you are using field name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
插入多条记录
"""
if isinstance(data, list): # if iterable, insert one by one
for d in data:
self.insert_one(d, using_name=using_name)
else: # not iterable, execute insert_one
self.insert_one(data, using_name=using_name)
|
python
|
def insert(self, data, using_name=True):
"""Insert one or many records.
:param data: dict type data or list of dict
:param using_name: if you are using field name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
插入多条记录
"""
if isinstance(data, list): # if iterable, insert one by one
for d in data:
self.insert_one(d, using_name=using_name)
else: # not iterable, execute insert_one
self.insert_one(data, using_name=using_name)
|
[
"def",
"insert",
"(",
"self",
",",
"data",
",",
"using_name",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"# if iterable, insert one by one",
"for",
"d",
"in",
"data",
":",
"self",
".",
"insert_one",
"(",
"d",
",",
"using_name",
"=",
"using_name",
")",
"else",
":",
"# not iterable, execute insert_one",
"self",
".",
"insert_one",
"(",
"data",
",",
"using_name",
"=",
"using_name",
")"
] |
Insert one or many records.
:param data: dict type data or list of dict
:param using_name: if you are using field name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
插入多条记录
|
[
"Insert",
"one",
"or",
"many",
"records",
"."
] |
dd937f24d7b0a351ba3818eb746c31b29a8cc341
|
https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/client.py#L115-L130
|
242,648
|
MacHu-GWU/pyknackhq-project
|
pyknackhq/client.py
|
KnackhqAuth.get
|
def get(self, url, params=dict()):
"""Http get method wrapper, to support search.
"""
try:
res = requests.get(url, headers=self.headers, params=params)
return json.loads(res.text)
except Exception as e:
print(e)
return "error"
|
python
|
def get(self, url, params=dict()):
"""Http get method wrapper, to support search.
"""
try:
res = requests.get(url, headers=self.headers, params=params)
return json.loads(res.text)
except Exception as e:
print(e)
return "error"
|
[
"def",
"get",
"(",
"self",
",",
"url",
",",
"params",
"=",
"dict",
"(",
")",
")",
":",
"try",
":",
"res",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"self",
".",
"headers",
",",
"params",
"=",
"params",
")",
"return",
"json",
".",
"loads",
"(",
"res",
".",
"text",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"e",
")",
"return",
"\"error\""
] |
Http get method wrapper, to support search.
|
[
"Http",
"get",
"method",
"wrapper",
"to",
"support",
"search",
"."
] |
dd937f24d7b0a351ba3818eb746c31b29a8cc341
|
https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/client.py#L330-L338
|
242,649
|
MacHu-GWU/pyknackhq-project
|
pyknackhq/client.py
|
KnackhqAuth.post
|
def post(self, url, data):
"""Http post method wrapper, to support insert.
"""
try:
res = requests.post(
url, headers=self.headers, data=json.dumps(data))
return json.loads(res.text)
except Exception as e:
print(e)
return "error"
|
python
|
def post(self, url, data):
"""Http post method wrapper, to support insert.
"""
try:
res = requests.post(
url, headers=self.headers, data=json.dumps(data))
return json.loads(res.text)
except Exception as e:
print(e)
return "error"
|
[
"def",
"post",
"(",
"self",
",",
"url",
",",
"data",
")",
":",
"try",
":",
"res",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"headers",
"=",
"self",
".",
"headers",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
")",
"return",
"json",
".",
"loads",
"(",
"res",
".",
"text",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"e",
")",
"return",
"\"error\""
] |
Http post method wrapper, to support insert.
|
[
"Http",
"post",
"method",
"wrapper",
"to",
"support",
"insert",
"."
] |
dd937f24d7b0a351ba3818eb746c31b29a8cc341
|
https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/client.py#L340-L349
|
242,650
|
MacHu-GWU/pyknackhq-project
|
pyknackhq/client.py
|
KnackhqAuth.delete
|
def delete(self, url):
"""Http delete method wrapper, to support delete.
"""
try:
res = requests.delete(url, headers=self.headers)
return json.loads(res.text)
except Exception as e:
print(e)
return "error"
|
python
|
def delete(self, url):
"""Http delete method wrapper, to support delete.
"""
try:
res = requests.delete(url, headers=self.headers)
return json.loads(res.text)
except Exception as e:
print(e)
return "error"
|
[
"def",
"delete",
"(",
"self",
",",
"url",
")",
":",
"try",
":",
"res",
"=",
"requests",
".",
"delete",
"(",
"url",
",",
"headers",
"=",
"self",
".",
"headers",
")",
"return",
"json",
".",
"loads",
"(",
"res",
".",
"text",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"e",
")",
"return",
"\"error\""
] |
Http delete method wrapper, to support delete.
|
[
"Http",
"delete",
"method",
"wrapper",
"to",
"support",
"delete",
"."
] |
dd937f24d7b0a351ba3818eb746c31b29a8cc341
|
https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/client.py#L362-L370
|
242,651
|
cdeboever3/cdpybio
|
cdpybio/express.py
|
combine_express_output
|
def combine_express_output(fnL,
column='eff_counts',
names=None,
tg=None,
define_sample_name=None,
debug=False):
"""
Combine eXpress output files
Parameters:
-----------
fnL : list of strs of filenames
List of paths to results.xprs files.
column : string
Column name of eXpress output to combine.
names : list of strings
Names to use for columns of output files. Overrides define_sample_name
if provided.
tg : string
File with transcript-to-gene mapping. Transcripts should be in first
column and genes in second column.
define_sample_name : function that takes string as input
Function mapping filename to sample name (or basename). For instance,
you may have the basename in the path and use a regex to extract it.
The basenames will be used as the column names. If this is not provided,
the columns will be named as the input files.
debug : boolean
Passing True will trigger any debugging statements.
"""
if names is not None:
assert len(names) == len(fnL)
if define_sample_name is None:
define_sample_name = lambda x: x
transcriptL = []
for i,fn in enumerate(fnL):
if names is not None:
bn = names[i]
else:
bn = define_sample_name(fn)
tDF = pd.read_table(fn, index_col=1, header=0)
se = tDF[column]
se.name = bn
transcriptL.append(se)
transcriptDF = pd.DataFrame(transcriptL).T
transcriptDF.index.name = 'transcript'
# There should not be any missing values.
if transcriptDF.shape != transcriptDF.dropna().shape:
sys.stderr.write('''Missing values in eXpress output. Check that the
same reference was used for all output files.\n''')
sys.exit(1)
if tg is not None:
tgDF = pd.read_table(tg,
index_col=0,
header=None,
names=['gene_id'])
import copy
geneDF = copy.deepcopy(transcriptDF)
geneDF['gene'] = tgDF.ix[geneDF.index]
geneDF = geneDF.groupby('gene').sum()
return transcriptDF, geneDF
else:
return transcriptDF, None
|
python
|
def combine_express_output(fnL,
column='eff_counts',
names=None,
tg=None,
define_sample_name=None,
debug=False):
"""
Combine eXpress output files
Parameters:
-----------
fnL : list of strs of filenames
List of paths to results.xprs files.
column : string
Column name of eXpress output to combine.
names : list of strings
Names to use for columns of output files. Overrides define_sample_name
if provided.
tg : string
File with transcript-to-gene mapping. Transcripts should be in first
column and genes in second column.
define_sample_name : function that takes string as input
Function mapping filename to sample name (or basename). For instance,
you may have the basename in the path and use a regex to extract it.
The basenames will be used as the column names. If this is not provided,
the columns will be named as the input files.
debug : boolean
Passing True will trigger any debugging statements.
"""
if names is not None:
assert len(names) == len(fnL)
if define_sample_name is None:
define_sample_name = lambda x: x
transcriptL = []
for i,fn in enumerate(fnL):
if names is not None:
bn = names[i]
else:
bn = define_sample_name(fn)
tDF = pd.read_table(fn, index_col=1, header=0)
se = tDF[column]
se.name = bn
transcriptL.append(se)
transcriptDF = pd.DataFrame(transcriptL).T
transcriptDF.index.name = 'transcript'
# There should not be any missing values.
if transcriptDF.shape != transcriptDF.dropna().shape:
sys.stderr.write('''Missing values in eXpress output. Check that the
same reference was used for all output files.\n''')
sys.exit(1)
if tg is not None:
tgDF = pd.read_table(tg,
index_col=0,
header=None,
names=['gene_id'])
import copy
geneDF = copy.deepcopy(transcriptDF)
geneDF['gene'] = tgDF.ix[geneDF.index]
geneDF = geneDF.groupby('gene').sum()
return transcriptDF, geneDF
else:
return transcriptDF, None
|
[
"def",
"combine_express_output",
"(",
"fnL",
",",
"column",
"=",
"'eff_counts'",
",",
"names",
"=",
"None",
",",
"tg",
"=",
"None",
",",
"define_sample_name",
"=",
"None",
",",
"debug",
"=",
"False",
")",
":",
"if",
"names",
"is",
"not",
"None",
":",
"assert",
"len",
"(",
"names",
")",
"==",
"len",
"(",
"fnL",
")",
"if",
"define_sample_name",
"is",
"None",
":",
"define_sample_name",
"=",
"lambda",
"x",
":",
"x",
"transcriptL",
"=",
"[",
"]",
"for",
"i",
",",
"fn",
"in",
"enumerate",
"(",
"fnL",
")",
":",
"if",
"names",
"is",
"not",
"None",
":",
"bn",
"=",
"names",
"[",
"i",
"]",
"else",
":",
"bn",
"=",
"define_sample_name",
"(",
"fn",
")",
"tDF",
"=",
"pd",
".",
"read_table",
"(",
"fn",
",",
"index_col",
"=",
"1",
",",
"header",
"=",
"0",
")",
"se",
"=",
"tDF",
"[",
"column",
"]",
"se",
".",
"name",
"=",
"bn",
"transcriptL",
".",
"append",
"(",
"se",
")",
"transcriptDF",
"=",
"pd",
".",
"DataFrame",
"(",
"transcriptL",
")",
".",
"T",
"transcriptDF",
".",
"index",
".",
"name",
"=",
"'transcript'",
"# There should not be any missing values.",
"if",
"transcriptDF",
".",
"shape",
"!=",
"transcriptDF",
".",
"dropna",
"(",
")",
".",
"shape",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'''Missing values in eXpress output. Check that the\n same reference was used for all output files.\\n'''",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"tg",
"is",
"not",
"None",
":",
"tgDF",
"=",
"pd",
".",
"read_table",
"(",
"tg",
",",
"index_col",
"=",
"0",
",",
"header",
"=",
"None",
",",
"names",
"=",
"[",
"'gene_id'",
"]",
")",
"import",
"copy",
"geneDF",
"=",
"copy",
".",
"deepcopy",
"(",
"transcriptDF",
")",
"geneDF",
"[",
"'gene'",
"]",
"=",
"tgDF",
".",
"ix",
"[",
"geneDF",
".",
"index",
"]",
"geneDF",
"=",
"geneDF",
".",
"groupby",
"(",
"'gene'",
")",
".",
"sum",
"(",
")",
"return",
"transcriptDF",
",",
"geneDF",
"else",
":",
"return",
"transcriptDF",
",",
"None"
] |
Combine eXpress output files
Parameters:
-----------
fnL : list of strs of filenames
List of paths to results.xprs files.
column : string
Column name of eXpress output to combine.
names : list of strings
Names to use for columns of output files. Overrides define_sample_name
if provided.
tg : string
File with transcript-to-gene mapping. Transcripts should be in first
column and genes in second column.
define_sample_name : function that takes string as input
Function mapping filename to sample name (or basename). For instance,
you may have the basename in the path and use a regex to extract it.
The basenames will be used as the column names. If this is not provided,
the columns will be named as the input files.
debug : boolean
Passing True will trigger any debugging statements.
|
[
"Combine",
"eXpress",
"output",
"files"
] |
38efdf0e11d01bc00a135921cb91a19c03db5d5c
|
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/express.py#L5-L75
|
242,652
|
MacHu-GWU/angora-project
|
angora/dataIO/textfile.py
|
read
|
def read(path, encoding="utf-8"):
"""Auto-decoding string reader.
Usage::
>>> from angora.dataIO import textfile
or
>>> from angora.dataIO import *
>>> textfile.read("test.txt")
"""
with open(path, "rb") as f:
content = f.read()
try:
text = content.decode(encoding)
except:
res = chardet.detect(content)
text = content.decode(res["encoding"])
return text
|
python
|
def read(path, encoding="utf-8"):
"""Auto-decoding string reader.
Usage::
>>> from angora.dataIO import textfile
or
>>> from angora.dataIO import *
>>> textfile.read("test.txt")
"""
with open(path, "rb") as f:
content = f.read()
try:
text = content.decode(encoding)
except:
res = chardet.detect(content)
text = content.decode(res["encoding"])
return text
|
[
"def",
"read",
"(",
"path",
",",
"encoding",
"=",
"\"utf-8\"",
")",
":",
"with",
"open",
"(",
"path",
",",
"\"rb\"",
")",
"as",
"f",
":",
"content",
"=",
"f",
".",
"read",
"(",
")",
"try",
":",
"text",
"=",
"content",
".",
"decode",
"(",
"encoding",
")",
"except",
":",
"res",
"=",
"chardet",
".",
"detect",
"(",
"content",
")",
"text",
"=",
"content",
".",
"decode",
"(",
"res",
"[",
"\"encoding\"",
"]",
")",
"return",
"text"
] |
Auto-decoding string reader.
Usage::
>>> from angora.dataIO import textfile
or
>>> from angora.dataIO import *
>>> textfile.read("test.txt")
|
[
"Auto",
"-",
"decoding",
"string",
"reader",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/dataIO/textfile.py#L35-L52
|
242,653
|
MacHu-GWU/angora-project
|
angora/dataIO/textfile.py
|
write
|
def write(text, path):
"""Writer text to file with utf-8 encoding.
Usage::
>>> from angora.dataIO import textfile
or
>>> from angora.dataIO import *
>>> textfile.write("hello world!", "test.txt")
"""
with open(path, "wb") as f:
f.write(text.encode("utf-8"))
|
python
|
def write(text, path):
"""Writer text to file with utf-8 encoding.
Usage::
>>> from angora.dataIO import textfile
or
>>> from angora.dataIO import *
>>> textfile.write("hello world!", "test.txt")
"""
with open(path, "wb") as f:
f.write(text.encode("utf-8"))
|
[
"def",
"write",
"(",
"text",
",",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"\"wb\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"text",
".",
"encode",
"(",
"\"utf-8\"",
")",
")"
] |
Writer text to file with utf-8 encoding.
Usage::
>>> from angora.dataIO import textfile
or
>>> from angora.dataIO import *
>>> textfile.write("hello world!", "test.txt")
|
[
"Writer",
"text",
"to",
"file",
"with",
"utf",
"-",
"8",
"encoding",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/dataIO/textfile.py#L54-L66
|
242,654
|
MacHu-GWU/angora-project
|
angora/bot/anjian.py
|
Script._delay
|
def _delay(self, ms):
"""Implement default delay mechanism.
"""
if ms:
self.Delay(ms)
else:
if self.default_delay:
self.Delay(self.default_delay)
|
python
|
def _delay(self, ms):
"""Implement default delay mechanism.
"""
if ms:
self.Delay(ms)
else:
if self.default_delay:
self.Delay(self.default_delay)
|
[
"def",
"_delay",
"(",
"self",
",",
"ms",
")",
":",
"if",
"ms",
":",
"self",
".",
"Delay",
"(",
"ms",
")",
"else",
":",
"if",
"self",
".",
"default_delay",
":",
"self",
".",
"Delay",
"(",
"self",
".",
"default_delay",
")"
] |
Implement default delay mechanism.
|
[
"Implement",
"default",
"delay",
"mechanism",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/bot/anjian.py#L91-L98
|
242,655
|
MacHu-GWU/angora-project
|
angora/bot/anjian.py
|
Script.AltTab
|
def AltTab(self, n=1, delay=0):
"""Press down Alt, then press n times Tab, then release Alt.
"""
self._delay(delay)
self.add(Command("KeyDown", 'KeyDown "%s", %s' % (BoardKey.Alt, 1)))
for i in range(n):
self.add(Command("KeyPress", 'KeyPress "%s", %s' % (BoardKey.Tab, 1)))
self.add(Command("KeyUp", 'KeyUp "%s", %s' % (BoardKey.Alt, 1)))
|
python
|
def AltTab(self, n=1, delay=0):
"""Press down Alt, then press n times Tab, then release Alt.
"""
self._delay(delay)
self.add(Command("KeyDown", 'KeyDown "%s", %s' % (BoardKey.Alt, 1)))
for i in range(n):
self.add(Command("KeyPress", 'KeyPress "%s", %s' % (BoardKey.Tab, 1)))
self.add(Command("KeyUp", 'KeyUp "%s", %s' % (BoardKey.Alt, 1)))
|
[
"def",
"AltTab",
"(",
"self",
",",
"n",
"=",
"1",
",",
"delay",
"=",
"0",
")",
":",
"self",
".",
"_delay",
"(",
"delay",
")",
"self",
".",
"add",
"(",
"Command",
"(",
"\"KeyDown\"",
",",
"'KeyDown \"%s\", %s'",
"%",
"(",
"BoardKey",
".",
"Alt",
",",
"1",
")",
")",
")",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"self",
".",
"add",
"(",
"Command",
"(",
"\"KeyPress\"",
",",
"'KeyPress \"%s\", %s'",
"%",
"(",
"BoardKey",
".",
"Tab",
",",
"1",
")",
")",
")",
"self",
".",
"add",
"(",
"Command",
"(",
"\"KeyUp\"",
",",
"'KeyUp \"%s\", %s'",
"%",
"(",
"BoardKey",
".",
"Alt",
",",
"1",
")",
")",
")"
] |
Press down Alt, then press n times Tab, then release Alt.
|
[
"Press",
"down",
"Alt",
"then",
"press",
"n",
"times",
"Tab",
"then",
"release",
"Alt",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/bot/anjian.py#L185-L192
|
242,656
|
MacHu-GWU/angora-project
|
angora/bot/anjian.py
|
Script.Ctrl_C
|
def Ctrl_C(self, delay=0):
"""Ctrl + C shortcut.
"""
self._delay(delay)
self.add(Command("KeyDown", 'KeyDown "%s", %s' % (BoardKey.Ctrl, 1)))
self.add(Command("KeyPress", 'KeyPress "%s", %s' % (BoardKey.C, 1)))
self.add(Command("KeyUp", 'KeyUp "%s", %s' % (BoardKey.Ctrl, 1)))
|
python
|
def Ctrl_C(self, delay=0):
"""Ctrl + C shortcut.
"""
self._delay(delay)
self.add(Command("KeyDown", 'KeyDown "%s", %s' % (BoardKey.Ctrl, 1)))
self.add(Command("KeyPress", 'KeyPress "%s", %s' % (BoardKey.C, 1)))
self.add(Command("KeyUp", 'KeyUp "%s", %s' % (BoardKey.Ctrl, 1)))
|
[
"def",
"Ctrl_C",
"(",
"self",
",",
"delay",
"=",
"0",
")",
":",
"self",
".",
"_delay",
"(",
"delay",
")",
"self",
".",
"add",
"(",
"Command",
"(",
"\"KeyDown\"",
",",
"'KeyDown \"%s\", %s'",
"%",
"(",
"BoardKey",
".",
"Ctrl",
",",
"1",
")",
")",
")",
"self",
".",
"add",
"(",
"Command",
"(",
"\"KeyPress\"",
",",
"'KeyPress \"%s\", %s'",
"%",
"(",
"BoardKey",
".",
"C",
",",
"1",
")",
")",
")",
"self",
".",
"add",
"(",
"Command",
"(",
"\"KeyUp\"",
",",
"'KeyUp \"%s\", %s'",
"%",
"(",
"BoardKey",
".",
"Ctrl",
",",
"1",
")",
")",
")"
] |
Ctrl + C shortcut.
|
[
"Ctrl",
"+",
"C",
"shortcut",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/bot/anjian.py#L194-L200
|
242,657
|
MacHu-GWU/angora-project
|
angora/bot/anjian.py
|
Script.Ctrl_V
|
def Ctrl_V(self, delay=0):
"""Ctrl + V shortcut.
"""
self._delay(delay)
self.add(Command("KeyDown", 'KeyDown "%s", %s' % (BoardKey.Ctrl, 1)))
self.add(Command("KeyPress", 'KeyPress "%s", %s' % (BoardKey.V, 1)))
self.add(Command("KeyUp", 'KeyUp "%s", %s' % (BoardKey.Ctrl, 1)))
|
python
|
def Ctrl_V(self, delay=0):
"""Ctrl + V shortcut.
"""
self._delay(delay)
self.add(Command("KeyDown", 'KeyDown "%s", %s' % (BoardKey.Ctrl, 1)))
self.add(Command("KeyPress", 'KeyPress "%s", %s' % (BoardKey.V, 1)))
self.add(Command("KeyUp", 'KeyUp "%s", %s' % (BoardKey.Ctrl, 1)))
|
[
"def",
"Ctrl_V",
"(",
"self",
",",
"delay",
"=",
"0",
")",
":",
"self",
".",
"_delay",
"(",
"delay",
")",
"self",
".",
"add",
"(",
"Command",
"(",
"\"KeyDown\"",
",",
"'KeyDown \"%s\", %s'",
"%",
"(",
"BoardKey",
".",
"Ctrl",
",",
"1",
")",
")",
")",
"self",
".",
"add",
"(",
"Command",
"(",
"\"KeyPress\"",
",",
"'KeyPress \"%s\", %s'",
"%",
"(",
"BoardKey",
".",
"V",
",",
"1",
")",
")",
")",
"self",
".",
"add",
"(",
"Command",
"(",
"\"KeyUp\"",
",",
"'KeyUp \"%s\", %s'",
"%",
"(",
"BoardKey",
".",
"Ctrl",
",",
"1",
")",
")",
")"
] |
Ctrl + V shortcut.
|
[
"Ctrl",
"+",
"V",
"shortcut",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/bot/anjian.py#L202-L208
|
242,658
|
MacHu-GWU/angora-project
|
angora/bot/anjian.py
|
Script.Ctrl_W
|
def Ctrl_W(self, delay=0):
"""Ctrl + W shortcut.
"""
self._delay(delay)
self.add(Command("KeyDown", 'KeyDown "%s", %s' % (BoardKey.Ctrl, 1)))
self.add(Command("KeyPress", 'KeyPress "%s", %s' % (BoardKey.W, 1)))
self.add(Command("KeyUp", 'KeyUp "%s", %s' % (BoardKey.Ctrl, 1)))
|
python
|
def Ctrl_W(self, delay=0):
"""Ctrl + W shortcut.
"""
self._delay(delay)
self.add(Command("KeyDown", 'KeyDown "%s", %s' % (BoardKey.Ctrl, 1)))
self.add(Command("KeyPress", 'KeyPress "%s", %s' % (BoardKey.W, 1)))
self.add(Command("KeyUp", 'KeyUp "%s", %s' % (BoardKey.Ctrl, 1)))
|
[
"def",
"Ctrl_W",
"(",
"self",
",",
"delay",
"=",
"0",
")",
":",
"self",
".",
"_delay",
"(",
"delay",
")",
"self",
".",
"add",
"(",
"Command",
"(",
"\"KeyDown\"",
",",
"'KeyDown \"%s\", %s'",
"%",
"(",
"BoardKey",
".",
"Ctrl",
",",
"1",
")",
")",
")",
"self",
".",
"add",
"(",
"Command",
"(",
"\"KeyPress\"",
",",
"'KeyPress \"%s\", %s'",
"%",
"(",
"BoardKey",
".",
"W",
",",
"1",
")",
")",
")",
"self",
".",
"add",
"(",
"Command",
"(",
"\"KeyUp\"",
",",
"'KeyUp \"%s\", %s'",
"%",
"(",
"BoardKey",
".",
"Ctrl",
",",
"1",
")",
")",
")"
] |
Ctrl + W shortcut.
|
[
"Ctrl",
"+",
"W",
"shortcut",
"."
] |
689a60da51cd88680ddbe26e28dbe81e6b01d275
|
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/bot/anjian.py#L210-L216
|
242,659
|
jeffrimko/Auxly
|
lib/auxly/stringy.py
|
randomize
|
def randomize(length=6, choices=None):
"""Returns a random string of the given length."""
if type(choices) == str:
choices = list(choices)
choices = choices or ascii_lowercase
return "".join(choice(choices) for _ in range(length))
|
python
|
def randomize(length=6, choices=None):
"""Returns a random string of the given length."""
if type(choices) == str:
choices = list(choices)
choices = choices or ascii_lowercase
return "".join(choice(choices) for _ in range(length))
|
[
"def",
"randomize",
"(",
"length",
"=",
"6",
",",
"choices",
"=",
"None",
")",
":",
"if",
"type",
"(",
"choices",
")",
"==",
"str",
":",
"choices",
"=",
"list",
"(",
"choices",
")",
"choices",
"=",
"choices",
"or",
"ascii_lowercase",
"return",
"\"\"",
".",
"join",
"(",
"choice",
"(",
"choices",
")",
"for",
"_",
"in",
"range",
"(",
"length",
")",
")"
] |
Returns a random string of the given length.
|
[
"Returns",
"a",
"random",
"string",
"of",
"the",
"given",
"length",
"."
] |
5aae876bcb6ca117c81d904f9455764cdc78cd48
|
https://github.com/jeffrimko/Auxly/blob/5aae876bcb6ca117c81d904f9455764cdc78cd48/lib/auxly/stringy.py#L22-L27
|
242,660
|
inveniosoftware-contrib/record-recommender
|
record_recommender/recommender.py
|
calc_scores_for_node
|
def calc_scores_for_node(G, node, depth_limit=22,
number_of_recommendations=None, impact_mode=10):
"""Calculate the score of multiple records."""
n, w, dep, _ = dfs_edges(G, node, depth_limit, "Record")
count_total_ways = len(n)
# print "Number of paths {}".format(len(n))
if impact_mode == 0:
impact_div = 12
elif impact_mode == 1:
impact_div = 1000
elif impact_mode == 2:
impact_div = 100
elif impact_mode == 10:
impact_div = count_total_ways
elif impact_mode == 11:
impact_div = count_total_ways/2
d_ = {'Nodes': n, 'Scores': w, 'Depth': dep}
d = pd.DataFrame(data=d_)
del n, w, dep, d_
n, w, dep = None, None, None
gc.collect()
nodes = array('I')
weight_high = array('f')
weight_new = array('f')
ways = array('I')
nodes_with_weight = d.groupby('Nodes')
del d
gc.collect()
# print "Number nodes {}".format(len(nodes_with_weight))
for node, end_nodes in nodes_with_weight:
nodes.append(node)
new_score, highest_score, number_of_paths = \
calc_weight_of_multiple_paths(end_nodes, impact_div)
weight_high.append(highest_score)
weight_new.append(new_score)
ways.append(number_of_paths)
new_weights_d = {'Node': nodes, 'Score_Highest': weight_high,
'Score': weight_new, 'Paths': ways}
new_weights = pd.DataFrame(data=new_weights_d)
del new_weights_d, nodes, weight_high, weight_new, ways
gc.collect()
# Numpy sort by score
new_weights = new_weights.sort_values(by='Score', ascending=False)
new_weights = new_weights[:number_of_recommendations]
return new_weights
|
python
|
def calc_scores_for_node(G, node, depth_limit=22,
number_of_recommendations=None, impact_mode=10):
"""Calculate the score of multiple records."""
n, w, dep, _ = dfs_edges(G, node, depth_limit, "Record")
count_total_ways = len(n)
# print "Number of paths {}".format(len(n))
if impact_mode == 0:
impact_div = 12
elif impact_mode == 1:
impact_div = 1000
elif impact_mode == 2:
impact_div = 100
elif impact_mode == 10:
impact_div = count_total_ways
elif impact_mode == 11:
impact_div = count_total_ways/2
d_ = {'Nodes': n, 'Scores': w, 'Depth': dep}
d = pd.DataFrame(data=d_)
del n, w, dep, d_
n, w, dep = None, None, None
gc.collect()
nodes = array('I')
weight_high = array('f')
weight_new = array('f')
ways = array('I')
nodes_with_weight = d.groupby('Nodes')
del d
gc.collect()
# print "Number nodes {}".format(len(nodes_with_weight))
for node, end_nodes in nodes_with_weight:
nodes.append(node)
new_score, highest_score, number_of_paths = \
calc_weight_of_multiple_paths(end_nodes, impact_div)
weight_high.append(highest_score)
weight_new.append(new_score)
ways.append(number_of_paths)
new_weights_d = {'Node': nodes, 'Score_Highest': weight_high,
'Score': weight_new, 'Paths': ways}
new_weights = pd.DataFrame(data=new_weights_d)
del new_weights_d, nodes, weight_high, weight_new, ways
gc.collect()
# Numpy sort by score
new_weights = new_weights.sort_values(by='Score', ascending=False)
new_weights = new_weights[:number_of_recommendations]
return new_weights
|
[
"def",
"calc_scores_for_node",
"(",
"G",
",",
"node",
",",
"depth_limit",
"=",
"22",
",",
"number_of_recommendations",
"=",
"None",
",",
"impact_mode",
"=",
"10",
")",
":",
"n",
",",
"w",
",",
"dep",
",",
"_",
"=",
"dfs_edges",
"(",
"G",
",",
"node",
",",
"depth_limit",
",",
"\"Record\"",
")",
"count_total_ways",
"=",
"len",
"(",
"n",
")",
"# print \"Number of paths {}\".format(len(n))",
"if",
"impact_mode",
"==",
"0",
":",
"impact_div",
"=",
"12",
"elif",
"impact_mode",
"==",
"1",
":",
"impact_div",
"=",
"1000",
"elif",
"impact_mode",
"==",
"2",
":",
"impact_div",
"=",
"100",
"elif",
"impact_mode",
"==",
"10",
":",
"impact_div",
"=",
"count_total_ways",
"elif",
"impact_mode",
"==",
"11",
":",
"impact_div",
"=",
"count_total_ways",
"/",
"2",
"d_",
"=",
"{",
"'Nodes'",
":",
"n",
",",
"'Scores'",
":",
"w",
",",
"'Depth'",
":",
"dep",
"}",
"d",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
"=",
"d_",
")",
"del",
"n",
",",
"w",
",",
"dep",
",",
"d_",
"n",
",",
"w",
",",
"dep",
"=",
"None",
",",
"None",
",",
"None",
"gc",
".",
"collect",
"(",
")",
"nodes",
"=",
"array",
"(",
"'I'",
")",
"weight_high",
"=",
"array",
"(",
"'f'",
")",
"weight_new",
"=",
"array",
"(",
"'f'",
")",
"ways",
"=",
"array",
"(",
"'I'",
")",
"nodes_with_weight",
"=",
"d",
".",
"groupby",
"(",
"'Nodes'",
")",
"del",
"d",
"gc",
".",
"collect",
"(",
")",
"# print \"Number nodes {}\".format(len(nodes_with_weight))",
"for",
"node",
",",
"end_nodes",
"in",
"nodes_with_weight",
":",
"nodes",
".",
"append",
"(",
"node",
")",
"new_score",
",",
"highest_score",
",",
"number_of_paths",
"=",
"calc_weight_of_multiple_paths",
"(",
"end_nodes",
",",
"impact_div",
")",
"weight_high",
".",
"append",
"(",
"highest_score",
")",
"weight_new",
".",
"append",
"(",
"new_score",
")",
"ways",
".",
"append",
"(",
"number_of_paths",
")",
"new_weights_d",
"=",
"{",
"'Node'",
":",
"nodes",
",",
"'Score_Highest'",
":",
"weight_high",
",",
"'Score'",
":",
"weight_new",
",",
"'Paths'",
":",
"ways",
"}",
"new_weights",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
"=",
"new_weights_d",
")",
"del",
"new_weights_d",
",",
"nodes",
",",
"weight_high",
",",
"weight_new",
",",
"ways",
"gc",
".",
"collect",
"(",
")",
"# Numpy sort by score",
"new_weights",
"=",
"new_weights",
".",
"sort_values",
"(",
"by",
"=",
"'Score'",
",",
"ascending",
"=",
"False",
")",
"new_weights",
"=",
"new_weights",
"[",
":",
"number_of_recommendations",
"]",
"return",
"new_weights"
] |
Calculate the score of multiple records.
|
[
"Calculate",
"the",
"score",
"of",
"multiple",
"records",
"."
] |
07f71e783369e6373218b5e6ba0bf15901e9251a
|
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/recommender.py#L88-L137
|
242,661
|
inveniosoftware-contrib/record-recommender
|
record_recommender/recommender.py
|
dfs_edges
|
def dfs_edges(G, start, depth_limit=1, get_only=True, get_path=False):
"""Deepest first search."""
depth_limit = depth_limit - 1
# creates unsigned int array (2 Byte)
output_nodes = array('L')
output_depth = array('I')
# creates float array (4 Byte)
output_weights = array('f')
apath = []
if G.node.get(start) is None:
# raise KeyError('Start node not found')
print('Start node not found')
return output_nodes, output_weights, output_depth, apath
visited = set()
visited.add(start)
# Save the start node with its data to the stack
stack = [(start, G.edges_iter(start, data=True), 1.0)]
visited.add(start)
while stack:
if len(output_nodes) > 80100100:
print("To many nodes for: {}".format(start))
del output_nodes
del output_weights
del output_depth
output_nodes = array('L')
output_depth = array('I')
# creates float array (4 Byte)
output_weights = array('f')
gc.collect()
break
parent, children, weight = stack[-1]
try:
parent_, child, child_keys = next(children)
# print "child: {}, parent_data: {}".format(child, parent_data)
if child not in visited:
weight = child_keys.get('weight', 1.0) * weight
visited.add(child)
if len(stack) >= depth_limit or weight <= 0.00001:
visited.remove(child)
else:
stack.append((child, G.edges_iter(child, data=True),
weight))
# if its not and user.
if get_only and child > 100000000000:
# if get_only and G.node[child].get('Type') != get_only:
continue
output_nodes.append(child)
output_weights.append(weight)
output_depth.append(len(stack))
if get_path:
apath.append([step[0] for step in stack])
except StopIteration:
stack.pop()
visited.remove(parent)
# if data.get('Type') == "Node":
return output_nodes, output_weights, output_depth, apath
|
python
|
def dfs_edges(G, start, depth_limit=1, get_only=True, get_path=False):
"""Deepest first search."""
depth_limit = depth_limit - 1
# creates unsigned int array (2 Byte)
output_nodes = array('L')
output_depth = array('I')
# creates float array (4 Byte)
output_weights = array('f')
apath = []
if G.node.get(start) is None:
# raise KeyError('Start node not found')
print('Start node not found')
return output_nodes, output_weights, output_depth, apath
visited = set()
visited.add(start)
# Save the start node with its data to the stack
stack = [(start, G.edges_iter(start, data=True), 1.0)]
visited.add(start)
while stack:
if len(output_nodes) > 80100100:
print("To many nodes for: {}".format(start))
del output_nodes
del output_weights
del output_depth
output_nodes = array('L')
output_depth = array('I')
# creates float array (4 Byte)
output_weights = array('f')
gc.collect()
break
parent, children, weight = stack[-1]
try:
parent_, child, child_keys = next(children)
# print "child: {}, parent_data: {}".format(child, parent_data)
if child not in visited:
weight = child_keys.get('weight', 1.0) * weight
visited.add(child)
if len(stack) >= depth_limit or weight <= 0.00001:
visited.remove(child)
else:
stack.append((child, G.edges_iter(child, data=True),
weight))
# if its not and user.
if get_only and child > 100000000000:
# if get_only and G.node[child].get('Type') != get_only:
continue
output_nodes.append(child)
output_weights.append(weight)
output_depth.append(len(stack))
if get_path:
apath.append([step[0] for step in stack])
except StopIteration:
stack.pop()
visited.remove(parent)
# if data.get('Type') == "Node":
return output_nodes, output_weights, output_depth, apath
|
[
"def",
"dfs_edges",
"(",
"G",
",",
"start",
",",
"depth_limit",
"=",
"1",
",",
"get_only",
"=",
"True",
",",
"get_path",
"=",
"False",
")",
":",
"depth_limit",
"=",
"depth_limit",
"-",
"1",
"# creates unsigned int array (2 Byte)",
"output_nodes",
"=",
"array",
"(",
"'L'",
")",
"output_depth",
"=",
"array",
"(",
"'I'",
")",
"# creates float array (4 Byte)",
"output_weights",
"=",
"array",
"(",
"'f'",
")",
"apath",
"=",
"[",
"]",
"if",
"G",
".",
"node",
".",
"get",
"(",
"start",
")",
"is",
"None",
":",
"# raise KeyError('Start node not found')",
"print",
"(",
"'Start node not found'",
")",
"return",
"output_nodes",
",",
"output_weights",
",",
"output_depth",
",",
"apath",
"visited",
"=",
"set",
"(",
")",
"visited",
".",
"add",
"(",
"start",
")",
"# Save the start node with its data to the stack",
"stack",
"=",
"[",
"(",
"start",
",",
"G",
".",
"edges_iter",
"(",
"start",
",",
"data",
"=",
"True",
")",
",",
"1.0",
")",
"]",
"visited",
".",
"add",
"(",
"start",
")",
"while",
"stack",
":",
"if",
"len",
"(",
"output_nodes",
")",
">",
"80100100",
":",
"print",
"(",
"\"To many nodes for: {}\"",
".",
"format",
"(",
"start",
")",
")",
"del",
"output_nodes",
"del",
"output_weights",
"del",
"output_depth",
"output_nodes",
"=",
"array",
"(",
"'L'",
")",
"output_depth",
"=",
"array",
"(",
"'I'",
")",
"# creates float array (4 Byte)",
"output_weights",
"=",
"array",
"(",
"'f'",
")",
"gc",
".",
"collect",
"(",
")",
"break",
"parent",
",",
"children",
",",
"weight",
"=",
"stack",
"[",
"-",
"1",
"]",
"try",
":",
"parent_",
",",
"child",
",",
"child_keys",
"=",
"next",
"(",
"children",
")",
"# print \"child: {}, parent_data: {}\".format(child, parent_data)",
"if",
"child",
"not",
"in",
"visited",
":",
"weight",
"=",
"child_keys",
".",
"get",
"(",
"'weight'",
",",
"1.0",
")",
"*",
"weight",
"visited",
".",
"add",
"(",
"child",
")",
"if",
"len",
"(",
"stack",
")",
">=",
"depth_limit",
"or",
"weight",
"<=",
"0.00001",
":",
"visited",
".",
"remove",
"(",
"child",
")",
"else",
":",
"stack",
".",
"append",
"(",
"(",
"child",
",",
"G",
".",
"edges_iter",
"(",
"child",
",",
"data",
"=",
"True",
")",
",",
"weight",
")",
")",
"# if its not and user.",
"if",
"get_only",
"and",
"child",
">",
"100000000000",
":",
"# if get_only and G.node[child].get('Type') != get_only:",
"continue",
"output_nodes",
".",
"append",
"(",
"child",
")",
"output_weights",
".",
"append",
"(",
"weight",
")",
"output_depth",
".",
"append",
"(",
"len",
"(",
"stack",
")",
")",
"if",
"get_path",
":",
"apath",
".",
"append",
"(",
"[",
"step",
"[",
"0",
"]",
"for",
"step",
"in",
"stack",
"]",
")",
"except",
"StopIteration",
":",
"stack",
".",
"pop",
"(",
")",
"visited",
".",
"remove",
"(",
"parent",
")",
"# if data.get('Type') == \"Node\":",
"return",
"output_nodes",
",",
"output_weights",
",",
"output_depth",
",",
"apath"
] |
Deepest first search.
|
[
"Deepest",
"first",
"search",
"."
] |
07f71e783369e6373218b5e6ba0bf15901e9251a
|
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/recommender.py#L140-L203
|
242,662
|
inveniosoftware-contrib/record-recommender
|
record_recommender/recommender.py
|
calc_weight_of_multiple_paths
|
def calc_weight_of_multiple_paths(path_scores, impact_div=12):
"""Caluculate the weight of multipe paths."""
number_of_paths = len(path_scores)
if number_of_paths > 1:
score_total = 0.0
highest_score = 0.0
for score in path_scores.Scores:
score_total += score
if highest_score < score:
highest_score = score
score_mean = score_total / number_of_paths
# print "score_total: {}".format(score_total)
# print "score_mean: {}".format(score_mean)
# print "number_of_paths: {}".format(number_of_paths)
# Calculate the weight depending on how many ways are found
weight_count_impact = number_of_paths / float(number_of_paths +
impact_div)
# print "weight_count_impact: {}".format(weight_count_impact)
new_score = highest_score + ((1 + weight_count_impact) * score_mean)
# print "new_score: {}".format(new_score)
return new_score, highest_score, number_of_paths
else:
return (path_scores.Scores.iloc[0], path_scores.Scores.iloc[0],
number_of_paths)
|
python
|
def calc_weight_of_multiple_paths(path_scores, impact_div=12):
"""Caluculate the weight of multipe paths."""
number_of_paths = len(path_scores)
if number_of_paths > 1:
score_total = 0.0
highest_score = 0.0
for score in path_scores.Scores:
score_total += score
if highest_score < score:
highest_score = score
score_mean = score_total / number_of_paths
# print "score_total: {}".format(score_total)
# print "score_mean: {}".format(score_mean)
# print "number_of_paths: {}".format(number_of_paths)
# Calculate the weight depending on how many ways are found
weight_count_impact = number_of_paths / float(number_of_paths +
impact_div)
# print "weight_count_impact: {}".format(weight_count_impact)
new_score = highest_score + ((1 + weight_count_impact) * score_mean)
# print "new_score: {}".format(new_score)
return new_score, highest_score, number_of_paths
else:
return (path_scores.Scores.iloc[0], path_scores.Scores.iloc[0],
number_of_paths)
|
[
"def",
"calc_weight_of_multiple_paths",
"(",
"path_scores",
",",
"impact_div",
"=",
"12",
")",
":",
"number_of_paths",
"=",
"len",
"(",
"path_scores",
")",
"if",
"number_of_paths",
">",
"1",
":",
"score_total",
"=",
"0.0",
"highest_score",
"=",
"0.0",
"for",
"score",
"in",
"path_scores",
".",
"Scores",
":",
"score_total",
"+=",
"score",
"if",
"highest_score",
"<",
"score",
":",
"highest_score",
"=",
"score",
"score_mean",
"=",
"score_total",
"/",
"number_of_paths",
"# print \"score_total: {}\".format(score_total)",
"# print \"score_mean: {}\".format(score_mean)",
"# print \"number_of_paths: {}\".format(number_of_paths)",
"# Calculate the weight depending on how many ways are found",
"weight_count_impact",
"=",
"number_of_paths",
"/",
"float",
"(",
"number_of_paths",
"+",
"impact_div",
")",
"# print \"weight_count_impact: {}\".format(weight_count_impact)",
"new_score",
"=",
"highest_score",
"+",
"(",
"(",
"1",
"+",
"weight_count_impact",
")",
"*",
"score_mean",
")",
"# print \"new_score: {}\".format(new_score)",
"return",
"new_score",
",",
"highest_score",
",",
"number_of_paths",
"else",
":",
"return",
"(",
"path_scores",
".",
"Scores",
".",
"iloc",
"[",
"0",
"]",
",",
"path_scores",
".",
"Scores",
".",
"iloc",
"[",
"0",
"]",
",",
"number_of_paths",
")"
] |
Caluculate the weight of multipe paths.
|
[
"Caluculate",
"the",
"weight",
"of",
"multipe",
"paths",
"."
] |
07f71e783369e6373218b5e6ba0bf15901e9251a
|
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/recommender.py#L206-L234
|
242,663
|
inveniosoftware-contrib/record-recommender
|
record_recommender/recommender.py
|
GraphRecommender.recommend_for_record
|
def recommend_for_record(self, record_id, depth=4, num_reco=10):
"""Calculate recommendations for record."""
data = calc_scores_for_node(self._graph, record_id, depth, num_reco)
return data.Node.tolist(), data.Score.tolist()
|
python
|
def recommend_for_record(self, record_id, depth=4, num_reco=10):
"""Calculate recommendations for record."""
data = calc_scores_for_node(self._graph, record_id, depth, num_reco)
return data.Node.tolist(), data.Score.tolist()
|
[
"def",
"recommend_for_record",
"(",
"self",
",",
"record_id",
",",
"depth",
"=",
"4",
",",
"num_reco",
"=",
"10",
")",
":",
"data",
"=",
"calc_scores_for_node",
"(",
"self",
".",
"_graph",
",",
"record_id",
",",
"depth",
",",
"num_reco",
")",
"return",
"data",
".",
"Node",
".",
"tolist",
"(",
")",
",",
"data",
".",
"Score",
".",
"tolist",
"(",
")"
] |
Calculate recommendations for record.
|
[
"Calculate",
"recommendations",
"for",
"record",
"."
] |
07f71e783369e6373218b5e6ba0bf15901e9251a
|
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/recommender.py#L47-L50
|
242,664
|
inveniosoftware-contrib/record-recommender
|
record_recommender/recommender.py
|
GraphRecommender.load_profile
|
def load_profile(self, profile_name):
"""Load user profiles from file."""
data = self.storage.get_user_profiles(profile_name)
for x in data.get_user_views():
self._graph.add_edge(int(x[0]), int(x[1]), {'weight': float(x[2])})
self.all_records[int(x[1])] += 1
return self._graph
|
python
|
def load_profile(self, profile_name):
"""Load user profiles from file."""
data = self.storage.get_user_profiles(profile_name)
for x in data.get_user_views():
self._graph.add_edge(int(x[0]), int(x[1]), {'weight': float(x[2])})
self.all_records[int(x[1])] += 1
return self._graph
|
[
"def",
"load_profile",
"(",
"self",
",",
"profile_name",
")",
":",
"data",
"=",
"self",
".",
"storage",
".",
"get_user_profiles",
"(",
"profile_name",
")",
"for",
"x",
"in",
"data",
".",
"get_user_views",
"(",
")",
":",
"self",
".",
"_graph",
".",
"add_edge",
"(",
"int",
"(",
"x",
"[",
"0",
"]",
")",
",",
"int",
"(",
"x",
"[",
"1",
"]",
")",
",",
"{",
"'weight'",
":",
"float",
"(",
"x",
"[",
"2",
"]",
")",
"}",
")",
"self",
".",
"all_records",
"[",
"int",
"(",
"x",
"[",
"1",
"]",
")",
"]",
"+=",
"1",
"return",
"self",
".",
"_graph"
] |
Load user profiles from file.
|
[
"Load",
"user",
"profiles",
"from",
"file",
"."
] |
07f71e783369e6373218b5e6ba0bf15901e9251a
|
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/recommender.py#L52-L60
|
242,665
|
inveniosoftware-contrib/record-recommender
|
record_recommender/recommender.py
|
GraphRecommender.del_big_nodes
|
def del_big_nodes(self, grater_than=215):
"""Delete big nodes with many connections from the graph."""
G = self._graph
it = G.nodes_iter()
node_paths = []
node_names = []
del_nodes = []
summe = 1
count = 1
for node in it:
l = len(G[node])
if l > grater_than:
del_nodes.append(node)
continue
summe += l
node_names.append(node)
node_paths.append(l)
count += 1
for node in del_nodes:
G.remove_node(node)
if node > 1000000000:
self.valid_user.pop(node)
print("Nodes deleted: {}".format(len(del_nodes)))
|
python
|
def del_big_nodes(self, grater_than=215):
"""Delete big nodes with many connections from the graph."""
G = self._graph
it = G.nodes_iter()
node_paths = []
node_names = []
del_nodes = []
summe = 1
count = 1
for node in it:
l = len(G[node])
if l > grater_than:
del_nodes.append(node)
continue
summe += l
node_names.append(node)
node_paths.append(l)
count += 1
for node in del_nodes:
G.remove_node(node)
if node > 1000000000:
self.valid_user.pop(node)
print("Nodes deleted: {}".format(len(del_nodes)))
|
[
"def",
"del_big_nodes",
"(",
"self",
",",
"grater_than",
"=",
"215",
")",
":",
"G",
"=",
"self",
".",
"_graph",
"it",
"=",
"G",
".",
"nodes_iter",
"(",
")",
"node_paths",
"=",
"[",
"]",
"node_names",
"=",
"[",
"]",
"del_nodes",
"=",
"[",
"]",
"summe",
"=",
"1",
"count",
"=",
"1",
"for",
"node",
"in",
"it",
":",
"l",
"=",
"len",
"(",
"G",
"[",
"node",
"]",
")",
"if",
"l",
">",
"grater_than",
":",
"del_nodes",
".",
"append",
"(",
"node",
")",
"continue",
"summe",
"+=",
"l",
"node_names",
".",
"append",
"(",
"node",
")",
"node_paths",
".",
"append",
"(",
"l",
")",
"count",
"+=",
"1",
"for",
"node",
"in",
"del_nodes",
":",
"G",
".",
"remove_node",
"(",
"node",
")",
"if",
"node",
">",
"1000000000",
":",
"self",
".",
"valid_user",
".",
"pop",
"(",
"node",
")",
"print",
"(",
"\"Nodes deleted: {}\"",
".",
"format",
"(",
"len",
"(",
"del_nodes",
")",
")",
")"
] |
Delete big nodes with many connections from the graph.
|
[
"Delete",
"big",
"nodes",
"with",
"many",
"connections",
"from",
"the",
"graph",
"."
] |
07f71e783369e6373218b5e6ba0bf15901e9251a
|
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/recommender.py#L62-L85
|
242,666
|
Arvedui/picuplib
|
picuplib/upload.py
|
punify_filename
|
def punify_filename(filename):
"""
small hackisch workaround for unicode problems with the picflash api
"""
path, extension = splitext(filename)
return path.encode('punycode').decode('utf8') + extension
|
python
|
def punify_filename(filename):
"""
small hackisch workaround for unicode problems with the picflash api
"""
path, extension = splitext(filename)
return path.encode('punycode').decode('utf8') + extension
|
[
"def",
"punify_filename",
"(",
"filename",
")",
":",
"path",
",",
"extension",
"=",
"splitext",
"(",
"filename",
")",
"return",
"path",
".",
"encode",
"(",
"'punycode'",
")",
".",
"decode",
"(",
"'utf8'",
")",
"+",
"extension"
] |
small hackisch workaround for unicode problems with the picflash api
|
[
"small",
"hackisch",
"workaround",
"for",
"unicode",
"problems",
"with",
"the",
"picflash",
"api"
] |
c8a5d1542dbd421e84afd5ee81fe76efec89fb95
|
https://github.com/Arvedui/picuplib/blob/c8a5d1542dbd421e84afd5ee81fe76efec89fb95/picuplib/upload.py#L173-L178
|
242,667
|
Arvedui/picuplib
|
picuplib/upload.py
|
upload
|
def upload(apikey, picture, resize=None, rotation='00', noexif=False,
callback=None):
"""
prepares post for regular upload
:param str apikey: Apikey needed for Autentication on picflash.
:param str/tuple/list picture: Path to picture as str or picture data. \
If data a tuple or list with the file name as str \
and data as byte object in that order.
:param str resize: Aresolution in the folowing format: \
'80x80'(optional)
:param str|degree rotation: The picture will be rotated by this Value. \
Allowed values are 00, 90, 180, 270.(optional)
:param boolean noexif: set to True when exif data should be purged.\
(optional)
:param function callback: function witch will be called after every read. \
Need to take one argument. you can use the len function to determine \
the body length and call bytes_read().
"""
if isinstance(picture, str):
with open(picture, 'rb') as file_obj:
picture_name = picture
data = file_obj.read()
elif isinstance(picture, (tuple, list)):
picture_name = picture[0]
data = picture[1]
else:
raise TypeError("The second argument must be str or list/tuple. "
"Please refer to the documentation for details.")
check_rotation(rotation)
check_resize(resize)
check_callback(callback)
post_data = compose_post(apikey, resize, rotation, noexif)
post_data['Datei[]'] = (punify_filename(basename(picture_name)), data)
return do_upload(post_data, callback)
|
python
|
def upload(apikey, picture, resize=None, rotation='00', noexif=False,
callback=None):
"""
prepares post for regular upload
:param str apikey: Apikey needed for Autentication on picflash.
:param str/tuple/list picture: Path to picture as str or picture data. \
If data a tuple or list with the file name as str \
and data as byte object in that order.
:param str resize: Aresolution in the folowing format: \
'80x80'(optional)
:param str|degree rotation: The picture will be rotated by this Value. \
Allowed values are 00, 90, 180, 270.(optional)
:param boolean noexif: set to True when exif data should be purged.\
(optional)
:param function callback: function witch will be called after every read. \
Need to take one argument. you can use the len function to determine \
the body length and call bytes_read().
"""
if isinstance(picture, str):
with open(picture, 'rb') as file_obj:
picture_name = picture
data = file_obj.read()
elif isinstance(picture, (tuple, list)):
picture_name = picture[0]
data = picture[1]
else:
raise TypeError("The second argument must be str or list/tuple. "
"Please refer to the documentation for details.")
check_rotation(rotation)
check_resize(resize)
check_callback(callback)
post_data = compose_post(apikey, resize, rotation, noexif)
post_data['Datei[]'] = (punify_filename(basename(picture_name)), data)
return do_upload(post_data, callback)
|
[
"def",
"upload",
"(",
"apikey",
",",
"picture",
",",
"resize",
"=",
"None",
",",
"rotation",
"=",
"'00'",
",",
"noexif",
"=",
"False",
",",
"callback",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"picture",
",",
"str",
")",
":",
"with",
"open",
"(",
"picture",
",",
"'rb'",
")",
"as",
"file_obj",
":",
"picture_name",
"=",
"picture",
"data",
"=",
"file_obj",
".",
"read",
"(",
")",
"elif",
"isinstance",
"(",
"picture",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"picture_name",
"=",
"picture",
"[",
"0",
"]",
"data",
"=",
"picture",
"[",
"1",
"]",
"else",
":",
"raise",
"TypeError",
"(",
"\"The second argument must be str or list/tuple. \"",
"\"Please refer to the documentation for details.\"",
")",
"check_rotation",
"(",
"rotation",
")",
"check_resize",
"(",
"resize",
")",
"check_callback",
"(",
"callback",
")",
"post_data",
"=",
"compose_post",
"(",
"apikey",
",",
"resize",
",",
"rotation",
",",
"noexif",
")",
"post_data",
"[",
"'Datei[]'",
"]",
"=",
"(",
"punify_filename",
"(",
"basename",
"(",
"picture_name",
")",
")",
",",
"data",
")",
"return",
"do_upload",
"(",
"post_data",
",",
"callback",
")"
] |
prepares post for regular upload
:param str apikey: Apikey needed for Autentication on picflash.
:param str/tuple/list picture: Path to picture as str or picture data. \
If data a tuple or list with the file name as str \
and data as byte object in that order.
:param str resize: Aresolution in the folowing format: \
'80x80'(optional)
:param str|degree rotation: The picture will be rotated by this Value. \
Allowed values are 00, 90, 180, 270.(optional)
:param boolean noexif: set to True when exif data should be purged.\
(optional)
:param function callback: function witch will be called after every read. \
Need to take one argument. you can use the len function to determine \
the body length and call bytes_read().
|
[
"prepares",
"post",
"for",
"regular",
"upload"
] |
c8a5d1542dbd421e84afd5ee81fe76efec89fb95
|
https://github.com/Arvedui/picuplib/blob/c8a5d1542dbd421e84afd5ee81fe76efec89fb95/picuplib/upload.py#L183-L223
|
242,668
|
Arvedui/picuplib
|
picuplib/upload.py
|
remote_upload
|
def remote_upload(apikey, picture_url, resize=None,
rotation='00', noexif=False):
"""
prepares post for remote upload
:param str apikey: Apikey needed for Autentication on picflash.
:param str picture_url: URL to picture allowd Protocols are: ftp,
http, https
:param str resize: Aresolution in the folowing format: \
'80x80'(optional)
:param str|degree rotation: The picture will be rotated by this Value. \
Allowed values are 00, 90, 180, 270.(optional)
:param boolean noexif: set to True when exif data should be purged.\
(optional)
"""
check_rotation(rotation)
check_resize(resize)
url = check_if_redirect(picture_url)
if url:
picture_url = resolve_redirect(url)
post_data = compose_post(apikey, resize, rotation, noexif)
post_data['url[]'] = ('', picture_url)
return do_upload(post_data)
|
python
|
def remote_upload(apikey, picture_url, resize=None,
rotation='00', noexif=False):
"""
prepares post for remote upload
:param str apikey: Apikey needed for Autentication on picflash.
:param str picture_url: URL to picture allowd Protocols are: ftp,
http, https
:param str resize: Aresolution in the folowing format: \
'80x80'(optional)
:param str|degree rotation: The picture will be rotated by this Value. \
Allowed values are 00, 90, 180, 270.(optional)
:param boolean noexif: set to True when exif data should be purged.\
(optional)
"""
check_rotation(rotation)
check_resize(resize)
url = check_if_redirect(picture_url)
if url:
picture_url = resolve_redirect(url)
post_data = compose_post(apikey, resize, rotation, noexif)
post_data['url[]'] = ('', picture_url)
return do_upload(post_data)
|
[
"def",
"remote_upload",
"(",
"apikey",
",",
"picture_url",
",",
"resize",
"=",
"None",
",",
"rotation",
"=",
"'00'",
",",
"noexif",
"=",
"False",
")",
":",
"check_rotation",
"(",
"rotation",
")",
"check_resize",
"(",
"resize",
")",
"url",
"=",
"check_if_redirect",
"(",
"picture_url",
")",
"if",
"url",
":",
"picture_url",
"=",
"resolve_redirect",
"(",
"url",
")",
"post_data",
"=",
"compose_post",
"(",
"apikey",
",",
"resize",
",",
"rotation",
",",
"noexif",
")",
"post_data",
"[",
"'url[]'",
"]",
"=",
"(",
"''",
",",
"picture_url",
")",
"return",
"do_upload",
"(",
"post_data",
")"
] |
prepares post for remote upload
:param str apikey: Apikey needed for Autentication on picflash.
:param str picture_url: URL to picture allowd Protocols are: ftp,
http, https
:param str resize: Aresolution in the folowing format: \
'80x80'(optional)
:param str|degree rotation: The picture will be rotated by this Value. \
Allowed values are 00, 90, 180, 270.(optional)
:param boolean noexif: set to True when exif data should be purged.\
(optional)
|
[
"prepares",
"post",
"for",
"remote",
"upload"
] |
c8a5d1542dbd421e84afd5ee81fe76efec89fb95
|
https://github.com/Arvedui/picuplib/blob/c8a5d1542dbd421e84afd5ee81fe76efec89fb95/picuplib/upload.py#L227-L252
|
242,669
|
Arvedui/picuplib
|
picuplib/upload.py
|
compose_post
|
def compose_post(apikey, resize, rotation, noexif):
"""
composes basic post requests
"""
check_rotation(rotation)
check_resize(resize)
post_data = {
'formatliste': ('', 'og'),
'userdrehung': ('', rotation),
'apikey': ('', apikey)
}
if resize and 'x' in resize:
width, height = [ x.strip() for x in resize.split('x')]
post_data['udefb'] = ('', width)
post_data['udefh'] = ('', height)
elif resize and '%' in resize:
precentage = resize.strip().strip('%')
post_data['udefp'] = precentage
if noexif:
post_data['noexif'] = ('', '')
return post_data
|
python
|
def compose_post(apikey, resize, rotation, noexif):
"""
composes basic post requests
"""
check_rotation(rotation)
check_resize(resize)
post_data = {
'formatliste': ('', 'og'),
'userdrehung': ('', rotation),
'apikey': ('', apikey)
}
if resize and 'x' in resize:
width, height = [ x.strip() for x in resize.split('x')]
post_data['udefb'] = ('', width)
post_data['udefh'] = ('', height)
elif resize and '%' in resize:
precentage = resize.strip().strip('%')
post_data['udefp'] = precentage
if noexif:
post_data['noexif'] = ('', '')
return post_data
|
[
"def",
"compose_post",
"(",
"apikey",
",",
"resize",
",",
"rotation",
",",
"noexif",
")",
":",
"check_rotation",
"(",
"rotation",
")",
"check_resize",
"(",
"resize",
")",
"post_data",
"=",
"{",
"'formatliste'",
":",
"(",
"''",
",",
"'og'",
")",
",",
"'userdrehung'",
":",
"(",
"''",
",",
"rotation",
")",
",",
"'apikey'",
":",
"(",
"''",
",",
"apikey",
")",
"}",
"if",
"resize",
"and",
"'x'",
"in",
"resize",
":",
"width",
",",
"height",
"=",
"[",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"resize",
".",
"split",
"(",
"'x'",
")",
"]",
"post_data",
"[",
"'udefb'",
"]",
"=",
"(",
"''",
",",
"width",
")",
"post_data",
"[",
"'udefh'",
"]",
"=",
"(",
"''",
",",
"height",
")",
"elif",
"resize",
"and",
"'%'",
"in",
"resize",
":",
"precentage",
"=",
"resize",
".",
"strip",
"(",
")",
".",
"strip",
"(",
"'%'",
")",
"post_data",
"[",
"'udefp'",
"]",
"=",
"precentage",
"if",
"noexif",
":",
"post_data",
"[",
"'noexif'",
"]",
"=",
"(",
"''",
",",
"''",
")",
"return",
"post_data"
] |
composes basic post requests
|
[
"composes",
"basic",
"post",
"requests"
] |
c8a5d1542dbd421e84afd5ee81fe76efec89fb95
|
https://github.com/Arvedui/picuplib/blob/c8a5d1542dbd421e84afd5ee81fe76efec89fb95/picuplib/upload.py#L265-L289
|
242,670
|
Arvedui/picuplib
|
picuplib/upload.py
|
do_upload
|
def do_upload(post_data, callback=None):
"""
does the actual upload also sets and generates the user agent string
"""
encoder = MultipartEncoder(post_data)
monitor = MultipartEncoderMonitor(encoder, callback)
headers = {'User-Agent': USER_AGENT, 'Content-Type': monitor.content_type}
response = post(API_URL, data=monitor, headers=headers)
check_response(response)
return response.json()[0]
|
python
|
def do_upload(post_data, callback=None):
"""
does the actual upload also sets and generates the user agent string
"""
encoder = MultipartEncoder(post_data)
monitor = MultipartEncoderMonitor(encoder, callback)
headers = {'User-Agent': USER_AGENT, 'Content-Type': monitor.content_type}
response = post(API_URL, data=monitor, headers=headers)
check_response(response)
return response.json()[0]
|
[
"def",
"do_upload",
"(",
"post_data",
",",
"callback",
"=",
"None",
")",
":",
"encoder",
"=",
"MultipartEncoder",
"(",
"post_data",
")",
"monitor",
"=",
"MultipartEncoderMonitor",
"(",
"encoder",
",",
"callback",
")",
"headers",
"=",
"{",
"'User-Agent'",
":",
"USER_AGENT",
",",
"'Content-Type'",
":",
"monitor",
".",
"content_type",
"}",
"response",
"=",
"post",
"(",
"API_URL",
",",
"data",
"=",
"monitor",
",",
"headers",
"=",
"headers",
")",
"check_response",
"(",
"response",
")",
"return",
"response",
".",
"json",
"(",
")",
"[",
"0",
"]"
] |
does the actual upload also sets and generates the user agent string
|
[
"does",
"the",
"actual",
"upload",
"also",
"sets",
"and",
"generates",
"the",
"user",
"agent",
"string"
] |
c8a5d1542dbd421e84afd5ee81fe76efec89fb95
|
https://github.com/Arvedui/picuplib/blob/c8a5d1542dbd421e84afd5ee81fe76efec89fb95/picuplib/upload.py#L292-L304
|
242,671
|
Arvedui/picuplib
|
picuplib/upload.py
|
Upload.upload
|
def upload(self, picture, resize=None, rotation=None, noexif=None,
callback=None):
"""
wraps upload function
:param str/tuple/list picture: Path to picture as str or picture data. \
If data a tuple or list with the file name as str \
and data as byte object in that order.
:param str resize: Aresolution in the folowing format: \
'80x80'(optional)
:param str|degree rotation: The picture will be rotated by this Value.\
Allowed values are 00, 90, 180, 270.(optional)
:param boolean noexif: set to True when exif data should be purged.\
(optional)
:param function callback: function will be called after every read. \
Need to take one argument. you can use the len function to \
determine the body length and call bytes_read().
"""
if not resize:
resize = self._resize
if not rotation:
rotation = self._rotation
if not noexif:
noexif = self._noexif
if not callback:
callback = self._callback
return upload(self._apikey, picture, resize,
rotation, noexif, callback)
|
python
|
def upload(self, picture, resize=None, rotation=None, noexif=None,
callback=None):
"""
wraps upload function
:param str/tuple/list picture: Path to picture as str or picture data. \
If data a tuple or list with the file name as str \
and data as byte object in that order.
:param str resize: Aresolution in the folowing format: \
'80x80'(optional)
:param str|degree rotation: The picture will be rotated by this Value.\
Allowed values are 00, 90, 180, 270.(optional)
:param boolean noexif: set to True when exif data should be purged.\
(optional)
:param function callback: function will be called after every read. \
Need to take one argument. you can use the len function to \
determine the body length and call bytes_read().
"""
if not resize:
resize = self._resize
if not rotation:
rotation = self._rotation
if not noexif:
noexif = self._noexif
if not callback:
callback = self._callback
return upload(self._apikey, picture, resize,
rotation, noexif, callback)
|
[
"def",
"upload",
"(",
"self",
",",
"picture",
",",
"resize",
"=",
"None",
",",
"rotation",
"=",
"None",
",",
"noexif",
"=",
"None",
",",
"callback",
"=",
"None",
")",
":",
"if",
"not",
"resize",
":",
"resize",
"=",
"self",
".",
"_resize",
"if",
"not",
"rotation",
":",
"rotation",
"=",
"self",
".",
"_rotation",
"if",
"not",
"noexif",
":",
"noexif",
"=",
"self",
".",
"_noexif",
"if",
"not",
"callback",
":",
"callback",
"=",
"self",
".",
"_callback",
"return",
"upload",
"(",
"self",
".",
"_apikey",
",",
"picture",
",",
"resize",
",",
"rotation",
",",
"noexif",
",",
"callback",
")"
] |
wraps upload function
:param str/tuple/list picture: Path to picture as str or picture data. \
If data a tuple or list with the file name as str \
and data as byte object in that order.
:param str resize: Aresolution in the folowing format: \
'80x80'(optional)
:param str|degree rotation: The picture will be rotated by this Value.\
Allowed values are 00, 90, 180, 270.(optional)
:param boolean noexif: set to True when exif data should be purged.\
(optional)
:param function callback: function will be called after every read. \
Need to take one argument. you can use the len function to \
determine the body length and call bytes_read().
|
[
"wraps",
"upload",
"function"
] |
c8a5d1542dbd421e84afd5ee81fe76efec89fb95
|
https://github.com/Arvedui/picuplib/blob/c8a5d1542dbd421e84afd5ee81fe76efec89fb95/picuplib/upload.py#L115-L144
|
242,672
|
Arvedui/picuplib
|
picuplib/upload.py
|
Upload.remote_upload
|
def remote_upload(self, picture_url, resize=None,
rotation=None, noexif=None):
"""
wraps remote_upload funktion
:param str picture_url: URL to picture allowd Protocols are: ftp,\
http, https
:param str resize: Aresolution in the folowing format: \
'80x80'(optional)
:param str|degree rotation: The picture will be rotated by this Value. \
Allowed values are 00, 90, 180, 270.(optional)
:param boolean noexif: set to True when exif data should be purged.\
(optional)
"""
if not resize:
resize = self._resize
if not rotation:
rotation = self._rotation
if not noexif:
noexif = self._noexif
return remote_upload(self._apikey, picture_url,
resize, rotation, noexif)
|
python
|
def remote_upload(self, picture_url, resize=None,
rotation=None, noexif=None):
"""
wraps remote_upload funktion
:param str picture_url: URL to picture allowd Protocols are: ftp,\
http, https
:param str resize: Aresolution in the folowing format: \
'80x80'(optional)
:param str|degree rotation: The picture will be rotated by this Value. \
Allowed values are 00, 90, 180, 270.(optional)
:param boolean noexif: set to True when exif data should be purged.\
(optional)
"""
if not resize:
resize = self._resize
if not rotation:
rotation = self._rotation
if not noexif:
noexif = self._noexif
return remote_upload(self._apikey, picture_url,
resize, rotation, noexif)
|
[
"def",
"remote_upload",
"(",
"self",
",",
"picture_url",
",",
"resize",
"=",
"None",
",",
"rotation",
"=",
"None",
",",
"noexif",
"=",
"None",
")",
":",
"if",
"not",
"resize",
":",
"resize",
"=",
"self",
".",
"_resize",
"if",
"not",
"rotation",
":",
"rotation",
"=",
"self",
".",
"_rotation",
"if",
"not",
"noexif",
":",
"noexif",
"=",
"self",
".",
"_noexif",
"return",
"remote_upload",
"(",
"self",
".",
"_apikey",
",",
"picture_url",
",",
"resize",
",",
"rotation",
",",
"noexif",
")"
] |
wraps remote_upload funktion
:param str picture_url: URL to picture allowd Protocols are: ftp,\
http, https
:param str resize: Aresolution in the folowing format: \
'80x80'(optional)
:param str|degree rotation: The picture will be rotated by this Value. \
Allowed values are 00, 90, 180, 270.(optional)
:param boolean noexif: set to True when exif data should be purged.\
(optional)
|
[
"wraps",
"remote_upload",
"funktion"
] |
c8a5d1542dbd421e84afd5ee81fe76efec89fb95
|
https://github.com/Arvedui/picuplib/blob/c8a5d1542dbd421e84afd5ee81fe76efec89fb95/picuplib/upload.py#L147-L170
|
242,673
|
rjw57/throw
|
throw/identity.py
|
load_identity
|
def load_identity(config = Config()):
"""Load the default identity from the configuration. If there is no default
identity, a KeyError is raised.
"""
return Identity(name = config.get('user', 'name'),
email_ = config.get('user', 'email'),
**config.get_section('smtp'))
|
python
|
def load_identity(config = Config()):
"""Load the default identity from the configuration. If there is no default
identity, a KeyError is raised.
"""
return Identity(name = config.get('user', 'name'),
email_ = config.get('user', 'email'),
**config.get_section('smtp'))
|
[
"def",
"load_identity",
"(",
"config",
"=",
"Config",
"(",
")",
")",
":",
"return",
"Identity",
"(",
"name",
"=",
"config",
".",
"get",
"(",
"'user'",
",",
"'name'",
")",
",",
"email_",
"=",
"config",
".",
"get",
"(",
"'user'",
",",
"'email'",
")",
",",
"*",
"*",
"config",
".",
"get_section",
"(",
"'smtp'",
")",
")"
] |
Load the default identity from the configuration. If there is no default
identity, a KeyError is raised.
|
[
"Load",
"the",
"default",
"identity",
"from",
"the",
"configuration",
".",
"If",
"there",
"is",
"no",
"default",
"identity",
"a",
"KeyError",
"is",
"raised",
"."
] |
74a7116362ba5b45635ab247472b25cfbdece4ee
|
https://github.com/rjw57/throw/blob/74a7116362ba5b45635ab247472b25cfbdece4ee/throw/identity.py#L29-L36
|
242,674
|
rjw57/throw
|
throw/identity.py
|
Identity._smtp_server
|
def _smtp_server(self):
"""Return a smtplib SMTP object correctly initialised and connected to
a SMTP server suitable for sending email on behalf of the user."""
if self._use_ssl:
server = smtplib.SMTP_SSL(**self._smtp_vars)
else:
server = smtplib.SMTP(**self._smtp_vars)
if self._use_tls:
server.starttls()
if self._credentials is not None:
passwd = self._credentials[1]
if passwd is None:
passwd = self._interface.input( \
'Password for %s' % (self._credentials[0],), no_echo=True)
server.login(*self._credentials)
# if we succeeded, cache the password
self._credentials = (self._credentials[0], passwd)
return server
|
python
|
def _smtp_server(self):
"""Return a smtplib SMTP object correctly initialised and connected to
a SMTP server suitable for sending email on behalf of the user."""
if self._use_ssl:
server = smtplib.SMTP_SSL(**self._smtp_vars)
else:
server = smtplib.SMTP(**self._smtp_vars)
if self._use_tls:
server.starttls()
if self._credentials is not None:
passwd = self._credentials[1]
if passwd is None:
passwd = self._interface.input( \
'Password for %s' % (self._credentials[0],), no_echo=True)
server.login(*self._credentials)
# if we succeeded, cache the password
self._credentials = (self._credentials[0], passwd)
return server
|
[
"def",
"_smtp_server",
"(",
"self",
")",
":",
"if",
"self",
".",
"_use_ssl",
":",
"server",
"=",
"smtplib",
".",
"SMTP_SSL",
"(",
"*",
"*",
"self",
".",
"_smtp_vars",
")",
"else",
":",
"server",
"=",
"smtplib",
".",
"SMTP",
"(",
"*",
"*",
"self",
".",
"_smtp_vars",
")",
"if",
"self",
".",
"_use_tls",
":",
"server",
".",
"starttls",
"(",
")",
"if",
"self",
".",
"_credentials",
"is",
"not",
"None",
":",
"passwd",
"=",
"self",
".",
"_credentials",
"[",
"1",
"]",
"if",
"passwd",
"is",
"None",
":",
"passwd",
"=",
"self",
".",
"_interface",
".",
"input",
"(",
"'Password for %s'",
"%",
"(",
"self",
".",
"_credentials",
"[",
"0",
"]",
",",
")",
",",
"no_echo",
"=",
"True",
")",
"server",
".",
"login",
"(",
"*",
"self",
".",
"_credentials",
")",
"# if we succeeded, cache the password",
"self",
".",
"_credentials",
"=",
"(",
"self",
".",
"_credentials",
"[",
"0",
"]",
",",
"passwd",
")",
"return",
"server"
] |
Return a smtplib SMTP object correctly initialised and connected to
a SMTP server suitable for sending email on behalf of the user.
|
[
"Return",
"a",
"smtplib",
"SMTP",
"object",
"correctly",
"initialised",
"and",
"connected",
"to",
"a",
"SMTP",
"server",
"suitable",
"for",
"sending",
"email",
"on",
"behalf",
"of",
"the",
"user",
"."
] |
74a7116362ba5b45635ab247472b25cfbdece4ee
|
https://github.com/rjw57/throw/blob/74a7116362ba5b45635ab247472b25cfbdece4ee/throw/identity.py#L234-L256
|
242,675
|
mgbarrero/xbob.db.atvskeystroke
|
xbob/db/atvskeystroke/create.py
|
add_clients
|
def add_clients(session, verbose):
"""Add clients to the ATVS Keystroke database."""
for ctype in ['Genuine', 'Impostor']:
for cdid in userid_clients:
cid = ctype + '_%d' % cdid
if verbose>1: print(" Adding user '%s' of type '%s'..." % (cid, ctype))
session.add(Client(cid, ctype, cdid))
|
python
|
def add_clients(session, verbose):
"""Add clients to the ATVS Keystroke database."""
for ctype in ['Genuine', 'Impostor']:
for cdid in userid_clients:
cid = ctype + '_%d' % cdid
if verbose>1: print(" Adding user '%s' of type '%s'..." % (cid, ctype))
session.add(Client(cid, ctype, cdid))
|
[
"def",
"add_clients",
"(",
"session",
",",
"verbose",
")",
":",
"for",
"ctype",
"in",
"[",
"'Genuine'",
",",
"'Impostor'",
"]",
":",
"for",
"cdid",
"in",
"userid_clients",
":",
"cid",
"=",
"ctype",
"+",
"'_%d'",
"%",
"cdid",
"if",
"verbose",
">",
"1",
":",
"print",
"(",
"\" Adding user '%s' of type '%s'...\"",
"%",
"(",
"cid",
",",
"ctype",
")",
")",
"session",
".",
"add",
"(",
"Client",
"(",
"cid",
",",
"ctype",
",",
"cdid",
")",
")"
] |
Add clients to the ATVS Keystroke database.
|
[
"Add",
"clients",
"to",
"the",
"ATVS",
"Keystroke",
"database",
"."
] |
b7358a73e21757b43334df7c89ba057b377ca704
|
https://github.com/mgbarrero/xbob.db.atvskeystroke/blob/b7358a73e21757b43334df7c89ba057b377ca704/xbob/db/atvskeystroke/create.py#L30-L36
|
242,676
|
mgbarrero/xbob.db.atvskeystroke
|
xbob/db/atvskeystroke/create.py
|
add_files
|
def add_files(session, imagedir, verbose):
"""Add files to the ATVS Keystroke database."""
def add_file(session, basename, userid, shotid, sessionid):
"""Parse a single filename and add it to the list."""
session.add(File(userid, basename, sessionid, shotid))
filenames = os.listdir(imagedir)
for filename in filenames:
basename, extension = os.path.splitext(filename)
if extension == db_file_extension:
if verbose>1: print(" Adding file '%s'..." % (basename))
parts = string.split(basename, "_")
ctype = parts[0]
shotid = int(parts[2])
userid = ctype + '_%d' % int(parts[1])
if parts[0] == "Impostor":
sessionid = 3
elif parts[0] == "Genuine" and shotid <= 6:
sessionid = 1
elif parts[0] == "Genuine" and shotid > 6:
sessionid = 2
shotid = shotid - 6
add_file(session, basename, userid, shotid, sessionid)
|
python
|
def add_files(session, imagedir, verbose):
"""Add files to the ATVS Keystroke database."""
def add_file(session, basename, userid, shotid, sessionid):
"""Parse a single filename and add it to the list."""
session.add(File(userid, basename, sessionid, shotid))
filenames = os.listdir(imagedir)
for filename in filenames:
basename, extension = os.path.splitext(filename)
if extension == db_file_extension:
if verbose>1: print(" Adding file '%s'..." % (basename))
parts = string.split(basename, "_")
ctype = parts[0]
shotid = int(parts[2])
userid = ctype + '_%d' % int(parts[1])
if parts[0] == "Impostor":
sessionid = 3
elif parts[0] == "Genuine" and shotid <= 6:
sessionid = 1
elif parts[0] == "Genuine" and shotid > 6:
sessionid = 2
shotid = shotid - 6
add_file(session, basename, userid, shotid, sessionid)
|
[
"def",
"add_files",
"(",
"session",
",",
"imagedir",
",",
"verbose",
")",
":",
"def",
"add_file",
"(",
"session",
",",
"basename",
",",
"userid",
",",
"shotid",
",",
"sessionid",
")",
":",
"\"\"\"Parse a single filename and add it to the list.\"\"\"",
"session",
".",
"add",
"(",
"File",
"(",
"userid",
",",
"basename",
",",
"sessionid",
",",
"shotid",
")",
")",
"filenames",
"=",
"os",
".",
"listdir",
"(",
"imagedir",
")",
"for",
"filename",
"in",
"filenames",
":",
"basename",
",",
"extension",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"if",
"extension",
"==",
"db_file_extension",
":",
"if",
"verbose",
">",
"1",
":",
"print",
"(",
"\" Adding file '%s'...\"",
"%",
"(",
"basename",
")",
")",
"parts",
"=",
"string",
".",
"split",
"(",
"basename",
",",
"\"_\"",
")",
"ctype",
"=",
"parts",
"[",
"0",
"]",
"shotid",
"=",
"int",
"(",
"parts",
"[",
"2",
"]",
")",
"userid",
"=",
"ctype",
"+",
"'_%d'",
"%",
"int",
"(",
"parts",
"[",
"1",
"]",
")",
"if",
"parts",
"[",
"0",
"]",
"==",
"\"Impostor\"",
":",
"sessionid",
"=",
"3",
"elif",
"parts",
"[",
"0",
"]",
"==",
"\"Genuine\"",
"and",
"shotid",
"<=",
"6",
":",
"sessionid",
"=",
"1",
"elif",
"parts",
"[",
"0",
"]",
"==",
"\"Genuine\"",
"and",
"shotid",
">",
"6",
":",
"sessionid",
"=",
"2",
"shotid",
"=",
"shotid",
"-",
"6",
"add_file",
"(",
"session",
",",
"basename",
",",
"userid",
",",
"shotid",
",",
"sessionid",
")"
] |
Add files to the ATVS Keystroke database.
|
[
"Add",
"files",
"to",
"the",
"ATVS",
"Keystroke",
"database",
"."
] |
b7358a73e21757b43334df7c89ba057b377ca704
|
https://github.com/mgbarrero/xbob.db.atvskeystroke/blob/b7358a73e21757b43334df7c89ba057b377ca704/xbob/db/atvskeystroke/create.py#L39-L62
|
242,677
|
mgbarrero/xbob.db.atvskeystroke
|
xbob/db/atvskeystroke/create.py
|
create
|
def create(args):
"""Creates or re-creates this database"""
from bob.db.utils import session_try_nolock
dbfile = args.files[0]
if args.recreate:
if args.verbose and os.path.exists(dbfile):
print('unlinking %s...' % dbfile)
if os.path.exists(dbfile): os.unlink(dbfile)
if not os.path.exists(os.path.dirname(dbfile)):
os.makedirs(os.path.dirname(dbfile))
# the real work...
create_tables(args)
s = session_try_nolock(args.type, dbfile, echo=(args.verbose > 2))
add_clients(s, args.verbose)
add_files(s, args.imagedir, args.verbose)
add_protocols(s, args.verbose)
s.commit()
s.close()
|
python
|
def create(args):
"""Creates or re-creates this database"""
from bob.db.utils import session_try_nolock
dbfile = args.files[0]
if args.recreate:
if args.verbose and os.path.exists(dbfile):
print('unlinking %s...' % dbfile)
if os.path.exists(dbfile): os.unlink(dbfile)
if not os.path.exists(os.path.dirname(dbfile)):
os.makedirs(os.path.dirname(dbfile))
# the real work...
create_tables(args)
s = session_try_nolock(args.type, dbfile, echo=(args.verbose > 2))
add_clients(s, args.verbose)
add_files(s, args.imagedir, args.verbose)
add_protocols(s, args.verbose)
s.commit()
s.close()
|
[
"def",
"create",
"(",
"args",
")",
":",
"from",
"bob",
".",
"db",
".",
"utils",
"import",
"session_try_nolock",
"dbfile",
"=",
"args",
".",
"files",
"[",
"0",
"]",
"if",
"args",
".",
"recreate",
":",
"if",
"args",
".",
"verbose",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"dbfile",
")",
":",
"print",
"(",
"'unlinking %s...'",
"%",
"dbfile",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"dbfile",
")",
":",
"os",
".",
"unlink",
"(",
"dbfile",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"dbfile",
")",
")",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"dbfile",
")",
")",
"# the real work...",
"create_tables",
"(",
"args",
")",
"s",
"=",
"session_try_nolock",
"(",
"args",
".",
"type",
",",
"dbfile",
",",
"echo",
"=",
"(",
"args",
".",
"verbose",
">",
"2",
")",
")",
"add_clients",
"(",
"s",
",",
"args",
".",
"verbose",
")",
"add_files",
"(",
"s",
",",
"args",
".",
"imagedir",
",",
"args",
".",
"verbose",
")",
"add_protocols",
"(",
"s",
",",
"args",
".",
"verbose",
")",
"s",
".",
"commit",
"(",
")",
"s",
".",
"close",
"(",
")"
] |
Creates or re-creates this database
|
[
"Creates",
"or",
"re",
"-",
"creates",
"this",
"database"
] |
b7358a73e21757b43334df7c89ba057b377ca704
|
https://github.com/mgbarrero/xbob.db.atvskeystroke/blob/b7358a73e21757b43334df7c89ba057b377ca704/xbob/db/atvskeystroke/create.py#L122-L144
|
242,678
|
mgbarrero/xbob.db.atvskeystroke
|
xbob/db/atvskeystroke/create.py
|
add_command
|
def add_command(subparsers):
"""Add specific subcommands that the action "create" can use"""
parser = subparsers.add_parser('create', help=create.__doc__)
parser.add_argument('-R', '--recreate', action='store_true', help="If set, I'll first erase the current database")
parser.add_argument('-v', '--verbose', action='count', help="Do SQL operations in a verbose way?")
parser.add_argument('-D', '--imagedir', metavar='DIR', default='/home/bob/BTAS_Keystroke_files_SingleFile', help="Change the relative path to the directory containing the images of the ATVS Keystroke database.")
parser.set_defaults(func=create)
|
python
|
def add_command(subparsers):
"""Add specific subcommands that the action "create" can use"""
parser = subparsers.add_parser('create', help=create.__doc__)
parser.add_argument('-R', '--recreate', action='store_true', help="If set, I'll first erase the current database")
parser.add_argument('-v', '--verbose', action='count', help="Do SQL operations in a verbose way?")
parser.add_argument('-D', '--imagedir', metavar='DIR', default='/home/bob/BTAS_Keystroke_files_SingleFile', help="Change the relative path to the directory containing the images of the ATVS Keystroke database.")
parser.set_defaults(func=create)
|
[
"def",
"add_command",
"(",
"subparsers",
")",
":",
"parser",
"=",
"subparsers",
".",
"add_parser",
"(",
"'create'",
",",
"help",
"=",
"create",
".",
"__doc__",
")",
"parser",
".",
"add_argument",
"(",
"'-R'",
",",
"'--recreate'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"If set, I'll first erase the current database\"",
")",
"parser",
".",
"add_argument",
"(",
"'-v'",
",",
"'--verbose'",
",",
"action",
"=",
"'count'",
",",
"help",
"=",
"\"Do SQL operations in a verbose way?\"",
")",
"parser",
".",
"add_argument",
"(",
"'-D'",
",",
"'--imagedir'",
",",
"metavar",
"=",
"'DIR'",
",",
"default",
"=",
"'/home/bob/BTAS_Keystroke_files_SingleFile'",
",",
"help",
"=",
"\"Change the relative path to the directory containing the images of the ATVS Keystroke database.\"",
")",
"parser",
".",
"set_defaults",
"(",
"func",
"=",
"create",
")"
] |
Add specific subcommands that the action "create" can use
|
[
"Add",
"specific",
"subcommands",
"that",
"the",
"action",
"create",
"can",
"use"
] |
b7358a73e21757b43334df7c89ba057b377ca704
|
https://github.com/mgbarrero/xbob.db.atvskeystroke/blob/b7358a73e21757b43334df7c89ba057b377ca704/xbob/db/atvskeystroke/create.py#L146-L155
|
242,679
|
litters/shrew
|
shrew/cli.py
|
CLI.__parse_args
|
def __parse_args(self, accept_unrecognized_args=False):
""" Invoke the argument parser. """
# If the user provided a description, use it. Otherwise grab the doc string.
if self.description:
self.argparser.description = self.description
elif getattr(sys.modules['__main__'], '__doc__', None):
self.argparser.description = getattr(sys.modules['__main__'], '__doc__')
else:
self.argparser.description = 'No documentation defined. Please add a doc string to %s' % sys.modules['__main__'].__file__
self.argparser.epilog = self.epilog
# Only if there aren't any other command line arguments.
if len(sys.argv) == 1 and self.argument_defaults:
self.argparser.set_defaults(**self.argument_defaults)
if accept_unrecognized_args:
self.args, self.unrecognized_args = self.argparser.parse_known_args()
else:
self.args = self.argparser.parse_args()
|
python
|
def __parse_args(self, accept_unrecognized_args=False):
""" Invoke the argument parser. """
# If the user provided a description, use it. Otherwise grab the doc string.
if self.description:
self.argparser.description = self.description
elif getattr(sys.modules['__main__'], '__doc__', None):
self.argparser.description = getattr(sys.modules['__main__'], '__doc__')
else:
self.argparser.description = 'No documentation defined. Please add a doc string to %s' % sys.modules['__main__'].__file__
self.argparser.epilog = self.epilog
# Only if there aren't any other command line arguments.
if len(sys.argv) == 1 and self.argument_defaults:
self.argparser.set_defaults(**self.argument_defaults)
if accept_unrecognized_args:
self.args, self.unrecognized_args = self.argparser.parse_known_args()
else:
self.args = self.argparser.parse_args()
|
[
"def",
"__parse_args",
"(",
"self",
",",
"accept_unrecognized_args",
"=",
"False",
")",
":",
"# If the user provided a description, use it. Otherwise grab the doc string.",
"if",
"self",
".",
"description",
":",
"self",
".",
"argparser",
".",
"description",
"=",
"self",
".",
"description",
"elif",
"getattr",
"(",
"sys",
".",
"modules",
"[",
"'__main__'",
"]",
",",
"'__doc__'",
",",
"None",
")",
":",
"self",
".",
"argparser",
".",
"description",
"=",
"getattr",
"(",
"sys",
".",
"modules",
"[",
"'__main__'",
"]",
",",
"'__doc__'",
")",
"else",
":",
"self",
".",
"argparser",
".",
"description",
"=",
"'No documentation defined. Please add a doc string to %s'",
"%",
"sys",
".",
"modules",
"[",
"'__main__'",
"]",
".",
"__file__",
"self",
".",
"argparser",
".",
"epilog",
"=",
"self",
".",
"epilog",
"# Only if there aren't any other command line arguments.",
"if",
"len",
"(",
"sys",
".",
"argv",
")",
"==",
"1",
"and",
"self",
".",
"argument_defaults",
":",
"self",
".",
"argparser",
".",
"set_defaults",
"(",
"*",
"*",
"self",
".",
"argument_defaults",
")",
"if",
"accept_unrecognized_args",
":",
"self",
".",
"args",
",",
"self",
".",
"unrecognized_args",
"=",
"self",
".",
"argparser",
".",
"parse_known_args",
"(",
")",
"else",
":",
"self",
".",
"args",
"=",
"self",
".",
"argparser",
".",
"parse_args",
"(",
")"
] |
Invoke the argument parser.
|
[
"Invoke",
"the",
"argument",
"parser",
"."
] |
ed4b1879321d858d6bc884d14fea7557372a4d41
|
https://github.com/litters/shrew/blob/ed4b1879321d858d6bc884d14fea7557372a4d41/shrew/cli.py#L245-L265
|
242,680
|
litters/shrew
|
shrew/cli.py
|
CLI.__parse_config
|
def __parse_config(self):
""" Invoke the config file parser. """
if self.should_parse_config and (self.args.config or self.config_file):
self.config = ConfigParser.SafeConfigParser()
self.config.read(self.args.config or self.config_file)
|
python
|
def __parse_config(self):
""" Invoke the config file parser. """
if self.should_parse_config and (self.args.config or self.config_file):
self.config = ConfigParser.SafeConfigParser()
self.config.read(self.args.config or self.config_file)
|
[
"def",
"__parse_config",
"(",
"self",
")",
":",
"if",
"self",
".",
"should_parse_config",
"and",
"(",
"self",
".",
"args",
".",
"config",
"or",
"self",
".",
"config_file",
")",
":",
"self",
".",
"config",
"=",
"ConfigParser",
".",
"SafeConfigParser",
"(",
")",
"self",
".",
"config",
".",
"read",
"(",
"self",
".",
"args",
".",
"config",
"or",
"self",
".",
"config_file",
")"
] |
Invoke the config file parser.
|
[
"Invoke",
"the",
"config",
"file",
"parser",
"."
] |
ed4b1879321d858d6bc884d14fea7557372a4d41
|
https://github.com/litters/shrew/blob/ed4b1879321d858d6bc884d14fea7557372a4d41/shrew/cli.py#L267-L272
|
242,681
|
litters/shrew
|
shrew/cli.py
|
CLI.__process_username_password
|
def __process_username_password(self):
""" If indicated, process the username and password """
if self.use_username_password_store is not None:
if self.args.clear_store:
with load_config(sections=AUTH_SECTIONS) as config:
config.remove_option(AUTH_SECTION, 'username')
if not self.args.username:
self.args.username = get_username(use_store=self.use_username_password_store)
if self.args.clear_store:
remove_password(AUTH_SECTION, username=self.args.username)
if not self.args.password:
self.args.password = get_password(AUTH_SECTION, username=self.args.username)
if self.use_username_password_store:
save_password(AUTH_SECTION, self.args.password, self.args.username)
|
python
|
def __process_username_password(self):
""" If indicated, process the username and password """
if self.use_username_password_store is not None:
if self.args.clear_store:
with load_config(sections=AUTH_SECTIONS) as config:
config.remove_option(AUTH_SECTION, 'username')
if not self.args.username:
self.args.username = get_username(use_store=self.use_username_password_store)
if self.args.clear_store:
remove_password(AUTH_SECTION, username=self.args.username)
if not self.args.password:
self.args.password = get_password(AUTH_SECTION, username=self.args.username)
if self.use_username_password_store:
save_password(AUTH_SECTION, self.args.password, self.args.username)
|
[
"def",
"__process_username_password",
"(",
"self",
")",
":",
"if",
"self",
".",
"use_username_password_store",
"is",
"not",
"None",
":",
"if",
"self",
".",
"args",
".",
"clear_store",
":",
"with",
"load_config",
"(",
"sections",
"=",
"AUTH_SECTIONS",
")",
"as",
"config",
":",
"config",
".",
"remove_option",
"(",
"AUTH_SECTION",
",",
"'username'",
")",
"if",
"not",
"self",
".",
"args",
".",
"username",
":",
"self",
".",
"args",
".",
"username",
"=",
"get_username",
"(",
"use_store",
"=",
"self",
".",
"use_username_password_store",
")",
"if",
"self",
".",
"args",
".",
"clear_store",
":",
"remove_password",
"(",
"AUTH_SECTION",
",",
"username",
"=",
"self",
".",
"args",
".",
"username",
")",
"if",
"not",
"self",
".",
"args",
".",
"password",
":",
"self",
".",
"args",
".",
"password",
"=",
"get_password",
"(",
"AUTH_SECTION",
",",
"username",
"=",
"self",
".",
"args",
".",
"username",
")",
"if",
"self",
".",
"use_username_password_store",
":",
"save_password",
"(",
"AUTH_SECTION",
",",
"self",
".",
"args",
".",
"password",
",",
"self",
".",
"args",
".",
"username",
")"
] |
If indicated, process the username and password
|
[
"If",
"indicated",
"process",
"the",
"username",
"and",
"password"
] |
ed4b1879321d858d6bc884d14fea7557372a4d41
|
https://github.com/litters/shrew/blob/ed4b1879321d858d6bc884d14fea7557372a4d41/shrew/cli.py#L274-L289
|
242,682
|
litters/shrew
|
shrew/cli.py
|
CLI.__finish_initializing
|
def __finish_initializing(self):
""" Handle any initialization after arguments & config has been parsed. """
if self.args.debug or self.args.trace:
# Set the console (StreamHandler) to allow debug statements.
if self.args.debug:
self.console.setLevel(logging.DEBUG)
self.console.setFormatter(logging.Formatter('[%(levelname)s] %(asctime)s %(name)s - %(message)s'))
# Set the global level to debug.
if self.args.debug:
self.log.setLevel(logging.DEBUG)
if self.args.log or self.log_file:
# Allow the user to override the default log file handler.
try:
self.log_file_handler = sys.modules['__main__'].log_file_handler(self.args.log or self.log_file)
except Exception:
self.log_file_handler = logging.FileHandler(self.args.log or self.log_file)
self.log_file_handler.setFormatter(logging.Formatter('[%(levelname)s] %(asctime)s %(name)s - %(message)s'))
self.log_file_handler.setLevel(logging.DEBUG)
self.log.addHandler(self.log_file_handler)
# Allow cli.log, args & self to be accessed from __main__
if not hasattr(sys.modules['__main__'], 'log'):
sys.modules['__main__'].log = self.log
if not hasattr(sys.modules['__main__'], 'cli'):
sys.modules['__main__'].cli = self
if not hasattr(sys.modules['__main__'], 'args'):
sys.modules['__main__'].args = self.args
|
python
|
def __finish_initializing(self):
""" Handle any initialization after arguments & config has been parsed. """
if self.args.debug or self.args.trace:
# Set the console (StreamHandler) to allow debug statements.
if self.args.debug:
self.console.setLevel(logging.DEBUG)
self.console.setFormatter(logging.Formatter('[%(levelname)s] %(asctime)s %(name)s - %(message)s'))
# Set the global level to debug.
if self.args.debug:
self.log.setLevel(logging.DEBUG)
if self.args.log or self.log_file:
# Allow the user to override the default log file handler.
try:
self.log_file_handler = sys.modules['__main__'].log_file_handler(self.args.log or self.log_file)
except Exception:
self.log_file_handler = logging.FileHandler(self.args.log or self.log_file)
self.log_file_handler.setFormatter(logging.Formatter('[%(levelname)s] %(asctime)s %(name)s - %(message)s'))
self.log_file_handler.setLevel(logging.DEBUG)
self.log.addHandler(self.log_file_handler)
# Allow cli.log, args & self to be accessed from __main__
if not hasattr(sys.modules['__main__'], 'log'):
sys.modules['__main__'].log = self.log
if not hasattr(sys.modules['__main__'], 'cli'):
sys.modules['__main__'].cli = self
if not hasattr(sys.modules['__main__'], 'args'):
sys.modules['__main__'].args = self.args
|
[
"def",
"__finish_initializing",
"(",
"self",
")",
":",
"if",
"self",
".",
"args",
".",
"debug",
"or",
"self",
".",
"args",
".",
"trace",
":",
"# Set the console (StreamHandler) to allow debug statements.",
"if",
"self",
".",
"args",
".",
"debug",
":",
"self",
".",
"console",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"self",
".",
"console",
".",
"setFormatter",
"(",
"logging",
".",
"Formatter",
"(",
"'[%(levelname)s] %(asctime)s %(name)s - %(message)s'",
")",
")",
"# Set the global level to debug.",
"if",
"self",
".",
"args",
".",
"debug",
":",
"self",
".",
"log",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"if",
"self",
".",
"args",
".",
"log",
"or",
"self",
".",
"log_file",
":",
"# Allow the user to override the default log file handler.",
"try",
":",
"self",
".",
"log_file_handler",
"=",
"sys",
".",
"modules",
"[",
"'__main__'",
"]",
".",
"log_file_handler",
"(",
"self",
".",
"args",
".",
"log",
"or",
"self",
".",
"log_file",
")",
"except",
"Exception",
":",
"self",
".",
"log_file_handler",
"=",
"logging",
".",
"FileHandler",
"(",
"self",
".",
"args",
".",
"log",
"or",
"self",
".",
"log_file",
")",
"self",
".",
"log_file_handler",
".",
"setFormatter",
"(",
"logging",
".",
"Formatter",
"(",
"'[%(levelname)s] %(asctime)s %(name)s - %(message)s'",
")",
")",
"self",
".",
"log_file_handler",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"self",
".",
"log",
".",
"addHandler",
"(",
"self",
".",
"log_file_handler",
")",
"# Allow cli.log, args & self to be accessed from __main__",
"if",
"not",
"hasattr",
"(",
"sys",
".",
"modules",
"[",
"'__main__'",
"]",
",",
"'log'",
")",
":",
"sys",
".",
"modules",
"[",
"'__main__'",
"]",
".",
"log",
"=",
"self",
".",
"log",
"if",
"not",
"hasattr",
"(",
"sys",
".",
"modules",
"[",
"'__main__'",
"]",
",",
"'cli'",
")",
":",
"sys",
".",
"modules",
"[",
"'__main__'",
"]",
".",
"cli",
"=",
"self",
"if",
"not",
"hasattr",
"(",
"sys",
".",
"modules",
"[",
"'__main__'",
"]",
",",
"'args'",
")",
":",
"sys",
".",
"modules",
"[",
"'__main__'",
"]",
".",
"args",
"=",
"self",
".",
"args"
] |
Handle any initialization after arguments & config has been parsed.
|
[
"Handle",
"any",
"initialization",
"after",
"arguments",
"&",
"config",
"has",
"been",
"parsed",
"."
] |
ed4b1879321d858d6bc884d14fea7557372a4d41
|
https://github.com/litters/shrew/blob/ed4b1879321d858d6bc884d14fea7557372a4d41/shrew/cli.py#L291-L327
|
242,683
|
rpcope1/HackerNewsAPI-Py
|
HackerNewsAPI/API.py
|
HackerNewsAPI.get_item
|
def get_item(self, item_number, raw=False):
"""
Get a dictionary or object with info about the given item number from the Hacker News API.
Item can be a poll, story, comment or possibly other entry.
Will raise an requests.HTTPError if we got a non-200 response back. Will raise a ValueError
if a item_number that can not be converted to int was passed in, or the server has no
information for that item number.
(Possible) response parameters:
"id" -> The item's unique id. Required.
"deleted" -> true if the item is deleted.
"type" -> The type of item. One of "job", "story", "comment", "poll", or "pollopt".
"by" -> The username of the item's author.
"time" -> Creation date of the item, in Unix Time.
"text" -> The comment, Ask HN, or poll text. HTML.
"dead" -> true if the item is dead.
"parent" -> The item's parent. For comments, either another comment or the relevant story.
For pollopts, the relevant poll.
"kids" -> The ids of the item's comments, in ranked display order.
"url" -> The URL of the story.
"score" -> The story's score, or the votes for a pollopt.
"title" -> The title of the story or poll.
"parts" -> A list of related pollopts, in display order.
:param item_number: an integer number for the HN item requested
:param raw: (optional): If true, return the raw decoded JSON dict, if False, return a nice object
with keywords as attributes. Default if False.
:return: A dictionary with relevant info about the item, if successful.
"""
if not isinstance(item_number, int):
item_number = int(item_number)
suburl = "v0/item/{}.json".format(item_number)
try:
item_data = self._make_request(suburl)
except requests.HTTPError as e:
hn_logger.exception('Faulted on item request for item {}, with status {}'.format(item_number, e.errno))
raise e
if not item_data:
raise ValueError('Item id {} not found!'.format(item_number))
return item_data if raw else HackerNewsItem(**item_data)
|
python
|
def get_item(self, item_number, raw=False):
"""
Get a dictionary or object with info about the given item number from the Hacker News API.
Item can be a poll, story, comment or possibly other entry.
Will raise an requests.HTTPError if we got a non-200 response back. Will raise a ValueError
if a item_number that can not be converted to int was passed in, or the server has no
information for that item number.
(Possible) response parameters:
"id" -> The item's unique id. Required.
"deleted" -> true if the item is deleted.
"type" -> The type of item. One of "job", "story", "comment", "poll", or "pollopt".
"by" -> The username of the item's author.
"time" -> Creation date of the item, in Unix Time.
"text" -> The comment, Ask HN, or poll text. HTML.
"dead" -> true if the item is dead.
"parent" -> The item's parent. For comments, either another comment or the relevant story.
For pollopts, the relevant poll.
"kids" -> The ids of the item's comments, in ranked display order.
"url" -> The URL of the story.
"score" -> The story's score, or the votes for a pollopt.
"title" -> The title of the story or poll.
"parts" -> A list of related pollopts, in display order.
:param item_number: an integer number for the HN item requested
:param raw: (optional): If true, return the raw decoded JSON dict, if False, return a nice object
with keywords as attributes. Default if False.
:return: A dictionary with relevant info about the item, if successful.
"""
if not isinstance(item_number, int):
item_number = int(item_number)
suburl = "v0/item/{}.json".format(item_number)
try:
item_data = self._make_request(suburl)
except requests.HTTPError as e:
hn_logger.exception('Faulted on item request for item {}, with status {}'.format(item_number, e.errno))
raise e
if not item_data:
raise ValueError('Item id {} not found!'.format(item_number))
return item_data if raw else HackerNewsItem(**item_data)
|
[
"def",
"get_item",
"(",
"self",
",",
"item_number",
",",
"raw",
"=",
"False",
")",
":",
"if",
"not",
"isinstance",
"(",
"item_number",
",",
"int",
")",
":",
"item_number",
"=",
"int",
"(",
"item_number",
")",
"suburl",
"=",
"\"v0/item/{}.json\"",
".",
"format",
"(",
"item_number",
")",
"try",
":",
"item_data",
"=",
"self",
".",
"_make_request",
"(",
"suburl",
")",
"except",
"requests",
".",
"HTTPError",
"as",
"e",
":",
"hn_logger",
".",
"exception",
"(",
"'Faulted on item request for item {}, with status {}'",
".",
"format",
"(",
"item_number",
",",
"e",
".",
"errno",
")",
")",
"raise",
"e",
"if",
"not",
"item_data",
":",
"raise",
"ValueError",
"(",
"'Item id {} not found!'",
".",
"format",
"(",
"item_number",
")",
")",
"return",
"item_data",
"if",
"raw",
"else",
"HackerNewsItem",
"(",
"*",
"*",
"item_data",
")"
] |
Get a dictionary or object with info about the given item number from the Hacker News API.
Item can be a poll, story, comment or possibly other entry.
Will raise an requests.HTTPError if we got a non-200 response back. Will raise a ValueError
if a item_number that can not be converted to int was passed in, or the server has no
information for that item number.
(Possible) response parameters:
"id" -> The item's unique id. Required.
"deleted" -> true if the item is deleted.
"type" -> The type of item. One of "job", "story", "comment", "poll", or "pollopt".
"by" -> The username of the item's author.
"time" -> Creation date of the item, in Unix Time.
"text" -> The comment, Ask HN, or poll text. HTML.
"dead" -> true if the item is dead.
"parent" -> The item's parent. For comments, either another comment or the relevant story.
For pollopts, the relevant poll.
"kids" -> The ids of the item's comments, in ranked display order.
"url" -> The URL of the story.
"score" -> The story's score, or the votes for a pollopt.
"title" -> The title of the story or poll.
"parts" -> A list of related pollopts, in display order.
:param item_number: an integer number for the HN item requested
:param raw: (optional): If true, return the raw decoded JSON dict, if False, return a nice object
with keywords as attributes. Default if False.
:return: A dictionary with relevant info about the item, if successful.
|
[
"Get",
"a",
"dictionary",
"or",
"object",
"with",
"info",
"about",
"the",
"given",
"item",
"number",
"from",
"the",
"Hacker",
"News",
"API",
".",
"Item",
"can",
"be",
"a",
"poll",
"story",
"comment",
"or",
"possibly",
"other",
"entry",
".",
"Will",
"raise",
"an",
"requests",
".",
"HTTPError",
"if",
"we",
"got",
"a",
"non",
"-",
"200",
"response",
"back",
".",
"Will",
"raise",
"a",
"ValueError",
"if",
"a",
"item_number",
"that",
"can",
"not",
"be",
"converted",
"to",
"int",
"was",
"passed",
"in",
"or",
"the",
"server",
"has",
"no",
"information",
"for",
"that",
"item",
"number",
"."
] |
b231aed24ec59fc32af320bbef27d48cc4b69914
|
https://github.com/rpcope1/HackerNewsAPI-Py/blob/b231aed24ec59fc32af320bbef27d48cc4b69914/HackerNewsAPI/API.py#L39-L78
|
242,684
|
rpcope1/HackerNewsAPI-Py
|
HackerNewsAPI/API.py
|
HackerNewsAPI.get_user
|
def get_user(self, user_name, raw=False):
"""
Get a dictionary or object with info about the given user from the Hacker News API.
Will raise an requests.HTTPError if we got a non-200 response back.
Response parameters:
"id' -> The user's unique username. Case-sensitive. Required.
"delay" -> Delay in minutes between a comment's creation and its visibility to other users.
"created" -> Creation date of the user, in Unix Time.
"karma" -> The user's karma.
"about" -> The user's optional self-description. HTML.
"submitted" -> List of the user's stories, polls and comments.
:param user_name: the relevant user's name
:param raw: (optional): If true, return the raw decoded JSON dict, if False, return a nice object
with keywords as attributes. Default if False.
:return: A dictionary with relevant info about the user, if successful.
"""
suburl = "v0/user/{}.json".format(user_name)
try:
user_data = self._make_request(suburl)
except requests.HTTPError as e:
hn_logger.exception('Faulted on item request for user {}, with status {}'.format(user_name, e.errno))
raise e
if not user_data:
raise ValueError('User name {} not found, or no data!'.format(user_name))
return user_data if raw else HackerNewsUpdates(**user_data)
|
python
|
def get_user(self, user_name, raw=False):
"""
Get a dictionary or object with info about the given user from the Hacker News API.
Will raise an requests.HTTPError if we got a non-200 response back.
Response parameters:
"id' -> The user's unique username. Case-sensitive. Required.
"delay" -> Delay in minutes between a comment's creation and its visibility to other users.
"created" -> Creation date of the user, in Unix Time.
"karma" -> The user's karma.
"about" -> The user's optional self-description. HTML.
"submitted" -> List of the user's stories, polls and comments.
:param user_name: the relevant user's name
:param raw: (optional): If true, return the raw decoded JSON dict, if False, return a nice object
with keywords as attributes. Default if False.
:return: A dictionary with relevant info about the user, if successful.
"""
suburl = "v0/user/{}.json".format(user_name)
try:
user_data = self._make_request(suburl)
except requests.HTTPError as e:
hn_logger.exception('Faulted on item request for user {}, with status {}'.format(user_name, e.errno))
raise e
if not user_data:
raise ValueError('User name {} not found, or no data!'.format(user_name))
return user_data if raw else HackerNewsUpdates(**user_data)
|
[
"def",
"get_user",
"(",
"self",
",",
"user_name",
",",
"raw",
"=",
"False",
")",
":",
"suburl",
"=",
"\"v0/user/{}.json\"",
".",
"format",
"(",
"user_name",
")",
"try",
":",
"user_data",
"=",
"self",
".",
"_make_request",
"(",
"suburl",
")",
"except",
"requests",
".",
"HTTPError",
"as",
"e",
":",
"hn_logger",
".",
"exception",
"(",
"'Faulted on item request for user {}, with status {}'",
".",
"format",
"(",
"user_name",
",",
"e",
".",
"errno",
")",
")",
"raise",
"e",
"if",
"not",
"user_data",
":",
"raise",
"ValueError",
"(",
"'User name {} not found, or no data!'",
".",
"format",
"(",
"user_name",
")",
")",
"return",
"user_data",
"if",
"raw",
"else",
"HackerNewsUpdates",
"(",
"*",
"*",
"user_data",
")"
] |
Get a dictionary or object with info about the given user from the Hacker News API.
Will raise an requests.HTTPError if we got a non-200 response back.
Response parameters:
"id' -> The user's unique username. Case-sensitive. Required.
"delay" -> Delay in minutes between a comment's creation and its visibility to other users.
"created" -> Creation date of the user, in Unix Time.
"karma" -> The user's karma.
"about" -> The user's optional self-description. HTML.
"submitted" -> List of the user's stories, polls and comments.
:param user_name: the relevant user's name
:param raw: (optional): If true, return the raw decoded JSON dict, if False, return a nice object
with keywords as attributes. Default if False.
:return: A dictionary with relevant info about the user, if successful.
|
[
"Get",
"a",
"dictionary",
"or",
"object",
"with",
"info",
"about",
"the",
"given",
"user",
"from",
"the",
"Hacker",
"News",
"API",
".",
"Will",
"raise",
"an",
"requests",
".",
"HTTPError",
"if",
"we",
"got",
"a",
"non",
"-",
"200",
"response",
"back",
"."
] |
b231aed24ec59fc32af320bbef27d48cc4b69914
|
https://github.com/rpcope1/HackerNewsAPI-Py/blob/b231aed24ec59fc32af320bbef27d48cc4b69914/HackerNewsAPI/API.py#L80-L106
|
242,685
|
rpcope1/HackerNewsAPI-Py
|
HackerNewsAPI/API.py
|
HackerNewsAPI.get_recent_updates
|
def get_recent_updates(self, raw=True):
"""
Get the most recent updates on Hacker News
Response dictionary parameters:
"items" -> A list of the most recently update items by item number.
"profiles" -> A list of most recently updated user profiles by user name.
:param raw: (optional): If true, return the raw dictionary, if False, return a nice object with attrs for
keywords. Default is True.
:return: A dictionary with relevant info about recent updates.
"""
suburl = "v0/updates.json"
try:
updates_data = self._make_request(suburl)
except requests.HTTPError as e:
hn_logger.exception('Faulted on get max item, with status {}'.format(e.errno))
raise e
return updates_data if raw else HackerNewsUpdates(**updates_data)
|
python
|
def get_recent_updates(self, raw=True):
"""
Get the most recent updates on Hacker News
Response dictionary parameters:
"items" -> A list of the most recently update items by item number.
"profiles" -> A list of most recently updated user profiles by user name.
:param raw: (optional): If true, return the raw dictionary, if False, return a nice object with attrs for
keywords. Default is True.
:return: A dictionary with relevant info about recent updates.
"""
suburl = "v0/updates.json"
try:
updates_data = self._make_request(suburl)
except requests.HTTPError as e:
hn_logger.exception('Faulted on get max item, with status {}'.format(e.errno))
raise e
return updates_data if raw else HackerNewsUpdates(**updates_data)
|
[
"def",
"get_recent_updates",
"(",
"self",
",",
"raw",
"=",
"True",
")",
":",
"suburl",
"=",
"\"v0/updates.json\"",
"try",
":",
"updates_data",
"=",
"self",
".",
"_make_request",
"(",
"suburl",
")",
"except",
"requests",
".",
"HTTPError",
"as",
"e",
":",
"hn_logger",
".",
"exception",
"(",
"'Faulted on get max item, with status {}'",
".",
"format",
"(",
"e",
".",
"errno",
")",
")",
"raise",
"e",
"return",
"updates_data",
"if",
"raw",
"else",
"HackerNewsUpdates",
"(",
"*",
"*",
"updates_data",
")"
] |
Get the most recent updates on Hacker News
Response dictionary parameters:
"items" -> A list of the most recently update items by item number.
"profiles" -> A list of most recently updated user profiles by user name.
:param raw: (optional): If true, return the raw dictionary, if False, return a nice object with attrs for
keywords. Default is True.
:return: A dictionary with relevant info about recent updates.
|
[
"Get",
"the",
"most",
"recent",
"updates",
"on",
"Hacker",
"News"
] |
b231aed24ec59fc32af320bbef27d48cc4b69914
|
https://github.com/rpcope1/HackerNewsAPI-Py/blob/b231aed24ec59fc32af320bbef27d48cc4b69914/HackerNewsAPI/API.py#L135-L153
|
242,686
|
mattupstate/cubric
|
cubric/providers/amazon.py
|
create_server
|
def create_server():
"""Creates an EC2 Server"""
try:
import boto
except ImportError:
sys.exit("boto library required for creating servers with Amazon.")
print(green("Creating EC2 server"))
conn = boto.connect_ec2(
get_or_prompt('ec2_key', 'API Key'),
get_or_prompt('ec2_secret', 'API Secret'))
reservation = conn.run_instances(
get_or_prompt(
'ec2_ami', 'AMI ID', 'ami-fd589594'),
instance_type=get_or_prompt(
'ec2_instancetype', 'Instance Type', 't1.micro'),
key_name=get_or_prompt(
'ec2_keypair', 'Key Pair'),
security_groups=get_or_prompt_list(
'ec2_secgroups', 'Security Groups'))
instance = reservation.instances[0]
time.sleep(3)
tag = get_or_prompt('ec2_tag', 'Instance Tag (blank for none)', '').strip()
if len(tag) > 0:
conn.create_tags([instance.id], {"Name": tag})
while instance.state != u'running':
print(yellow("Instance state: %s" % instance.state))
time.sleep(10)
instance.update()
print(green("Instance state: %s" % instance.state))
print(green("Public dns: %s" % instance.public_dns_name))
print(green("Waiting 30 seconds for server to boot"))
time.sleep(30)
return instance.public_dns_name
|
python
|
def create_server():
"""Creates an EC2 Server"""
try:
import boto
except ImportError:
sys.exit("boto library required for creating servers with Amazon.")
print(green("Creating EC2 server"))
conn = boto.connect_ec2(
get_or_prompt('ec2_key', 'API Key'),
get_or_prompt('ec2_secret', 'API Secret'))
reservation = conn.run_instances(
get_or_prompt(
'ec2_ami', 'AMI ID', 'ami-fd589594'),
instance_type=get_or_prompt(
'ec2_instancetype', 'Instance Type', 't1.micro'),
key_name=get_or_prompt(
'ec2_keypair', 'Key Pair'),
security_groups=get_or_prompt_list(
'ec2_secgroups', 'Security Groups'))
instance = reservation.instances[0]
time.sleep(3)
tag = get_or_prompt('ec2_tag', 'Instance Tag (blank for none)', '').strip()
if len(tag) > 0:
conn.create_tags([instance.id], {"Name": tag})
while instance.state != u'running':
print(yellow("Instance state: %s" % instance.state))
time.sleep(10)
instance.update()
print(green("Instance state: %s" % instance.state))
print(green("Public dns: %s" % instance.public_dns_name))
print(green("Waiting 30 seconds for server to boot"))
time.sleep(30)
return instance.public_dns_name
|
[
"def",
"create_server",
"(",
")",
":",
"try",
":",
"import",
"boto",
"except",
"ImportError",
":",
"sys",
".",
"exit",
"(",
"\"boto library required for creating servers with Amazon.\"",
")",
"print",
"(",
"green",
"(",
"\"Creating EC2 server\"",
")",
")",
"conn",
"=",
"boto",
".",
"connect_ec2",
"(",
"get_or_prompt",
"(",
"'ec2_key'",
",",
"'API Key'",
")",
",",
"get_or_prompt",
"(",
"'ec2_secret'",
",",
"'API Secret'",
")",
")",
"reservation",
"=",
"conn",
".",
"run_instances",
"(",
"get_or_prompt",
"(",
"'ec2_ami'",
",",
"'AMI ID'",
",",
"'ami-fd589594'",
")",
",",
"instance_type",
"=",
"get_or_prompt",
"(",
"'ec2_instancetype'",
",",
"'Instance Type'",
",",
"'t1.micro'",
")",
",",
"key_name",
"=",
"get_or_prompt",
"(",
"'ec2_keypair'",
",",
"'Key Pair'",
")",
",",
"security_groups",
"=",
"get_or_prompt_list",
"(",
"'ec2_secgroups'",
",",
"'Security Groups'",
")",
")",
"instance",
"=",
"reservation",
".",
"instances",
"[",
"0",
"]",
"time",
".",
"sleep",
"(",
"3",
")",
"tag",
"=",
"get_or_prompt",
"(",
"'ec2_tag'",
",",
"'Instance Tag (blank for none)'",
",",
"''",
")",
".",
"strip",
"(",
")",
"if",
"len",
"(",
"tag",
")",
">",
"0",
":",
"conn",
".",
"create_tags",
"(",
"[",
"instance",
".",
"id",
"]",
",",
"{",
"\"Name\"",
":",
"tag",
"}",
")",
"while",
"instance",
".",
"state",
"!=",
"u'running'",
":",
"print",
"(",
"yellow",
"(",
"\"Instance state: %s\"",
"%",
"instance",
".",
"state",
")",
")",
"time",
".",
"sleep",
"(",
"10",
")",
"instance",
".",
"update",
"(",
")",
"print",
"(",
"green",
"(",
"\"Instance state: %s\"",
"%",
"instance",
".",
"state",
")",
")",
"print",
"(",
"green",
"(",
"\"Public dns: %s\"",
"%",
"instance",
".",
"public_dns_name",
")",
")",
"print",
"(",
"green",
"(",
"\"Waiting 30 seconds for server to boot\"",
")",
")",
"time",
".",
"sleep",
"(",
"30",
")",
"return",
"instance",
".",
"public_dns_name"
] |
Creates an EC2 Server
|
[
"Creates",
"an",
"EC2",
"Server"
] |
a648ce00e4467cd14d71e754240ef6c1f87a34b5
|
https://github.com/mattupstate/cubric/blob/a648ce00e4467cd14d71e754240ef6c1f87a34b5/cubric/providers/amazon.py#L11-L55
|
242,687
|
jcalogovic/lightning
|
stormstats/downloader.py
|
return_time_elements
|
def return_time_elements(time_stamp):
"""Returns formatted strings of time stamps for HTML requests.
:parameters time_range: pandas.tslib.Timestamp
"""
yyyy = str(time_stamp.year)
mm = "%02d" % (time_stamp.month,)
dd = "%02d" % (time_stamp.day,)
hr = "%02d" % (time_stamp.hour,)
mins = "%02d" % (time_stamp.minute,)
return yyyy, mm, dd, hr, mins
|
python
|
def return_time_elements(time_stamp):
"""Returns formatted strings of time stamps for HTML requests.
:parameters time_range: pandas.tslib.Timestamp
"""
yyyy = str(time_stamp.year)
mm = "%02d" % (time_stamp.month,)
dd = "%02d" % (time_stamp.day,)
hr = "%02d" % (time_stamp.hour,)
mins = "%02d" % (time_stamp.minute,)
return yyyy, mm, dd, hr, mins
|
[
"def",
"return_time_elements",
"(",
"time_stamp",
")",
":",
"yyyy",
"=",
"str",
"(",
"time_stamp",
".",
"year",
")",
"mm",
"=",
"\"%02d\"",
"%",
"(",
"time_stamp",
".",
"month",
",",
")",
"dd",
"=",
"\"%02d\"",
"%",
"(",
"time_stamp",
".",
"day",
",",
")",
"hr",
"=",
"\"%02d\"",
"%",
"(",
"time_stamp",
".",
"hour",
",",
")",
"mins",
"=",
"\"%02d\"",
"%",
"(",
"time_stamp",
".",
"minute",
",",
")",
"return",
"yyyy",
",",
"mm",
",",
"dd",
",",
"hr",
",",
"mins"
] |
Returns formatted strings of time stamps for HTML requests.
:parameters time_range: pandas.tslib.Timestamp
|
[
"Returns",
"formatted",
"strings",
"of",
"time",
"stamps",
"for",
"HTML",
"requests",
"."
] |
f9e52731c9dd40cb302295ec36a444e0377d0570
|
https://github.com/jcalogovic/lightning/blob/f9e52731c9dd40cb302295ec36a444e0377d0570/stormstats/downloader.py#L10-L20
|
242,688
|
samfcmc/fenixedu-python-sdk
|
fenixedu/configuration.py
|
FenixEduConfiguration.fromConfigFile
|
def fromConfigFile(filename = DEFAULT_CONFIG_FILE):
""" Read settings from configuration file"""
parser = SafeConfigParser()
section = 'fenixedu'
parser.read(filename)
client_id = parser.get(section, 'client_id')
redirect_uri = parser.get(section, 'redirect_uri')
client_secret = parser.get(section, 'client_secret')
base_url = parser.get(section, 'base_url')
api_endpoint = parser.get(section, 'api_endpoint')
api_version = parser.get(section, 'api_version')
return FenixEduConfiguration(client_id = client_id,
redirect_uri = redirect_uri,
client_secret = client_secret,
base_url = base_url,
api_endpoint = api_endpoint,
api_version = api_version)
|
python
|
def fromConfigFile(filename = DEFAULT_CONFIG_FILE):
""" Read settings from configuration file"""
parser = SafeConfigParser()
section = 'fenixedu'
parser.read(filename)
client_id = parser.get(section, 'client_id')
redirect_uri = parser.get(section, 'redirect_uri')
client_secret = parser.get(section, 'client_secret')
base_url = parser.get(section, 'base_url')
api_endpoint = parser.get(section, 'api_endpoint')
api_version = parser.get(section, 'api_version')
return FenixEduConfiguration(client_id = client_id,
redirect_uri = redirect_uri,
client_secret = client_secret,
base_url = base_url,
api_endpoint = api_endpoint,
api_version = api_version)
|
[
"def",
"fromConfigFile",
"(",
"filename",
"=",
"DEFAULT_CONFIG_FILE",
")",
":",
"parser",
"=",
"SafeConfigParser",
"(",
")",
"section",
"=",
"'fenixedu'",
"parser",
".",
"read",
"(",
"filename",
")",
"client_id",
"=",
"parser",
".",
"get",
"(",
"section",
",",
"'client_id'",
")",
"redirect_uri",
"=",
"parser",
".",
"get",
"(",
"section",
",",
"'redirect_uri'",
")",
"client_secret",
"=",
"parser",
".",
"get",
"(",
"section",
",",
"'client_secret'",
")",
"base_url",
"=",
"parser",
".",
"get",
"(",
"section",
",",
"'base_url'",
")",
"api_endpoint",
"=",
"parser",
".",
"get",
"(",
"section",
",",
"'api_endpoint'",
")",
"api_version",
"=",
"parser",
".",
"get",
"(",
"section",
",",
"'api_version'",
")",
"return",
"FenixEduConfiguration",
"(",
"client_id",
"=",
"client_id",
",",
"redirect_uri",
"=",
"redirect_uri",
",",
"client_secret",
"=",
"client_secret",
",",
"base_url",
"=",
"base_url",
",",
"api_endpoint",
"=",
"api_endpoint",
",",
"api_version",
"=",
"api_version",
")"
] |
Read settings from configuration file
|
[
"Read",
"settings",
"from",
"configuration",
"file"
] |
b9a1366853b2e7a6e208c46c3b99a589062677a9
|
https://github.com/samfcmc/fenixedu-python-sdk/blob/b9a1366853b2e7a6e208c46c3b99a589062677a9/fenixedu/configuration.py#L17-L36
|
242,689
|
mayfield/shellish
|
shellish/session.py
|
Session.map_subcommands
|
def map_subcommands(self, func):
""" Run `func` against all the subcommands attached to our root
command. """
def crawl(cmd):
for sc in cmd.subcommands.values():
yield from crawl(sc)
yield cmd
return map(func, crawl(self.root_command))
|
python
|
def map_subcommands(self, func):
""" Run `func` against all the subcommands attached to our root
command. """
def crawl(cmd):
for sc in cmd.subcommands.values():
yield from crawl(sc)
yield cmd
return map(func, crawl(self.root_command))
|
[
"def",
"map_subcommands",
"(",
"self",
",",
"func",
")",
":",
"def",
"crawl",
"(",
"cmd",
")",
":",
"for",
"sc",
"in",
"cmd",
".",
"subcommands",
".",
"values",
"(",
")",
":",
"yield",
"from",
"crawl",
"(",
"sc",
")",
"yield",
"cmd",
"return",
"map",
"(",
"func",
",",
"crawl",
"(",
"self",
".",
"root_command",
")",
")"
] |
Run `func` against all the subcommands attached to our root
command.
|
[
"Run",
"func",
"against",
"all",
"the",
"subcommands",
"attached",
"to",
"our",
"root",
"command",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/session.py#L89-L97
|
242,690
|
mayfield/shellish
|
shellish/session.py
|
Session.handle_command_error
|
def handle_command_error(self, command, args, exc):
""" Depending on how the session is configured this will print
information about an unhandled command exception or possibly jump
to some other behavior like a debugger. """
verbosity = self.command_error_verbosity
if verbosity == 'traceback':
self.pretty_print_exc(command, exc, show_traceback=True)
elif verbosity == 'debug':
pdb.set_trace()
elif verbosity == 'raise':
raise exc
elif verbosity == 'pretty':
self.pretty_print_exc(command, exc)
else:
raise ValueError('Unexpected exception_verbosity: %s' %
verbosity)
|
python
|
def handle_command_error(self, command, args, exc):
""" Depending on how the session is configured this will print
information about an unhandled command exception or possibly jump
to some other behavior like a debugger. """
verbosity = self.command_error_verbosity
if verbosity == 'traceback':
self.pretty_print_exc(command, exc, show_traceback=True)
elif verbosity == 'debug':
pdb.set_trace()
elif verbosity == 'raise':
raise exc
elif verbosity == 'pretty':
self.pretty_print_exc(command, exc)
else:
raise ValueError('Unexpected exception_verbosity: %s' %
verbosity)
|
[
"def",
"handle_command_error",
"(",
"self",
",",
"command",
",",
"args",
",",
"exc",
")",
":",
"verbosity",
"=",
"self",
".",
"command_error_verbosity",
"if",
"verbosity",
"==",
"'traceback'",
":",
"self",
".",
"pretty_print_exc",
"(",
"command",
",",
"exc",
",",
"show_traceback",
"=",
"True",
")",
"elif",
"verbosity",
"==",
"'debug'",
":",
"pdb",
".",
"set_trace",
"(",
")",
"elif",
"verbosity",
"==",
"'raise'",
":",
"raise",
"exc",
"elif",
"verbosity",
"==",
"'pretty'",
":",
"self",
".",
"pretty_print_exc",
"(",
"command",
",",
"exc",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unexpected exception_verbosity: %s'",
"%",
"verbosity",
")"
] |
Depending on how the session is configured this will print
information about an unhandled command exception or possibly jump
to some other behavior like a debugger.
|
[
"Depending",
"on",
"how",
"the",
"session",
"is",
"configured",
"this",
"will",
"print",
"information",
"about",
"an",
"unhandled",
"command",
"exception",
"or",
"possibly",
"jump",
"to",
"some",
"other",
"behavior",
"like",
"a",
"debugger",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/session.py#L134-L149
|
242,691
|
mayfield/shellish
|
shellish/session.py
|
Session.complete_wrap
|
def complete_wrap(self, func, *args, **kwargs):
""" Readline eats exceptions raised by completer functions. """
# Workaround readline's one-time-read of terminal width.
termcols = shutil.get_terminal_size()[0]
readline.parse_and_bind('set completion-display-width %d' % termcols)
try:
return func(*args, **kwargs)
except:
traceback.print_exc()
raise
|
python
|
def complete_wrap(self, func, *args, **kwargs):
""" Readline eats exceptions raised by completer functions. """
# Workaround readline's one-time-read of terminal width.
termcols = shutil.get_terminal_size()[0]
readline.parse_and_bind('set completion-display-width %d' % termcols)
try:
return func(*args, **kwargs)
except:
traceback.print_exc()
raise
|
[
"def",
"complete_wrap",
"(",
"self",
",",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Workaround readline's one-time-read of terminal width.",
"termcols",
"=",
"shutil",
".",
"get_terminal_size",
"(",
")",
"[",
"0",
"]",
"readline",
".",
"parse_and_bind",
"(",
"'set completion-display-width %d'",
"%",
"termcols",
")",
"try",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
":",
"traceback",
".",
"print_exc",
"(",
")",
"raise"
] |
Readline eats exceptions raised by completer functions.
|
[
"Readline",
"eats",
"exceptions",
"raised",
"by",
"completer",
"functions",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/session.py#L206-L215
|
242,692
|
mayfield/shellish
|
shellish/session.py
|
Session.setup_readline
|
def setup_readline(self):
""" Configure our tab completion settings for a context and then
restore them to previous settings on exit. """
readline.parse_and_bind('tab: complete')
completer_save = readline.get_completer()
delims_save = readline.get_completer_delims()
delims = set(delims_save)
delims |= self.completer_delim_includes
delims -= self.completer_delim_excludes
readline.set_completer(self.completer_hook)
try:
readline.set_completer_delims(''.join(delims))
try:
yield
finally:
readline.set_completer_delims(delims_save)
finally:
readline.set_completer(completer_save)
|
python
|
def setup_readline(self):
""" Configure our tab completion settings for a context and then
restore them to previous settings on exit. """
readline.parse_and_bind('tab: complete')
completer_save = readline.get_completer()
delims_save = readline.get_completer_delims()
delims = set(delims_save)
delims |= self.completer_delim_includes
delims -= self.completer_delim_excludes
readline.set_completer(self.completer_hook)
try:
readline.set_completer_delims(''.join(delims))
try:
yield
finally:
readline.set_completer_delims(delims_save)
finally:
readline.set_completer(completer_save)
|
[
"def",
"setup_readline",
"(",
"self",
")",
":",
"readline",
".",
"parse_and_bind",
"(",
"'tab: complete'",
")",
"completer_save",
"=",
"readline",
".",
"get_completer",
"(",
")",
"delims_save",
"=",
"readline",
".",
"get_completer_delims",
"(",
")",
"delims",
"=",
"set",
"(",
"delims_save",
")",
"delims",
"|=",
"self",
".",
"completer_delim_includes",
"delims",
"-=",
"self",
".",
"completer_delim_excludes",
"readline",
".",
"set_completer",
"(",
"self",
".",
"completer_hook",
")",
"try",
":",
"readline",
".",
"set_completer_delims",
"(",
"''",
".",
"join",
"(",
"delims",
")",
")",
"try",
":",
"yield",
"finally",
":",
"readline",
".",
"set_completer_delims",
"(",
"delims_save",
")",
"finally",
":",
"readline",
".",
"set_completer",
"(",
"completer_save",
")"
] |
Configure our tab completion settings for a context and then
restore them to previous settings on exit.
|
[
"Configure",
"our",
"tab",
"completion",
"settings",
"for",
"a",
"context",
"and",
"then",
"restore",
"them",
"to",
"previous",
"settings",
"on",
"exit",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/session.py#L222-L239
|
242,693
|
mayfield/shellish
|
shellish/session.py
|
Session.run_loop
|
def run_loop(self):
""" Main entry point for running in interactive mode. """
self.root_command.prog = ''
history_file = self.load_history()
rendering.vtmlprint(self.intro)
try:
self.loop()
finally:
readline.write_history_file(history_file)
|
python
|
def run_loop(self):
""" Main entry point for running in interactive mode. """
self.root_command.prog = ''
history_file = self.load_history()
rendering.vtmlprint(self.intro)
try:
self.loop()
finally:
readline.write_history_file(history_file)
|
[
"def",
"run_loop",
"(",
"self",
")",
":",
"self",
".",
"root_command",
".",
"prog",
"=",
"''",
"history_file",
"=",
"self",
".",
"load_history",
"(",
")",
"rendering",
".",
"vtmlprint",
"(",
"self",
".",
"intro",
")",
"try",
":",
"self",
".",
"loop",
"(",
")",
"finally",
":",
"readline",
".",
"write_history_file",
"(",
"history_file",
")"
] |
Main entry point for running in interactive mode.
|
[
"Main",
"entry",
"point",
"for",
"running",
"in",
"interactive",
"mode",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/session.py#L241-L249
|
242,694
|
mayfield/shellish
|
shellish/session.py
|
Session.loop
|
def loop(self):
""" Inner loop for interactive mode. Do not call directly. """
while True:
with self.setup_readline():
try:
line = input(self.prompt)
except EOFError:
_vprinterr('^D')
break
except KeyboardInterrupt:
_vprinterr('^C')
continue
if not line.strip():
continue
try:
cmd, args = self.cmd_split(line)
except KeyError as e:
_vprinterr('<red>Invalid command: %s</red>' % e)
continue
try:
cmd(argv=args)
except SessionExit:
break
except SystemExit as e:
pass
|
python
|
def loop(self):
""" Inner loop for interactive mode. Do not call directly. """
while True:
with self.setup_readline():
try:
line = input(self.prompt)
except EOFError:
_vprinterr('^D')
break
except KeyboardInterrupt:
_vprinterr('^C')
continue
if not line.strip():
continue
try:
cmd, args = self.cmd_split(line)
except KeyError as e:
_vprinterr('<red>Invalid command: %s</red>' % e)
continue
try:
cmd(argv=args)
except SessionExit:
break
except SystemExit as e:
pass
|
[
"def",
"loop",
"(",
"self",
")",
":",
"while",
"True",
":",
"with",
"self",
".",
"setup_readline",
"(",
")",
":",
"try",
":",
"line",
"=",
"input",
"(",
"self",
".",
"prompt",
")",
"except",
"EOFError",
":",
"_vprinterr",
"(",
"'^D'",
")",
"break",
"except",
"KeyboardInterrupt",
":",
"_vprinterr",
"(",
"'^C'",
")",
"continue",
"if",
"not",
"line",
".",
"strip",
"(",
")",
":",
"continue",
"try",
":",
"cmd",
",",
"args",
"=",
"self",
".",
"cmd_split",
"(",
"line",
")",
"except",
"KeyError",
"as",
"e",
":",
"_vprinterr",
"(",
"'<red>Invalid command: %s</red>'",
"%",
"e",
")",
"continue",
"try",
":",
"cmd",
"(",
"argv",
"=",
"args",
")",
"except",
"SessionExit",
":",
"break",
"except",
"SystemExit",
"as",
"e",
":",
"pass"
] |
Inner loop for interactive mode. Do not call directly.
|
[
"Inner",
"loop",
"for",
"interactive",
"mode",
".",
"Do",
"not",
"call",
"directly",
"."
] |
df0f0e4612d138c34d8cb99b66ab5b8e47f1414a
|
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/session.py#L251-L275
|
242,695
|
langloisjp/tstore
|
tstore/pgtablestorage.py
|
sqldelete
|
def sqldelete(table, where):
"""Generates SQL delete from ... where ...
>>> sqldelete('t', {'id': 5})
('delete from t where id=%s', [5])
"""
validate_name(table)
(whereclause, wherevalues) = sqlwhere(where)
sql = "delete from {}".format(table)
if whereclause:
sql += " where " + whereclause
return (sql, wherevalues)
|
python
|
def sqldelete(table, where):
"""Generates SQL delete from ... where ...
>>> sqldelete('t', {'id': 5})
('delete from t where id=%s', [5])
"""
validate_name(table)
(whereclause, wherevalues) = sqlwhere(where)
sql = "delete from {}".format(table)
if whereclause:
sql += " where " + whereclause
return (sql, wherevalues)
|
[
"def",
"sqldelete",
"(",
"table",
",",
"where",
")",
":",
"validate_name",
"(",
"table",
")",
"(",
"whereclause",
",",
"wherevalues",
")",
"=",
"sqlwhere",
"(",
"where",
")",
"sql",
"=",
"\"delete from {}\"",
".",
"format",
"(",
"table",
")",
"if",
"whereclause",
":",
"sql",
"+=",
"\" where \"",
"+",
"whereclause",
"return",
"(",
"sql",
",",
"wherevalues",
")"
] |
Generates SQL delete from ... where ...
>>> sqldelete('t', {'id': 5})
('delete from t where id=%s', [5])
|
[
"Generates",
"SQL",
"delete",
"from",
"...",
"where",
"..."
] |
b438f8aaf09117bf6f922ba06ae5cf46b7b97a57
|
https://github.com/langloisjp/tstore/blob/b438f8aaf09117bf6f922ba06ae5cf46b7b97a57/tstore/pgtablestorage.py#L398-L409
|
242,696
|
langloisjp/tstore
|
tstore/pgtablestorage.py
|
DB.select
|
def select(self, table, fields=['*'], where=None, orderby=None,
limit=None, offset=None):
"""
Query and return list of records.
>>> import getpass
>>> s = DB(dbname='test', user=getpass.getuser(), host='localhost',
... password='')
>>> s.execute('drop table if exists t2')
>>> s.execute('create table t2 (id int, name text)')
>>> s.insert('t2', {'id': 1, 'name': 'Toto'})
>>> rows = s.select('t2')
>>> len(rows)
1
>>> row = rows[0]
>>> row
{'id': 1, 'name': 'Toto'}
"""
(sql, values) = sqlselect(table, fields, where, orderby, limit, offset)
self.execute(sql, values)
return self.fetchall()
|
python
|
def select(self, table, fields=['*'], where=None, orderby=None,
limit=None, offset=None):
"""
Query and return list of records.
>>> import getpass
>>> s = DB(dbname='test', user=getpass.getuser(), host='localhost',
... password='')
>>> s.execute('drop table if exists t2')
>>> s.execute('create table t2 (id int, name text)')
>>> s.insert('t2', {'id': 1, 'name': 'Toto'})
>>> rows = s.select('t2')
>>> len(rows)
1
>>> row = rows[0]
>>> row
{'id': 1, 'name': 'Toto'}
"""
(sql, values) = sqlselect(table, fields, where, orderby, limit, offset)
self.execute(sql, values)
return self.fetchall()
|
[
"def",
"select",
"(",
"self",
",",
"table",
",",
"fields",
"=",
"[",
"'*'",
"]",
",",
"where",
"=",
"None",
",",
"orderby",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"offset",
"=",
"None",
")",
":",
"(",
"sql",
",",
"values",
")",
"=",
"sqlselect",
"(",
"table",
",",
"fields",
",",
"where",
",",
"orderby",
",",
"limit",
",",
"offset",
")",
"self",
".",
"execute",
"(",
"sql",
",",
"values",
")",
"return",
"self",
".",
"fetchall",
"(",
")"
] |
Query and return list of records.
>>> import getpass
>>> s = DB(dbname='test', user=getpass.getuser(), host='localhost',
... password='')
>>> s.execute('drop table if exists t2')
>>> s.execute('create table t2 (id int, name text)')
>>> s.insert('t2', {'id': 1, 'name': 'Toto'})
>>> rows = s.select('t2')
>>> len(rows)
1
>>> row = rows[0]
>>> row
{'id': 1, 'name': 'Toto'}
|
[
"Query",
"and",
"return",
"list",
"of",
"records",
"."
] |
b438f8aaf09117bf6f922ba06ae5cf46b7b97a57
|
https://github.com/langloisjp/tstore/blob/b438f8aaf09117bf6f922ba06ae5cf46b7b97a57/tstore/pgtablestorage.py#L98-L118
|
242,697
|
langloisjp/tstore
|
tstore/pgtablestorage.py
|
DB.insert
|
def insert(self, table, row):
"""
Add new row. Row must be a dict or implement the mapping interface.
>>> import getpass
>>> s = DB(dbname='test', user=getpass.getuser(), host='localhost',
... password='')
>>> s.execute('drop table if exists t2')
>>> s.execute('create table t2 (id int, name text)')
>>> s.insert('t2', {'id': 1, 'name': 'Toto'})
>>> rows = s.select('t2')
>>> len(rows)
1
>>> row = rows[0]
>>> row
{'id': 1, 'name': 'Toto'}
"""
(sql, values) = sqlinsert(table, row)
self.execute(sql, values)
|
python
|
def insert(self, table, row):
"""
Add new row. Row must be a dict or implement the mapping interface.
>>> import getpass
>>> s = DB(dbname='test', user=getpass.getuser(), host='localhost',
... password='')
>>> s.execute('drop table if exists t2')
>>> s.execute('create table t2 (id int, name text)')
>>> s.insert('t2', {'id': 1, 'name': 'Toto'})
>>> rows = s.select('t2')
>>> len(rows)
1
>>> row = rows[0]
>>> row
{'id': 1, 'name': 'Toto'}
"""
(sql, values) = sqlinsert(table, row)
self.execute(sql, values)
|
[
"def",
"insert",
"(",
"self",
",",
"table",
",",
"row",
")",
":",
"(",
"sql",
",",
"values",
")",
"=",
"sqlinsert",
"(",
"table",
",",
"row",
")",
"self",
".",
"execute",
"(",
"sql",
",",
"values",
")"
] |
Add new row. Row must be a dict or implement the mapping interface.
>>> import getpass
>>> s = DB(dbname='test', user=getpass.getuser(), host='localhost',
... password='')
>>> s.execute('drop table if exists t2')
>>> s.execute('create table t2 (id int, name text)')
>>> s.insert('t2', {'id': 1, 'name': 'Toto'})
>>> rows = s.select('t2')
>>> len(rows)
1
>>> row = rows[0]
>>> row
{'id': 1, 'name': 'Toto'}
|
[
"Add",
"new",
"row",
".",
"Row",
"must",
"be",
"a",
"dict",
"or",
"implement",
"the",
"mapping",
"interface",
"."
] |
b438f8aaf09117bf6f922ba06ae5cf46b7b97a57
|
https://github.com/langloisjp/tstore/blob/b438f8aaf09117bf6f922ba06ae5cf46b7b97a57/tstore/pgtablestorage.py#L120-L138
|
242,698
|
mgaitan/one
|
one.py
|
one
|
def one(iterable, cmp=None):
"""
Return the object in the given iterable that evaluates to True.
If the given iterable has more than one object that evaluates to True,
or if there is no object that fulfills such condition, return False.
If a callable ``cmp`` is given, it's used to evaluate each element.
>>> one((True, False, False))
True
>>> one((True, False, True))
False
>>> one((0, 0, 'a'))
'a'
>>> one((0, False, None))
False
>>> one((True, True))
False
>>> bool(one(('', 1)))
True
>>> one((10, 20, 30, 42), lambda i: i > 40)
42
"""
the_one = False
for i in iterable:
if cmp(i) if cmp else i:
if the_one:
return False
the_one = i
return the_one
|
python
|
def one(iterable, cmp=None):
"""
Return the object in the given iterable that evaluates to True.
If the given iterable has more than one object that evaluates to True,
or if there is no object that fulfills such condition, return False.
If a callable ``cmp`` is given, it's used to evaluate each element.
>>> one((True, False, False))
True
>>> one((True, False, True))
False
>>> one((0, 0, 'a'))
'a'
>>> one((0, False, None))
False
>>> one((True, True))
False
>>> bool(one(('', 1)))
True
>>> one((10, 20, 30, 42), lambda i: i > 40)
42
"""
the_one = False
for i in iterable:
if cmp(i) if cmp else i:
if the_one:
return False
the_one = i
return the_one
|
[
"def",
"one",
"(",
"iterable",
",",
"cmp",
"=",
"None",
")",
":",
"the_one",
"=",
"False",
"for",
"i",
"in",
"iterable",
":",
"if",
"cmp",
"(",
"i",
")",
"if",
"cmp",
"else",
"i",
":",
"if",
"the_one",
":",
"return",
"False",
"the_one",
"=",
"i",
"return",
"the_one"
] |
Return the object in the given iterable that evaluates to True.
If the given iterable has more than one object that evaluates to True,
or if there is no object that fulfills such condition, return False.
If a callable ``cmp`` is given, it's used to evaluate each element.
>>> one((True, False, False))
True
>>> one((True, False, True))
False
>>> one((0, 0, 'a'))
'a'
>>> one((0, False, None))
False
>>> one((True, True))
False
>>> bool(one(('', 1)))
True
>>> one((10, 20, 30, 42), lambda i: i > 40)
42
|
[
"Return",
"the",
"object",
"in",
"the",
"given",
"iterable",
"that",
"evaluates",
"to",
"True",
"."
] |
c6639cd7f31c0541df5f8512561a2bb0feea194c
|
https://github.com/mgaitan/one/blob/c6639cd7f31c0541df5f8512561a2bb0feea194c/one.py#L4-L35
|
242,699
|
DasIch/argvard
|
argvard/__init__.py
|
ExecutableBase.register_command
|
def register_command(self, name, command):
"""
Registers the `command` with the given `name`.
If the `name` has already been used to register a command a
:exc:`RuntimeError` will be raised.
"""
if name in self.commands:
raise RuntimeError('%s is already defined' % name)
self.commands[name] = command
|
python
|
def register_command(self, name, command):
"""
Registers the `command` with the given `name`.
If the `name` has already been used to register a command a
:exc:`RuntimeError` will be raised.
"""
if name in self.commands:
raise RuntimeError('%s is already defined' % name)
self.commands[name] = command
|
[
"def",
"register_command",
"(",
"self",
",",
"name",
",",
"command",
")",
":",
"if",
"name",
"in",
"self",
".",
"commands",
":",
"raise",
"RuntimeError",
"(",
"'%s is already defined'",
"%",
"name",
")",
"self",
".",
"commands",
"[",
"name",
"]",
"=",
"command"
] |
Registers the `command` with the given `name`.
If the `name` has already been used to register a command a
:exc:`RuntimeError` will be raised.
|
[
"Registers",
"the",
"command",
"with",
"the",
"given",
"name",
"."
] |
2603e323a995e0915ce41fcf49e2a82519556195
|
https://github.com/DasIch/argvard/blob/2603e323a995e0915ce41fcf49e2a82519556195/argvard/__init__.py#L108-L117
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.