id_within_dataset int64 1 55.5k | snippet stringlengths 19 14.2k | tokens listlengths 6 1.63k | nl stringlengths 6 352 | split_within_dataset stringclasses 1 value | is_duplicated bool 2 classes |
|---|---|---|---|---|---|
15,315 | def ParseFullPath(full_path):
(bucket, _, path) = full_path.partition('/')
if ((not bucket) or (not IsBucket(bucket))):
return None
else:
return (bucket, path)
| [
"def",
"ParseFullPath",
"(",
"full_path",
")",
":",
"(",
"bucket",
",",
"_",
",",
"path",
")",
"=",
"full_path",
".",
"partition",
"(",
"'/'",
")",
"if",
"(",
"(",
"not",
"bucket",
")",
"or",
"(",
"not",
"IsBucket",
"(",
"bucket",
")",
")",
")",
":",
"return",
"None",
"else",
":",
"return",
"(",
"bucket",
",",
"path",
")"
] | parse a full path and return a . | train | false |
15,316 | def has_c():
return _USE_C
| [
"def",
"has_c",
"(",
")",
":",
"return",
"_USE_C"
] | is the c extension installed? . | train | false |
15,317 | def resolve_patterns_path(patterns_path):
if os.path.isabs(patterns_path):
if os.path.exists(patterns_path):
return patterns_path
else:
raise InvalidPatternsPathError('Absolute path does not exist.')
else:
patterns_path = os.path.join(MONITORDIR, patterns_path)
if os.path.exists(patterns_path):
return patterns_path
else:
raise InvalidPatternsPathError('Relative path does not exist.')
| [
"def",
"resolve_patterns_path",
"(",
"patterns_path",
")",
":",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"patterns_path",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"patterns_path",
")",
":",
"return",
"patterns_path",
"else",
":",
"raise",
"InvalidPatternsPathError",
"(",
"'Absolute path does not exist.'",
")",
"else",
":",
"patterns_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"MONITORDIR",
",",
"patterns_path",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"patterns_path",
")",
":",
"return",
"patterns_path",
"else",
":",
"raise",
"InvalidPatternsPathError",
"(",
"'Relative path does not exist.'",
")"
] | resolve patterns_path to existing absolute local path or raise . | train | false |
15,319 | def get_pm2_5_info():
url = ('http://www.pm25.in/api/querys/pm2_5.json?' + ('city=dongguan&token=%s&stations=no' % app.config['PM2_5_TOKEN']))
res = requests.get(url, timeout=7)
return res.json()
| [
"def",
"get_pm2_5_info",
"(",
")",
":",
"url",
"=",
"(",
"'http://www.pm25.in/api/querys/pm2_5.json?'",
"+",
"(",
"'city=dongguan&token=%s&stations=no'",
"%",
"app",
".",
"config",
"[",
"'PM2_5_TOKEN'",
"]",
")",
")",
"res",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"timeout",
"=",
"7",
")",
"return",
"res",
".",
"json",
"(",
")"
] | api 详情:URL . | train | false |
15,320 | def get_output(layer_or_layers, inputs=None, **kwargs):
from .input import InputLayer
from .base import MergeLayer
accepted_kwargs = {'deterministic'}
treat_as_input = (inputs.keys() if isinstance(inputs, dict) else [])
all_layers = get_all_layers(layer_or_layers, treat_as_input)
all_outputs = dict(((layer, layer.input_var) for layer in all_layers if (isinstance(layer, InputLayer) and (layer not in treat_as_input))))
if isinstance(inputs, dict):
all_outputs.update(((layer, utils.as_theano_expression(expr)) for (layer, expr) in inputs.items()))
elif (inputs is not None):
if (len(all_outputs) > 1):
raise ValueError('get_output() was called with a single input expression on a network with multiple input layers. Please call it with a dictionary of input expressions instead.')
for input_layer in all_outputs:
all_outputs[input_layer] = utils.as_theano_expression(inputs)
for layer in all_layers:
if (layer not in all_outputs):
try:
if isinstance(layer, MergeLayer):
layer_inputs = [all_outputs[input_layer] for input_layer in layer.input_layers]
else:
layer_inputs = all_outputs[layer.input_layer]
except KeyError:
raise ValueError(('get_output() was called without giving an input expression for the free-floating layer %r. Please call it with a dictionary mapping this layer to an input expression.' % layer))
all_outputs[layer] = layer.get_output_for(layer_inputs, **kwargs)
try:
(names, _, _, defaults) = getargspec(layer.get_output_for)
except TypeError:
pass
else:
if (defaults is not None):
accepted_kwargs |= set(names[(- len(defaults)):])
accepted_kwargs |= set(layer.get_output_kwargs)
unused_kwargs = (set(kwargs.keys()) - accepted_kwargs)
if unused_kwargs:
suggestions = []
for kwarg in unused_kwargs:
suggestion = get_close_matches(kwarg, accepted_kwargs)
if suggestion:
suggestions.append(('%s (perhaps you meant %s)' % (kwarg, suggestion[0])))
else:
suggestions.append(kwarg)
warn(('get_output() was called with unused kwargs:\n DCTB %s' % '\n DCTB '.join(suggestions)))
try:
return [all_outputs[layer] for layer in layer_or_layers]
except TypeError:
return all_outputs[layer_or_layers]
| [
"def",
"get_output",
"(",
"layer_or_layers",
",",
"inputs",
"=",
"None",
",",
"**",
"kwargs",
")",
":",
"from",
".",
"input",
"import",
"InputLayer",
"from",
".",
"base",
"import",
"MergeLayer",
"accepted_kwargs",
"=",
"{",
"'deterministic'",
"}",
"treat_as_input",
"=",
"(",
"inputs",
".",
"keys",
"(",
")",
"if",
"isinstance",
"(",
"inputs",
",",
"dict",
")",
"else",
"[",
"]",
")",
"all_layers",
"=",
"get_all_layers",
"(",
"layer_or_layers",
",",
"treat_as_input",
")",
"all_outputs",
"=",
"dict",
"(",
"(",
"(",
"layer",
",",
"layer",
".",
"input_var",
")",
"for",
"layer",
"in",
"all_layers",
"if",
"(",
"isinstance",
"(",
"layer",
",",
"InputLayer",
")",
"and",
"(",
"layer",
"not",
"in",
"treat_as_input",
")",
")",
")",
")",
"if",
"isinstance",
"(",
"inputs",
",",
"dict",
")",
":",
"all_outputs",
".",
"update",
"(",
"(",
"(",
"layer",
",",
"utils",
".",
"as_theano_expression",
"(",
"expr",
")",
")",
"for",
"(",
"layer",
",",
"expr",
")",
"in",
"inputs",
".",
"items",
"(",
")",
")",
")",
"elif",
"(",
"inputs",
"is",
"not",
"None",
")",
":",
"if",
"(",
"len",
"(",
"all_outputs",
")",
">",
"1",
")",
":",
"raise",
"ValueError",
"(",
"'get_output() was called with a single input expression on a network with multiple input layers. Please call it with a dictionary of input expressions instead.'",
")",
"for",
"input_layer",
"in",
"all_outputs",
":",
"all_outputs",
"[",
"input_layer",
"]",
"=",
"utils",
".",
"as_theano_expression",
"(",
"inputs",
")",
"for",
"layer",
"in",
"all_layers",
":",
"if",
"(",
"layer",
"not",
"in",
"all_outputs",
")",
":",
"try",
":",
"if",
"isinstance",
"(",
"layer",
",",
"MergeLayer",
")",
":",
"layer_inputs",
"=",
"[",
"all_outputs",
"[",
"input_layer",
"]",
"for",
"input_layer",
"in",
"layer",
".",
"input_layers",
"]",
"else",
":",
"layer_inputs",
"=",
"all_outputs",
"[",
"layer",
".",
"input_layer",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"(",
"'get_output() was called without giving an input expression for the free-floating layer %r. Please call it with a dictionary mapping this layer to an input expression.'",
"%",
"layer",
")",
")",
"all_outputs",
"[",
"layer",
"]",
"=",
"layer",
".",
"get_output_for",
"(",
"layer_inputs",
",",
"**",
"kwargs",
")",
"try",
":",
"(",
"names",
",",
"_",
",",
"_",
",",
"defaults",
")",
"=",
"getargspec",
"(",
"layer",
".",
"get_output_for",
")",
"except",
"TypeError",
":",
"pass",
"else",
":",
"if",
"(",
"defaults",
"is",
"not",
"None",
")",
":",
"accepted_kwargs",
"|=",
"set",
"(",
"names",
"[",
"(",
"-",
"len",
"(",
"defaults",
")",
")",
":",
"]",
")",
"accepted_kwargs",
"|=",
"set",
"(",
"layer",
".",
"get_output_kwargs",
")",
"unused_kwargs",
"=",
"(",
"set",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
"-",
"accepted_kwargs",
")",
"if",
"unused_kwargs",
":",
"suggestions",
"=",
"[",
"]",
"for",
"kwarg",
"in",
"unused_kwargs",
":",
"suggestion",
"=",
"get_close_matches",
"(",
"kwarg",
",",
"accepted_kwargs",
")",
"if",
"suggestion",
":",
"suggestions",
".",
"append",
"(",
"(",
"'%s (perhaps you meant %s)'",
"%",
"(",
"kwarg",
",",
"suggestion",
"[",
"0",
"]",
")",
")",
")",
"else",
":",
"suggestions",
".",
"append",
"(",
"kwarg",
")",
"warn",
"(",
"(",
"'get_output() was called with unused kwargs:\\n DCTB %s'",
"%",
"'\\n DCTB '",
".",
"join",
"(",
"suggestions",
")",
")",
")",
"try",
":",
"return",
"[",
"all_outputs",
"[",
"layer",
"]",
"for",
"layer",
"in",
"layer_or_layers",
"]",
"except",
"TypeError",
":",
"return",
"all_outputs",
"[",
"layer_or_layers",
"]"
] | parse and return values from a cloudformation outputs list . | train | false |
15,323 | def get_paginated_list(klass, url=None, args={}, **kwargs):
if ('event_id' in kwargs):
get_object_or_404(EventModel, kwargs['event_id'])
if (url is None):
url = request.base_url
start = args['start']
limit = args['limit']
results = get_object_list(klass, **kwargs)
count = len(results)
if (count < start):
raise NotFoundError(message="Start position '{}' out of bound".format(start))
obj = {}
obj['start'] = start
obj['limit'] = limit
obj['count'] = count
args_copy = args.copy()
if (start == 1):
obj['previous'] = ''
else:
args_copy['start'] = max(1, (start - limit))
args_copy['limit'] = (start - 1)
obj['previous'] = (url + _make_url_query(args_copy))
args_copy = args.copy()
if ((start + limit) > count):
obj['next'] = ''
else:
args_copy['start'] = (start + limit)
obj['next'] = (url + _make_url_query(args_copy))
obj['results'] = results[(start - 1):((start - 1) + limit)]
return obj
| [
"def",
"get_paginated_list",
"(",
"klass",
",",
"url",
"=",
"None",
",",
"args",
"=",
"{",
"}",
",",
"**",
"kwargs",
")",
":",
"if",
"(",
"'event_id'",
"in",
"kwargs",
")",
":",
"get_object_or_404",
"(",
"EventModel",
",",
"kwargs",
"[",
"'event_id'",
"]",
")",
"if",
"(",
"url",
"is",
"None",
")",
":",
"url",
"=",
"request",
".",
"base_url",
"start",
"=",
"args",
"[",
"'start'",
"]",
"limit",
"=",
"args",
"[",
"'limit'",
"]",
"results",
"=",
"get_object_list",
"(",
"klass",
",",
"**",
"kwargs",
")",
"count",
"=",
"len",
"(",
"results",
")",
"if",
"(",
"count",
"<",
"start",
")",
":",
"raise",
"NotFoundError",
"(",
"message",
"=",
"\"Start position '{}' out of bound\"",
".",
"format",
"(",
"start",
")",
")",
"obj",
"=",
"{",
"}",
"obj",
"[",
"'start'",
"]",
"=",
"start",
"obj",
"[",
"'limit'",
"]",
"=",
"limit",
"obj",
"[",
"'count'",
"]",
"=",
"count",
"args_copy",
"=",
"args",
".",
"copy",
"(",
")",
"if",
"(",
"start",
"==",
"1",
")",
":",
"obj",
"[",
"'previous'",
"]",
"=",
"''",
"else",
":",
"args_copy",
"[",
"'start'",
"]",
"=",
"max",
"(",
"1",
",",
"(",
"start",
"-",
"limit",
")",
")",
"args_copy",
"[",
"'limit'",
"]",
"=",
"(",
"start",
"-",
"1",
")",
"obj",
"[",
"'previous'",
"]",
"=",
"(",
"url",
"+",
"_make_url_query",
"(",
"args_copy",
")",
")",
"args_copy",
"=",
"args",
".",
"copy",
"(",
")",
"if",
"(",
"(",
"start",
"+",
"limit",
")",
">",
"count",
")",
":",
"obj",
"[",
"'next'",
"]",
"=",
"''",
"else",
":",
"args_copy",
"[",
"'start'",
"]",
"=",
"(",
"start",
"+",
"limit",
")",
"obj",
"[",
"'next'",
"]",
"=",
"(",
"url",
"+",
"_make_url_query",
"(",
"args_copy",
")",
")",
"obj",
"[",
"'results'",
"]",
"=",
"results",
"[",
"(",
"start",
"-",
"1",
")",
":",
"(",
"(",
"start",
"-",
"1",
")",
"+",
"limit",
")",
"]",
"return",
"obj"
] | returns a paginated response object klass - model class to query from url - url of the request args - args passed to the request as query parameters kwargs - filters for query on the klass model . | train | false |
15,324 | def pattern_matches(pattern, target):
if (not isinstance(pattern, Pattern)):
pattern = _compile_pattern(pattern)
return pattern.matches(target)
| [
"def",
"pattern_matches",
"(",
"pattern",
",",
"target",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"pattern",
",",
"Pattern",
")",
")",
":",
"pattern",
"=",
"_compile_pattern",
"(",
"pattern",
")",
"return",
"pattern",
".",
"matches",
"(",
"target",
")"
] | verify that a target string matches the given pattern . | train | false |
15,325 | def extract_provider_location(provider_location, key):
if (not provider_location):
return None
kvps = provider_location.split('|')
for kvp in kvps:
fields = kvp.split('^')
if ((len(fields) == 2) and (fields[0] == key)):
return fields[1]
| [
"def",
"extract_provider_location",
"(",
"provider_location",
",",
"key",
")",
":",
"if",
"(",
"not",
"provider_location",
")",
":",
"return",
"None",
"kvps",
"=",
"provider_location",
".",
"split",
"(",
"'|'",
")",
"for",
"kvp",
"in",
"kvps",
":",
"fields",
"=",
"kvp",
".",
"split",
"(",
"'^'",
")",
"if",
"(",
"(",
"len",
"(",
"fields",
")",
"==",
"2",
")",
"and",
"(",
"fields",
"[",
"0",
"]",
"==",
"key",
")",
")",
":",
"return",
"fields",
"[",
"1",
"]"
] | extracts value of the specified field from provider_location string . | train | false |
15,326 | def perform_update(request, obj):
return execute_locked(request, obj, _('All repositories were updated.'), obj.do_update, request, method=request.GET.get('method', None))
| [
"def",
"perform_update",
"(",
"request",
",",
"obj",
")",
":",
"return",
"execute_locked",
"(",
"request",
",",
"obj",
",",
"_",
"(",
"'All repositories were updated.'",
")",
",",
"obj",
".",
"do_update",
",",
"request",
",",
"method",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'method'",
",",
"None",
")",
")"
] | triggers update of given object . | train | false |
15,329 | def deploy_snmp(snmp, host=None, admin_username=None, admin_password=None, module=None):
return __execute_cmd('deploy -v SNMPv2 {0} ro'.format(snmp), host=host, admin_username=admin_username, admin_password=admin_password, module=module)
| [
"def",
"deploy_snmp",
"(",
"snmp",
",",
"host",
"=",
"None",
",",
"admin_username",
"=",
"None",
",",
"admin_password",
"=",
"None",
",",
"module",
"=",
"None",
")",
":",
"return",
"__execute_cmd",
"(",
"'deploy -v SNMPv2 {0} ro'",
".",
"format",
"(",
"snmp",
")",
",",
"host",
"=",
"host",
",",
"admin_username",
"=",
"admin_username",
",",
"admin_password",
"=",
"admin_password",
",",
"module",
"=",
"module",
")"
] | change the quickdeploy snmp community string . | train | true |
15,330 | def _find_appropriate_compiler(_config_vars):
if ('CC' in os.environ):
return _config_vars
cc = oldcc = _config_vars['CC'].split()[0]
if (not _find_executable(cc)):
cc = _find_build_tool('clang')
elif os.path.basename(cc).startswith('gcc'):
data = _read_output(("'%s' --version" % (cc.replace("'", '\'"\'"\''),)))
if (data and ('llvm-gcc' in data)):
cc = _find_build_tool('clang')
if (not cc):
raise SystemError('Cannot locate working compiler')
if (cc != oldcc):
for cv in _COMPILER_CONFIG_VARS:
if ((cv in _config_vars) and (cv not in os.environ)):
cv_split = _config_vars[cv].split()
cv_split[0] = (cc if (cv != 'CXX') else (cc + '++'))
_save_modified_value(_config_vars, cv, ' '.join(cv_split))
return _config_vars
| [
"def",
"_find_appropriate_compiler",
"(",
"_config_vars",
")",
":",
"if",
"(",
"'CC'",
"in",
"os",
".",
"environ",
")",
":",
"return",
"_config_vars",
"cc",
"=",
"oldcc",
"=",
"_config_vars",
"[",
"'CC'",
"]",
".",
"split",
"(",
")",
"[",
"0",
"]",
"if",
"(",
"not",
"_find_executable",
"(",
"cc",
")",
")",
":",
"cc",
"=",
"_find_build_tool",
"(",
"'clang'",
")",
"elif",
"os",
".",
"path",
".",
"basename",
"(",
"cc",
")",
".",
"startswith",
"(",
"'gcc'",
")",
":",
"data",
"=",
"_read_output",
"(",
"(",
"\"'%s' --version\"",
"%",
"(",
"cc",
".",
"replace",
"(",
"\"'\"",
",",
"'\\'\"\\'\"\\''",
")",
",",
")",
")",
")",
"if",
"(",
"data",
"and",
"(",
"'llvm-gcc'",
"in",
"data",
")",
")",
":",
"cc",
"=",
"_find_build_tool",
"(",
"'clang'",
")",
"if",
"(",
"not",
"cc",
")",
":",
"raise",
"SystemError",
"(",
"'Cannot locate working compiler'",
")",
"if",
"(",
"cc",
"!=",
"oldcc",
")",
":",
"for",
"cv",
"in",
"_COMPILER_CONFIG_VARS",
":",
"if",
"(",
"(",
"cv",
"in",
"_config_vars",
")",
"and",
"(",
"cv",
"not",
"in",
"os",
".",
"environ",
")",
")",
":",
"cv_split",
"=",
"_config_vars",
"[",
"cv",
"]",
".",
"split",
"(",
")",
"cv_split",
"[",
"0",
"]",
"=",
"(",
"cc",
"if",
"(",
"cv",
"!=",
"'CXX'",
")",
"else",
"(",
"cc",
"+",
"'++'",
")",
")",
"_save_modified_value",
"(",
"_config_vars",
",",
"cv",
",",
"' '",
".",
"join",
"(",
"cv_split",
")",
")",
"return",
"_config_vars"
] | find appropriate c compiler for extension module builds . | train | false |
15,331 | def test_converter_with_tuples():
class TwoValueConverter(r.BaseConverter, ):
def __init__(self, *args, **kwargs):
super(TwoValueConverter, self).__init__(*args, **kwargs)
self.regex = '(\\w\\w+)/(\\w\\w+)'
def to_python(self, two_values):
(one, two) = two_values.split('/')
return (one, two)
def to_url(self, values):
return ('%s/%s' % (values[0], values[1]))
map = r.Map([r.Rule('/<two:foo>/', endpoint='handler')], converters={'two': TwoValueConverter})
a = map.bind('example.org', '/')
(route, kwargs) = a.match('/qwert/yuiop/')
assert (kwargs['foo'] == ('qwert', 'yuiop'))
| [
"def",
"test_converter_with_tuples",
"(",
")",
":",
"class",
"TwoValueConverter",
"(",
"r",
".",
"BaseConverter",
",",
")",
":",
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"super",
"(",
"TwoValueConverter",
",",
"self",
")",
".",
"__init__",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"self",
".",
"regex",
"=",
"'(\\\\w\\\\w+)/(\\\\w\\\\w+)'",
"def",
"to_python",
"(",
"self",
",",
"two_values",
")",
":",
"(",
"one",
",",
"two",
")",
"=",
"two_values",
".",
"split",
"(",
"'/'",
")",
"return",
"(",
"one",
",",
"two",
")",
"def",
"to_url",
"(",
"self",
",",
"values",
")",
":",
"return",
"(",
"'%s/%s'",
"%",
"(",
"values",
"[",
"0",
"]",
",",
"values",
"[",
"1",
"]",
")",
")",
"map",
"=",
"r",
".",
"Map",
"(",
"[",
"r",
".",
"Rule",
"(",
"'/<two:foo>/'",
",",
"endpoint",
"=",
"'handler'",
")",
"]",
",",
"converters",
"=",
"{",
"'two'",
":",
"TwoValueConverter",
"}",
")",
"a",
"=",
"map",
".",
"bind",
"(",
"'example.org'",
",",
"'/'",
")",
"(",
"route",
",",
"kwargs",
")",
"=",
"a",
".",
"match",
"(",
"'/qwert/yuiop/'",
")",
"assert",
"(",
"kwargs",
"[",
"'foo'",
"]",
"==",
"(",
"'qwert'",
",",
"'yuiop'",
")",
")"
] | regression test for URL . | train | false |
15,332 | def _attachment_sequence(attachments):
if ((len(attachments) == 2) and isinstance(attachments[0], basestring)):
return (attachments,)
return attachments
| [
"def",
"_attachment_sequence",
"(",
"attachments",
")",
":",
"if",
"(",
"(",
"len",
"(",
"attachments",
")",
"==",
"2",
")",
"and",
"isinstance",
"(",
"attachments",
"[",
"0",
"]",
",",
"basestring",
")",
")",
":",
"return",
"(",
"attachments",
",",
")",
"return",
"attachments"
] | forces attachments to be sequenceable type . | train | false |
15,334 | def _rename_node(graph, node_id, copy_id):
graph._add_node_silent(copy_id)
graph.node[copy_id] = graph.node[node_id]
for nbr in graph.neighbors(node_id):
wt = graph[node_id][nbr]['weight']
graph.add_edge(nbr, copy_id, {'weight': wt})
graph.remove_node(node_id)
| [
"def",
"_rename_node",
"(",
"graph",
",",
"node_id",
",",
"copy_id",
")",
":",
"graph",
".",
"_add_node_silent",
"(",
"copy_id",
")",
"graph",
".",
"node",
"[",
"copy_id",
"]",
"=",
"graph",
".",
"node",
"[",
"node_id",
"]",
"for",
"nbr",
"in",
"graph",
".",
"neighbors",
"(",
"node_id",
")",
":",
"wt",
"=",
"graph",
"[",
"node_id",
"]",
"[",
"nbr",
"]",
"[",
"'weight'",
"]",
"graph",
".",
"add_edge",
"(",
"nbr",
",",
"copy_id",
",",
"{",
"'weight'",
":",
"wt",
"}",
")",
"graph",
".",
"remove_node",
"(",
"node_id",
")"
] | rename node_id in graph to copy_id . | train | false |
15,336 | @contextlib.contextmanager
def captured_cuda_stdout():
sys.stdout.flush()
if config.ENABLE_CUDASIM:
with captured_stdout() as stream:
(yield PythonTextCapture(stream))
else:
from numba import cuda
fd = sys.__stdout__.fileno()
with redirect_fd(fd) as stream:
(yield CUDATextCapture(stream))
cuda.synchronize()
| [
"@",
"contextlib",
".",
"contextmanager",
"def",
"captured_cuda_stdout",
"(",
")",
":",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"if",
"config",
".",
"ENABLE_CUDASIM",
":",
"with",
"captured_stdout",
"(",
")",
"as",
"stream",
":",
"(",
"yield",
"PythonTextCapture",
"(",
"stream",
")",
")",
"else",
":",
"from",
"numba",
"import",
"cuda",
"fd",
"=",
"sys",
".",
"__stdout__",
".",
"fileno",
"(",
")",
"with",
"redirect_fd",
"(",
"fd",
")",
"as",
"stream",
":",
"(",
"yield",
"CUDATextCapture",
"(",
"stream",
")",
")",
"cuda",
".",
"synchronize",
"(",
")"
] | return a minimal stream-like object capturing the text output of either cuda or the simulator . | train | false |
15,337 | def document_fromstring(html, guess_charset=True, parser=None):
if (not isinstance(html, _strings)):
raise TypeError('string required')
if (parser is None):
parser = html_parser
return parser.parse(html, useChardet=guess_charset).getroot()
| [
"def",
"document_fromstring",
"(",
"html",
",",
"guess_charset",
"=",
"True",
",",
"parser",
"=",
"None",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"html",
",",
"_strings",
")",
")",
":",
"raise",
"TypeError",
"(",
"'string required'",
")",
"if",
"(",
"parser",
"is",
"None",
")",
":",
"parser",
"=",
"html_parser",
"return",
"parser",
".",
"parse",
"(",
"html",
",",
"useChardet",
"=",
"guess_charset",
")",
".",
"getroot",
"(",
")"
] | parse a whole document into a string . | train | true |
15,339 | def p_shift_expression_2(t):
pass
| [
"def",
"p_shift_expression_2",
"(",
"t",
")",
":",
"pass"
] | shift_expression : shift_expression lshift additive_expression . | train | false |
15,340 | def test_str():
mlp = MLP(nvis=2, layers=[Linear(2, 'h0', irange=0), Linear(2, 'h1', irange=0)])
s = str(mlp)
assert isinstance(s, six.string_types)
| [
"def",
"test_str",
"(",
")",
":",
"mlp",
"=",
"MLP",
"(",
"nvis",
"=",
"2",
",",
"layers",
"=",
"[",
"Linear",
"(",
"2",
",",
"'h0'",
",",
"irange",
"=",
"0",
")",
",",
"Linear",
"(",
"2",
",",
"'h1'",
",",
"irange",
"=",
"0",
")",
"]",
")",
"s",
"=",
"str",
"(",
"mlp",
")",
"assert",
"isinstance",
"(",
"s",
",",
"six",
".",
"string_types",
")"
] | make sure the __str__ method returns a string . | train | false |
15,341 | def bitstobytes(bits):
ret = []
nextbyte = 0
nextbit = 7
for bit in bits:
if bit:
nextbyte = (nextbyte | (1 << nextbit))
if nextbit:
nextbit = (nextbit - 1)
else:
ret.append(nextbyte)
nextbit = 7
nextbyte = 0
if (nextbit < 7):
ret.append(nextbyte)
return ret
| [
"def",
"bitstobytes",
"(",
"bits",
")",
":",
"ret",
"=",
"[",
"]",
"nextbyte",
"=",
"0",
"nextbit",
"=",
"7",
"for",
"bit",
"in",
"bits",
":",
"if",
"bit",
":",
"nextbyte",
"=",
"(",
"nextbyte",
"|",
"(",
"1",
"<<",
"nextbit",
")",
")",
"if",
"nextbit",
":",
"nextbit",
"=",
"(",
"nextbit",
"-",
"1",
")",
"else",
":",
"ret",
".",
"append",
"(",
"nextbyte",
")",
"nextbit",
"=",
"7",
"nextbyte",
"=",
"0",
"if",
"(",
"nextbit",
"<",
"7",
")",
":",
"ret",
".",
"append",
"(",
"nextbyte",
")",
"return",
"ret"
] | interprets an indexable list of booleans as bits . | train | false |
15,342 | def _get_branch_head(branch):
return ('%s@head' % branch)
| [
"def",
"_get_branch_head",
"(",
"branch",
")",
":",
"return",
"(",
"'%s@head'",
"%",
"branch",
")"
] | get the latest @head specification for a branch . | train | false |
15,343 | def _context_factory_and_credential(path, host, port):
ca = Certificate.loadPEM(path.child('cluster.crt').getContent())
node_credential = NodeCredential.from_path(path, 'node')
policy = ControlServicePolicy(ca_certificate=ca, client_credential=node_credential.credential)
return _TLSContext(context_factory=policy.creatorForNetloc(host, port), node_credential=node_credential)
| [
"def",
"_context_factory_and_credential",
"(",
"path",
",",
"host",
",",
"port",
")",
":",
"ca",
"=",
"Certificate",
".",
"loadPEM",
"(",
"path",
".",
"child",
"(",
"'cluster.crt'",
")",
".",
"getContent",
"(",
")",
")",
"node_credential",
"=",
"NodeCredential",
".",
"from_path",
"(",
"path",
",",
"'node'",
")",
"policy",
"=",
"ControlServicePolicy",
"(",
"ca_certificate",
"=",
"ca",
",",
"client_credential",
"=",
"node_credential",
".",
"credential",
")",
"return",
"_TLSContext",
"(",
"context_factory",
"=",
"policy",
".",
"creatorForNetloc",
"(",
"host",
",",
"port",
")",
",",
"node_credential",
"=",
"node_credential",
")"
] | load a tls context factory for the amp client from the path where configuration and certificates live . | train | false |
15,344 | def _concatenate2(arrays, axes=[]):
if isinstance(arrays, Iterator):
arrays = list(arrays)
if (not isinstance(arrays, (list, tuple))):
return arrays
if (len(axes) > 1):
arrays = [_concatenate2(a, axes=axes[1:]) for a in arrays]
return np.concatenate(arrays, axis=axes[0])
| [
"def",
"_concatenate2",
"(",
"arrays",
",",
"axes",
"=",
"[",
"]",
")",
":",
"if",
"isinstance",
"(",
"arrays",
",",
"Iterator",
")",
":",
"arrays",
"=",
"list",
"(",
"arrays",
")",
"if",
"(",
"not",
"isinstance",
"(",
"arrays",
",",
"(",
"list",
",",
"tuple",
")",
")",
")",
":",
"return",
"arrays",
"if",
"(",
"len",
"(",
"axes",
")",
">",
"1",
")",
":",
"arrays",
"=",
"[",
"_concatenate2",
"(",
"a",
",",
"axes",
"=",
"axes",
"[",
"1",
":",
"]",
")",
"for",
"a",
"in",
"arrays",
"]",
"return",
"np",
".",
"concatenate",
"(",
"arrays",
",",
"axis",
"=",
"axes",
"[",
"0",
"]",
")"
] | recursively concatenate nested lists of arrays along axes each entry in axes corresponds to each level of the nested list . | train | false |
15,348 | def str2markup(sourcestring, colors=None, title='', markup='xhtml', header=None, footer=None, linenumbers=0, form=None):
if (markup.lower() == 'html'):
return (None, str2html(sourcestring, colors=colors, title=title, header=header, footer=footer, markup=markup, linenumbers=linenumbers, form=form))
else:
return str2css(sourcestring, colors=colors, title=title, header=header, footer=footer, markup=markup, linenumbers=linenumbers, form=form)
| [
"def",
"str2markup",
"(",
"sourcestring",
",",
"colors",
"=",
"None",
",",
"title",
"=",
"''",
",",
"markup",
"=",
"'xhtml'",
",",
"header",
"=",
"None",
",",
"footer",
"=",
"None",
",",
"linenumbers",
"=",
"0",
",",
"form",
"=",
"None",
")",
":",
"if",
"(",
"markup",
".",
"lower",
"(",
")",
"==",
"'html'",
")",
":",
"return",
"(",
"None",
",",
"str2html",
"(",
"sourcestring",
",",
"colors",
"=",
"colors",
",",
"title",
"=",
"title",
",",
"header",
"=",
"header",
",",
"footer",
"=",
"footer",
",",
"markup",
"=",
"markup",
",",
"linenumbers",
"=",
"linenumbers",
",",
"form",
"=",
"form",
")",
")",
"else",
":",
"return",
"str2css",
"(",
"sourcestring",
",",
"colors",
"=",
"colors",
",",
"title",
"=",
"title",
",",
"header",
"=",
"header",
",",
"footer",
"=",
"footer",
",",
"markup",
"=",
"markup",
",",
"linenumbers",
"=",
"linenumbers",
",",
"form",
"=",
"form",
")"
] | convert code strings into . | train | false |
15,350 | def assert_rpm_content(test_case, expected_paths, package_path):
output = check_output(['rpm', '--query', '--list', '--package', package_path.path])
actual_paths = set(map(FilePath, output.splitlines()))
test_case.assertEqual(expected_paths, actual_paths)
| [
"def",
"assert_rpm_content",
"(",
"test_case",
",",
"expected_paths",
",",
"package_path",
")",
":",
"output",
"=",
"check_output",
"(",
"[",
"'rpm'",
",",
"'--query'",
",",
"'--list'",
",",
"'--package'",
",",
"package_path",
".",
"path",
"]",
")",
"actual_paths",
"=",
"set",
"(",
"map",
"(",
"FilePath",
",",
"output",
".",
"splitlines",
"(",
")",
")",
")",
"test_case",
".",
"assertEqual",
"(",
"expected_paths",
",",
"actual_paths",
")"
] | fail unless the rpm file at rpm_path contains all the expected_paths . | train | false |
15,351 | def record_summary(record, indent=' '):
if (record.id == record.name):
answer = ("%sID and Name='%s',\n%sSeq='" % (indent, record.id, indent))
else:
answer = ("%sID = '%s', Name='%s',\n%sSeq='" % (indent, record.id, record.name, indent))
if (record.seq is None):
answer += 'None'
else:
if (len(record.seq) > 50):
answer += ((str(record.seq[:40]) + '...') + str(record.seq[(-7):]))
else:
answer += str(record.seq)
answer += ("', length=%i" % len(record.seq))
return answer
| [
"def",
"record_summary",
"(",
"record",
",",
"indent",
"=",
"' '",
")",
":",
"if",
"(",
"record",
".",
"id",
"==",
"record",
".",
"name",
")",
":",
"answer",
"=",
"(",
"\"%sID and Name='%s',\\n%sSeq='\"",
"%",
"(",
"indent",
",",
"record",
".",
"id",
",",
"indent",
")",
")",
"else",
":",
"answer",
"=",
"(",
"\"%sID = '%s', Name='%s',\\n%sSeq='\"",
"%",
"(",
"indent",
",",
"record",
".",
"id",
",",
"record",
".",
"name",
",",
"indent",
")",
")",
"if",
"(",
"record",
".",
"seq",
"is",
"None",
")",
":",
"answer",
"+=",
"'None'",
"else",
":",
"if",
"(",
"len",
"(",
"record",
".",
"seq",
")",
">",
"50",
")",
":",
"answer",
"+=",
"(",
"(",
"str",
"(",
"record",
".",
"seq",
"[",
":",
"40",
"]",
")",
"+",
"'...'",
")",
"+",
"str",
"(",
"record",
".",
"seq",
"[",
"(",
"-",
"7",
")",
":",
"]",
")",
")",
"else",
":",
"answer",
"+=",
"str",
"(",
"record",
".",
"seq",
")",
"answer",
"+=",
"(",
"\"', length=%i\"",
"%",
"len",
"(",
"record",
".",
"seq",
")",
")",
"return",
"answer"
] | returns a concise summary of a seqrecord object as a string . | train | false |
15,352 | def console_get_all_by_instance(context, instance_uuid, columns_to_join=None):
return IMPL.console_get_all_by_instance(context, instance_uuid, columns_to_join)
| [
"def",
"console_get_all_by_instance",
"(",
"context",
",",
"instance_uuid",
",",
"columns_to_join",
"=",
"None",
")",
":",
"return",
"IMPL",
".",
"console_get_all_by_instance",
"(",
"context",
",",
"instance_uuid",
",",
"columns_to_join",
")"
] | get consoles for a given instance . | train | false |
15,353 | @with_setup(prepare_stdout)
def test_output_when_could_not_find_features_verbosity_level_2():
path = fs.relpath(join(abspath(dirname(__file__)), 'no_features', 'unexistent-folder'))
runner = Runner(path, verbosity=2)
runner.run()
assert_stdout_lines(('Oops!\ncould not find features at ./%s\n' % path))
| [
"@",
"with_setup",
"(",
"prepare_stdout",
")",
"def",
"test_output_when_could_not_find_features_verbosity_level_2",
"(",
")",
":",
"path",
"=",
"fs",
".",
"relpath",
"(",
"join",
"(",
"abspath",
"(",
"dirname",
"(",
"__file__",
")",
")",
",",
"'no_features'",
",",
"'unexistent-folder'",
")",
")",
"runner",
"=",
"Runner",
"(",
"path",
",",
"verbosity",
"=",
"2",
")",
"runner",
".",
"run",
"(",
")",
"assert_stdout_lines",
"(",
"(",
"'Oops!\\ncould not find features at ./%s\\n'",
"%",
"path",
")",
")"
] | testing the colorful output of many successful features colorless . | train | false |
15,354 | def as_even(iterator_cls):
assert issubclass(iterator_cls, SubsetIterator)
dct = ForcedEvenIterator.__dict__.copy()
dct['_base_iterator_cls'] = iterator_cls
dct['fancy'] = iterator_cls.fancy
dct['stochastic'] = iterator_cls.stochastic
NewForcedEvenClass = type(('ForcedEven%s' % iterator_cls.__name__), ForcedEvenIterator.__bases__, dct)
return NewForcedEvenClass
| [
"def",
"as_even",
"(",
"iterator_cls",
")",
":",
"assert",
"issubclass",
"(",
"iterator_cls",
",",
"SubsetIterator",
")",
"dct",
"=",
"ForcedEvenIterator",
".",
"__dict__",
".",
"copy",
"(",
")",
"dct",
"[",
"'_base_iterator_cls'",
"]",
"=",
"iterator_cls",
"dct",
"[",
"'fancy'",
"]",
"=",
"iterator_cls",
".",
"fancy",
"dct",
"[",
"'stochastic'",
"]",
"=",
"iterator_cls",
".",
"stochastic",
"NewForcedEvenClass",
"=",
"type",
"(",
"(",
"'ForcedEven%s'",
"%",
"iterator_cls",
".",
"__name__",
")",
",",
"ForcedEvenIterator",
".",
"__bases__",
",",
"dct",
")",
"return",
"NewForcedEvenClass"
] | returns a class wrapping iterator_cls that forces equal batch size . | train | false |
15,355 | def read_double_matrix(fid, rows, cols):
return _unpack_matrix(fid, rows, cols, dtype='>f8', out_dtype=np.float64)
| [
"def",
"read_double_matrix",
"(",
"fid",
",",
"rows",
",",
"cols",
")",
":",
"return",
"_unpack_matrix",
"(",
"fid",
",",
"rows",
",",
"cols",
",",
"dtype",
"=",
"'>f8'",
",",
"out_dtype",
"=",
"np",
".",
"float64",
")"
] | read 64bit float matrix from bti file . | train | false |
15,356 | def collect_hosts(hosts, randomize=True):
(host_ports, chroot) = hosts.partition('/')[::2]
chroot = (('/' + chroot) if chroot else None)
result = []
for host_port in host_ports.split(','):
res = urlsplit(('xxx://' + host_port))
host = res.hostname
port = (int(res.port) if res.port else 2181)
result.append((host.strip(), port))
if randomize:
random.shuffle(result)
return (result, chroot)
| [
"def",
"collect_hosts",
"(",
"hosts",
",",
"randomize",
"=",
"True",
")",
":",
"(",
"host_ports",
",",
"chroot",
")",
"=",
"hosts",
".",
"partition",
"(",
"'/'",
")",
"[",
":",
":",
"2",
"]",
"chroot",
"=",
"(",
"(",
"'/'",
"+",
"chroot",
")",
"if",
"chroot",
"else",
"None",
")",
"result",
"=",
"[",
"]",
"for",
"host_port",
"in",
"host_ports",
".",
"split",
"(",
"','",
")",
":",
"res",
"=",
"urlsplit",
"(",
"(",
"'xxx://'",
"+",
"host_port",
")",
")",
"host",
"=",
"res",
".",
"hostname",
"port",
"=",
"(",
"int",
"(",
"res",
".",
"port",
")",
"if",
"res",
".",
"port",
"else",
"2181",
")",
"result",
".",
"append",
"(",
"(",
"host",
".",
"strip",
"(",
")",
",",
"port",
")",
")",
"if",
"randomize",
":",
"random",
".",
"shuffle",
"(",
"result",
")",
"return",
"(",
"result",
",",
"chroot",
")"
] | collect a set of hosts and an optional chroot from a string . | train | false |
15,357 | def indentXML(elem, level=0):
i = (u'\n' + (level * u' '))
if len(elem):
if ((not elem.text) or (not elem.text.strip())):
elem.text = (i + u' ')
if ((not elem.tail) or (not elem.tail.strip())):
elem.tail = i
for elem in elem:
indentXML(elem, (level + 1))
if ((not elem.tail) or (not elem.tail.strip())):
elem.tail = i
elif (level and ((not elem.tail) or (not elem.tail.strip()))):
elem.tail = i
| [
"def",
"indentXML",
"(",
"elem",
",",
"level",
"=",
"0",
")",
":",
"i",
"=",
"(",
"u'\\n'",
"+",
"(",
"level",
"*",
"u' '",
")",
")",
"if",
"len",
"(",
"elem",
")",
":",
"if",
"(",
"(",
"not",
"elem",
".",
"text",
")",
"or",
"(",
"not",
"elem",
".",
"text",
".",
"strip",
"(",
")",
")",
")",
":",
"elem",
".",
"text",
"=",
"(",
"i",
"+",
"u' '",
")",
"if",
"(",
"(",
"not",
"elem",
".",
"tail",
")",
"or",
"(",
"not",
"elem",
".",
"tail",
".",
"strip",
"(",
")",
")",
")",
":",
"elem",
".",
"tail",
"=",
"i",
"for",
"elem",
"in",
"elem",
":",
"indentXML",
"(",
"elem",
",",
"(",
"level",
"+",
"1",
")",
")",
"if",
"(",
"(",
"not",
"elem",
".",
"tail",
")",
"or",
"(",
"not",
"elem",
".",
"tail",
".",
"strip",
"(",
")",
")",
")",
":",
"elem",
".",
"tail",
"=",
"i",
"elif",
"(",
"level",
"and",
"(",
"(",
"not",
"elem",
".",
"tail",
")",
"or",
"(",
"not",
"elem",
".",
"tail",
".",
"strip",
"(",
")",
")",
")",
")",
":",
"elem",
".",
"tail",
"=",
"i"
] | does our pretty printing . | train | false |
15,358 | def delphi_solution_votes(row):
if hasattr(row, 'delphi_solution'):
row = row.delphi_solution
try:
solution_id = row.id
problem_id = row.problem_id
except AttributeError:
return None
vtable = current.s3db.delphi_vote
query = (vtable.solution_id == solution_id)
votes = current.db(query).count()
url = URL(c='delphi', f='problem', args=[problem_id, 'results'])
return A(votes, _href=url)
| [
"def",
"delphi_solution_votes",
"(",
"row",
")",
":",
"if",
"hasattr",
"(",
"row",
",",
"'delphi_solution'",
")",
":",
"row",
"=",
"row",
".",
"delphi_solution",
"try",
":",
"solution_id",
"=",
"row",
".",
"id",
"problem_id",
"=",
"row",
".",
"problem_id",
"except",
"AttributeError",
":",
"return",
"None",
"vtable",
"=",
"current",
".",
"s3db",
".",
"delphi_vote",
"query",
"=",
"(",
"vtable",
".",
"solution_id",
"==",
"solution_id",
")",
"votes",
"=",
"current",
".",
"db",
"(",
"query",
")",
".",
"count",
"(",
")",
"url",
"=",
"URL",
"(",
"c",
"=",
"'delphi'",
",",
"f",
"=",
"'problem'",
",",
"args",
"=",
"[",
"problem_id",
",",
"'results'",
"]",
")",
"return",
"A",
"(",
"votes",
",",
"_href",
"=",
"url",
")"
] | clickable number of solutions for a problem . | train | false |
15,359 | def get_sorcery_ver(module):
cmd_sorcery = ('%s --version' % SORCERY['sorcery'])
(rc, stdout, stderr) = module.run_command(cmd_sorcery)
if ((rc != 0) or (not stdout)):
module.fail_json(msg='unable to get Sorcery version')
return stdout.strip()
| [
"def",
"get_sorcery_ver",
"(",
"module",
")",
":",
"cmd_sorcery",
"=",
"(",
"'%s --version'",
"%",
"SORCERY",
"[",
"'sorcery'",
"]",
")",
"(",
"rc",
",",
"stdout",
",",
"stderr",
")",
"=",
"module",
".",
"run_command",
"(",
"cmd_sorcery",
")",
"if",
"(",
"(",
"rc",
"!=",
"0",
")",
"or",
"(",
"not",
"stdout",
")",
")",
":",
"module",
".",
"fail_json",
"(",
"msg",
"=",
"'unable to get Sorcery version'",
")",
"return",
"stdout",
".",
"strip",
"(",
")"
] | get sorcery version . | train | false |
15,362 | @only_ci
def generate_requirements_txt(failed_deps):
req_file = file(REQUIREMENTS_TXT, 'w')
if failed_deps:
for pkg in failed_deps:
if pkg.is_git:
req_file.write(('%s\n' % pkg.git_src))
else:
req_file.write(('%s==%s\n' % (pkg.package_name, pkg.package_version)))
req_file.close()
return REQUIREMENTS_TXT
| [
"@",
"only_ci",
"def",
"generate_requirements_txt",
"(",
"failed_deps",
")",
":",
"req_file",
"=",
"file",
"(",
"REQUIREMENTS_TXT",
",",
"'w'",
")",
"if",
"failed_deps",
":",
"for",
"pkg",
"in",
"failed_deps",
":",
"if",
"pkg",
".",
"is_git",
":",
"req_file",
".",
"write",
"(",
"(",
"'%s\\n'",
"%",
"pkg",
".",
"git_src",
")",
")",
"else",
":",
"req_file",
".",
"write",
"(",
"(",
"'%s==%s\\n'",
"%",
"(",
"pkg",
".",
"package_name",
",",
"pkg",
".",
"package_version",
")",
")",
")",
"req_file",
".",
"close",
"(",
")",
"return",
"REQUIREMENTS_TXT"
] | we want to generate a requirements . | train | false |
15,363 | def _str2int(num):
if (not num):
return None
if num.isdigit():
return int(num)
if (not re.match('\\w\\w:\\w\\w:\\w\\w', num)):
return None
try:
return int(num.replace(':', ''), 16)
except ValueError:
return None
| [
"def",
"_str2int",
"(",
"num",
")",
":",
"if",
"(",
"not",
"num",
")",
":",
"return",
"None",
"if",
"num",
".",
"isdigit",
"(",
")",
":",
"return",
"int",
"(",
"num",
")",
"if",
"(",
"not",
"re",
".",
"match",
"(",
"'\\\\w\\\\w:\\\\w\\\\w:\\\\w\\\\w'",
",",
"num",
")",
")",
":",
"return",
"None",
"try",
":",
"return",
"int",
"(",
"num",
".",
"replace",
"(",
"':'",
",",
"''",
")",
",",
"16",
")",
"except",
"ValueError",
":",
"return",
"None"
] | convert a string into an integer . | train | false |
15,364 | def binary_closing(input, structure=None, iterations=1, output=None, origin=0):
input = numpy.asarray(input)
if (structure is None):
rank = input.ndim
structure = generate_binary_structure(rank, 1)
tmp = binary_dilation(input, structure, iterations, None, None, 0, origin)
return binary_erosion(tmp, structure, iterations, None, output, 0, origin)
| [
"def",
"binary_closing",
"(",
"input",
",",
"structure",
"=",
"None",
",",
"iterations",
"=",
"1",
",",
"output",
"=",
"None",
",",
"origin",
"=",
"0",
")",
":",
"input",
"=",
"numpy",
".",
"asarray",
"(",
"input",
")",
"if",
"(",
"structure",
"is",
"None",
")",
":",
"rank",
"=",
"input",
".",
"ndim",
"structure",
"=",
"generate_binary_structure",
"(",
"rank",
",",
"1",
")",
"tmp",
"=",
"binary_dilation",
"(",
"input",
",",
"structure",
",",
"iterations",
",",
"None",
",",
"None",
",",
"0",
",",
"origin",
")",
"return",
"binary_erosion",
"(",
"tmp",
",",
"structure",
",",
"iterations",
",",
"None",
",",
"output",
",",
"0",
",",
"origin",
")"
] | return fast binary morphological closing of an image . | train | false |
15,365 | @requires_duration
@apply_to_mask
@apply_to_audio
def loop(self, n=None, duration=None):
result = self.fl_time((lambda t: (t % self.duration)))
if n:
duration = (n * self.duration)
if duration:
result = result.set_duration(duration)
return result
| [
"@",
"requires_duration",
"@",
"apply_to_mask",
"@",
"apply_to_audio",
"def",
"loop",
"(",
"self",
",",
"n",
"=",
"None",
",",
"duration",
"=",
"None",
")",
":",
"result",
"=",
"self",
".",
"fl_time",
"(",
"(",
"lambda",
"t",
":",
"(",
"t",
"%",
"self",
".",
"duration",
")",
")",
")",
"if",
"n",
":",
"duration",
"=",
"(",
"n",
"*",
"self",
".",
"duration",
")",
"if",
"duration",
":",
"result",
"=",
"result",
".",
"set_duration",
"(",
"duration",
")",
"return",
"result"
] | custom event loop implementation uses poll instead of loop to respect active flag . | train | false |
15,366 | def connect_to_autoscale(region=None):
return _create_client(ep_name='autoscale', region=region)
| [
"def",
"connect_to_autoscale",
"(",
"region",
"=",
"None",
")",
":",
"return",
"_create_client",
"(",
"ep_name",
"=",
"'autoscale'",
",",
"region",
"=",
"region",
")"
] | creates a client for working with autoscale . | train | false |
15,367 | def _sigma_est_kpss(resids, nobs, lags):
s_hat = sum((resids ** 2))
for i in range(1, (lags + 1)):
resids_prod = np.dot(resids[i:], resids[:(nobs - i)])
s_hat += ((2 * resids_prod) * (1.0 - (i / (lags + 1.0))))
return (s_hat / nobs)
| [
"def",
"_sigma_est_kpss",
"(",
"resids",
",",
"nobs",
",",
"lags",
")",
":",
"s_hat",
"=",
"sum",
"(",
"(",
"resids",
"**",
"2",
")",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"(",
"lags",
"+",
"1",
")",
")",
":",
"resids_prod",
"=",
"np",
".",
"dot",
"(",
"resids",
"[",
"i",
":",
"]",
",",
"resids",
"[",
":",
"(",
"nobs",
"-",
"i",
")",
"]",
")",
"s_hat",
"+=",
"(",
"(",
"2",
"*",
"resids_prod",
")",
"*",
"(",
"1.0",
"-",
"(",
"i",
"/",
"(",
"lags",
"+",
"1.0",
")",
")",
")",
")",
"return",
"(",
"s_hat",
"/",
"nobs",
")"
] | computes equation 10 . | train | false |
15,368 | def test_can_create_all_resources():
session = create_session()
for service_name in session.get_available_resources():
(yield (_test_create_resource, session, service_name))
| [
"def",
"test_can_create_all_resources",
"(",
")",
":",
"session",
"=",
"create_session",
"(",
")",
"for",
"service_name",
"in",
"session",
".",
"get_available_resources",
"(",
")",
":",
"(",
"yield",
"(",
"_test_create_resource",
",",
"session",
",",
"service_name",
")",
")"
] | verify we can create all existing resources . | train | false |
15,370 | def test_multiline_is_part_of_previous_step():
lines = strings.get_stripped_lines(MULTI_LINE)
steps = Step.many_from_lines(lines)
print steps
assert_equals(len(steps), 1)
assert isinstance(steps[0], Step)
assert_equals(steps[0].sentence, 'I have a string like so:')
| [
"def",
"test_multiline_is_part_of_previous_step",
"(",
")",
":",
"lines",
"=",
"strings",
".",
"get_stripped_lines",
"(",
"MULTI_LINE",
")",
"steps",
"=",
"Step",
".",
"many_from_lines",
"(",
"lines",
")",
"print",
"steps",
"assert_equals",
"(",
"len",
"(",
"steps",
")",
",",
"1",
")",
"assert",
"isinstance",
"(",
"steps",
"[",
"0",
"]",
",",
"Step",
")",
"assert_equals",
"(",
"steps",
"[",
"0",
"]",
".",
"sentence",
",",
"'I have a string like so:'",
")"
] | it should correctly parse a multi-line string as part of the preceding step . | train | false |
15,372 | def gmmStatus():
a = TpPd(pd=3)
b = MessageType(mesType=32)
c = GmmCause()
packet = ((a / b) / c)
return packet
| [
"def",
"gmmStatus",
"(",
")",
":",
"a",
"=",
"TpPd",
"(",
"pd",
"=",
"3",
")",
"b",
"=",
"MessageType",
"(",
"mesType",
"=",
"32",
")",
"c",
"=",
"GmmCause",
"(",
")",
"packet",
"=",
"(",
"(",
"a",
"/",
"b",
")",
"/",
"c",
")",
"return",
"packet"
] | gmm status section 9 . | train | true |
15,373 | def _normalize_html(data):
try:
data = lxml.html.tostring(lxml.html.fromstring(data), encoding='unicode')
except:
pass
return ('<!DOCTYPE html>\n' + data)
| [
"def",
"_normalize_html",
"(",
"data",
")",
":",
"try",
":",
"data",
"=",
"lxml",
".",
"html",
".",
"tostring",
"(",
"lxml",
".",
"html",
".",
"fromstring",
"(",
"data",
")",
",",
"encoding",
"=",
"'unicode'",
")",
"except",
":",
"pass",
"return",
"(",
"'<!DOCTYPE html>\\n'",
"+",
"data",
")"
] | pass html through lxml to clean it up . | train | false |
15,374 | @receiver(COURSE_CERT_AWARDED, sender=GeneratedCertificate)
def create_course_group_badge(sender, user, course_key, status, **kwargs):
course_group_check(user, course_key)
| [
"@",
"receiver",
"(",
"COURSE_CERT_AWARDED",
",",
"sender",
"=",
"GeneratedCertificate",
")",
"def",
"create_course_group_badge",
"(",
"sender",
",",
"user",
",",
"course_key",
",",
"status",
",",
"**",
"kwargs",
")",
":",
"course_group_check",
"(",
"user",
",",
"course_key",
")"
] | standard signal hook to create badges when a user has completed a prespecified set of courses . | train | false |
15,375 | def generate_fake_facilities(names=('Wilson Elementary',)):
facilities = []
for name in names:
found_facilities = Facility.objects.filter(name=name)
if found_facilities:
facility = found_facilities[0]
logging.info(("Retrieved facility '%s'" % name))
else:
facility = Facility(name=name)
facility.save()
logging.info(("Created facility '%s'" % name))
facilities.append(facility)
return facilities
| [
"def",
"generate_fake_facilities",
"(",
"names",
"=",
"(",
"'Wilson Elementary'",
",",
")",
")",
":",
"facilities",
"=",
"[",
"]",
"for",
"name",
"in",
"names",
":",
"found_facilities",
"=",
"Facility",
".",
"objects",
".",
"filter",
"(",
"name",
"=",
"name",
")",
"if",
"found_facilities",
":",
"facility",
"=",
"found_facilities",
"[",
"0",
"]",
"logging",
".",
"info",
"(",
"(",
"\"Retrieved facility '%s'\"",
"%",
"name",
")",
")",
"else",
":",
"facility",
"=",
"Facility",
"(",
"name",
"=",
"name",
")",
"facility",
".",
"save",
"(",
")",
"logging",
".",
"info",
"(",
"(",
"\"Created facility '%s'\"",
"%",
"name",
")",
")",
"facilities",
".",
"append",
"(",
"facility",
")",
"return",
"facilities"
] | add the given fake facilities . | train | false |
15,376 | def file_autocomplete(request, project_slug):
if ('term' in request.GET):
term = request.GET['term']
else:
raise Http404
queryset = ImportedFile.objects.filter(project__slug=project_slug, path__icontains=term)[:20]
ret_list = []
for filename in queryset:
ret_list.append({'label': filename.path, 'value': filename.path})
json_response = json.dumps(ret_list)
return HttpResponse(json_response, content_type='text/javascript')
| [
"def",
"file_autocomplete",
"(",
"request",
",",
"project_slug",
")",
":",
"if",
"(",
"'term'",
"in",
"request",
".",
"GET",
")",
":",
"term",
"=",
"request",
".",
"GET",
"[",
"'term'",
"]",
"else",
":",
"raise",
"Http404",
"queryset",
"=",
"ImportedFile",
".",
"objects",
".",
"filter",
"(",
"project__slug",
"=",
"project_slug",
",",
"path__icontains",
"=",
"term",
")",
"[",
":",
"20",
"]",
"ret_list",
"=",
"[",
"]",
"for",
"filename",
"in",
"queryset",
":",
"ret_list",
".",
"append",
"(",
"{",
"'label'",
":",
"filename",
".",
"path",
",",
"'value'",
":",
"filename",
".",
"path",
"}",
")",
"json_response",
"=",
"json",
".",
"dumps",
"(",
"ret_list",
")",
"return",
"HttpResponse",
"(",
"json_response",
",",
"content_type",
"=",
"'text/javascript'",
")"
] | return a json list of file names . | train | false |
15,377 | @command('u\\s?([\\d]{1,4})')
def user_more(num):
if (g.browse_mode != 'normal'):
g.message = 'User uploads must refer to a specific video item'
g.message = ((c.y + g.message) + c.w)
g.content = content.generate_songlist_display()
return
g.current_page = 0
item = g.model[(int(num) - 1)]
if (item.ytid in g.meta):
channel_id = g.meta.get(item.ytid, {}).get('uploader')
user = g.meta.get(item.ytid, {}).get('uploaderName')
else:
paf = util.get_pafy(item)
(user, channel_id) = channelfromname(paf.author)
usersearch_id(user, channel_id, '')
| [
"@",
"command",
"(",
"'u\\\\s?([\\\\d]{1,4})'",
")",
"def",
"user_more",
"(",
"num",
")",
":",
"if",
"(",
"g",
".",
"browse_mode",
"!=",
"'normal'",
")",
":",
"g",
".",
"message",
"=",
"'User uploads must refer to a specific video item'",
"g",
".",
"message",
"=",
"(",
"(",
"c",
".",
"y",
"+",
"g",
".",
"message",
")",
"+",
"c",
".",
"w",
")",
"g",
".",
"content",
"=",
"content",
".",
"generate_songlist_display",
"(",
")",
"return",
"g",
".",
"current_page",
"=",
"0",
"item",
"=",
"g",
".",
"model",
"[",
"(",
"int",
"(",
"num",
")",
"-",
"1",
")",
"]",
"if",
"(",
"item",
".",
"ytid",
"in",
"g",
".",
"meta",
")",
":",
"channel_id",
"=",
"g",
".",
"meta",
".",
"get",
"(",
"item",
".",
"ytid",
",",
"{",
"}",
")",
".",
"get",
"(",
"'uploader'",
")",
"user",
"=",
"g",
".",
"meta",
".",
"get",
"(",
"item",
".",
"ytid",
",",
"{",
"}",
")",
".",
"get",
"(",
"'uploaderName'",
")",
"else",
":",
"paf",
"=",
"util",
".",
"get_pafy",
"(",
"item",
")",
"(",
"user",
",",
"channel_id",
")",
"=",
"channelfromname",
"(",
"paf",
".",
"author",
")",
"usersearch_id",
"(",
"user",
",",
"channel_id",
",",
"''",
")"
] | show more videos from user of vid num . | train | false |
15,378 | def aao_art(album):
if (not album.asin):
return
try:
resp = requests_session.get(AAO_URL, params={'asin': album.asin})
log.debug(u'fetchart: scraped art URL: {0}'.format(resp.url))
except requests.RequestException:
log.debug(u'fetchart: error scraping art page')
return
m = re.search(AAO_PAT, resp.text)
if m:
image_url = m.group(1)
(yield image_url)
else:
log.debug(u'fetchart: no image found on page')
| [
"def",
"aao_art",
"(",
"album",
")",
":",
"if",
"(",
"not",
"album",
".",
"asin",
")",
":",
"return",
"try",
":",
"resp",
"=",
"requests_session",
".",
"get",
"(",
"AAO_URL",
",",
"params",
"=",
"{",
"'asin'",
":",
"album",
".",
"asin",
"}",
")",
"log",
".",
"debug",
"(",
"u'fetchart: scraped art URL: {0}'",
".",
"format",
"(",
"resp",
".",
"url",
")",
")",
"except",
"requests",
".",
"RequestException",
":",
"log",
".",
"debug",
"(",
"u'fetchart: error scraping art page'",
")",
"return",
"m",
"=",
"re",
".",
"search",
"(",
"AAO_PAT",
",",
"resp",
".",
"text",
")",
"if",
"m",
":",
"image_url",
"=",
"m",
".",
"group",
"(",
"1",
")",
"(",
"yield",
"image_url",
")",
"else",
":",
"log",
".",
"debug",
"(",
"u'fetchart: no image found on page'",
")"
] | return art url from albumart . | train | false |
15,380 | def repeat(x, repeats, axis=None):
repeats = tensor.as_tensor_variable(repeats)
if (repeats.ndim > 1):
raise ValueError('The dimension of repeats should not exceed 1.')
if ((repeats.ndim == 1) and (not repeats.broadcastable[0])):
return RepeatOp(axis=axis)(x, repeats)
else:
if (repeats.ndim == 1):
repeats = repeats[0]
if (x.dtype == 'uint64'):
raise TypeError("theano.tensor.repeat don't support dtype uint64")
if (axis is None):
axis = 0
x = x.flatten()
else:
if (axis >= x.ndim):
raise ValueError('Axis should not exceed x.ndim-1.')
if (axis < 0):
axis = (x.ndim + axis)
shape = [x.shape[i] for i in xrange(x.ndim)]
shape_ = shape[:]
shape_.insert((axis + 1), repeats)
shape[axis] = (shape[axis] * repeats)
dims_ = list(numpy.arange(x.ndim))
dims_.insert((axis + 1), 'x')
z = tensor.alloc(x.dimshuffle(*dims_), *shape_).reshape(shape)
return z
| [
"def",
"repeat",
"(",
"x",
",",
"repeats",
",",
"axis",
"=",
"None",
")",
":",
"repeats",
"=",
"tensor",
".",
"as_tensor_variable",
"(",
"repeats",
")",
"if",
"(",
"repeats",
".",
"ndim",
">",
"1",
")",
":",
"raise",
"ValueError",
"(",
"'The dimension of repeats should not exceed 1.'",
")",
"if",
"(",
"(",
"repeats",
".",
"ndim",
"==",
"1",
")",
"and",
"(",
"not",
"repeats",
".",
"broadcastable",
"[",
"0",
"]",
")",
")",
":",
"return",
"RepeatOp",
"(",
"axis",
"=",
"axis",
")",
"(",
"x",
",",
"repeats",
")",
"else",
":",
"if",
"(",
"repeats",
".",
"ndim",
"==",
"1",
")",
":",
"repeats",
"=",
"repeats",
"[",
"0",
"]",
"if",
"(",
"x",
".",
"dtype",
"==",
"'uint64'",
")",
":",
"raise",
"TypeError",
"(",
"\"theano.tensor.repeat don't support dtype uint64\"",
")",
"if",
"(",
"axis",
"is",
"None",
")",
":",
"axis",
"=",
"0",
"x",
"=",
"x",
".",
"flatten",
"(",
")",
"else",
":",
"if",
"(",
"axis",
">=",
"x",
".",
"ndim",
")",
":",
"raise",
"ValueError",
"(",
"'Axis should not exceed x.ndim-1.'",
")",
"if",
"(",
"axis",
"<",
"0",
")",
":",
"axis",
"=",
"(",
"x",
".",
"ndim",
"+",
"axis",
")",
"shape",
"=",
"[",
"x",
".",
"shape",
"[",
"i",
"]",
"for",
"i",
"in",
"xrange",
"(",
"x",
".",
"ndim",
")",
"]",
"shape_",
"=",
"shape",
"[",
":",
"]",
"shape_",
".",
"insert",
"(",
"(",
"axis",
"+",
"1",
")",
",",
"repeats",
")",
"shape",
"[",
"axis",
"]",
"=",
"(",
"shape",
"[",
"axis",
"]",
"*",
"repeats",
")",
"dims_",
"=",
"list",
"(",
"numpy",
".",
"arange",
"(",
"x",
".",
"ndim",
")",
")",
"dims_",
".",
"insert",
"(",
"(",
"axis",
"+",
"1",
")",
",",
"'x'",
")",
"z",
"=",
"tensor",
".",
"alloc",
"(",
"x",
".",
"dimshuffle",
"(",
"*",
"dims_",
")",
",",
"*",
"shape_",
")",
".",
"reshape",
"(",
"shape",
")",
"return",
"z"
] | repeat a 2d tensor . | train | false |
15,381 | def tearDown():
conn = get_conn()
conn.indices.delete_index_if_exists('test-pindex')
| [
"def",
"tearDown",
"(",
")",
":",
"conn",
"=",
"get_conn",
"(",
")",
"conn",
".",
"indices",
".",
"delete_index_if_exists",
"(",
"'test-pindex'",
")"
] | undo the effects of :func:pyramid . | train | false |
15,382 | @register.inclusion_tag(engine.get_template('inclusion.html'))
def inclusion_one_default_from_template(one, two='hi'):
return {'result': ('inclusion_one_default_from_template - Expected result: %s, %s' % (one, two))}
| [
"@",
"register",
".",
"inclusion_tag",
"(",
"engine",
".",
"get_template",
"(",
"'inclusion.html'",
")",
")",
"def",
"inclusion_one_default_from_template",
"(",
"one",
",",
"two",
"=",
"'hi'",
")",
":",
"return",
"{",
"'result'",
":",
"(",
"'inclusion_one_default_from_template - Expected result: %s, %s'",
"%",
"(",
"one",
",",
"two",
")",
")",
"}"
] | expected inclusion_one_default_from_template __doc__ . | train | false |
15,383 | def newDerSequence(*der_objs):
der = DerSequence()
for obj in der_objs:
if isinstance(obj, DerObject):
der += obj.encode()
else:
der += obj
return der
| [
"def",
"newDerSequence",
"(",
"*",
"der_objs",
")",
":",
"der",
"=",
"DerSequence",
"(",
")",
"for",
"obj",
"in",
"der_objs",
":",
"if",
"isinstance",
"(",
"obj",
",",
"DerObject",
")",
":",
"der",
"+=",
"obj",
".",
"encode",
"(",
")",
"else",
":",
"der",
"+=",
"obj",
"return",
"der"
] | create a dersequence object . | train | false |
15,384 | @with_config(DEBUG=True, ASSETS_DEBUG='merge')
def test_debug_merge_only():
sub = Bundle('s3', filters=[css], output='bar')
b = Bundle('s1', 's2', sub, output='foo', filters=[js])
jl = bundle_to_joblist(b)
assert (len(jl) == 1)
assert (jl['foo'][0][0] == [])
assert (jl['foo'][1][0] == [])
sub.debug = False
jl = bundle_to_joblist(b)
assert (len(jl) == 1)
assert (jl['foo'][0][0] == [])
assert (jl['foo'][1][0] == [css])
sub.debug = True
jl = bundle_to_joblist(b)
assert (len(jl) == 2)
assert (jl['foo'][0][0] == [])
| [
"@",
"with_config",
"(",
"DEBUG",
"=",
"True",
",",
"ASSETS_DEBUG",
"=",
"'merge'",
")",
"def",
"test_debug_merge_only",
"(",
")",
":",
"sub",
"=",
"Bundle",
"(",
"'s3'",
",",
"filters",
"=",
"[",
"css",
"]",
",",
"output",
"=",
"'bar'",
")",
"b",
"=",
"Bundle",
"(",
"'s1'",
",",
"'s2'",
",",
"sub",
",",
"output",
"=",
"'foo'",
",",
"filters",
"=",
"[",
"js",
"]",
")",
"jl",
"=",
"bundle_to_joblist",
"(",
"b",
")",
"assert",
"(",
"len",
"(",
"jl",
")",
"==",
"1",
")",
"assert",
"(",
"jl",
"[",
"'foo'",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"[",
"]",
")",
"assert",
"(",
"jl",
"[",
"'foo'",
"]",
"[",
"1",
"]",
"[",
"0",
"]",
"==",
"[",
"]",
")",
"sub",
".",
"debug",
"=",
"False",
"jl",
"=",
"bundle_to_joblist",
"(",
"b",
")",
"assert",
"(",
"len",
"(",
"jl",
")",
"==",
"1",
")",
"assert",
"(",
"jl",
"[",
"'foo'",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"[",
"]",
")",
"assert",
"(",
"jl",
"[",
"'foo'",
"]",
"[",
"1",
"]",
"[",
"0",
"]",
"==",
"[",
"css",
"]",
")",
"sub",
".",
"debug",
"=",
"True",
"jl",
"=",
"bundle_to_joblist",
"(",
"b",
")",
"assert",
"(",
"len",
"(",
"jl",
")",
"==",
"2",
")",
"assert",
"(",
"jl",
"[",
"'foo'",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"[",
"]",
")"
] | test the merge only debug option . | train | false |
15,385 | def get_max_workspace_size():
return _max_workspace_size
| [
"def",
"get_max_workspace_size",
"(",
")",
":",
"return",
"_max_workspace_size"
] | gets the workspace size for cudnn . | train | false |
15,386 | def generate_proto_go_source(target, source, env):
source = source[0]
global proto_import_re
import_protos = proto_import_re.findall(source.get_text_contents())
parameters = ('import_prefix=%s/' % env['PROTOBUFGOPATH'])
if import_protos:
proto_mappings = []
for proto in import_protos:
dir = os.path.dirname(proto)
name = os.path.basename(proto)
proto_mappings.append(('M%s=%s' % (proto, os.path.join(dir, name.replace('.', '_')))))
parameters += (',%s' % ','.join(proto_mappings))
cmd = ('%s --proto_path=. --plugin=protoc-gen-go=%s -I. %s -I=%s --go_out=%s:%s %s' % (env['PROTOC'], env['PROTOCGOPLUGIN'], env['PROTOBUFINCS'], os.path.dirname(str(source)), parameters, env['BUILDDIR'], source))
return echospawn(args=[cmd], env=os.environ, sh=None, cmd=None, escape=None)
| [
"def",
"generate_proto_go_source",
"(",
"target",
",",
"source",
",",
"env",
")",
":",
"source",
"=",
"source",
"[",
"0",
"]",
"global",
"proto_import_re",
"import_protos",
"=",
"proto_import_re",
".",
"findall",
"(",
"source",
".",
"get_text_contents",
"(",
")",
")",
"parameters",
"=",
"(",
"'import_prefix=%s/'",
"%",
"env",
"[",
"'PROTOBUFGOPATH'",
"]",
")",
"if",
"import_protos",
":",
"proto_mappings",
"=",
"[",
"]",
"for",
"proto",
"in",
"import_protos",
":",
"dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"proto",
")",
"name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"proto",
")",
"proto_mappings",
".",
"append",
"(",
"(",
"'M%s=%s'",
"%",
"(",
"proto",
",",
"os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"name",
".",
"replace",
"(",
"'.'",
",",
"'_'",
")",
")",
")",
")",
")",
"parameters",
"+=",
"(",
"',%s'",
"%",
"','",
".",
"join",
"(",
"proto_mappings",
")",
")",
"cmd",
"=",
"(",
"'%s --proto_path=. --plugin=protoc-gen-go=%s -I. %s -I=%s --go_out=%s:%s %s'",
"%",
"(",
"env",
"[",
"'PROTOC'",
"]",
",",
"env",
"[",
"'PROTOCGOPLUGIN'",
"]",
",",
"env",
"[",
"'PROTOBUFINCS'",
"]",
",",
"os",
".",
"path",
".",
"dirname",
"(",
"str",
"(",
"source",
")",
")",
",",
"parameters",
",",
"env",
"[",
"'BUILDDIR'",
"]",
",",
"source",
")",
")",
"return",
"echospawn",
"(",
"args",
"=",
"[",
"cmd",
"]",
",",
"env",
"=",
"os",
".",
"environ",
",",
"sh",
"=",
"None",
",",
"cmd",
"=",
"None",
",",
"escape",
"=",
"None",
")"
] | generate go source file by invoking protobuf compiler . | train | false |
15,387 | def is_repo_url(value):
return bool(REPO_REGEX.match(value))
| [
"def",
"is_repo_url",
"(",
"value",
")",
":",
"return",
"bool",
"(",
"REPO_REGEX",
".",
"match",
"(",
"value",
")",
")"
] | return true if value is a repository url . | train | false |
15,388 | @pytest.fixture
def en_tutorial_ts(english_tutorial, ts_directory):
from pootle_format.models import Format
english_tutorial.project.filetypes.add(Format.objects.get(name='ts'))
return store._require_store(english_tutorial, ts_directory, 'tutorial.ts')
| [
"@",
"pytest",
".",
"fixture",
"def",
"en_tutorial_ts",
"(",
"english_tutorial",
",",
"ts_directory",
")",
":",
"from",
"pootle_format",
".",
"models",
"import",
"Format",
"english_tutorial",
".",
"project",
".",
"filetypes",
".",
"add",
"(",
"Format",
".",
"objects",
".",
"get",
"(",
"name",
"=",
"'ts'",
")",
")",
"return",
"store",
".",
"_require_store",
"(",
"english_tutorial",
",",
"ts_directory",
",",
"'tutorial.ts'",
")"
] | require the en/tutorial/tutorial . | train | false |
15,389 | def _attr_get_(obj, attr):
try:
return getattr(obj, attr)
except AttributeError:
return None
| [
"def",
"_attr_get_",
"(",
"obj",
",",
"attr",
")",
":",
"try",
":",
"return",
"getattr",
"(",
"obj",
",",
"attr",
")",
"except",
"AttributeError",
":",
"return",
"None"
] | returns an attributes value . | train | false |
15,390 | def get_iscsi_initiator(execute=None):
root_helper = utils.get_root_helper()
if (not execute):
execute = putils.execute
iscsi = connector.ISCSIConnector(root_helper=root_helper, execute=execute)
return iscsi.get_initiator()
| [
"def",
"get_iscsi_initiator",
"(",
"execute",
"=",
"None",
")",
":",
"root_helper",
"=",
"utils",
".",
"get_root_helper",
"(",
")",
"if",
"(",
"not",
"execute",
")",
":",
"execute",
"=",
"putils",
".",
"execute",
"iscsi",
"=",
"connector",
".",
"ISCSIConnector",
"(",
"root_helper",
"=",
"root_helper",
",",
"execute",
"=",
"execute",
")",
"return",
"iscsi",
".",
"get_initiator",
"(",
")"
] | get iscsi initiator name for this machine . | train | false |
15,391 | def get_cuda_memory():
if (not _cuda_capable):
warn('CUDA not enabled, returning zero for memory')
mem = 0
else:
from pycuda.driver import mem_get_info
mem = mem_get_info()[0]
return sizeof_fmt(mem)
| [
"def",
"get_cuda_memory",
"(",
")",
":",
"if",
"(",
"not",
"_cuda_capable",
")",
":",
"warn",
"(",
"'CUDA not enabled, returning zero for memory'",
")",
"mem",
"=",
"0",
"else",
":",
"from",
"pycuda",
".",
"driver",
"import",
"mem_get_info",
"mem",
"=",
"mem_get_info",
"(",
")",
"[",
"0",
"]",
"return",
"sizeof_fmt",
"(",
"mem",
")"
] | get the amount of free memory for cuda operations . | train | false |
15,392 | def libvlc_media_player_release(p_mi):
f = (_Cfunctions.get('libvlc_media_player_release', None) or _Cfunction('libvlc_media_player_release', ((1,),), None, None, MediaPlayer))
return f(p_mi)
| [
"def",
"libvlc_media_player_release",
"(",
"p_mi",
")",
":",
"f",
"=",
"(",
"_Cfunctions",
".",
"get",
"(",
"'libvlc_media_player_release'",
",",
"None",
")",
"or",
"_Cfunction",
"(",
"'libvlc_media_player_release'",
",",
"(",
"(",
"1",
",",
")",
",",
")",
",",
"None",
",",
"None",
",",
"MediaPlayer",
")",
")",
"return",
"f",
"(",
"p_mi",
")"
] | release a media_player after use decrement the reference count of a media player object . | train | false |
15,393 | def edginess_sobel(image):
edges = mh.sobel(image, just_filter=True)
edges = edges.ravel()
return np.sqrt(np.dot(edges, edges))
| [
"def",
"edginess_sobel",
"(",
"image",
")",
":",
"edges",
"=",
"mh",
".",
"sobel",
"(",
"image",
",",
"just_filter",
"=",
"True",
")",
"edges",
"=",
"edges",
".",
"ravel",
"(",
")",
"return",
"np",
".",
"sqrt",
"(",
"np",
".",
"dot",
"(",
"edges",
",",
"edges",
")",
")"
] | measure the "edginess" of an image image should be a 2d numpy array returns a floating point value which is higher the "edgier" the image is . | train | false |
15,395 | def serviced(method):
@wraps(method)
def decorator(cls, request, *args, **kwargs):
service = atcdClient()
if (service is None):
raise BadGateway()
return method(cls, request, service, *args, **kwargs)
return decorator
| [
"def",
"serviced",
"(",
"method",
")",
":",
"@",
"wraps",
"(",
"method",
")",
"def",
"decorator",
"(",
"cls",
",",
"request",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"service",
"=",
"atcdClient",
"(",
")",
"if",
"(",
"service",
"is",
"None",
")",
":",
"raise",
"BadGateway",
"(",
")",
"return",
"method",
"(",
"cls",
",",
"request",
",",
"service",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
"return",
"decorator"
] | a decorator to check if the service is available or not . | train | false |
15,396 | def _checkKnownRunOrder(order):
if (order not in _runOrders):
raise usage.UsageError(('--order must be one of: %s. See --help-orders for details' % (', '.join((repr(order) for order in _runOrders)),)))
return order
| [
"def",
"_checkKnownRunOrder",
"(",
"order",
")",
":",
"if",
"(",
"order",
"not",
"in",
"_runOrders",
")",
":",
"raise",
"usage",
".",
"UsageError",
"(",
"(",
"'--order must be one of: %s. See --help-orders for details'",
"%",
"(",
"', '",
".",
"join",
"(",
"(",
"repr",
"(",
"order",
")",
"for",
"order",
"in",
"_runOrders",
")",
")",
",",
")",
")",
")",
"return",
"order"
] | check that the given order is a known test running order . | train | false |
15,399 | def property_name(property, index):
statuses = ['', '', 'Open', 'Pending', 'Resolved', 'Closed', 'Waiting on Customer', 'Job Application', 'Monthly']
priorities = ['', 'Low', 'Medium', 'High', 'Urgent']
if (property == 'status'):
return (statuses[index] if (index < len(statuses)) else str(index))
elif (property == 'priority'):
return (priorities[index] if (index < len(priorities)) else str(index))
else:
raise ValueError('Unknown property')
| [
"def",
"property_name",
"(",
"property",
",",
"index",
")",
":",
"statuses",
"=",
"[",
"''",
",",
"''",
",",
"'Open'",
",",
"'Pending'",
",",
"'Resolved'",
",",
"'Closed'",
",",
"'Waiting on Customer'",
",",
"'Job Application'",
",",
"'Monthly'",
"]",
"priorities",
"=",
"[",
"''",
",",
"'Low'",
",",
"'Medium'",
",",
"'High'",
",",
"'Urgent'",
"]",
"if",
"(",
"property",
"==",
"'status'",
")",
":",
"return",
"(",
"statuses",
"[",
"index",
"]",
"if",
"(",
"index",
"<",
"len",
"(",
"statuses",
")",
")",
"else",
"str",
"(",
"index",
")",
")",
"elif",
"(",
"property",
"==",
"'priority'",
")",
":",
"return",
"(",
"priorities",
"[",
"index",
"]",
"if",
"(",
"index",
"<",
"len",
"(",
"priorities",
")",
")",
"else",
"str",
"(",
"index",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unknown property'",
")"
] | the freshdesk api is currently pretty broken: statuses are customizable but the api will only tell you the number associated with the status . | train | false |
15,400 | def setPaths():
paths.SQLMAP_EXTRAS_PATH = os.path.join(paths.SQLMAP_ROOT_PATH, 'extra')
paths.SQLMAP_PROCS_PATH = os.path.join(paths.SQLMAP_ROOT_PATH, 'procs')
paths.SQLMAP_SHELL_PATH = os.path.join(paths.SQLMAP_ROOT_PATH, 'shell')
paths.SQLMAP_TAMPER_PATH = os.path.join(paths.SQLMAP_ROOT_PATH, 'tamper')
paths.SQLMAP_WAF_PATH = os.path.join(paths.SQLMAP_ROOT_PATH, 'waf')
paths.SQLMAP_TXT_PATH = os.path.join(paths.SQLMAP_ROOT_PATH, 'txt')
paths.SQLMAP_UDF_PATH = os.path.join(paths.SQLMAP_ROOT_PATH, 'udf')
paths.SQLMAP_XML_PATH = os.path.join(paths.SQLMAP_ROOT_PATH, 'xml')
paths.SQLMAP_XML_BANNER_PATH = os.path.join(paths.SQLMAP_XML_PATH, 'banner')
paths.SQLMAP_XML_PAYLOADS_PATH = os.path.join(paths.SQLMAP_XML_PATH, 'payloads')
_ = os.path.join(os.path.expandvars(os.path.expanduser('~')), '.sqlmap')
paths.SQLMAP_OUTPUT_PATH = getUnicode(paths.get('SQLMAP_OUTPUT_PATH', os.path.join(_, 'output')), encoding=sys.getfilesystemencoding())
paths.SQLMAP_DUMP_PATH = os.path.join(paths.SQLMAP_OUTPUT_PATH, '%s', 'dump')
paths.SQLMAP_FILES_PATH = os.path.join(paths.SQLMAP_OUTPUT_PATH, '%s', 'files')
paths.OS_SHELL_HISTORY = os.path.join(_, 'os.hst')
paths.SQL_SHELL_HISTORY = os.path.join(_, 'sql.hst')
paths.SQLMAP_SHELL_HISTORY = os.path.join(_, 'sqlmap.hst')
paths.GITHUB_HISTORY = os.path.join(_, 'github.hst')
paths.COMMON_COLUMNS = os.path.join(paths.SQLMAP_TXT_PATH, 'common-columns.txt')
paths.COMMON_TABLES = os.path.join(paths.SQLMAP_TXT_PATH, 'common-tables.txt')
paths.COMMON_OUTPUTS = os.path.join(paths.SQLMAP_TXT_PATH, 'common-outputs.txt')
paths.SQL_KEYWORDS = os.path.join(paths.SQLMAP_TXT_PATH, 'keywords.txt')
paths.SMALL_DICT = os.path.join(paths.SQLMAP_TXT_PATH, 'smalldict.txt')
paths.USER_AGENTS = os.path.join(paths.SQLMAP_TXT_PATH, 'user-agents.txt')
paths.WORDLIST = os.path.join(paths.SQLMAP_TXT_PATH, 'wordlist.zip')
paths.ERRORS_XML = os.path.join(paths.SQLMAP_XML_PATH, 'errors.xml')
paths.BOUNDARIES_XML = os.path.join(paths.SQLMAP_XML_PATH, 'boundaries.xml')
paths.LIVE_TESTS_XML = os.path.join(paths.SQLMAP_XML_PATH, 'livetests.xml')
paths.QUERIES_XML = os.path.join(paths.SQLMAP_XML_PATH, 'queries.xml')
paths.GENERIC_XML = os.path.join(paths.SQLMAP_XML_BANNER_PATH, 'generic.xml')
paths.MSSQL_XML = os.path.join(paths.SQLMAP_XML_BANNER_PATH, 'mssql.xml')
paths.MYSQL_XML = os.path.join(paths.SQLMAP_XML_BANNER_PATH, 'mysql.xml')
paths.ORACLE_XML = os.path.join(paths.SQLMAP_XML_BANNER_PATH, 'oracle.xml')
paths.PGSQL_XML = os.path.join(paths.SQLMAP_XML_BANNER_PATH, 'postgresql.xml')
for path in paths.values():
if any((path.endswith(_) for _ in ('.txt', '.xml', '.zip'))):
checkFile(path)
| [
"def",
"setPaths",
"(",
")",
":",
"paths",
".",
"SQLMAP_EXTRAS_PATH",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_ROOT_PATH",
",",
"'extra'",
")",
"paths",
".",
"SQLMAP_PROCS_PATH",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_ROOT_PATH",
",",
"'procs'",
")",
"paths",
".",
"SQLMAP_SHELL_PATH",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_ROOT_PATH",
",",
"'shell'",
")",
"paths",
".",
"SQLMAP_TAMPER_PATH",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_ROOT_PATH",
",",
"'tamper'",
")",
"paths",
".",
"SQLMAP_WAF_PATH",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_ROOT_PATH",
",",
"'waf'",
")",
"paths",
".",
"SQLMAP_TXT_PATH",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_ROOT_PATH",
",",
"'txt'",
")",
"paths",
".",
"SQLMAP_UDF_PATH",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_ROOT_PATH",
",",
"'udf'",
")",
"paths",
".",
"SQLMAP_XML_PATH",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_ROOT_PATH",
",",
"'xml'",
")",
"paths",
".",
"SQLMAP_XML_BANNER_PATH",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_XML_PATH",
",",
"'banner'",
")",
"paths",
".",
"SQLMAP_XML_PAYLOADS_PATH",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_XML_PATH",
",",
"'payloads'",
")",
"_",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"expandvars",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~'",
")",
")",
",",
"'.sqlmap'",
")",
"paths",
".",
"SQLMAP_OUTPUT_PATH",
"=",
"getUnicode",
"(",
"paths",
".",
"get",
"(",
"'SQLMAP_OUTPUT_PATH'",
",",
"os",
".",
"path",
".",
"join",
"(",
"_",
",",
"'output'",
")",
")",
",",
"encoding",
"=",
"sys",
".",
"getfilesystemencoding",
"(",
")",
")",
"paths",
".",
"SQLMAP_DUMP_PATH",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_OUTPUT_PATH",
",",
"'%s'",
",",
"'dump'",
")",
"paths",
".",
"SQLMAP_FILES_PATH",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_OUTPUT_PATH",
",",
"'%s'",
",",
"'files'",
")",
"paths",
".",
"OS_SHELL_HISTORY",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_",
",",
"'os.hst'",
")",
"paths",
".",
"SQL_SHELL_HISTORY",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_",
",",
"'sql.hst'",
")",
"paths",
".",
"SQLMAP_SHELL_HISTORY",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_",
",",
"'sqlmap.hst'",
")",
"paths",
".",
"GITHUB_HISTORY",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_",
",",
"'github.hst'",
")",
"paths",
".",
"COMMON_COLUMNS",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_TXT_PATH",
",",
"'common-columns.txt'",
")",
"paths",
".",
"COMMON_TABLES",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_TXT_PATH",
",",
"'common-tables.txt'",
")",
"paths",
".",
"COMMON_OUTPUTS",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_TXT_PATH",
",",
"'common-outputs.txt'",
")",
"paths",
".",
"SQL_KEYWORDS",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_TXT_PATH",
",",
"'keywords.txt'",
")",
"paths",
".",
"SMALL_DICT",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_TXT_PATH",
",",
"'smalldict.txt'",
")",
"paths",
".",
"USER_AGENTS",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_TXT_PATH",
",",
"'user-agents.txt'",
")",
"paths",
".",
"WORDLIST",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_TXT_PATH",
",",
"'wordlist.zip'",
")",
"paths",
".",
"ERRORS_XML",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_XML_PATH",
",",
"'errors.xml'",
")",
"paths",
".",
"BOUNDARIES_XML",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_XML_PATH",
",",
"'boundaries.xml'",
")",
"paths",
".",
"LIVE_TESTS_XML",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_XML_PATH",
",",
"'livetests.xml'",
")",
"paths",
".",
"QUERIES_XML",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_XML_PATH",
",",
"'queries.xml'",
")",
"paths",
".",
"GENERIC_XML",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_XML_BANNER_PATH",
",",
"'generic.xml'",
")",
"paths",
".",
"MSSQL_XML",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_XML_BANNER_PATH",
",",
"'mssql.xml'",
")",
"paths",
".",
"MYSQL_XML",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_XML_BANNER_PATH",
",",
"'mysql.xml'",
")",
"paths",
".",
"ORACLE_XML",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_XML_BANNER_PATH",
",",
"'oracle.xml'",
")",
"paths",
".",
"PGSQL_XML",
"=",
"os",
".",
"path",
".",
"join",
"(",
"paths",
".",
"SQLMAP_XML_BANNER_PATH",
",",
"'postgresql.xml'",
")",
"for",
"path",
"in",
"paths",
".",
"values",
"(",
")",
":",
"if",
"any",
"(",
"(",
"path",
".",
"endswith",
"(",
"_",
")",
"for",
"_",
"in",
"(",
"'.txt'",
",",
"'.xml'",
",",
"'.zip'",
")",
")",
")",
":",
"checkFile",
"(",
"path",
")"
] | sets absolute paths for project directories and files . | train | false |
15,401 | def create_some_learner_data():
user = CreateStudentMixin.create_student()
attempt_states = (('not started', 0, 0), ('completed', 100, 15), ('attempted', 50, 10), ('struggling', 30, 25))
exercises = get_random_content(kinds=['Exercise'], limit=len(attempt_states))
for state in attempt_states:
exercise = exercises.pop()
(log, created) = ExerciseLog.objects.get_or_create(exercise_id=exercise.get('id'), user=user)
if ('not started' != state[0]):
(log.streak_progress, log.attempts) = state[1:]
for i in range(0, log.attempts):
AttemptLog.objects.get_or_create(exercise_id=exercise.get('id'), user=user, seed=i, timestamp=datetime.datetime.now())
log.latest_activity_timestamp = datetime.datetime.now()
log.save()
| [
"def",
"create_some_learner_data",
"(",
")",
":",
"user",
"=",
"CreateStudentMixin",
".",
"create_student",
"(",
")",
"attempt_states",
"=",
"(",
"(",
"'not started'",
",",
"0",
",",
"0",
")",
",",
"(",
"'completed'",
",",
"100",
",",
"15",
")",
",",
"(",
"'attempted'",
",",
"50",
",",
"10",
")",
",",
"(",
"'struggling'",
",",
"30",
",",
"25",
")",
")",
"exercises",
"=",
"get_random_content",
"(",
"kinds",
"=",
"[",
"'Exercise'",
"]",
",",
"limit",
"=",
"len",
"(",
"attempt_states",
")",
")",
"for",
"state",
"in",
"attempt_states",
":",
"exercise",
"=",
"exercises",
".",
"pop",
"(",
")",
"(",
"log",
",",
"created",
")",
"=",
"ExerciseLog",
".",
"objects",
".",
"get_or_create",
"(",
"exercise_id",
"=",
"exercise",
".",
"get",
"(",
"'id'",
")",
",",
"user",
"=",
"user",
")",
"if",
"(",
"'not started'",
"!=",
"state",
"[",
"0",
"]",
")",
":",
"(",
"log",
".",
"streak_progress",
",",
"log",
".",
"attempts",
")",
"=",
"state",
"[",
"1",
":",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"log",
".",
"attempts",
")",
":",
"AttemptLog",
".",
"objects",
".",
"get_or_create",
"(",
"exercise_id",
"=",
"exercise",
".",
"get",
"(",
"'id'",
")",
",",
"user",
"=",
"user",
",",
"seed",
"=",
"i",
",",
"timestamp",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
")",
"log",
".",
"latest_activity_timestamp",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"log",
".",
"save",
"(",
")"
] | just create a lil bit-o-data of each type . | train | false |
15,403 | def send_birthday_reminders():
if int((frappe.db.get_single_value(u'HR Settings', u'stop_birthday_reminders') or 0)):
return
from frappe.utils.user import get_enabled_system_users
users = None
birthdays = get_employees_who_are_born_today()
if birthdays:
if (not users):
users = [(u.email_id or u.name) for u in get_enabled_system_users()]
for e in birthdays:
frappe.sendmail(recipients=filter((lambda u: (u not in (e.company_email, e.personal_email, e.user_id))), users), subject=_(u'Birthday Reminder for {0}').format(e.employee_name), message=_(u"Today is {0}'s birthday!").format(e.employee_name), reply_to=(e.company_email or e.personal_email or e.user_id))
| [
"def",
"send_birthday_reminders",
"(",
")",
":",
"if",
"int",
"(",
"(",
"frappe",
".",
"db",
".",
"get_single_value",
"(",
"u'HR Settings'",
",",
"u'stop_birthday_reminders'",
")",
"or",
"0",
")",
")",
":",
"return",
"from",
"frappe",
".",
"utils",
".",
"user",
"import",
"get_enabled_system_users",
"users",
"=",
"None",
"birthdays",
"=",
"get_employees_who_are_born_today",
"(",
")",
"if",
"birthdays",
":",
"if",
"(",
"not",
"users",
")",
":",
"users",
"=",
"[",
"(",
"u",
".",
"email_id",
"or",
"u",
".",
"name",
")",
"for",
"u",
"in",
"get_enabled_system_users",
"(",
")",
"]",
"for",
"e",
"in",
"birthdays",
":",
"frappe",
".",
"sendmail",
"(",
"recipients",
"=",
"filter",
"(",
"(",
"lambda",
"u",
":",
"(",
"u",
"not",
"in",
"(",
"e",
".",
"company_email",
",",
"e",
".",
"personal_email",
",",
"e",
".",
"user_id",
")",
")",
")",
",",
"users",
")",
",",
"subject",
"=",
"_",
"(",
"u'Birthday Reminder for {0}'",
")",
".",
"format",
"(",
"e",
".",
"employee_name",
")",
",",
"message",
"=",
"_",
"(",
"u\"Today is {0}'s birthday!\"",
")",
".",
"format",
"(",
"e",
".",
"employee_name",
")",
",",
"reply_to",
"=",
"(",
"e",
".",
"company_email",
"or",
"e",
".",
"personal_email",
"or",
"e",
".",
"user_id",
")",
")"
] | send employee birthday reminders if no stop birthday reminders is not set . | train | false |
15,404 | def getReceivers(sender=Any, signal=Any):
existing = connections.get(id(sender))
if (existing is not None):
return existing.get(signal, [])
return []
| [
"def",
"getReceivers",
"(",
"sender",
"=",
"Any",
",",
"signal",
"=",
"Any",
")",
":",
"existing",
"=",
"connections",
".",
"get",
"(",
"id",
"(",
"sender",
")",
")",
"if",
"(",
"existing",
"is",
"not",
"None",
")",
":",
"return",
"existing",
".",
"get",
"(",
"signal",
",",
"[",
"]",
")",
"return",
"[",
"]"
] | get list of receivers from global tables this utility function allows you to retrieve the raw list of receivers from the connections table for the given sender and signal pair . | train | false |
15,405 | def get_chimeras_from_Nast_aligned(seqs_fp, ref_db_aligned_fp=None, ref_db_fasta_fp=None, HALT_EXEC=False, min_div_ratio=None, keep_intermediates=False):
files_to_remove = []
seqs_fp = str(seqs_fp)
seqs_fp = seqs_fp.rstrip('"')
seqs_fp = seqs_fp.lstrip('"')
(seqs_dir, new_seqs_fp) = split(seqs_fp)
if (seqs_dir == ''):
seqs_dir = './'
params = {'--query_NAST': new_seqs_fp, '--exec_dir': seqs_dir}
if ((ref_db_aligned_fp is None) and (ref_db_fasta_fp is None)):
pass
else:
if (not ref_db_fasta_fp):
ref_db_fasta_fp = write_degapped_fasta_to_file(parse_fasta(open(ref_db_aligned_fp)))
files_to_remove.append(ref_db_fasta_fp)
params.update({'--db_NAST': abspath(ref_db_aligned_fp), '--db_FASTA': abspath(ref_db_fasta_fp)})
if (min_div_ratio is not None):
params.update({'-R': min_div_ratio})
app = ChimeraSlayer(params=params, HALT_EXEC=HALT_EXEC)
app_results = app()
chimeras = parse_CPS_file(app_results['CPS'])
if (not keep_intermediates):
app.remove_intermediate_files()
remove_files(files_to_remove)
return chimeras
| [
"def",
"get_chimeras_from_Nast_aligned",
"(",
"seqs_fp",
",",
"ref_db_aligned_fp",
"=",
"None",
",",
"ref_db_fasta_fp",
"=",
"None",
",",
"HALT_EXEC",
"=",
"False",
",",
"min_div_ratio",
"=",
"None",
",",
"keep_intermediates",
"=",
"False",
")",
":",
"files_to_remove",
"=",
"[",
"]",
"seqs_fp",
"=",
"str",
"(",
"seqs_fp",
")",
"seqs_fp",
"=",
"seqs_fp",
".",
"rstrip",
"(",
"'\"'",
")",
"seqs_fp",
"=",
"seqs_fp",
".",
"lstrip",
"(",
"'\"'",
")",
"(",
"seqs_dir",
",",
"new_seqs_fp",
")",
"=",
"split",
"(",
"seqs_fp",
")",
"if",
"(",
"seqs_dir",
"==",
"''",
")",
":",
"seqs_dir",
"=",
"'./'",
"params",
"=",
"{",
"'--query_NAST'",
":",
"new_seqs_fp",
",",
"'--exec_dir'",
":",
"seqs_dir",
"}",
"if",
"(",
"(",
"ref_db_aligned_fp",
"is",
"None",
")",
"and",
"(",
"ref_db_fasta_fp",
"is",
"None",
")",
")",
":",
"pass",
"else",
":",
"if",
"(",
"not",
"ref_db_fasta_fp",
")",
":",
"ref_db_fasta_fp",
"=",
"write_degapped_fasta_to_file",
"(",
"parse_fasta",
"(",
"open",
"(",
"ref_db_aligned_fp",
")",
")",
")",
"files_to_remove",
".",
"append",
"(",
"ref_db_fasta_fp",
")",
"params",
".",
"update",
"(",
"{",
"'--db_NAST'",
":",
"abspath",
"(",
"ref_db_aligned_fp",
")",
",",
"'--db_FASTA'",
":",
"abspath",
"(",
"ref_db_fasta_fp",
")",
"}",
")",
"if",
"(",
"min_div_ratio",
"is",
"not",
"None",
")",
":",
"params",
".",
"update",
"(",
"{",
"'-R'",
":",
"min_div_ratio",
"}",
")",
"app",
"=",
"ChimeraSlayer",
"(",
"params",
"=",
"params",
",",
"HALT_EXEC",
"=",
"HALT_EXEC",
")",
"app_results",
"=",
"app",
"(",
")",
"chimeras",
"=",
"parse_CPS_file",
"(",
"app_results",
"[",
"'CPS'",
"]",
")",
"if",
"(",
"not",
"keep_intermediates",
")",
":",
"app",
".",
"remove_intermediate_files",
"(",
")",
"remove_files",
"(",
"files_to_remove",
")",
"return",
"chimeras"
] | remove chimeras from seqs_fp using chimeraslayer . | train | false |
15,406 | def join_nonshared_inputs(xs, vars, shared, make_shared=False):
joined = tt.concatenate([var.ravel() for var in vars])
if (not make_shared):
tensor_type = joined.type
inarray = tensor_type('inarray')
else:
inarray = theano.shared(joined.tag.test_value, 'inarray')
ordering = ArrayOrdering(vars)
inarray.tag.test_value = joined.tag.test_value
get_var = {var.name: var for var in vars}
replace = {get_var[var]: reshape_t(inarray[slc], shp).astype(dtyp) for (var, slc, shp, dtyp) in ordering.vmap}
replace.update(shared)
xs_special = [theano.clone(x, replace, strict=False) for x in xs]
return (xs_special, inarray)
| [
"def",
"join_nonshared_inputs",
"(",
"xs",
",",
"vars",
",",
"shared",
",",
"make_shared",
"=",
"False",
")",
":",
"joined",
"=",
"tt",
".",
"concatenate",
"(",
"[",
"var",
".",
"ravel",
"(",
")",
"for",
"var",
"in",
"vars",
"]",
")",
"if",
"(",
"not",
"make_shared",
")",
":",
"tensor_type",
"=",
"joined",
".",
"type",
"inarray",
"=",
"tensor_type",
"(",
"'inarray'",
")",
"else",
":",
"inarray",
"=",
"theano",
".",
"shared",
"(",
"joined",
".",
"tag",
".",
"test_value",
",",
"'inarray'",
")",
"ordering",
"=",
"ArrayOrdering",
"(",
"vars",
")",
"inarray",
".",
"tag",
".",
"test_value",
"=",
"joined",
".",
"tag",
".",
"test_value",
"get_var",
"=",
"{",
"var",
".",
"name",
":",
"var",
"for",
"var",
"in",
"vars",
"}",
"replace",
"=",
"{",
"get_var",
"[",
"var",
"]",
":",
"reshape_t",
"(",
"inarray",
"[",
"slc",
"]",
",",
"shp",
")",
".",
"astype",
"(",
"dtyp",
")",
"for",
"(",
"var",
",",
"slc",
",",
"shp",
",",
"dtyp",
")",
"in",
"ordering",
".",
"vmap",
"}",
"replace",
".",
"update",
"(",
"shared",
")",
"xs_special",
"=",
"[",
"theano",
".",
"clone",
"(",
"x",
",",
"replace",
",",
"strict",
"=",
"False",
")",
"for",
"x",
"in",
"xs",
"]",
"return",
"(",
"xs_special",
",",
"inarray",
")"
] | takes a list of theano variables and joins their non shared inputs into a single input . | train | false |
15,407 | @pytest.mark.skipif('sys.version_info[0] < 3')
def test_simple_annotations():
source = dedent(" def annot(a:3):\n return a\n\n annot('')")
assert ([d.name for d in jedi.Script(source).goto_definitions()] == ['str'])
source = dedent("\n def annot_ret(a:3) -> 3:\n return a\n\n annot_ret('')")
assert ([d.name for d in jedi.Script(source).goto_definitions()] == ['str'])
source = dedent(" def annot(a:int):\n return a\n\n annot('')")
assert ([d.name for d in jedi.Script(source).goto_definitions()] == ['int'])
| [
"@",
"pytest",
".",
"mark",
".",
"skipif",
"(",
"'sys.version_info[0] < 3'",
")",
"def",
"test_simple_annotations",
"(",
")",
":",
"source",
"=",
"dedent",
"(",
"\" def annot(a:3):\\n return a\\n\\n annot('')\"",
")",
"assert",
"(",
"[",
"d",
".",
"name",
"for",
"d",
"in",
"jedi",
".",
"Script",
"(",
"source",
")",
".",
"goto_definitions",
"(",
")",
"]",
"==",
"[",
"'str'",
"]",
")",
"source",
"=",
"dedent",
"(",
"\"\\n def annot_ret(a:3) -> 3:\\n return a\\n\\n annot_ret('')\"",
")",
"assert",
"(",
"[",
"d",
".",
"name",
"for",
"d",
"in",
"jedi",
".",
"Script",
"(",
"source",
")",
".",
"goto_definitions",
"(",
")",
"]",
"==",
"[",
"'str'",
"]",
")",
"source",
"=",
"dedent",
"(",
"\" def annot(a:int):\\n return a\\n\\n annot('')\"",
")",
"assert",
"(",
"[",
"d",
".",
"name",
"for",
"d",
"in",
"jedi",
".",
"Script",
"(",
"source",
")",
".",
"goto_definitions",
"(",
")",
"]",
"==",
"[",
"'int'",
"]",
")"
] | annotations only exist in python 3 . | train | false |
15,408 | def machines():
return [name for (name, state) in _status()]
| [
"def",
"machines",
"(",
")",
":",
"return",
"[",
"name",
"for",
"(",
"name",
",",
"state",
")",
"in",
"_status",
"(",
")",
"]"
] | get the list of vagrant machines . | train | false |
15,409 | def get_all_bears():
from coalib.settings.Section import Section
printer = LogPrinter(NullPrinter())
(local_bears, global_bears) = collect_bears(Section('').bear_dirs(), ['**'], [BEAR_KIND.LOCAL, BEAR_KIND.GLOBAL], printer, warn_if_unused_glob=False)
return list(itertools.chain(local_bears, global_bears))
| [
"def",
"get_all_bears",
"(",
")",
":",
"from",
"coalib",
".",
"settings",
".",
"Section",
"import",
"Section",
"printer",
"=",
"LogPrinter",
"(",
"NullPrinter",
"(",
")",
")",
"(",
"local_bears",
",",
"global_bears",
")",
"=",
"collect_bears",
"(",
"Section",
"(",
"''",
")",
".",
"bear_dirs",
"(",
")",
",",
"[",
"'**'",
"]",
",",
"[",
"BEAR_KIND",
".",
"LOCAL",
",",
"BEAR_KIND",
".",
"GLOBAL",
"]",
",",
"printer",
",",
"warn_if_unused_glob",
"=",
"False",
")",
"return",
"list",
"(",
"itertools",
".",
"chain",
"(",
"local_bears",
",",
"global_bears",
")",
")"
] | get a list of all available bears . | train | false |
15,411 | def _header_int_property(header):
def getter(self):
val = self.headers.get(header, None)
if (val is not None):
val = int(val)
return val
def setter(self, value):
self.headers[header] = value
return property(getter, setter, doc=('Retrieve and set the %s header as an int' % header))
| [
"def",
"_header_int_property",
"(",
"header",
")",
":",
"def",
"getter",
"(",
"self",
")",
":",
"val",
"=",
"self",
".",
"headers",
".",
"get",
"(",
"header",
",",
"None",
")",
"if",
"(",
"val",
"is",
"not",
"None",
")",
":",
"val",
"=",
"int",
"(",
"val",
")",
"return",
"val",
"def",
"setter",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"headers",
"[",
"header",
"]",
"=",
"value",
"return",
"property",
"(",
"getter",
",",
"setter",
",",
"doc",
"=",
"(",
"'Retrieve and set the %s header as an int'",
"%",
"header",
")",
")"
] | set and retrieve the value of self . | train | false |
15,412 | def render_exception(error, context=None, request=None, user_locale=None):
error_message = error.args[0]
message = oslo_i18n.translate(error_message, desired_locale=user_locale)
if (message is error_message):
message = six.text_type(message)
body = {'error': {'code': error.code, 'title': error.title, 'message': message}}
headers = []
if isinstance(error, exception.AuthPluginException):
body['error']['identity'] = error.authentication
elif isinstance(error, exception.Unauthorized):
local_context = {}
if request:
local_context = {'environment': request.environ}
elif (context and ('environment' in context)):
local_context = {'environment': context['environment']}
url = Application.base_url(local_context, 'public')
headers.append(('WWW-Authenticate', ('Keystone uri="%s"' % url)))
return render_response(status=(error.code, error.title), body=body, headers=headers)
| [
"def",
"render_exception",
"(",
"error",
",",
"context",
"=",
"None",
",",
"request",
"=",
"None",
",",
"user_locale",
"=",
"None",
")",
":",
"error_message",
"=",
"error",
".",
"args",
"[",
"0",
"]",
"message",
"=",
"oslo_i18n",
".",
"translate",
"(",
"error_message",
",",
"desired_locale",
"=",
"user_locale",
")",
"if",
"(",
"message",
"is",
"error_message",
")",
":",
"message",
"=",
"six",
".",
"text_type",
"(",
"message",
")",
"body",
"=",
"{",
"'error'",
":",
"{",
"'code'",
":",
"error",
".",
"code",
",",
"'title'",
":",
"error",
".",
"title",
",",
"'message'",
":",
"message",
"}",
"}",
"headers",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"error",
",",
"exception",
".",
"AuthPluginException",
")",
":",
"body",
"[",
"'error'",
"]",
"[",
"'identity'",
"]",
"=",
"error",
".",
"authentication",
"elif",
"isinstance",
"(",
"error",
",",
"exception",
".",
"Unauthorized",
")",
":",
"local_context",
"=",
"{",
"}",
"if",
"request",
":",
"local_context",
"=",
"{",
"'environment'",
":",
"request",
".",
"environ",
"}",
"elif",
"(",
"context",
"and",
"(",
"'environment'",
"in",
"context",
")",
")",
":",
"local_context",
"=",
"{",
"'environment'",
":",
"context",
"[",
"'environment'",
"]",
"}",
"url",
"=",
"Application",
".",
"base_url",
"(",
"local_context",
",",
"'public'",
")",
"headers",
".",
"append",
"(",
"(",
"'WWW-Authenticate'",
",",
"(",
"'Keystone uri=\"%s\"'",
"%",
"url",
")",
")",
")",
"return",
"render_response",
"(",
"status",
"=",
"(",
"error",
".",
"code",
",",
"error",
".",
"title",
")",
",",
"body",
"=",
"body",
",",
"headers",
"=",
"headers",
")"
] | forms a wsgi response based on the current error . | train | false |
15,413 | def set_dags_paused_state(is_paused):
session = settings.Session()
dms = session.query(DagModel).filter(DagModel.dag_id.in_(DAG_IDS))
for dm in dms:
logging.info('Setting DAG :: {} is_paused={}'.format(dm, is_paused))
dm.is_paused = is_paused
session.commit()
| [
"def",
"set_dags_paused_state",
"(",
"is_paused",
")",
":",
"session",
"=",
"settings",
".",
"Session",
"(",
")",
"dms",
"=",
"session",
".",
"query",
"(",
"DagModel",
")",
".",
"filter",
"(",
"DagModel",
".",
"dag_id",
".",
"in_",
"(",
"DAG_IDS",
")",
")",
"for",
"dm",
"in",
"dms",
":",
"logging",
".",
"info",
"(",
"'Setting DAG :: {} is_paused={}'",
".",
"format",
"(",
"dm",
",",
"is_paused",
")",
")",
"dm",
".",
"is_paused",
"=",
"is_paused",
"session",
".",
"commit",
"(",
")"
] | toggle the pause state of the dags in the test . | train | true |
15,414 | def getPathByList(vertexList):
if (len(vertexList) < 1):
return Vector3()
if (vertexList[0].__class__ != list):
vertexList = [vertexList]
path = []
for floatList in vertexList:
vector3 = getVector3ByFloatList(floatList, Vector3())
path.append(vector3)
return path
| [
"def",
"getPathByList",
"(",
"vertexList",
")",
":",
"if",
"(",
"len",
"(",
"vertexList",
")",
"<",
"1",
")",
":",
"return",
"Vector3",
"(",
")",
"if",
"(",
"vertexList",
"[",
"0",
"]",
".",
"__class__",
"!=",
"list",
")",
":",
"vertexList",
"=",
"[",
"vertexList",
"]",
"path",
"=",
"[",
"]",
"for",
"floatList",
"in",
"vertexList",
":",
"vector3",
"=",
"getVector3ByFloatList",
"(",
"floatList",
",",
"Vector3",
"(",
")",
")",
"path",
".",
"append",
"(",
"vector3",
")",
"return",
"path"
] | get the paths by list . | train | false |
15,415 | def traceback_thread(thread_id):
if (not hasattr(sys, '_current_frames')):
return None
frames = sys._current_frames()
if (not (thread_id in frames)):
return None
frame = frames[thread_id]
out = StringIO()
traceback.print_stack(frame, file=out)
return out.getvalue()
| [
"def",
"traceback_thread",
"(",
"thread_id",
")",
":",
"if",
"(",
"not",
"hasattr",
"(",
"sys",
",",
"'_current_frames'",
")",
")",
":",
"return",
"None",
"frames",
"=",
"sys",
".",
"_current_frames",
"(",
")",
"if",
"(",
"not",
"(",
"thread_id",
"in",
"frames",
")",
")",
":",
"return",
"None",
"frame",
"=",
"frames",
"[",
"thread_id",
"]",
"out",
"=",
"StringIO",
"(",
")",
"traceback",
".",
"print_stack",
"(",
"frame",
",",
"file",
"=",
"out",
")",
"return",
"out",
".",
"getvalue",
"(",
")"
] | returns a plain-text traceback of the given thread . | train | false |
15,416 | def SpearmanCorr(xs, ys):
xranks = pandas.Series(xs).rank()
yranks = pandas.Series(ys).rank()
return Corr(xranks, yranks)
| [
"def",
"SpearmanCorr",
"(",
"xs",
",",
"ys",
")",
":",
"xranks",
"=",
"pandas",
".",
"Series",
"(",
"xs",
")",
".",
"rank",
"(",
")",
"yranks",
"=",
"pandas",
".",
"Series",
"(",
"ys",
")",
".",
"rank",
"(",
")",
"return",
"Corr",
"(",
"xranks",
",",
"yranks",
")"
] | computes spearmans rank correlation . | train | false |
15,417 | def get_nginx_configurator(config_path, config_dir, work_dir, version=(1, 6, 2)):
backups = os.path.join(work_dir, 'backups')
with mock.patch('certbot_nginx.configurator.NginxConfigurator.config_test'):
with mock.patch('certbot_nginx.configurator.util.exe_exists') as mock_exe_exists:
mock_exe_exists.return_value = True
config = configurator.NginxConfigurator(config=mock.MagicMock(nginx_server_root=config_path, le_vhost_ext='-le-ssl.conf', config_dir=config_dir, work_dir=work_dir, backup_dir=backups, temp_checkpoint_dir=os.path.join(work_dir, 'temp_checkpoints'), in_progress_dir=os.path.join(backups, 'IN_PROGRESS'), server='https://acme-server.org:443/new', tls_sni_01_port=5001), name='nginx', version=version)
config.prepare()
nsconfig = configuration.NamespaceConfig(config.config)
zope.component.provideUtility(nsconfig)
return config
| [
"def",
"get_nginx_configurator",
"(",
"config_path",
",",
"config_dir",
",",
"work_dir",
",",
"version",
"=",
"(",
"1",
",",
"6",
",",
"2",
")",
")",
":",
"backups",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'backups'",
")",
"with",
"mock",
".",
"patch",
"(",
"'certbot_nginx.configurator.NginxConfigurator.config_test'",
")",
":",
"with",
"mock",
".",
"patch",
"(",
"'certbot_nginx.configurator.util.exe_exists'",
")",
"as",
"mock_exe_exists",
":",
"mock_exe_exists",
".",
"return_value",
"=",
"True",
"config",
"=",
"configurator",
".",
"NginxConfigurator",
"(",
"config",
"=",
"mock",
".",
"MagicMock",
"(",
"nginx_server_root",
"=",
"config_path",
",",
"le_vhost_ext",
"=",
"'-le-ssl.conf'",
",",
"config_dir",
"=",
"config_dir",
",",
"work_dir",
"=",
"work_dir",
",",
"backup_dir",
"=",
"backups",
",",
"temp_checkpoint_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"work_dir",
",",
"'temp_checkpoints'",
")",
",",
"in_progress_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"backups",
",",
"'IN_PROGRESS'",
")",
",",
"server",
"=",
"'https://acme-server.org:443/new'",
",",
"tls_sni_01_port",
"=",
"5001",
")",
",",
"name",
"=",
"'nginx'",
",",
"version",
"=",
"version",
")",
"config",
".",
"prepare",
"(",
")",
"nsconfig",
"=",
"configuration",
".",
"NamespaceConfig",
"(",
"config",
".",
"config",
")",
"zope",
".",
"component",
".",
"provideUtility",
"(",
"nsconfig",
")",
"return",
"config"
] | create an nginx configurator with the specified options . | train | false |
15,418 | def webapp_add_wsgi_middleware(app):
from google.appengine.ext.appstats import recording
def save(self):
t0 = time.time()
with self._lock:
num_pending = len(self.pending)
if num_pending:
logging.warn('Found %d RPC request(s) without matching response (presumably due to timeouts or other errors)', num_pending)
self.dump()
try:
(key, len_part, len_full) = self._save()
except Exception:
logging.exception('Recorder.save() failed')
return
t1 = time.time()
link = ('http://%s%s/details?time=%s' % (self.env.get('HTTP_HOST', ''), recording.config.stats_url, int((self.start_timestamp * 1000))))
logging.debug('Saved; key: %s, part: %s bytes, full: %s bytes, overhead: %.3f + %.3f; link: %s', key, len_part, len_full, self.overhead, (t1 - t0), link)
recording.Recorder.save = save
app = recording.appstats_wsgi_middleware(app)
return app
| [
"def",
"webapp_add_wsgi_middleware",
"(",
"app",
")",
":",
"from",
"google",
".",
"appengine",
".",
"ext",
".",
"appstats",
"import",
"recording",
"def",
"save",
"(",
"self",
")",
":",
"t0",
"=",
"time",
".",
"time",
"(",
")",
"with",
"self",
".",
"_lock",
":",
"num_pending",
"=",
"len",
"(",
"self",
".",
"pending",
")",
"if",
"num_pending",
":",
"logging",
".",
"warn",
"(",
"'Found %d RPC request(s) without matching response (presumably due to timeouts or other errors)'",
",",
"num_pending",
")",
"self",
".",
"dump",
"(",
")",
"try",
":",
"(",
"key",
",",
"len_part",
",",
"len_full",
")",
"=",
"self",
".",
"_save",
"(",
")",
"except",
"Exception",
":",
"logging",
".",
"exception",
"(",
"'Recorder.save() failed'",
")",
"return",
"t1",
"=",
"time",
".",
"time",
"(",
")",
"link",
"=",
"(",
"'http://%s%s/details?time=%s'",
"%",
"(",
"self",
".",
"env",
".",
"get",
"(",
"'HTTP_HOST'",
",",
"''",
")",
",",
"recording",
".",
"config",
".",
"stats_url",
",",
"int",
"(",
"(",
"self",
".",
"start_timestamp",
"*",
"1000",
")",
")",
")",
")",
"logging",
".",
"debug",
"(",
"'Saved; key: %s, part: %s bytes, full: %s bytes, overhead: %.3f + %.3f; link: %s'",
",",
"key",
",",
"len_part",
",",
"len_full",
",",
"self",
".",
"overhead",
",",
"(",
"t1",
"-",
"t0",
")",
",",
"link",
")",
"recording",
".",
"Recorder",
".",
"save",
"=",
"save",
"app",
"=",
"recording",
".",
"appstats_wsgi_middleware",
"(",
"app",
")",
"return",
"app"
] | applying the i18nmiddleware to our helloworld app . | train | false |
15,420 | def uuid4():
if _uuid_generate_random:
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_random(_buffer)
return UUID(bytes=_buffer.raw)
try:
import os
return UUID(bytes=os.urandom(16), version=4)
except:
import random
bytes = [chr(random.randrange(256)) for i in range(16)]
return UUID(bytes=bytes, version=4)
| [
"def",
"uuid4",
"(",
")",
":",
"if",
"_uuid_generate_random",
":",
"_buffer",
"=",
"ctypes",
".",
"create_string_buffer",
"(",
"16",
")",
"_uuid_generate_random",
"(",
"_buffer",
")",
"return",
"UUID",
"(",
"bytes",
"=",
"_buffer",
".",
"raw",
")",
"try",
":",
"import",
"os",
"return",
"UUID",
"(",
"bytes",
"=",
"os",
".",
"urandom",
"(",
"16",
")",
",",
"version",
"=",
"4",
")",
"except",
":",
"import",
"random",
"bytes",
"=",
"[",
"chr",
"(",
"random",
".",
"randrange",
"(",
"256",
")",
")",
"for",
"i",
"in",
"range",
"(",
"16",
")",
"]",
"return",
"UUID",
"(",
"bytes",
"=",
"bytes",
",",
"version",
"=",
"4",
")"
] | generate a random uuid . | train | true |
15,421 | def getProtocolId(packet):
if isinstance(packet, list):
return unpack('>H', pack('BB', *packet[2:4]))[0]
else:
return unpack('>H', packet[2:4])[0]
| [
"def",
"getProtocolId",
"(",
"packet",
")",
":",
"if",
"isinstance",
"(",
"packet",
",",
"list",
")",
":",
"return",
"unpack",
"(",
"'>H'",
",",
"pack",
"(",
"'BB'",
",",
"*",
"packet",
"[",
"2",
":",
"4",
"]",
")",
")",
"[",
"0",
"]",
"else",
":",
"return",
"unpack",
"(",
"'>H'",
",",
"packet",
"[",
"2",
":",
"4",
"]",
")",
"[",
"0",
"]"
] | pulls out the transaction id of the packet . | train | false |
15,422 | def init_backends():
global _BACKENDS, _ACTIVE_BACKENDS
try:
from .cffi_backend import CFFIBackend
except ImportError:
pass
else:
_BACKENDS.append(CFFIBackend)
from .ctypes_backend import CTypesBackend
from .null_backend import NullBackend
_BACKENDS.append(CTypesBackend)
_ACTIVE_BACKENDS = _BACKENDS[:]
_BACKENDS.append(NullBackend)
| [
"def",
"init_backends",
"(",
")",
":",
"global",
"_BACKENDS",
",",
"_ACTIVE_BACKENDS",
"try",
":",
"from",
".",
"cffi_backend",
"import",
"CFFIBackend",
"except",
"ImportError",
":",
"pass",
"else",
":",
"_BACKENDS",
".",
"append",
"(",
"CFFIBackend",
")",
"from",
".",
"ctypes_backend",
"import",
"CTypesBackend",
"from",
".",
"null_backend",
"import",
"NullBackend",
"_BACKENDS",
".",
"append",
"(",
"CTypesBackend",
")",
"_ACTIVE_BACKENDS",
"=",
"_BACKENDS",
"[",
":",
"]",
"_BACKENDS",
".",
"append",
"(",
"NullBackend",
")"
] | loads all backends . | train | true |
15,424 | def text_2d_to_3d(obj, z=0, zdir=u'z'):
obj.__class__ = Text3D
obj.set_3d_properties(z, zdir)
| [
"def",
"text_2d_to_3d",
"(",
"obj",
",",
"z",
"=",
"0",
",",
"zdir",
"=",
"u'z'",
")",
":",
"obj",
".",
"__class__",
"=",
"Text3D",
"obj",
".",
"set_3d_properties",
"(",
"z",
",",
"zdir",
")"
] | convert a text to a text3d object . | train | false |
15,425 | def format_span(fmt, yearfrom, yearto, fromnchars, tonchars):
args = str(yearfrom)[(- fromnchars):]
if tonchars:
args = (str(yearfrom)[(- fromnchars):], str(yearto)[(- tonchars):])
return (fmt % args)
| [
"def",
"format_span",
"(",
"fmt",
",",
"yearfrom",
",",
"yearto",
",",
"fromnchars",
",",
"tonchars",
")",
":",
"args",
"=",
"str",
"(",
"yearfrom",
")",
"[",
"(",
"-",
"fromnchars",
")",
":",
"]",
"if",
"tonchars",
":",
"args",
"=",
"(",
"str",
"(",
"yearfrom",
")",
"[",
"(",
"-",
"fromnchars",
")",
":",
"]",
",",
"str",
"(",
"yearto",
")",
"[",
"(",
"-",
"tonchars",
")",
":",
"]",
")",
"return",
"(",
"fmt",
"%",
"args",
")"
] | return a span string representation . | train | false |
15,426 | def drop(x, keep=0.5):
if (len(x.shape) == 3):
if (x.shape[(-1)] == 3):
img_size = x.shape
mask = np.random.binomial(n=1, p=keep, size=x.shape[:(-1)])
for i in range(3):
x[:, :, i] = np.multiply(x[:, :, i], mask)
elif (x.shape[(-1)] == 1):
img_size = x.shape
x = np.multiply(x, np.random.binomial(n=1, p=keep, size=img_size))
else:
raise Exception('Unsupported shape {}'.format(x.shape))
elif ((len(x.shape) == 2) or 1):
img_size = x.shape
x = np.multiply(x, np.random.binomial(n=1, p=keep, size=img_size))
else:
raise Exception('Unsupported shape {}'.format(x.shape))
return x
| [
"def",
"drop",
"(",
"x",
",",
"keep",
"=",
"0.5",
")",
":",
"if",
"(",
"len",
"(",
"x",
".",
"shape",
")",
"==",
"3",
")",
":",
"if",
"(",
"x",
".",
"shape",
"[",
"(",
"-",
"1",
")",
"]",
"==",
"3",
")",
":",
"img_size",
"=",
"x",
".",
"shape",
"mask",
"=",
"np",
".",
"random",
".",
"binomial",
"(",
"n",
"=",
"1",
",",
"p",
"=",
"keep",
",",
"size",
"=",
"x",
".",
"shape",
"[",
":",
"(",
"-",
"1",
")",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"x",
"[",
":",
",",
":",
",",
"i",
"]",
"=",
"np",
".",
"multiply",
"(",
"x",
"[",
":",
",",
":",
",",
"i",
"]",
",",
"mask",
")",
"elif",
"(",
"x",
".",
"shape",
"[",
"(",
"-",
"1",
")",
"]",
"==",
"1",
")",
":",
"img_size",
"=",
"x",
".",
"shape",
"x",
"=",
"np",
".",
"multiply",
"(",
"x",
",",
"np",
".",
"random",
".",
"binomial",
"(",
"n",
"=",
"1",
",",
"p",
"=",
"keep",
",",
"size",
"=",
"img_size",
")",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Unsupported shape {}'",
".",
"format",
"(",
"x",
".",
"shape",
")",
")",
"elif",
"(",
"(",
"len",
"(",
"x",
".",
"shape",
")",
"==",
"2",
")",
"or",
"1",
")",
":",
"img_size",
"=",
"x",
".",
"shape",
"x",
"=",
"np",
".",
"multiply",
"(",
"x",
",",
"np",
".",
"random",
".",
"binomial",
"(",
"n",
"=",
"1",
",",
"p",
"=",
"keep",
",",
"size",
"=",
"img_size",
")",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Unsupported shape {}'",
".",
"format",
"(",
"x",
".",
"shape",
")",
")",
"return",
"x"
] | the sequence following the first n elements . | train | true |
15,428 | def mtFeatureExtraction(signal, Fs, mtWin, mtStep, stWin, stStep):
mtWinRatio = int(round((mtWin / stStep)))
mtStepRatio = int(round((mtStep / stStep)))
mtFeatures = []
stFeatures = stFeatureExtraction(signal, Fs, stWin, stStep)
numOfFeatures = len(stFeatures)
numOfStatistics = 2
mtFeatures = []
for i in range((numOfStatistics * numOfFeatures)):
mtFeatures.append([])
for i in range(numOfFeatures):
curPos = 0
N = len(stFeatures[i])
while (curPos < N):
N1 = curPos
N2 = (curPos + mtWinRatio)
if (N2 > N):
N2 = N
curStFeatures = stFeatures[i][N1:N2]
mtFeatures[i].append(numpy.mean(curStFeatures))
mtFeatures[(i + numOfFeatures)].append(numpy.std(curStFeatures))
curPos += mtStepRatio
return (numpy.array(mtFeatures), stFeatures)
| [
"def",
"mtFeatureExtraction",
"(",
"signal",
",",
"Fs",
",",
"mtWin",
",",
"mtStep",
",",
"stWin",
",",
"stStep",
")",
":",
"mtWinRatio",
"=",
"int",
"(",
"round",
"(",
"(",
"mtWin",
"/",
"stStep",
")",
")",
")",
"mtStepRatio",
"=",
"int",
"(",
"round",
"(",
"(",
"mtStep",
"/",
"stStep",
")",
")",
")",
"mtFeatures",
"=",
"[",
"]",
"stFeatures",
"=",
"stFeatureExtraction",
"(",
"signal",
",",
"Fs",
",",
"stWin",
",",
"stStep",
")",
"numOfFeatures",
"=",
"len",
"(",
"stFeatures",
")",
"numOfStatistics",
"=",
"2",
"mtFeatures",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"(",
"numOfStatistics",
"*",
"numOfFeatures",
")",
")",
":",
"mtFeatures",
".",
"append",
"(",
"[",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"numOfFeatures",
")",
":",
"curPos",
"=",
"0",
"N",
"=",
"len",
"(",
"stFeatures",
"[",
"i",
"]",
")",
"while",
"(",
"curPos",
"<",
"N",
")",
":",
"N1",
"=",
"curPos",
"N2",
"=",
"(",
"curPos",
"+",
"mtWinRatio",
")",
"if",
"(",
"N2",
">",
"N",
")",
":",
"N2",
"=",
"N",
"curStFeatures",
"=",
"stFeatures",
"[",
"i",
"]",
"[",
"N1",
":",
"N2",
"]",
"mtFeatures",
"[",
"i",
"]",
".",
"append",
"(",
"numpy",
".",
"mean",
"(",
"curStFeatures",
")",
")",
"mtFeatures",
"[",
"(",
"i",
"+",
"numOfFeatures",
")",
"]",
".",
"append",
"(",
"numpy",
".",
"std",
"(",
"curStFeatures",
")",
")",
"curPos",
"+=",
"mtStepRatio",
"return",
"(",
"numpy",
".",
"array",
"(",
"mtFeatures",
")",
",",
"stFeatures",
")"
] | mid-term feature extraction . | train | false |
15,429 | @decorators.memoize
def _check_mdata_list():
return salt.utils.which('mdata-list')
| [
"@",
"decorators",
".",
"memoize",
"def",
"_check_mdata_list",
"(",
")",
":",
"return",
"salt",
".",
"utils",
".",
"which",
"(",
"'mdata-list'",
")"
] | looks to see if mdata-list is present on the system . | train | false |
15,430 | def get_doctypes_for_user_permissions():
return list(set([p.parent for p in get_valid_perms() if p.set_user_permissions]))
| [
"def",
"get_doctypes_for_user_permissions",
"(",
")",
":",
"return",
"list",
"(",
"set",
"(",
"[",
"p",
".",
"parent",
"for",
"p",
"in",
"get_valid_perms",
"(",
")",
"if",
"p",
".",
"set_user_permissions",
"]",
")",
")"
] | get doctypes for the current user where user permissions are applicable . | train | false |
15,432 | @contextlib.contextmanager
def MockVimBuffers(buffers, current_buffer, cursor_position=(1, 1)):
if (current_buffer not in buffers):
raise RuntimeError(u'Current buffer must be part of the buffers list.')
with patch(u'vim.buffers', buffers):
with patch(u'vim.current.buffer', current_buffer):
with patch(u'vim.current.window.cursor', cursor_position):
(yield)
| [
"@",
"contextlib",
".",
"contextmanager",
"def",
"MockVimBuffers",
"(",
"buffers",
",",
"current_buffer",
",",
"cursor_position",
"=",
"(",
"1",
",",
"1",
")",
")",
":",
"if",
"(",
"current_buffer",
"not",
"in",
"buffers",
")",
":",
"raise",
"RuntimeError",
"(",
"u'Current buffer must be part of the buffers list.'",
")",
"with",
"patch",
"(",
"u'vim.buffers'",
",",
"buffers",
")",
":",
"with",
"patch",
"(",
"u'vim.current.buffer'",
",",
"current_buffer",
")",
":",
"with",
"patch",
"(",
"u'vim.current.window.cursor'",
",",
"cursor_position",
")",
":",
"(",
"yield",
")"
] | simulates the vim buffers list |buffers| where |current_buffer| is the buffer displayed in the current window and |cursor_position| is the current cursor position . | train | false |
15,433 | def _list_resources(source=None, user=None, project=None):
rq = flask.request
q_ts = _get_query_timestamps(rq.args)
resources = rq.storage_conn.get_resources(source=source, user=user, project=project, start_timestamp=q_ts['start_timestamp'], end_timestamp=q_ts['end_timestamp'], metaquery=_get_metaquery(rq.args))
return flask.jsonify(resources=[r.as_dict() for r in resources])
| [
"def",
"_list_resources",
"(",
"source",
"=",
"None",
",",
"user",
"=",
"None",
",",
"project",
"=",
"None",
")",
":",
"rq",
"=",
"flask",
".",
"request",
"q_ts",
"=",
"_get_query_timestamps",
"(",
"rq",
".",
"args",
")",
"resources",
"=",
"rq",
".",
"storage_conn",
".",
"get_resources",
"(",
"source",
"=",
"source",
",",
"user",
"=",
"user",
",",
"project",
"=",
"project",
",",
"start_timestamp",
"=",
"q_ts",
"[",
"'start_timestamp'",
"]",
",",
"end_timestamp",
"=",
"q_ts",
"[",
"'end_timestamp'",
"]",
",",
"metaquery",
"=",
"_get_metaquery",
"(",
"rq",
".",
"args",
")",
")",
"return",
"flask",
".",
"jsonify",
"(",
"resources",
"=",
"[",
"r",
".",
"as_dict",
"(",
")",
"for",
"r",
"in",
"resources",
"]",
")"
] | return a list of resource identifiers . | train | false |
15,434 | def image_description(shape, colormaped=False, **metadata):
if colormaped:
shape = (shape + (3,))
metadata.update({'shape': shape})
return json.dumps(metadata).encode('utf-8')
| [
"def",
"image_description",
"(",
"shape",
",",
"colormaped",
"=",
"False",
",",
"**",
"metadata",
")",
":",
"if",
"colormaped",
":",
"shape",
"=",
"(",
"shape",
"+",
"(",
"3",
",",
")",
")",
"metadata",
".",
"update",
"(",
"{",
"'shape'",
":",
"shape",
"}",
")",
"return",
"json",
".",
"dumps",
"(",
"metadata",
")",
".",
"encode",
"(",
"'utf-8'",
")"
] | return image description from data shape and meta data . | train | false |
15,435 | def test_start_detached_error(fake_proc, message_mock, caplog):
argv = ['foo', 'bar']
fake_proc._proc.startDetached.return_value = (False, 0)
fake_proc._proc.error.return_value = 'Error message'
with caplog.at_level(logging.ERROR):
fake_proc.start_detached(*argv)
msg = message_mock.getmsg(usertypes.MessageLevel.error)
assert (msg.text == 'Error while spawning testprocess: Error message.')
| [
"def",
"test_start_detached_error",
"(",
"fake_proc",
",",
"message_mock",
",",
"caplog",
")",
":",
"argv",
"=",
"[",
"'foo'",
",",
"'bar'",
"]",
"fake_proc",
".",
"_proc",
".",
"startDetached",
".",
"return_value",
"=",
"(",
"False",
",",
"0",
")",
"fake_proc",
".",
"_proc",
".",
"error",
".",
"return_value",
"=",
"'Error message'",
"with",
"caplog",
".",
"at_level",
"(",
"logging",
".",
"ERROR",
")",
":",
"fake_proc",
".",
"start_detached",
"(",
"*",
"argv",
")",
"msg",
"=",
"message_mock",
".",
"getmsg",
"(",
"usertypes",
".",
"MessageLevel",
".",
"error",
")",
"assert",
"(",
"msg",
".",
"text",
"==",
"'Error while spawning testprocess: Error message.'",
")"
] | test starting a detached process with ok=false . | train | false |
15,436 | @register(u'end-of-history')
def end_of_history(event):
event.current_buffer.history_forward(count=(10 ** 100))
buff = event.current_buffer
buff.go_to_history((len(buff._working_lines) - 1))
| [
"@",
"register",
"(",
"u'end-of-history'",
")",
"def",
"end_of_history",
"(",
"event",
")",
":",
"event",
".",
"current_buffer",
".",
"history_forward",
"(",
"count",
"=",
"(",
"10",
"**",
"100",
")",
")",
"buff",
"=",
"event",
".",
"current_buffer",
"buff",
".",
"go_to_history",
"(",
"(",
"len",
"(",
"buff",
".",
"_working_lines",
")",
"-",
"1",
")",
")"
] | move to the end of the input history . | train | true |
15,437 | def append_hook(target, key, value):
if isinstance(value, dict):
target.setdefault(key, {})
for inkey in value:
append_hook(target[key], inkey, value[inkey])
else:
target.setdefault(key, [])
if (not isinstance(value, list)):
value = [value]
target[key].extend(value)
| [
"def",
"append_hook",
"(",
"target",
",",
"key",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"target",
".",
"setdefault",
"(",
"key",
",",
"{",
"}",
")",
"for",
"inkey",
"in",
"value",
":",
"append_hook",
"(",
"target",
"[",
"key",
"]",
",",
"inkey",
",",
"value",
"[",
"inkey",
"]",
")",
"else",
":",
"target",
".",
"setdefault",
"(",
"key",
",",
"[",
"]",
")",
"if",
"(",
"not",
"isinstance",
"(",
"value",
",",
"list",
")",
")",
":",
"value",
"=",
"[",
"value",
"]",
"target",
"[",
"key",
"]",
".",
"extend",
"(",
"value",
")"
] | appends a hook to the the target dict . | train | false |
15,442 | def _api_queue_pause(output, value, kwargs):
if value:
items = value.split(',')
handled = NzbQueue.do.pause_multiple_nzo(items)
return report(output, keyword='', data={'status': bool(handled), 'nzo_ids': handled})
| [
"def",
"_api_queue_pause",
"(",
"output",
",",
"value",
",",
"kwargs",
")",
":",
"if",
"value",
":",
"items",
"=",
"value",
".",
"split",
"(",
"','",
")",
"handled",
"=",
"NzbQueue",
".",
"do",
".",
"pause_multiple_nzo",
"(",
"items",
")",
"return",
"report",
"(",
"output",
",",
"keyword",
"=",
"''",
",",
"data",
"=",
"{",
"'status'",
":",
"bool",
"(",
"handled",
")",
",",
"'nzo_ids'",
":",
"handled",
"}",
")"
] | api: accepts output . | train | false |
15,443 | def _lcs_ic(synset1, synset2, ic, verbose=False):
if (synset1._pos != synset2._pos):
raise WordNetError((u'Computing the least common subsumer requires ' + (u'%s and %s to have the same part of speech.' % (synset1, synset2))))
ic1 = information_content(synset1, ic)
ic2 = information_content(synset2, ic)
subsumers = synset1.common_hypernyms(synset2)
if (len(subsumers) == 0):
subsumer_ic = 0
else:
subsumer_ic = max((information_content(s, ic) for s in subsumers))
if verbose:
print(u'> LCS Subsumer by content:', subsumer_ic)
return (ic1, ic2, subsumer_ic)
| [
"def",
"_lcs_ic",
"(",
"synset1",
",",
"synset2",
",",
"ic",
",",
"verbose",
"=",
"False",
")",
":",
"if",
"(",
"synset1",
".",
"_pos",
"!=",
"synset2",
".",
"_pos",
")",
":",
"raise",
"WordNetError",
"(",
"(",
"u'Computing the least common subsumer requires '",
"+",
"(",
"u'%s and %s to have the same part of speech.'",
"%",
"(",
"synset1",
",",
"synset2",
")",
")",
")",
")",
"ic1",
"=",
"information_content",
"(",
"synset1",
",",
"ic",
")",
"ic2",
"=",
"information_content",
"(",
"synset2",
",",
"ic",
")",
"subsumers",
"=",
"synset1",
".",
"common_hypernyms",
"(",
"synset2",
")",
"if",
"(",
"len",
"(",
"subsumers",
")",
"==",
"0",
")",
":",
"subsumer_ic",
"=",
"0",
"else",
":",
"subsumer_ic",
"=",
"max",
"(",
"(",
"information_content",
"(",
"s",
",",
"ic",
")",
"for",
"s",
"in",
"subsumers",
")",
")",
"if",
"verbose",
":",
"print",
"(",
"u'> LCS Subsumer by content:'",
",",
"subsumer_ic",
")",
"return",
"(",
"ic1",
",",
"ic2",
",",
"subsumer_ic",
")"
] | get the information content of the least common subsumer that has the highest information content value . | train | false |
15,444 | def subclass_exception(name, parents, module, attached_to=None):
class_dict = {u'__module__': module}
if (attached_to is not None):
def __reduce__(self):
return (unpickle_inner_exception, (attached_to, name), self.args)
def __setstate__(self, args):
self.args = args
class_dict[u'__reduce__'] = __reduce__
class_dict[u'__setstate__'] = __setstate__
return type(name, parents, class_dict)
| [
"def",
"subclass_exception",
"(",
"name",
",",
"parents",
",",
"module",
",",
"attached_to",
"=",
"None",
")",
":",
"class_dict",
"=",
"{",
"u'__module__'",
":",
"module",
"}",
"if",
"(",
"attached_to",
"is",
"not",
"None",
")",
":",
"def",
"__reduce__",
"(",
"self",
")",
":",
"return",
"(",
"unpickle_inner_exception",
",",
"(",
"attached_to",
",",
"name",
")",
",",
"self",
".",
"args",
")",
"def",
"__setstate__",
"(",
"self",
",",
"args",
")",
":",
"self",
".",
"args",
"=",
"args",
"class_dict",
"[",
"u'__reduce__'",
"]",
"=",
"__reduce__",
"class_dict",
"[",
"u'__setstate__'",
"]",
"=",
"__setstate__",
"return",
"type",
"(",
"name",
",",
"parents",
",",
"class_dict",
")"
] | create new exception class . | train | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.