id_within_dataset int64 1 55.5k | snippet stringlengths 19 14.2k | tokens listlengths 6 1.63k | nl stringlengths 6 352 | split_within_dataset stringclasses 1 value | is_duplicated bool 2 classes |
|---|---|---|---|---|---|
37,756 | def build_path(graph, node1, node2, path=None):
if (path is None):
path = []
if (node1 is node2):
return path
path.append(node2)
for pred in graph.all_preds(node2):
if (pred in path):
continue
build_path(graph, node1, pred, path)
return path
| [
"def",
"build_path",
"(",
"graph",
",",
"node1",
",",
"node2",
",",
"path",
"=",
"None",
")",
":",
"if",
"(",
"path",
"is",
"None",
")",
":",
"path",
"=",
"[",
"]",
"if",
"(",
"node1",
"is",
"node2",
")",
":",
"return",
"path",
"path",
".",
"append",
"(",
"node2",
")",
"for",
"pred",
"in",
"graph",
".",
"all_preds",
"(",
"node2",
")",
":",
"if",
"(",
"pred",
"in",
"path",
")",
":",
"continue",
"build_path",
"(",
"graph",
",",
"node1",
",",
"pred",
",",
"path",
")",
"return",
"path"
] | build the path from node1 to node2 . | train | true |
37,757 | def safezip(*args):
Nx = len(args[0])
for (i, arg) in enumerate(args[1:]):
if (len(arg) != Nx):
raise ValueError((_safezip_msg % (Nx, (i + 1), len(arg))))
return list(zip(*args))
| [
"def",
"safezip",
"(",
"*",
"args",
")",
":",
"Nx",
"=",
"len",
"(",
"args",
"[",
"0",
"]",
")",
"for",
"(",
"i",
",",
"arg",
")",
"in",
"enumerate",
"(",
"args",
"[",
"1",
":",
"]",
")",
":",
"if",
"(",
"len",
"(",
"arg",
")",
"!=",
"Nx",
")",
":",
"raise",
"ValueError",
"(",
"(",
"_safezip_msg",
"%",
"(",
"Nx",
",",
"(",
"i",
"+",
"1",
")",
",",
"len",
"(",
"arg",
")",
")",
")",
")",
"return",
"list",
"(",
"zip",
"(",
"*",
"args",
")",
")"
] | make sure *args* are equal len before zipping . | train | false |
37,759 | @pytest.fixture(scope=u'session')
def celery_includes():
return ()
| [
"@",
"pytest",
".",
"fixture",
"(",
"scope",
"=",
"u'session'",
")",
"def",
"celery_includes",
"(",
")",
":",
"return",
"(",
")"
] | you can override this include modules when a worker start . | train | false |
37,760 | def secure_filename(filename):
if isinstance(filename, unicode):
from unicodedata import normalize
filename = normalize('NFKD', filename).encode('ascii', 'ignore')
for sep in (os.path.sep, os.path.altsep):
if sep:
filename = filename.replace(sep, ' ')
filename = str(_filename_ascii_strip_re.sub('', '_'.join(filename.split()))).strip('._')
if ((os.name == 'nt') and filename and (filename.split('.')[0].upper() in _windows_device_files)):
filename = ('_' + filename)
return filename
| [
"def",
"secure_filename",
"(",
"filename",
")",
":",
"if",
"isinstance",
"(",
"filename",
",",
"unicode",
")",
":",
"from",
"unicodedata",
"import",
"normalize",
"filename",
"=",
"normalize",
"(",
"'NFKD'",
",",
"filename",
")",
".",
"encode",
"(",
"'ascii'",
",",
"'ignore'",
")",
"for",
"sep",
"in",
"(",
"os",
".",
"path",
".",
"sep",
",",
"os",
".",
"path",
".",
"altsep",
")",
":",
"if",
"sep",
":",
"filename",
"=",
"filename",
".",
"replace",
"(",
"sep",
",",
"' '",
")",
"filename",
"=",
"str",
"(",
"_filename_ascii_strip_re",
".",
"sub",
"(",
"''",
",",
"'_'",
".",
"join",
"(",
"filename",
".",
"split",
"(",
")",
")",
")",
")",
".",
"strip",
"(",
"'._'",
")",
"if",
"(",
"(",
"os",
".",
"name",
"==",
"'nt'",
")",
"and",
"filename",
"and",
"(",
"filename",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"in",
"_windows_device_files",
")",
")",
":",
"filename",
"=",
"(",
"'_'",
"+",
"filename",
")",
"return",
"filename"
] | pass it a filename and it will return a secure version of it . | train | true |
37,761 | def build_ns_name(prefix, identifier):
return (prefix + identifier)
| [
"def",
"build_ns_name",
"(",
"prefix",
",",
"identifier",
")",
":",
"return",
"(",
"prefix",
"+",
"identifier",
")"
] | builds a namespace name from the given prefix and identifier . | train | false |
37,762 | def single_source_bellman_ford_path_length(G, source, cutoff=None, weight='weight'):
weight = _weight_function(G, weight)
return iter(_bellman_ford(G, [source], weight, cutoff=cutoff).items())
| [
"def",
"single_source_bellman_ford_path_length",
"(",
"G",
",",
"source",
",",
"cutoff",
"=",
"None",
",",
"weight",
"=",
"'weight'",
")",
":",
"weight",
"=",
"_weight_function",
"(",
"G",
",",
"weight",
")",
"return",
"iter",
"(",
"_bellman_ford",
"(",
"G",
",",
"[",
"source",
"]",
",",
"weight",
",",
"cutoff",
"=",
"cutoff",
")",
".",
"items",
"(",
")",
")"
] | compute the shortest path length between source and all other reachable nodes for a weighted graph . | train | false |
37,763 | def obliquity(jd, algorithm=2006):
T = ((jd - jd2000) / 36525.0)
if (algorithm == 2006):
p = ((-4.34e-08), (-5.76e-07), 0.0020034, (-0.0001831), (-46.836769), 84381.406)
corr = 0
elif (algorithm == 2000):
p = (0.001813, (-0.00059), (-46.815), 84381.448)
corr = ((-0.02524) * T)
elif (algorithm == 1980):
p = (0.001813, (-0.00059), (-46.815), 84381.448)
corr = 0
else:
raise ValueError(u'invalid algorithm year for computing obliquity')
return ((np.polyval(p, T) + corr) / 3600.0)
| [
"def",
"obliquity",
"(",
"jd",
",",
"algorithm",
"=",
"2006",
")",
":",
"T",
"=",
"(",
"(",
"jd",
"-",
"jd2000",
")",
"/",
"36525.0",
")",
"if",
"(",
"algorithm",
"==",
"2006",
")",
":",
"p",
"=",
"(",
"(",
"-",
"4.34e-08",
")",
",",
"(",
"-",
"5.76e-07",
")",
",",
"0.0020034",
",",
"(",
"-",
"0.0001831",
")",
",",
"(",
"-",
"46.836769",
")",
",",
"84381.406",
")",
"corr",
"=",
"0",
"elif",
"(",
"algorithm",
"==",
"2000",
")",
":",
"p",
"=",
"(",
"0.001813",
",",
"(",
"-",
"0.00059",
")",
",",
"(",
"-",
"46.815",
")",
",",
"84381.448",
")",
"corr",
"=",
"(",
"(",
"-",
"0.02524",
")",
"*",
"T",
")",
"elif",
"(",
"algorithm",
"==",
"1980",
")",
":",
"p",
"=",
"(",
"0.001813",
",",
"(",
"-",
"0.00059",
")",
",",
"(",
"-",
"46.815",
")",
",",
"84381.448",
")",
"corr",
"=",
"0",
"else",
":",
"raise",
"ValueError",
"(",
"u'invalid algorithm year for computing obliquity'",
")",
"return",
"(",
"(",
"np",
".",
"polyval",
"(",
"p",
",",
"T",
")",
"+",
"corr",
")",
"/",
"3600.0",
")"
] | computes the obliquity of the earth at the requested julian date . | train | false |
37,766 | def set_configuration(env, use_distutils):
dist_cfg = {'PYEXTCC': ("sysconfig.get_config_var('CC')", False), 'PYEXTCFLAGS': ("sysconfig.get_config_var('CFLAGS')", True), 'PYEXTCCSHARED': ("sysconfig.get_config_var('CCSHARED')", False), 'PYEXTLINKFLAGS': ("sysconfig.get_config_var('LDFLAGS')", True), 'PYEXTLINK': ("sysconfig.get_config_var('LDSHARED')", False), 'PYEXTINCPATH': ('sysconfig.get_python_inc()', False), 'PYEXTSUFFIX': ("sysconfig.get_config_var('SO')", False)}
from distutils import sysconfig
ifnotset(env, 'PYEXTINCPATH', sysconfig.get_python_inc())
if use_distutils:
for (k, (v, should_split)) in dist_cfg.items():
val = eval(v)
if should_split:
val = val.split()
ifnotset(env, k, val)
else:
_set_configuration_nodistutils(env)
| [
"def",
"set_configuration",
"(",
"env",
",",
"use_distutils",
")",
":",
"dist_cfg",
"=",
"{",
"'PYEXTCC'",
":",
"(",
"\"sysconfig.get_config_var('CC')\"",
",",
"False",
")",
",",
"'PYEXTCFLAGS'",
":",
"(",
"\"sysconfig.get_config_var('CFLAGS')\"",
",",
"True",
")",
",",
"'PYEXTCCSHARED'",
":",
"(",
"\"sysconfig.get_config_var('CCSHARED')\"",
",",
"False",
")",
",",
"'PYEXTLINKFLAGS'",
":",
"(",
"\"sysconfig.get_config_var('LDFLAGS')\"",
",",
"True",
")",
",",
"'PYEXTLINK'",
":",
"(",
"\"sysconfig.get_config_var('LDSHARED')\"",
",",
"False",
")",
",",
"'PYEXTINCPATH'",
":",
"(",
"'sysconfig.get_python_inc()'",
",",
"False",
")",
",",
"'PYEXTSUFFIX'",
":",
"(",
"\"sysconfig.get_config_var('SO')\"",
",",
"False",
")",
"}",
"from",
"distutils",
"import",
"sysconfig",
"ifnotset",
"(",
"env",
",",
"'PYEXTINCPATH'",
",",
"sysconfig",
".",
"get_python_inc",
"(",
")",
")",
"if",
"use_distutils",
":",
"for",
"(",
"k",
",",
"(",
"v",
",",
"should_split",
")",
")",
"in",
"dist_cfg",
".",
"items",
"(",
")",
":",
"val",
"=",
"eval",
"(",
"v",
")",
"if",
"should_split",
":",
"val",
"=",
"val",
".",
"split",
"(",
")",
"ifnotset",
"(",
"env",
",",
"k",
",",
"val",
")",
"else",
":",
"_set_configuration_nodistutils",
"(",
"env",
")"
] | set the current device configuration . | train | false |
37,767 | def should_calculate_taxes_automatically():
if (not settings.SHUUP_CALCULATE_TAXES_AUTOMATICALLY_IF_POSSIBLE):
return False
return get_tax_module().calculating_is_cheap
| [
"def",
"should_calculate_taxes_automatically",
"(",
")",
":",
"if",
"(",
"not",
"settings",
".",
"SHUUP_CALCULATE_TAXES_AUTOMATICALLY_IF_POSSIBLE",
")",
":",
"return",
"False",
"return",
"get_tax_module",
"(",
")",
".",
"calculating_is_cheap"
] | if settings . | train | false |
37,768 | def _describe_certs(config, parsed_certs, parse_failures):
out = []
notify = out.append
if ((not parsed_certs) and (not parse_failures)):
notify('No certs found.')
else:
if parsed_certs:
match = ('matching ' if (config.certname or config.domains) else '')
notify('Found the following {0}certs:'.format(match))
notify(_report_human_readable(config, parsed_certs))
if parse_failures:
notify('\nThe following renewal configuration files were invalid:')
notify(_report_lines(parse_failures))
disp = zope.component.getUtility(interfaces.IDisplay)
disp.notification('\n'.join(out), pause=False, wrap=False)
| [
"def",
"_describe_certs",
"(",
"config",
",",
"parsed_certs",
",",
"parse_failures",
")",
":",
"out",
"=",
"[",
"]",
"notify",
"=",
"out",
".",
"append",
"if",
"(",
"(",
"not",
"parsed_certs",
")",
"and",
"(",
"not",
"parse_failures",
")",
")",
":",
"notify",
"(",
"'No certs found.'",
")",
"else",
":",
"if",
"parsed_certs",
":",
"match",
"=",
"(",
"'matching '",
"if",
"(",
"config",
".",
"certname",
"or",
"config",
".",
"domains",
")",
"else",
"''",
")",
"notify",
"(",
"'Found the following {0}certs:'",
".",
"format",
"(",
"match",
")",
")",
"notify",
"(",
"_report_human_readable",
"(",
"config",
",",
"parsed_certs",
")",
")",
"if",
"parse_failures",
":",
"notify",
"(",
"'\\nThe following renewal configuration files were invalid:'",
")",
"notify",
"(",
"_report_lines",
"(",
"parse_failures",
")",
")",
"disp",
"=",
"zope",
".",
"component",
".",
"getUtility",
"(",
"interfaces",
".",
"IDisplay",
")",
"disp",
".",
"notification",
"(",
"'\\n'",
".",
"join",
"(",
"out",
")",
",",
"pause",
"=",
"False",
",",
"wrap",
"=",
"False",
")"
] | print information about the certs we know about . | train | false |
37,770 | def get_statistics(prefix=''):
stats = []
keys = options.messages.keys()
keys.sort()
for key in keys:
if key.startswith(prefix):
stats.append(('%-7s %s %s' % (options.counters[key], key, options.messages[key])))
return stats
| [
"def",
"get_statistics",
"(",
"prefix",
"=",
"''",
")",
":",
"stats",
"=",
"[",
"]",
"keys",
"=",
"options",
".",
"messages",
".",
"keys",
"(",
")",
"keys",
".",
"sort",
"(",
")",
"for",
"key",
"in",
"keys",
":",
"if",
"key",
".",
"startswith",
"(",
"prefix",
")",
":",
"stats",
".",
"append",
"(",
"(",
"'%-7s %s %s'",
"%",
"(",
"options",
".",
"counters",
"[",
"key",
"]",
",",
"key",
",",
"options",
".",
"messages",
"[",
"key",
"]",
")",
")",
")",
"return",
"stats"
] | get statistics for message codes that start with the prefix . | train | true |
37,772 | @pytest.mark.models
def test_issue514(EN):
text = [u'This', u'is', u'a', u'sentence', u'about', u'pasta', u'.']
vocab = EN.entity.vocab
doc = get_doc(vocab, text)
EN.entity.add_label(u'Food')
EN.entity(doc)
label_id = vocab.strings[u'Food']
doc.ents = [(label_id, 5, 6)]
assert ([(ent.label_, ent.text) for ent in doc.ents] == [(u'Food', u'pasta')])
doc2 = get_doc(EN.entity.vocab).from_bytes(doc.to_bytes())
assert ([(ent.label_, ent.text) for ent in doc2.ents] == [(u'Food', u'pasta')])
| [
"@",
"pytest",
".",
"mark",
".",
"models",
"def",
"test_issue514",
"(",
"EN",
")",
":",
"text",
"=",
"[",
"u'This'",
",",
"u'is'",
",",
"u'a'",
",",
"u'sentence'",
",",
"u'about'",
",",
"u'pasta'",
",",
"u'.'",
"]",
"vocab",
"=",
"EN",
".",
"entity",
".",
"vocab",
"doc",
"=",
"get_doc",
"(",
"vocab",
",",
"text",
")",
"EN",
".",
"entity",
".",
"add_label",
"(",
"u'Food'",
")",
"EN",
".",
"entity",
"(",
"doc",
")",
"label_id",
"=",
"vocab",
".",
"strings",
"[",
"u'Food'",
"]",
"doc",
".",
"ents",
"=",
"[",
"(",
"label_id",
",",
"5",
",",
"6",
")",
"]",
"assert",
"(",
"[",
"(",
"ent",
".",
"label_",
",",
"ent",
".",
"text",
")",
"for",
"ent",
"in",
"doc",
".",
"ents",
"]",
"==",
"[",
"(",
"u'Food'",
",",
"u'pasta'",
")",
"]",
")",
"doc2",
"=",
"get_doc",
"(",
"EN",
".",
"entity",
".",
"vocab",
")",
".",
"from_bytes",
"(",
"doc",
".",
"to_bytes",
"(",
")",
")",
"assert",
"(",
"[",
"(",
"ent",
".",
"label_",
",",
"ent",
".",
"text",
")",
"for",
"ent",
"in",
"doc2",
".",
"ents",
"]",
"==",
"[",
"(",
"u'Food'",
",",
"u'pasta'",
")",
"]",
")"
] | test serializing after adding entity . | train | false |
37,773 | def _parse_typed_parameter(param):
global _current_parameter_value
(type_, value) = _expand_one_key_dictionary(param)
_current_parameter.type = type_
if (_is_simple_type(value) and (value != '')):
_current_parameter_value = SimpleParameterValue(value)
_current_parameter.add_value(_current_parameter_value)
elif isinstance(value, list):
for i in value:
if _is_simple_type(i):
_current_parameter_value = SimpleParameterValue(i)
_current_parameter.add_value(_current_parameter_value)
elif isinstance(i, dict):
_current_parameter_value = TypedParameterValue()
_parse_typed_parameter_typed_value(i)
_current_parameter.add_value(_current_parameter_value)
| [
"def",
"_parse_typed_parameter",
"(",
"param",
")",
":",
"global",
"_current_parameter_value",
"(",
"type_",
",",
"value",
")",
"=",
"_expand_one_key_dictionary",
"(",
"param",
")",
"_current_parameter",
".",
"type",
"=",
"type_",
"if",
"(",
"_is_simple_type",
"(",
"value",
")",
"and",
"(",
"value",
"!=",
"''",
")",
")",
":",
"_current_parameter_value",
"=",
"SimpleParameterValue",
"(",
"value",
")",
"_current_parameter",
".",
"add_value",
"(",
"_current_parameter_value",
")",
"elif",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"for",
"i",
"in",
"value",
":",
"if",
"_is_simple_type",
"(",
"i",
")",
":",
"_current_parameter_value",
"=",
"SimpleParameterValue",
"(",
"i",
")",
"_current_parameter",
".",
"add_value",
"(",
"_current_parameter_value",
")",
"elif",
"isinstance",
"(",
"i",
",",
"dict",
")",
":",
"_current_parameter_value",
"=",
"TypedParameterValue",
"(",
")",
"_parse_typed_parameter_typed_value",
"(",
"i",
")",
"_current_parameter",
".",
"add_value",
"(",
"_current_parameter_value",
")"
] | parses a typedparameter and fills it with values . | train | true |
37,775 | def compare_dicts(dict1, dict2, dict1_name, dict2_name):
dict1_keyset = set(dict1.keys())
dict2_keyset = set(dict2.keys())
print_key_diff((dict1_keyset - dict2_keyset), dict1_name, dict2_name)
print_key_diff((dict2_keyset - dict1_keyset), dict2_name, dict1_name)
print 'Value differences:'
has_value_differences = False
for key in (dict1_keyset & dict2_keyset):
if (dict1[key] != dict2[key]):
print (' %s:' % (key,))
print (' %s: %s' % (dict1_name, dict1[key]))
print (' %s: %s' % (dict2_name, dict2[key]))
print
has_value_differences = True
if (not has_value_differences):
print ' None'
| [
"def",
"compare_dicts",
"(",
"dict1",
",",
"dict2",
",",
"dict1_name",
",",
"dict2_name",
")",
":",
"dict1_keyset",
"=",
"set",
"(",
"dict1",
".",
"keys",
"(",
")",
")",
"dict2_keyset",
"=",
"set",
"(",
"dict2",
".",
"keys",
"(",
")",
")",
"print_key_diff",
"(",
"(",
"dict1_keyset",
"-",
"dict2_keyset",
")",
",",
"dict1_name",
",",
"dict2_name",
")",
"print_key_diff",
"(",
"(",
"dict2_keyset",
"-",
"dict1_keyset",
")",
",",
"dict2_name",
",",
"dict1_name",
")",
"print",
"'Value differences:'",
"has_value_differences",
"=",
"False",
"for",
"key",
"in",
"(",
"dict1_keyset",
"&",
"dict2_keyset",
")",
":",
"if",
"(",
"dict1",
"[",
"key",
"]",
"!=",
"dict2",
"[",
"key",
"]",
")",
":",
"print",
"(",
"' %s:'",
"%",
"(",
"key",
",",
")",
")",
"print",
"(",
"' %s: %s'",
"%",
"(",
"dict1_name",
",",
"dict1",
"[",
"key",
"]",
")",
")",
"print",
"(",
"' %s: %s'",
"%",
"(",
"dict2_name",
",",
"dict2",
"[",
"key",
"]",
")",
")",
"print",
"has_value_differences",
"=",
"True",
"if",
"(",
"not",
"has_value_differences",
")",
":",
"print",
"' None'"
] | compare the two dictionaries . | train | false |
37,776 | def get_remote_data(data, settings, mode, more_excluded_names=None):
supported_types = get_supported_types()
assert (mode in list(supported_types.keys()))
excluded_names = settings['excluded_names']
if (more_excluded_names is not None):
excluded_names += more_excluded_names
return globalsfilter(data, check_all=settings['check_all'], filters=tuple(supported_types[mode]), exclude_private=settings['exclude_private'], exclude_uppercase=settings['exclude_uppercase'], exclude_capitalized=settings['exclude_capitalized'], exclude_unsupported=settings['exclude_unsupported'], excluded_names=excluded_names)
| [
"def",
"get_remote_data",
"(",
"data",
",",
"settings",
",",
"mode",
",",
"more_excluded_names",
"=",
"None",
")",
":",
"supported_types",
"=",
"get_supported_types",
"(",
")",
"assert",
"(",
"mode",
"in",
"list",
"(",
"supported_types",
".",
"keys",
"(",
")",
")",
")",
"excluded_names",
"=",
"settings",
"[",
"'excluded_names'",
"]",
"if",
"(",
"more_excluded_names",
"is",
"not",
"None",
")",
":",
"excluded_names",
"+=",
"more_excluded_names",
"return",
"globalsfilter",
"(",
"data",
",",
"check_all",
"=",
"settings",
"[",
"'check_all'",
"]",
",",
"filters",
"=",
"tuple",
"(",
"supported_types",
"[",
"mode",
"]",
")",
",",
"exclude_private",
"=",
"settings",
"[",
"'exclude_private'",
"]",
",",
"exclude_uppercase",
"=",
"settings",
"[",
"'exclude_uppercase'",
"]",
",",
"exclude_capitalized",
"=",
"settings",
"[",
"'exclude_capitalized'",
"]",
",",
"exclude_unsupported",
"=",
"settings",
"[",
"'exclude_unsupported'",
"]",
",",
"excluded_names",
"=",
"excluded_names",
")"
] | return globals according to filter described in *settings*: * data: data to be filtered * settings: variable explorer settings * mode : editable or picklable * more_excluded_names: additional excluded names . | train | true |
37,777 | def can_users_receive_thread_email(recipient_ids, exploration_id, has_suggestion):
users_global_prefs = user_services.get_users_email_preferences(recipient_ids)
users_exploration_prefs = user_services.get_users_email_preferences_for_exploration(recipient_ids, exploration_id)
zipped_preferences = zip(users_global_prefs, users_exploration_prefs)
result = []
if has_suggestion:
for (user_global_prefs, user_exploration_prefs) in zipped_preferences:
result.append((user_global_prefs.can_receive_feedback_message_email and (not user_exploration_prefs.mute_suggestion_notifications)))
else:
for (user_global_prefs, user_exploration_prefs) in zipped_preferences:
result.append((user_global_prefs.can_receive_feedback_message_email and (not user_exploration_prefs.mute_feedback_notifications)))
return result
| [
"def",
"can_users_receive_thread_email",
"(",
"recipient_ids",
",",
"exploration_id",
",",
"has_suggestion",
")",
":",
"users_global_prefs",
"=",
"user_services",
".",
"get_users_email_preferences",
"(",
"recipient_ids",
")",
"users_exploration_prefs",
"=",
"user_services",
".",
"get_users_email_preferences_for_exploration",
"(",
"recipient_ids",
",",
"exploration_id",
")",
"zipped_preferences",
"=",
"zip",
"(",
"users_global_prefs",
",",
"users_exploration_prefs",
")",
"result",
"=",
"[",
"]",
"if",
"has_suggestion",
":",
"for",
"(",
"user_global_prefs",
",",
"user_exploration_prefs",
")",
"in",
"zipped_preferences",
":",
"result",
".",
"append",
"(",
"(",
"user_global_prefs",
".",
"can_receive_feedback_message_email",
"and",
"(",
"not",
"user_exploration_prefs",
".",
"mute_suggestion_notifications",
")",
")",
")",
"else",
":",
"for",
"(",
"user_global_prefs",
",",
"user_exploration_prefs",
")",
"in",
"zipped_preferences",
":",
"result",
".",
"append",
"(",
"(",
"user_global_prefs",
".",
"can_receive_feedback_message_email",
"and",
"(",
"not",
"user_exploration_prefs",
".",
"mute_feedback_notifications",
")",
")",
")",
"return",
"result"
] | returns if users can receive email . | train | false |
37,779 | def test_ast_good_cut():
can_compile(u'(cut x)')
can_compile(u'(cut x y)')
can_compile(u'(cut x y z)')
can_compile(u'(cut x y z t)')
| [
"def",
"test_ast_good_cut",
"(",
")",
":",
"can_compile",
"(",
"u'(cut x)'",
")",
"can_compile",
"(",
"u'(cut x y)'",
")",
"can_compile",
"(",
"u'(cut x y z)'",
")",
"can_compile",
"(",
"u'(cut x y z t)'",
")"
] | make sure ast can compile valid cut . | train | false |
37,780 | def check_str_arg(result, func, cargs):
dbl = result
ptr = cargs[(-1)]._obj
return (dbl, ptr.value.decode())
| [
"def",
"check_str_arg",
"(",
"result",
",",
"func",
",",
"cargs",
")",
":",
"dbl",
"=",
"result",
"ptr",
"=",
"cargs",
"[",
"(",
"-",
"1",
")",
"]",
".",
"_obj",
"return",
"(",
"dbl",
",",
"ptr",
".",
"value",
".",
"decode",
"(",
")",
")"
] | this is for the osrget[angular|linear]units functions . | train | false |
37,781 | def resolve_future_dependencies(__provider_name=None):
new_providers = dict()
if __provider_name:
targets = _future_dependencies.pop(__provider_name, [])
for target in targets:
setattr(target, __provider_name, get_provider(__provider_name))
return
try:
for (dependency, targets) in _future_dependencies.copy().items():
if (dependency not in _REGISTRY):
factory = _factories.get(dependency)
if factory:
provider = factory()
new_providers[dependency] = provider
else:
raise UnresolvableDependencyException(dependency, targets)
for target in targets:
setattr(target, dependency, get_provider(dependency))
finally:
_future_dependencies.clear()
return new_providers
| [
"def",
"resolve_future_dependencies",
"(",
"__provider_name",
"=",
"None",
")",
":",
"new_providers",
"=",
"dict",
"(",
")",
"if",
"__provider_name",
":",
"targets",
"=",
"_future_dependencies",
".",
"pop",
"(",
"__provider_name",
",",
"[",
"]",
")",
"for",
"target",
"in",
"targets",
":",
"setattr",
"(",
"target",
",",
"__provider_name",
",",
"get_provider",
"(",
"__provider_name",
")",
")",
"return",
"try",
":",
"for",
"(",
"dependency",
",",
"targets",
")",
"in",
"_future_dependencies",
".",
"copy",
"(",
")",
".",
"items",
"(",
")",
":",
"if",
"(",
"dependency",
"not",
"in",
"_REGISTRY",
")",
":",
"factory",
"=",
"_factories",
".",
"get",
"(",
"dependency",
")",
"if",
"factory",
":",
"provider",
"=",
"factory",
"(",
")",
"new_providers",
"[",
"dependency",
"]",
"=",
"provider",
"else",
":",
"raise",
"UnresolvableDependencyException",
"(",
"dependency",
",",
"targets",
")",
"for",
"target",
"in",
"targets",
":",
"setattr",
"(",
"target",
",",
"dependency",
",",
"get_provider",
"(",
"dependency",
")",
")",
"finally",
":",
"_future_dependencies",
".",
"clear",
"(",
")",
"return",
"new_providers"
] | force injection of all dependencies . | train | false |
37,782 | def install_ruby(ruby, runas=None):
if (runas and (runas != 'root')):
_rvm(['autolibs', 'disable', ruby], runas=runas)
return _rvm(['install', '--disable-binary', ruby], runas=runas)
else:
return _rvm(['install', ruby], runas=runas)
| [
"def",
"install_ruby",
"(",
"ruby",
",",
"runas",
"=",
"None",
")",
":",
"if",
"(",
"runas",
"and",
"(",
"runas",
"!=",
"'root'",
")",
")",
":",
"_rvm",
"(",
"[",
"'autolibs'",
",",
"'disable'",
",",
"ruby",
"]",
",",
"runas",
"=",
"runas",
")",
"return",
"_rvm",
"(",
"[",
"'install'",
",",
"'--disable-binary'",
",",
"ruby",
"]",
",",
"runas",
"=",
"runas",
")",
"else",
":",
"return",
"_rvm",
"(",
"[",
"'install'",
",",
"ruby",
"]",
",",
"runas",
"=",
"runas",
")"
] | install a ruby implementation . | train | false |
37,783 | def getMinimumZ(geometryObject):
booleanGeometry = BooleanGeometry()
booleanGeometry.archivableObjects = geometryObject.archivableObjects
booleanGeometry.importRadius = setting.getImportRadius(geometryObject.elementNode)
booleanGeometry.layerThickness = setting.getLayerThickness(geometryObject.elementNode)
archivableMinimumZ = booleanGeometry.getMinimumZ()
geometryMinimumZ = geometryObject.getMinimumZ()
if (archivableMinimumZ == None):
return geometryMinimumZ
if (geometryMinimumZ == None):
return archivableMinimumZ
return min(archivableMinimumZ, geometryMinimumZ)
| [
"def",
"getMinimumZ",
"(",
"geometryObject",
")",
":",
"booleanGeometry",
"=",
"BooleanGeometry",
"(",
")",
"booleanGeometry",
".",
"archivableObjects",
"=",
"geometryObject",
".",
"archivableObjects",
"booleanGeometry",
".",
"importRadius",
"=",
"setting",
".",
"getImportRadius",
"(",
"geometryObject",
".",
"elementNode",
")",
"booleanGeometry",
".",
"layerThickness",
"=",
"setting",
".",
"getLayerThickness",
"(",
"geometryObject",
".",
"elementNode",
")",
"archivableMinimumZ",
"=",
"booleanGeometry",
".",
"getMinimumZ",
"(",
")",
"geometryMinimumZ",
"=",
"geometryObject",
".",
"getMinimumZ",
"(",
")",
"if",
"(",
"archivableMinimumZ",
"==",
"None",
")",
":",
"return",
"geometryMinimumZ",
"if",
"(",
"geometryMinimumZ",
"==",
"None",
")",
":",
"return",
"archivableMinimumZ",
"return",
"min",
"(",
"archivableMinimumZ",
",",
"geometryMinimumZ",
")"
] | get the minimum of the minimum z of the archivableobjects and the object . | train | false |
37,784 | def post_init(cr, registry):
from odoo import api, SUPERUSER_ID
from odoo.addons.base.ir.ir_config_parameter import _default_parameters
env = api.Environment(cr, SUPERUSER_ID, {})
ICP = env['ir.config_parameter']
for (key, func) in _default_parameters.iteritems():
val = ICP.get_param(key)
(_, groups) = func()
ICP.set_param(key, val, groups)
| [
"def",
"post_init",
"(",
"cr",
",",
"registry",
")",
":",
"from",
"odoo",
"import",
"api",
",",
"SUPERUSER_ID",
"from",
"odoo",
".",
"addons",
".",
"base",
".",
"ir",
".",
"ir_config_parameter",
"import",
"_default_parameters",
"env",
"=",
"api",
".",
"Environment",
"(",
"cr",
",",
"SUPERUSER_ID",
",",
"{",
"}",
")",
"ICP",
"=",
"env",
"[",
"'ir.config_parameter'",
"]",
"for",
"(",
"key",
",",
"func",
")",
"in",
"_default_parameters",
".",
"iteritems",
"(",
")",
":",
"val",
"=",
"ICP",
".",
"get_param",
"(",
"key",
")",
"(",
"_",
",",
"groups",
")",
"=",
"func",
"(",
")",
"ICP",
".",
"set_param",
"(",
"key",
",",
"val",
",",
"groups",
")"
] | rewrite icps to force groups . | train | false |
37,786 | @csrf_exempt
def handle_xblock_callback_noauth(request, course_id, usage_id, handler, suffix=None):
request.user.known = False
course_key = CourseKey.from_string(course_id)
with modulestore().bulk_operations(course_key):
course = modulestore().get_course(course_key, depth=0)
return _invoke_xblock_handler(request, course_id, usage_id, handler, suffix, course=course)
| [
"@",
"csrf_exempt",
"def",
"handle_xblock_callback_noauth",
"(",
"request",
",",
"course_id",
",",
"usage_id",
",",
"handler",
",",
"suffix",
"=",
"None",
")",
":",
"request",
".",
"user",
".",
"known",
"=",
"False",
"course_key",
"=",
"CourseKey",
".",
"from_string",
"(",
"course_id",
")",
"with",
"modulestore",
"(",
")",
".",
"bulk_operations",
"(",
"course_key",
")",
":",
"course",
"=",
"modulestore",
"(",
")",
".",
"get_course",
"(",
"course_key",
",",
"depth",
"=",
"0",
")",
"return",
"_invoke_xblock_handler",
"(",
"request",
",",
"course_id",
",",
"usage_id",
",",
"handler",
",",
"suffix",
",",
"course",
"=",
"course",
")"
] | entry point for unauthenticated xblock handlers . | train | false |
37,787 | def _onpick(event, params):
if ((event.mouseevent.button != 2) or (not params['butterfly'])):
return
lidx = np.where([(l is event.artist) for l in params['lines']])[0][0]
text = params['text']
text.set_x(event.mouseevent.xdata)
text.set_y(event.mouseevent.ydata)
text.set_text(params['ch_names'][lidx])
text.set_visible(True)
| [
"def",
"_onpick",
"(",
"event",
",",
"params",
")",
":",
"if",
"(",
"(",
"event",
".",
"mouseevent",
".",
"button",
"!=",
"2",
")",
"or",
"(",
"not",
"params",
"[",
"'butterfly'",
"]",
")",
")",
":",
"return",
"lidx",
"=",
"np",
".",
"where",
"(",
"[",
"(",
"l",
"is",
"event",
".",
"artist",
")",
"for",
"l",
"in",
"params",
"[",
"'lines'",
"]",
"]",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"text",
"=",
"params",
"[",
"'text'",
"]",
"text",
".",
"set_x",
"(",
"event",
".",
"mouseevent",
".",
"xdata",
")",
"text",
".",
"set_y",
"(",
"event",
".",
"mouseevent",
".",
"ydata",
")",
"text",
".",
"set_text",
"(",
"params",
"[",
"'ch_names'",
"]",
"[",
"lidx",
"]",
")",
"text",
".",
"set_visible",
"(",
"True",
")"
] | add a channel name on click . | train | false |
37,788 | def template_get(name=None, host=None, templateids=None, **connection_args):
conn_args = _login(**connection_args)
try:
if conn_args:
method = 'template.get'
params = {'output': 'extend', 'filter': {}}
if name:
params['filter'].setdefault('name', name)
if host:
params['filter'].setdefault('host', host)
if templateids:
params.setdefault('templateids', templateids)
params = _params_extend(params, **connection_args)
ret = _query(method, params, conn_args['url'], conn_args['auth'])
return (ret['result'] if (len(ret['result']) > 0) else False)
else:
raise KeyError
except KeyError:
return False
| [
"def",
"template_get",
"(",
"name",
"=",
"None",
",",
"host",
"=",
"None",
",",
"templateids",
"=",
"None",
",",
"**",
"connection_args",
")",
":",
"conn_args",
"=",
"_login",
"(",
"**",
"connection_args",
")",
"try",
":",
"if",
"conn_args",
":",
"method",
"=",
"'template.get'",
"params",
"=",
"{",
"'output'",
":",
"'extend'",
",",
"'filter'",
":",
"{",
"}",
"}",
"if",
"name",
":",
"params",
"[",
"'filter'",
"]",
".",
"setdefault",
"(",
"'name'",
",",
"name",
")",
"if",
"host",
":",
"params",
"[",
"'filter'",
"]",
".",
"setdefault",
"(",
"'host'",
",",
"host",
")",
"if",
"templateids",
":",
"params",
".",
"setdefault",
"(",
"'templateids'",
",",
"templateids",
")",
"params",
"=",
"_params_extend",
"(",
"params",
",",
"**",
"connection_args",
")",
"ret",
"=",
"_query",
"(",
"method",
",",
"params",
",",
"conn_args",
"[",
"'url'",
"]",
",",
"conn_args",
"[",
"'auth'",
"]",
")",
"return",
"(",
"ret",
"[",
"'result'",
"]",
"if",
"(",
"len",
"(",
"ret",
"[",
"'result'",
"]",
")",
">",
"0",
")",
"else",
"False",
")",
"else",
":",
"raise",
"KeyError",
"except",
"KeyError",
":",
"return",
"False"
] | retrieve templates according to the given parameters . | train | true |
37,789 | def catchLogs(testCase, logPublisher=globalLogPublisher):
logs = []
logPublisher.addObserver(logs.append)
testCase.addCleanup((lambda : logPublisher.removeObserver(logs.append)))
return (lambda : [formatEvent(event) for event in logs])
| [
"def",
"catchLogs",
"(",
"testCase",
",",
"logPublisher",
"=",
"globalLogPublisher",
")",
":",
"logs",
"=",
"[",
"]",
"logPublisher",
".",
"addObserver",
"(",
"logs",
".",
"append",
")",
"testCase",
".",
"addCleanup",
"(",
"(",
"lambda",
":",
"logPublisher",
".",
"removeObserver",
"(",
"logs",
".",
"append",
")",
")",
")",
"return",
"(",
"lambda",
":",
"[",
"formatEvent",
"(",
"event",
")",
"for",
"event",
"in",
"logs",
"]",
")"
] | catch the global log stream . | train | false |
37,790 | def from_func(func, shape, dtype=None, name=None, args=(), kwargs={}):
name = (name or ('from_func-' + tokenize(func, shape, dtype, args, kwargs)))
if (args or kwargs):
func = partial(func, *args, **kwargs)
dsk = {((name,) + ((0,) * len(shape))): (func,)}
chunks = tuple(((i,) for i in shape))
return Array(dsk, name, chunks, dtype)
| [
"def",
"from_func",
"(",
"func",
",",
"shape",
",",
"dtype",
"=",
"None",
",",
"name",
"=",
"None",
",",
"args",
"=",
"(",
")",
",",
"kwargs",
"=",
"{",
"}",
")",
":",
"name",
"=",
"(",
"name",
"or",
"(",
"'from_func-'",
"+",
"tokenize",
"(",
"func",
",",
"shape",
",",
"dtype",
",",
"args",
",",
"kwargs",
")",
")",
")",
"if",
"(",
"args",
"or",
"kwargs",
")",
":",
"func",
"=",
"partial",
"(",
"func",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
"dsk",
"=",
"{",
"(",
"(",
"name",
",",
")",
"+",
"(",
"(",
"0",
",",
")",
"*",
"len",
"(",
"shape",
")",
")",
")",
":",
"(",
"func",
",",
")",
"}",
"chunks",
"=",
"tuple",
"(",
"(",
"(",
"i",
",",
")",
"for",
"i",
"in",
"shape",
")",
")",
"return",
"Array",
"(",
"dsk",
",",
"name",
",",
"chunks",
",",
"dtype",
")"
] | create dask array in a single block by calling a function calling the provided function with func should return a numpy array of the indicated shape and dtype . | train | false |
37,791 | def print_totals_for_all_results(results):
print('Total Metrics For All Results:')
print(('This query returned %s rows.' % len(results.get('rows'))))
print(('But the query matched %s total results.' % results.get('totalResults')))
print('Here are the metric totals for the matched total results.')
totals = results.get('totalsForAllResults')
for (metric_name, metric_total) in totals.iteritems():
print(('Metric Name = %s' % metric_name))
print(('Metric Total = %s' % metric_total))
print()
| [
"def",
"print_totals_for_all_results",
"(",
"results",
")",
":",
"print",
"(",
"'Total Metrics For All Results:'",
")",
"print",
"(",
"(",
"'This query returned %s rows.'",
"%",
"len",
"(",
"results",
".",
"get",
"(",
"'rows'",
")",
")",
")",
")",
"print",
"(",
"(",
"'But the query matched %s total results.'",
"%",
"results",
".",
"get",
"(",
"'totalResults'",
")",
")",
")",
"print",
"(",
"'Here are the metric totals for the matched total results.'",
")",
"totals",
"=",
"results",
".",
"get",
"(",
"'totalsForAllResults'",
")",
"for",
"(",
"metric_name",
",",
"metric_total",
")",
"in",
"totals",
".",
"iteritems",
"(",
")",
":",
"print",
"(",
"(",
"'Metric Name = %s'",
"%",
"metric_name",
")",
")",
"print",
"(",
"(",
"'Metric Total = %s'",
"%",
"metric_total",
")",
")",
"print",
"(",
")"
] | prints the total metric value for all pages the query matched . | train | false |
37,792 | def lang(mode='extract'):
if (mode == 'compile'):
local('pybabel compile -f -d ./locale')
else:
local('pybabel extract -F ./locale/babel.cfg -o ./locale/messages.pot ./ --sort-output --no-location --omit-header')
local('pybabel update -l cs_CZ -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete')
local('pybabel update -l de_DE -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete')
local('pybabel update -l en_US -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete')
local('pybabel update -l es_ES -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete')
local('pybabel update -l fr_FR -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete')
local('pybabel update -l id_ID -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete')
local('pybabel update -l it_IT -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete')
local('pybabel update -l nl_NL -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete')
local('pybabel update -l pt_BR -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete')
local('pybabel update -l ru_RU -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete')
local('pybabel update -l vi_VN -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete')
local('pybabel update -l zh_CN -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete')
local('pybabel update -l ko_KR -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete')
| [
"def",
"lang",
"(",
"mode",
"=",
"'extract'",
")",
":",
"if",
"(",
"mode",
"==",
"'compile'",
")",
":",
"local",
"(",
"'pybabel compile -f -d ./locale'",
")",
"else",
":",
"local",
"(",
"'pybabel extract -F ./locale/babel.cfg -o ./locale/messages.pot ./ --sort-output --no-location --omit-header'",
")",
"local",
"(",
"'pybabel update -l cs_CZ -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete'",
")",
"local",
"(",
"'pybabel update -l de_DE -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete'",
")",
"local",
"(",
"'pybabel update -l en_US -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete'",
")",
"local",
"(",
"'pybabel update -l es_ES -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete'",
")",
"local",
"(",
"'pybabel update -l fr_FR -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete'",
")",
"local",
"(",
"'pybabel update -l id_ID -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete'",
")",
"local",
"(",
"'pybabel update -l it_IT -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete'",
")",
"local",
"(",
"'pybabel update -l nl_NL -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete'",
")",
"local",
"(",
"'pybabel update -l pt_BR -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete'",
")",
"local",
"(",
"'pybabel update -l ru_RU -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete'",
")",
"local",
"(",
"'pybabel update -l vi_VN -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete'",
")",
"local",
"(",
"'pybabel update -l zh_CN -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete'",
")",
"local",
"(",
"'pybabel update -l ko_KR -d ./locale -i ./locale/messages.pot --previous --ignore-obsolete'",
")"
] | return the language code for the current locale eg en . | train | false |
37,793 | def filter_hits_by_distance(hits, source_text, min_similarity=DEFAULT_MIN_SIMILARITY):
if ((min_similarity <= 0) or (min_similarity >= 1)):
min_similarity = DEFAULT_MIN_SIMILARITY
filtered_hits = []
for hit in hits:
hit_source_text = hit['_source']['source']
distance = Levenshtein.distance(source_text, hit_source_text)
similarity = (1 - (distance / float(max(len(source_text), len(hit_source_text)))))
logger.debug('Similarity: %.2f (distance: %d)\nOriginal: DCTB %s\nComparing with: DCTB %s', similarity, distance, source_text, hit_source_text)
if (similarity < min_similarity):
break
filtered_hits.append(hit)
return filtered_hits
| [
"def",
"filter_hits_by_distance",
"(",
"hits",
",",
"source_text",
",",
"min_similarity",
"=",
"DEFAULT_MIN_SIMILARITY",
")",
":",
"if",
"(",
"(",
"min_similarity",
"<=",
"0",
")",
"or",
"(",
"min_similarity",
">=",
"1",
")",
")",
":",
"min_similarity",
"=",
"DEFAULT_MIN_SIMILARITY",
"filtered_hits",
"=",
"[",
"]",
"for",
"hit",
"in",
"hits",
":",
"hit_source_text",
"=",
"hit",
"[",
"'_source'",
"]",
"[",
"'source'",
"]",
"distance",
"=",
"Levenshtein",
".",
"distance",
"(",
"source_text",
",",
"hit_source_text",
")",
"similarity",
"=",
"(",
"1",
"-",
"(",
"distance",
"/",
"float",
"(",
"max",
"(",
"len",
"(",
"source_text",
")",
",",
"len",
"(",
"hit_source_text",
")",
")",
")",
")",
")",
"logger",
".",
"debug",
"(",
"'Similarity: %.2f (distance: %d)\\nOriginal: DCTB %s\\nComparing with: DCTB %s'",
",",
"similarity",
",",
"distance",
",",
"source_text",
",",
"hit_source_text",
")",
"if",
"(",
"similarity",
"<",
"min_similarity",
")",
":",
"break",
"filtered_hits",
".",
"append",
"(",
"hit",
")",
"return",
"filtered_hits"
] | returns es hits filtered according to their levenshtein distance to the source_text . | train | false |
37,794 | def get_hwclock():
return 'localtime'
| [
"def",
"get_hwclock",
"(",
")",
":",
"return",
"'localtime'"
] | get current hardware clock setting cli example: . | train | false |
37,795 | def addOpenEventSupport(root, flist):
def doOpenFile(*args):
for fn in args:
flist.open(fn)
root.createcommand('::tk::mac::OpenDocument', doOpenFile)
| [
"def",
"addOpenEventSupport",
"(",
"root",
",",
"flist",
")",
":",
"def",
"doOpenFile",
"(",
"*",
"args",
")",
":",
"for",
"fn",
"in",
"args",
":",
"flist",
".",
"open",
"(",
"fn",
")",
"root",
".",
"createcommand",
"(",
"'::tk::mac::OpenDocument'",
",",
"doOpenFile",
")"
] | this ensures that the application will respont to open appleevents . | train | false |
37,796 | def Target(filename):
return (os.path.splitext(filename)[0] + '.o')
| [
"def",
"Target",
"(",
"filename",
")",
":",
"return",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"[",
"0",
"]",
"+",
"'.o'",
")"
] | translate a compilable filename to its . | train | false |
37,798 | @socketio.on('connect', namespace='/jobs')
def on_connect_jobs():
pass
| [
"@",
"socketio",
".",
"on",
"(",
"'connect'",
",",
"namespace",
"=",
"'/jobs'",
")",
"def",
"on_connect_jobs",
"(",
")",
":",
"pass"
] | somebody connected to a jobs page . | train | false |
37,800 | def encode_rfc2822_address_header(header_text):
def encode_addr(addr):
(name, email) = addr
if (not try_coerce_ascii(name)):
name = str(Header(name, 'utf-8'))
return formataddr((name, email))
addresses = getaddresses([ustr(header_text).encode('utf-8')])
return COMMASPACE.join(map(encode_addr, addresses))
| [
"def",
"encode_rfc2822_address_header",
"(",
"header_text",
")",
":",
"def",
"encode_addr",
"(",
"addr",
")",
":",
"(",
"name",
",",
"email",
")",
"=",
"addr",
"if",
"(",
"not",
"try_coerce_ascii",
"(",
"name",
")",
")",
":",
"name",
"=",
"str",
"(",
"Header",
"(",
"name",
",",
"'utf-8'",
")",
")",
"return",
"formataddr",
"(",
"(",
"name",
",",
"email",
")",
")",
"addresses",
"=",
"getaddresses",
"(",
"[",
"ustr",
"(",
"header_text",
")",
".",
"encode",
"(",
"'utf-8'",
")",
"]",
")",
"return",
"COMMASPACE",
".",
"join",
"(",
"map",
"(",
"encode_addr",
",",
"addresses",
")",
")"
] | if header_text contains non-ascii characters . | train | false |
37,801 | def test_interval(timer):
assert (timer.interval() == 0)
timer.setInterval(1000)
assert (timer.interval() == 1000)
| [
"def",
"test_interval",
"(",
"timer",
")",
":",
"assert",
"(",
"timer",
".",
"interval",
"(",
")",
"==",
"0",
")",
"timer",
".",
"setInterval",
"(",
"1000",
")",
"assert",
"(",
"timer",
".",
"interval",
"(",
")",
"==",
"1000",
")"
] | test setting an interval . | train | false |
37,802 | def _parse_localectl():
ret = {}
localectl_out = __salt__['cmd.run']('localectl')
reading_locale = False
for line in localectl_out.splitlines():
if ('System Locale:' in line):
line = line.replace('System Locale:', '')
reading_locale = True
if (not reading_locale):
continue
match = re.match('^([A-Z_]+)=(.*)$', line.strip())
if (not match):
break
ret[match.group(1)] = match.group(2).replace('"', '')
else:
raise CommandExecutionError('Could not find system locale - could not parse localectl output\n{0}'.format(localectl_out))
return ret
| [
"def",
"_parse_localectl",
"(",
")",
":",
"ret",
"=",
"{",
"}",
"localectl_out",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"'localectl'",
")",
"reading_locale",
"=",
"False",
"for",
"line",
"in",
"localectl_out",
".",
"splitlines",
"(",
")",
":",
"if",
"(",
"'System Locale:'",
"in",
"line",
")",
":",
"line",
"=",
"line",
".",
"replace",
"(",
"'System Locale:'",
",",
"''",
")",
"reading_locale",
"=",
"True",
"if",
"(",
"not",
"reading_locale",
")",
":",
"continue",
"match",
"=",
"re",
".",
"match",
"(",
"'^([A-Z_]+)=(.*)$'",
",",
"line",
".",
"strip",
"(",
")",
")",
"if",
"(",
"not",
"match",
")",
":",
"break",
"ret",
"[",
"match",
".",
"group",
"(",
"1",
")",
"]",
"=",
"match",
".",
"group",
"(",
"2",
")",
".",
"replace",
"(",
"'\"'",
",",
"''",
")",
"else",
":",
"raise",
"CommandExecutionError",
"(",
"'Could not find system locale - could not parse localectl output\\n{0}'",
".",
"format",
"(",
"localectl_out",
")",
")",
"return",
"ret"
] | get the system locale parameters from localectl . | train | false |
37,805 | def cleanup_packing_list(doc, parent_items):
delete_list = []
for d in doc.get(u'packed_items'):
if ([d.parent_item, d.parent_detail_docname] not in parent_items):
delete_list.append(d)
if (not delete_list):
return doc
packed_items = doc.get(u'packed_items')
doc.set(u'packed_items', [])
for d in packed_items:
if (d not in delete_list):
doc.append(u'packed_items', d)
| [
"def",
"cleanup_packing_list",
"(",
"doc",
",",
"parent_items",
")",
":",
"delete_list",
"=",
"[",
"]",
"for",
"d",
"in",
"doc",
".",
"get",
"(",
"u'packed_items'",
")",
":",
"if",
"(",
"[",
"d",
".",
"parent_item",
",",
"d",
".",
"parent_detail_docname",
"]",
"not",
"in",
"parent_items",
")",
":",
"delete_list",
".",
"append",
"(",
"d",
")",
"if",
"(",
"not",
"delete_list",
")",
":",
"return",
"doc",
"packed_items",
"=",
"doc",
".",
"get",
"(",
"u'packed_items'",
")",
"doc",
".",
"set",
"(",
"u'packed_items'",
",",
"[",
"]",
")",
"for",
"d",
"in",
"packed_items",
":",
"if",
"(",
"d",
"not",
"in",
"delete_list",
")",
":",
"doc",
".",
"append",
"(",
"u'packed_items'",
",",
"d",
")"
] | remove all those child items which are no longer present in main item table . | train | false |
37,807 | def ANOVA_one_way(a):
group_means = []
group_variances = []
num_cases = 0
all_vals = []
for i in a:
num_cases += len(i)
group_means.append(mean(i))
group_variances.append((i.var(ddof=1) * (len(i) - 1)))
all_vals.extend(i)
dfd = (num_cases - len(group_means))
within_Groups = (sum(group_variances) / dfd)
if (within_Groups == 0.0):
return (nan, nan)
all_vals = array(all_vals)
grand_mean = all_vals.mean()
between_Groups = 0
for i in a:
diff = (i.mean() - grand_mean)
diff_sq = (diff * diff)
x = (diff_sq * len(i))
between_Groups += x
dfn = (len(group_means) - 1)
between_Groups = (between_Groups / dfn)
F = (between_Groups / within_Groups)
return (F, fprob(F, dfn, dfd, direction='high'))
| [
"def",
"ANOVA_one_way",
"(",
"a",
")",
":",
"group_means",
"=",
"[",
"]",
"group_variances",
"=",
"[",
"]",
"num_cases",
"=",
"0",
"all_vals",
"=",
"[",
"]",
"for",
"i",
"in",
"a",
":",
"num_cases",
"+=",
"len",
"(",
"i",
")",
"group_means",
".",
"append",
"(",
"mean",
"(",
"i",
")",
")",
"group_variances",
".",
"append",
"(",
"(",
"i",
".",
"var",
"(",
"ddof",
"=",
"1",
")",
"*",
"(",
"len",
"(",
"i",
")",
"-",
"1",
")",
")",
")",
"all_vals",
".",
"extend",
"(",
"i",
")",
"dfd",
"=",
"(",
"num_cases",
"-",
"len",
"(",
"group_means",
")",
")",
"within_Groups",
"=",
"(",
"sum",
"(",
"group_variances",
")",
"/",
"dfd",
")",
"if",
"(",
"within_Groups",
"==",
"0.0",
")",
":",
"return",
"(",
"nan",
",",
"nan",
")",
"all_vals",
"=",
"array",
"(",
"all_vals",
")",
"grand_mean",
"=",
"all_vals",
".",
"mean",
"(",
")",
"between_Groups",
"=",
"0",
"for",
"i",
"in",
"a",
":",
"diff",
"=",
"(",
"i",
".",
"mean",
"(",
")",
"-",
"grand_mean",
")",
"diff_sq",
"=",
"(",
"diff",
"*",
"diff",
")",
"x",
"=",
"(",
"diff_sq",
"*",
"len",
"(",
"i",
")",
")",
"between_Groups",
"+=",
"x",
"dfn",
"=",
"(",
"len",
"(",
"group_means",
")",
"-",
"1",
")",
"between_Groups",
"=",
"(",
"between_Groups",
"/",
"dfn",
")",
"F",
"=",
"(",
"between_Groups",
"/",
"within_Groups",
")",
"return",
"(",
"F",
",",
"fprob",
"(",
"F",
",",
"dfn",
",",
"dfd",
",",
"direction",
"=",
"'high'",
")",
")"
] | performs a one way analysis of variance a is a list of lists of observed values . | train | false |
37,808 | def test_EngFormatter_formatting():
unitless = mticker.EngFormatter()
assert (unitless(0.1) == u'100 m')
assert (unitless(1) == u'1')
assert (unitless(999.9) == u'999.9')
assert (unitless(1001) == u'1.001 k')
with_unit = mticker.EngFormatter(unit=u's')
assert (with_unit(0.1) == u'100 ms')
assert (with_unit(1) == u'1 s')
assert (with_unit(999.9) == u'999.9 s')
assert (with_unit(1001) == u'1.001 ks')
| [
"def",
"test_EngFormatter_formatting",
"(",
")",
":",
"unitless",
"=",
"mticker",
".",
"EngFormatter",
"(",
")",
"assert",
"(",
"unitless",
"(",
"0.1",
")",
"==",
"u'100 m'",
")",
"assert",
"(",
"unitless",
"(",
"1",
")",
"==",
"u'1'",
")",
"assert",
"(",
"unitless",
"(",
"999.9",
")",
"==",
"u'999.9'",
")",
"assert",
"(",
"unitless",
"(",
"1001",
")",
"==",
"u'1.001 k'",
")",
"with_unit",
"=",
"mticker",
".",
"EngFormatter",
"(",
"unit",
"=",
"u's'",
")",
"assert",
"(",
"with_unit",
"(",
"0.1",
")",
"==",
"u'100 ms'",
")",
"assert",
"(",
"with_unit",
"(",
"1",
")",
"==",
"u'1 s'",
")",
"assert",
"(",
"with_unit",
"(",
"999.9",
")",
"==",
"u'999.9 s'",
")",
"assert",
"(",
"with_unit",
"(",
"1001",
")",
"==",
"u'1.001 ks'",
")"
] | create two instances of engformatter with default parameters . | train | false |
37,809 | def parse_network_vlan_range(network_vlan_range):
entry = network_vlan_range.strip()
if (':' in entry):
if (entry.count(':') != 2):
raise n_exc.NetworkVlanRangeError(vlan_range=entry, error=_('Need exactly two values for VLAN range'))
(network, vlan_min, vlan_max) = entry.split(':')
if (not network):
raise n_exc.PhysicalNetworkNameError()
try:
vlan_min = int(vlan_min)
except ValueError:
raise_invalid_tag(vlan_min, entry)
try:
vlan_max = int(vlan_max)
except ValueError:
raise_invalid_tag(vlan_max, entry)
vlan_range = (vlan_min, vlan_max)
verify_vlan_range(vlan_range)
return (network, vlan_range)
else:
return (entry, None)
| [
"def",
"parse_network_vlan_range",
"(",
"network_vlan_range",
")",
":",
"entry",
"=",
"network_vlan_range",
".",
"strip",
"(",
")",
"if",
"(",
"':'",
"in",
"entry",
")",
":",
"if",
"(",
"entry",
".",
"count",
"(",
"':'",
")",
"!=",
"2",
")",
":",
"raise",
"n_exc",
".",
"NetworkVlanRangeError",
"(",
"vlan_range",
"=",
"entry",
",",
"error",
"=",
"_",
"(",
"'Need exactly two values for VLAN range'",
")",
")",
"(",
"network",
",",
"vlan_min",
",",
"vlan_max",
")",
"=",
"entry",
".",
"split",
"(",
"':'",
")",
"if",
"(",
"not",
"network",
")",
":",
"raise",
"n_exc",
".",
"PhysicalNetworkNameError",
"(",
")",
"try",
":",
"vlan_min",
"=",
"int",
"(",
"vlan_min",
")",
"except",
"ValueError",
":",
"raise_invalid_tag",
"(",
"vlan_min",
",",
"entry",
")",
"try",
":",
"vlan_max",
"=",
"int",
"(",
"vlan_max",
")",
"except",
"ValueError",
":",
"raise_invalid_tag",
"(",
"vlan_max",
",",
"entry",
")",
"vlan_range",
"=",
"(",
"vlan_min",
",",
"vlan_max",
")",
"verify_vlan_range",
"(",
"vlan_range",
")",
"return",
"(",
"network",
",",
"vlan_range",
")",
"else",
":",
"return",
"(",
"entry",
",",
"None",
")"
] | interpret a string as network[:vlan_begin:vlan_end] . | train | false |
37,810 | def is_not_scalar_zero(builder, value):
return _scalar_pred_against_zero(builder, value, functools.partial(builder.fcmp_unordered, '!='), '!=')
| [
"def",
"is_not_scalar_zero",
"(",
"builder",
",",
"value",
")",
":",
"return",
"_scalar_pred_against_zero",
"(",
"builder",
",",
"value",
",",
"functools",
".",
"partial",
"(",
"builder",
".",
"fcmp_unordered",
",",
"'!='",
")",
",",
"'!='",
")"
] | return a predicate representin whether a *value* is not equal to zero . | train | false |
37,811 | def dup_lcm(f, g, K):
if K.has_Field:
return dup_ff_lcm(f, g, K)
else:
return dup_rr_lcm(f, g, K)
| [
"def",
"dup_lcm",
"(",
"f",
",",
"g",
",",
"K",
")",
":",
"if",
"K",
".",
"has_Field",
":",
"return",
"dup_ff_lcm",
"(",
"f",
",",
"g",
",",
"K",
")",
"else",
":",
"return",
"dup_rr_lcm",
"(",
"f",
",",
"g",
",",
"K",
")"
] | computes polynomial lcm of f and g in k[x] . | train | false |
37,812 | def server_hardreset(host=None, admin_username=None, admin_password=None, module=None):
return __execute_cmd('serveraction hardreset', host=host, admin_username=admin_username, admin_password=admin_password, module=module)
| [
"def",
"server_hardreset",
"(",
"host",
"=",
"None",
",",
"admin_username",
"=",
"None",
",",
"admin_password",
"=",
"None",
",",
"module",
"=",
"None",
")",
":",
"return",
"__execute_cmd",
"(",
"'serveraction hardreset'",
",",
"host",
"=",
"host",
",",
"admin_username",
"=",
"admin_username",
",",
"admin_password",
"=",
"admin_password",
",",
"module",
"=",
"module",
")"
] | performs a reset operation on the managed server . | train | true |
37,813 | def do_cli(manager, options):
if (irc_bot is None):
console(u'irc_bot is not installed. install using `pip install irc_bot`.')
return
if (hasattr(options, u'table_type') and (options.table_type == u'porcelain')):
disable_all_colors()
action_map = {u'status': action_status, u'restart': action_restart, u'stop': action_stop}
from flexget.plugins.daemon.irc import irc_manager
if (irc_manager is None):
console(u'IRC daemon does not appear to be running.')
return
action_map[options.irc_action](options, irc_manager)
| [
"def",
"do_cli",
"(",
"manager",
",",
"options",
")",
":",
"if",
"(",
"irc_bot",
"is",
"None",
")",
":",
"console",
"(",
"u'irc_bot is not installed. install using `pip install irc_bot`.'",
")",
"return",
"if",
"(",
"hasattr",
"(",
"options",
",",
"u'table_type'",
")",
"and",
"(",
"options",
".",
"table_type",
"==",
"u'porcelain'",
")",
")",
":",
"disable_all_colors",
"(",
")",
"action_map",
"=",
"{",
"u'status'",
":",
"action_status",
",",
"u'restart'",
":",
"action_restart",
",",
"u'stop'",
":",
"action_stop",
"}",
"from",
"flexget",
".",
"plugins",
".",
"daemon",
".",
"irc",
"import",
"irc_manager",
"if",
"(",
"irc_manager",
"is",
"None",
")",
":",
"console",
"(",
"u'IRC daemon does not appear to be running.'",
")",
"return",
"action_map",
"[",
"options",
".",
"irc_action",
"]",
"(",
"options",
",",
"irc_manager",
")"
] | handle movie-list subcommand . | train | false |
37,814 | def marker_sorted(markers, matches):
return sorted(markers, key=cmp_to_key(marker_comparator(matches, markers)))
| [
"def",
"marker_sorted",
"(",
"markers",
",",
"matches",
")",
":",
"return",
"sorted",
"(",
"markers",
",",
"key",
"=",
"cmp_to_key",
"(",
"marker_comparator",
"(",
"matches",
",",
"markers",
")",
")",
")"
] | sort markers from matches . | train | false |
37,815 | def WinFileTimeToDateTime(filetime):
return (NULL_FILETIME + datetime.timedelta(microseconds=(filetime / 10)))
| [
"def",
"WinFileTimeToDateTime",
"(",
"filetime",
")",
":",
"return",
"(",
"NULL_FILETIME",
"+",
"datetime",
".",
"timedelta",
"(",
"microseconds",
"=",
"(",
"filetime",
"/",
"10",
")",
")",
")"
] | take a windows filetime as integer and convert to datetime . | train | false |
37,820 | def gen_random_resource_name(resource='', timestamp=True):
fields = ['horizon']
if resource:
fields.append(resource)
if timestamp:
tstamp = time.strftime('%d-%m-%H-%M-%S')
fields.append(tstamp)
fields.append(uuidutils.generate_uuid(dashed=False))
return '_'.join(fields)
| [
"def",
"gen_random_resource_name",
"(",
"resource",
"=",
"''",
",",
"timestamp",
"=",
"True",
")",
":",
"fields",
"=",
"[",
"'horizon'",
"]",
"if",
"resource",
":",
"fields",
".",
"append",
"(",
"resource",
")",
"if",
"timestamp",
":",
"tstamp",
"=",
"time",
".",
"strftime",
"(",
"'%d-%m-%H-%M-%S'",
")",
"fields",
".",
"append",
"(",
"tstamp",
")",
"fields",
".",
"append",
"(",
"uuidutils",
".",
"generate_uuid",
"(",
"dashed",
"=",
"False",
")",
")",
"return",
"'_'",
".",
"join",
"(",
"fields",
")"
] | generate random resource name using uuid and timestamp . | train | false |
37,822 | def getwhere(x):
(y_prepool, y_postpool) = x
return K.gradients(K.sum(y_postpool), y_prepool)
| [
"def",
"getwhere",
"(",
"x",
")",
":",
"(",
"y_prepool",
",",
"y_postpool",
")",
"=",
"x",
"return",
"K",
".",
"gradients",
"(",
"K",
".",
"sum",
"(",
"y_postpool",
")",
",",
"y_prepool",
")"
] | calculate the "where" mask that contains switches indicating which index contained the max value when maxpool2d was applied . | train | false |
37,823 | def _safe_find_iso_sr(session):
sr_ref = _find_iso_sr(session)
if (sr_ref is None):
raise exception.NotFound(_('Cannot find SR of content-type ISO'))
return sr_ref
| [
"def",
"_safe_find_iso_sr",
"(",
"session",
")",
":",
"sr_ref",
"=",
"_find_iso_sr",
"(",
"session",
")",
"if",
"(",
"sr_ref",
"is",
"None",
")",
":",
"raise",
"exception",
".",
"NotFound",
"(",
"_",
"(",
"'Cannot find SR of content-type ISO'",
")",
")",
"return",
"sr_ref"
] | same as _find_iso_sr except raises a notfound exception if sr cannot be determined . | train | false |
37,824 | def abs_url(path):
if (not path):
return
if (path.startswith(u'http://') or path.startswith(u'https://')):
return path
if (not path.startswith(u'/')):
path = (u'/' + path)
return path
| [
"def",
"abs_url",
"(",
"path",
")",
":",
"if",
"(",
"not",
"path",
")",
":",
"return",
"if",
"(",
"path",
".",
"startswith",
"(",
"u'http://'",
")",
"or",
"path",
".",
"startswith",
"(",
"u'https://'",
")",
")",
":",
"return",
"path",
"if",
"(",
"not",
"path",
".",
"startswith",
"(",
"u'/'",
")",
")",
":",
"path",
"=",
"(",
"u'/'",
"+",
"path",
")",
"return",
"path"
] | deconstructs and reconstructs a url into an absolute url or a url relative from root / . | train | false |
37,828 | @utils.arg('name', metavar='<name>', help=_('Name of aggregate.'))
@utils.arg('availability_zone', metavar='<availability-zone>', default=None, nargs='?', help=_('The availability zone of the aggregate (optional).'))
def do_aggregate_create(cs, args):
aggregate = cs.aggregates.create(args.name, args.availability_zone)
_print_aggregate_details(cs, aggregate)
| [
"@",
"utils",
".",
"arg",
"(",
"'name'",
",",
"metavar",
"=",
"'<name>'",
",",
"help",
"=",
"_",
"(",
"'Name of aggregate.'",
")",
")",
"@",
"utils",
".",
"arg",
"(",
"'availability_zone'",
",",
"metavar",
"=",
"'<availability-zone>'",
",",
"default",
"=",
"None",
",",
"nargs",
"=",
"'?'",
",",
"help",
"=",
"_",
"(",
"'The availability zone of the aggregate (optional).'",
")",
")",
"def",
"do_aggregate_create",
"(",
"cs",
",",
"args",
")",
":",
"aggregate",
"=",
"cs",
".",
"aggregates",
".",
"create",
"(",
"args",
".",
"name",
",",
"args",
".",
"availability_zone",
")",
"_print_aggregate_details",
"(",
"cs",
",",
"aggregate",
")"
] | create a new aggregate with the specified details . | train | false |
37,829 | def test_clean_info_bads():
raw_file = op.join(op.dirname(__file__), 'io', 'tests', 'data', 'test_raw.fif')
raw = read_raw_fif(raw_file)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
idx_eeg_bad_ch = picks_eeg[[1, 5, 14]]
eeg_bad_ch = [raw.info['ch_names'][k] for k in idx_eeg_bad_ch]
picks_meg = pick_types(raw.info, meg=True, eeg=False)
idx_meg_bad_ch = picks_meg[[0, 15, 34]]
meg_bad_ch = [raw.info['ch_names'][k] for k in idx_meg_bad_ch]
raw.info['bads'] = (eeg_bad_ch + meg_bad_ch)
info_eeg = pick_info(raw.info, picks_eeg)
info_meg = pick_info(raw.info, picks_meg)
assert_equal(info_eeg['bads'], eeg_bad_ch)
assert_equal(info_meg['bads'], meg_bad_ch)
info = pick_info(raw.info, picks_meg)
info._check_consistency()
info['bads'] += ['EEG 053']
assert_raises(RuntimeError, info._check_consistency)
| [
"def",
"test_clean_info_bads",
"(",
")",
":",
"raw_file",
"=",
"op",
".",
"join",
"(",
"op",
".",
"dirname",
"(",
"__file__",
")",
",",
"'io'",
",",
"'tests'",
",",
"'data'",
",",
"'test_raw.fif'",
")",
"raw",
"=",
"read_raw_fif",
"(",
"raw_file",
")",
"picks_eeg",
"=",
"pick_types",
"(",
"raw",
".",
"info",
",",
"meg",
"=",
"False",
",",
"eeg",
"=",
"True",
")",
"idx_eeg_bad_ch",
"=",
"picks_eeg",
"[",
"[",
"1",
",",
"5",
",",
"14",
"]",
"]",
"eeg_bad_ch",
"=",
"[",
"raw",
".",
"info",
"[",
"'ch_names'",
"]",
"[",
"k",
"]",
"for",
"k",
"in",
"idx_eeg_bad_ch",
"]",
"picks_meg",
"=",
"pick_types",
"(",
"raw",
".",
"info",
",",
"meg",
"=",
"True",
",",
"eeg",
"=",
"False",
")",
"idx_meg_bad_ch",
"=",
"picks_meg",
"[",
"[",
"0",
",",
"15",
",",
"34",
"]",
"]",
"meg_bad_ch",
"=",
"[",
"raw",
".",
"info",
"[",
"'ch_names'",
"]",
"[",
"k",
"]",
"for",
"k",
"in",
"idx_meg_bad_ch",
"]",
"raw",
".",
"info",
"[",
"'bads'",
"]",
"=",
"(",
"eeg_bad_ch",
"+",
"meg_bad_ch",
")",
"info_eeg",
"=",
"pick_info",
"(",
"raw",
".",
"info",
",",
"picks_eeg",
")",
"info_meg",
"=",
"pick_info",
"(",
"raw",
".",
"info",
",",
"picks_meg",
")",
"assert_equal",
"(",
"info_eeg",
"[",
"'bads'",
"]",
",",
"eeg_bad_ch",
")",
"assert_equal",
"(",
"info_meg",
"[",
"'bads'",
"]",
",",
"meg_bad_ch",
")",
"info",
"=",
"pick_info",
"(",
"raw",
".",
"info",
",",
"picks_meg",
")",
"info",
".",
"_check_consistency",
"(",
")",
"info",
"[",
"'bads'",
"]",
"+=",
"[",
"'EEG 053'",
"]",
"assert_raises",
"(",
"RuntimeError",
",",
"info",
".",
"_check_consistency",
")"
] | test cleaning info[bads] when bad_channels are excluded . | train | false |
37,830 | def validate_token(token_string):
token = Token.get(token_string)
if (token.expiry <= date_utils.get_datetime_utc_now()):
LOG.audit(('Token with id "%s" has expired.' % token.id))
raise exceptions.TokenExpiredError('Token has expired.')
LOG.audit(('Token with id "%s" is validated.' % token.id))
return token
| [
"def",
"validate_token",
"(",
"token_string",
")",
":",
"token",
"=",
"Token",
".",
"get",
"(",
"token_string",
")",
"if",
"(",
"token",
".",
"expiry",
"<=",
"date_utils",
".",
"get_datetime_utc_now",
"(",
")",
")",
":",
"LOG",
".",
"audit",
"(",
"(",
"'Token with id \"%s\" has expired.'",
"%",
"token",
".",
"id",
")",
")",
"raise",
"exceptions",
".",
"TokenExpiredError",
"(",
"'Token has expired.'",
")",
"LOG",
".",
"audit",
"(",
"(",
"'Token with id \"%s\" is validated.'",
"%",
"token",
".",
"id",
")",
")",
"return",
"token"
] | validate the provided authentication token . | train | false |
37,831 | def strip_handshake(capfile):
output_file = capfile
if program_exists('pyrit'):
cmd = ['pyrit', '-r', capfile, '-o', output_file, 'strip']
call(cmd, stdout=DN, stderr=DN)
elif program_exists('tshark'):
cmd = ['tshark', '-r', capfile, '-R', 'eapol || wlan_mgt.tag.interpretation', '-w', (capfile + '.temp')]
proc_strip = call(cmd, stdout=DN, stderr=DN)
rename((capfile + '.temp'), output_file)
else:
print ((((R + ' [!]') + O) + ' unable to strip .cap file: neither pyrit nor tshark were found') + W)
| [
"def",
"strip_handshake",
"(",
"capfile",
")",
":",
"output_file",
"=",
"capfile",
"if",
"program_exists",
"(",
"'pyrit'",
")",
":",
"cmd",
"=",
"[",
"'pyrit'",
",",
"'-r'",
",",
"capfile",
",",
"'-o'",
",",
"output_file",
",",
"'strip'",
"]",
"call",
"(",
"cmd",
",",
"stdout",
"=",
"DN",
",",
"stderr",
"=",
"DN",
")",
"elif",
"program_exists",
"(",
"'tshark'",
")",
":",
"cmd",
"=",
"[",
"'tshark'",
",",
"'-r'",
",",
"capfile",
",",
"'-R'",
",",
"'eapol || wlan_mgt.tag.interpretation'",
",",
"'-w'",
",",
"(",
"capfile",
"+",
"'.temp'",
")",
"]",
"proc_strip",
"=",
"call",
"(",
"cmd",
",",
"stdout",
"=",
"DN",
",",
"stderr",
"=",
"DN",
")",
"rename",
"(",
"(",
"capfile",
"+",
"'.temp'",
")",
",",
"output_file",
")",
"else",
":",
"print",
"(",
"(",
"(",
"(",
"R",
"+",
"' [!]'",
")",
"+",
"O",
")",
"+",
"' unable to strip .cap file: neither pyrit nor tshark were found'",
")",
"+",
"W",
")"
] | uses tshark or pyrit to strip all non-handshake packets from a . | train | false |
37,832 | def _import_module_with_version_check(module_name, minimum_version, install_info=None):
from distutils.version import LooseVersion
try:
module = __import__(module_name)
except ImportError as exc:
user_friendly_info = 'Module "{0}" could not be found. {1}'.format(module_name, (install_info or 'Please install it properly to use imbalanced-learn.'))
exc.args += (user_friendly_info,)
raise
module_version = getattr(module, '__version__', '0.0.0')
version_too_old = (not (LooseVersion(module_version) >= LooseVersion(minimum_version)))
if version_too_old:
message = 'A {module_name} version of at least {minimum_version} is required to use imbalanced-learn. {module_version} was found. Please upgrade {module_name}'.format(module_name=module_name, minimum_version=minimum_version, module_version=module_version)
raise ImportError(message)
return module
| [
"def",
"_import_module_with_version_check",
"(",
"module_name",
",",
"minimum_version",
",",
"install_info",
"=",
"None",
")",
":",
"from",
"distutils",
".",
"version",
"import",
"LooseVersion",
"try",
":",
"module",
"=",
"__import__",
"(",
"module_name",
")",
"except",
"ImportError",
"as",
"exc",
":",
"user_friendly_info",
"=",
"'Module \"{0}\" could not be found. {1}'",
".",
"format",
"(",
"module_name",
",",
"(",
"install_info",
"or",
"'Please install it properly to use imbalanced-learn.'",
")",
")",
"exc",
".",
"args",
"+=",
"(",
"user_friendly_info",
",",
")",
"raise",
"module_version",
"=",
"getattr",
"(",
"module",
",",
"'__version__'",
",",
"'0.0.0'",
")",
"version_too_old",
"=",
"(",
"not",
"(",
"LooseVersion",
"(",
"module_version",
")",
">=",
"LooseVersion",
"(",
"minimum_version",
")",
")",
")",
"if",
"version_too_old",
":",
"message",
"=",
"'A {module_name} version of at least {minimum_version} is required to use imbalanced-learn. {module_version} was found. Please upgrade {module_name}'",
".",
"format",
"(",
"module_name",
"=",
"module_name",
",",
"minimum_version",
"=",
"minimum_version",
",",
"module_version",
"=",
"module_version",
")",
"raise",
"ImportError",
"(",
"message",
")",
"return",
"module"
] | check that module is installed with a recent enough version . | train | false |
37,834 | def build_full_cache(items, id_key='id', ids=None):
return dict(((item['id'], item) for item in items))
| [
"def",
"build_full_cache",
"(",
"items",
",",
"id_key",
"=",
"'id'",
",",
"ids",
"=",
"None",
")",
":",
"return",
"dict",
"(",
"(",
"(",
"item",
"[",
"'id'",
"]",
",",
"item",
")",
"for",
"item",
"in",
"items",
")",
")"
] | uses list of items retrieved from building the topic tree to create an item cache with look up keys . | train | false |
37,835 | def replace_page_in_query(query, page_number, page_key=DEFAULT_PAGE_KEY):
getvars = parse_qs(query)
if (page_number is None):
getvars.pop(page_key, None)
else:
getvars[page_key] = page_number
return urlencode(getvars, True)
| [
"def",
"replace_page_in_query",
"(",
"query",
",",
"page_number",
",",
"page_key",
"=",
"DEFAULT_PAGE_KEY",
")",
":",
"getvars",
"=",
"parse_qs",
"(",
"query",
")",
"if",
"(",
"page_number",
"is",
"None",
")",
":",
"getvars",
".",
"pop",
"(",
"page_key",
",",
"None",
")",
"else",
":",
"getvars",
"[",
"page_key",
"]",
"=",
"page_number",
"return",
"urlencode",
"(",
"getvars",
",",
"True",
")"
] | replaces page_key from query string with page_number . | train | false |
37,836 | def get_month_names(width='wide', context='format', locale=LC_TIME):
return Locale.parse(locale).months[context][width]
| [
"def",
"get_month_names",
"(",
"width",
"=",
"'wide'",
",",
"context",
"=",
"'format'",
",",
"locale",
"=",
"LC_TIME",
")",
":",
"return",
"Locale",
".",
"parse",
"(",
"locale",
")",
".",
"months",
"[",
"context",
"]",
"[",
"width",
"]"
] | return the month names used by the locale for the specified format . | train | false |
37,837 | def in6_isdocaddr(str):
return in6_isincluded(str, '2001:db8::', 32)
| [
"def",
"in6_isdocaddr",
"(",
"str",
")",
":",
"return",
"in6_isincluded",
"(",
"str",
",",
"'2001:db8::'",
",",
"32",
")"
] | returns true if provided address in printable format belongs to 2001:db8::/32 address space reserved for documentation . | train | false |
37,838 | def get_ipver_str(ip_version):
return IP_VERSION_DICT.get(ip_version, '')
| [
"def",
"get_ipver_str",
"(",
"ip_version",
")",
":",
"return",
"IP_VERSION_DICT",
".",
"get",
"(",
"ip_version",
",",
"''",
")"
] | convert an ip version number to a human-friendly string . | train | false |
37,839 | def _valid_dict(dic):
return (isinstance(dic, dict) and (len(dic) > 0))
| [
"def",
"_valid_dict",
"(",
"dic",
")",
":",
"return",
"(",
"isinstance",
"(",
"dic",
",",
"dict",
")",
"and",
"(",
"len",
"(",
"dic",
")",
">",
"0",
")",
")"
] | valid dictionary? . | train | false |
37,841 | def getAroundsFromPaths(paths, radius, thresholdRatio=0.9):
points = []
for path in paths:
points += getPointsFromPath(path, (1.01 * abs(radius)), thresholdRatio)
return getAroundsFromPoints(points, radius)
| [
"def",
"getAroundsFromPaths",
"(",
"paths",
",",
"radius",
",",
"thresholdRatio",
"=",
"0.9",
")",
":",
"points",
"=",
"[",
"]",
"for",
"path",
"in",
"paths",
":",
"points",
"+=",
"getPointsFromPath",
"(",
"path",
",",
"(",
"1.01",
"*",
"abs",
"(",
"radius",
")",
")",
",",
"thresholdRatio",
")",
"return",
"getAroundsFromPoints",
"(",
"points",
",",
"radius",
")"
] | get the arounds from the path . | train | false |
37,842 | def schedule_enabled():
cmd = ['softwareupdate', '--schedule']
ret = salt.utils.mac_utils.execute_return_result(cmd)
enabled = ret.split()[(-1)]
return (salt.utils.mac_utils.validate_enabled(enabled) == 'on')
| [
"def",
"schedule_enabled",
"(",
")",
":",
"cmd",
"=",
"[",
"'softwareupdate'",
",",
"'--schedule'",
"]",
"ret",
"=",
"salt",
".",
"utils",
".",
"mac_utils",
".",
"execute_return_result",
"(",
"cmd",
")",
"enabled",
"=",
"ret",
".",
"split",
"(",
")",
"[",
"(",
"-",
"1",
")",
"]",
"return",
"(",
"salt",
".",
"utils",
".",
"mac_utils",
".",
"validate_enabled",
"(",
"enabled",
")",
"==",
"'on'",
")"
] | check the status of automatic update scheduling . | train | true |
37,843 | def handle_awful_failure(fail_text):
if g.debug:
import sys
s = sys.exc_info()
raise s[1], None, s[2]
try:
import traceback
log.write_error_summary(fail_text)
for line in traceback.format_exc().splitlines():
g.log.error(line)
return (redditbroke % (make_failien_url(), websafe(fail_text)))
except:
return 'This is an error that should never occur. You win.'
| [
"def",
"handle_awful_failure",
"(",
"fail_text",
")",
":",
"if",
"g",
".",
"debug",
":",
"import",
"sys",
"s",
"=",
"sys",
".",
"exc_info",
"(",
")",
"raise",
"s",
"[",
"1",
"]",
",",
"None",
",",
"s",
"[",
"2",
"]",
"try",
":",
"import",
"traceback",
"log",
".",
"write_error_summary",
"(",
"fail_text",
")",
"for",
"line",
"in",
"traceback",
".",
"format_exc",
"(",
")",
".",
"splitlines",
"(",
")",
":",
"g",
".",
"log",
".",
"error",
"(",
"line",
")",
"return",
"(",
"redditbroke",
"%",
"(",
"make_failien_url",
"(",
")",
",",
"websafe",
"(",
"fail_text",
")",
")",
")",
"except",
":",
"return",
"'This is an error that should never occur. You win.'"
] | makes sure that no errors generated in the error handler percolate up to the user unless debug is enabled . | train | false |
37,844 | def test_only_major_dots():
line = Line(show_only_major_dots=True)
line.add('test', range(12))
line.x_labels = map(str, range(12))
line.x_labels_major = ['1', '5', '11']
q = line.render_pyquery()
assert (len(q('.dots')) == 3)
| [
"def",
"test_only_major_dots",
"(",
")",
":",
"line",
"=",
"Line",
"(",
"show_only_major_dots",
"=",
"True",
")",
"line",
".",
"add",
"(",
"'test'",
",",
"range",
"(",
"12",
")",
")",
"line",
".",
"x_labels",
"=",
"map",
"(",
"str",
",",
"range",
"(",
"12",
")",
")",
"line",
".",
"x_labels_major",
"=",
"[",
"'1'",
",",
"'5'",
",",
"'11'",
"]",
"q",
"=",
"line",
".",
"render_pyquery",
"(",
")",
"assert",
"(",
"len",
"(",
"q",
"(",
"'.dots'",
")",
")",
"==",
"3",
")"
] | test major dots with specified major labels . | train | false |
37,845 | def libvlc_audio_output_device_list_release(p_list):
f = (_Cfunctions.get('libvlc_audio_output_device_list_release', None) or _Cfunction('libvlc_audio_output_device_list_release', ((1,),), None, None, ctypes.POINTER(AudioOutputDevice)))
return f(p_list)
| [
"def",
"libvlc_audio_output_device_list_release",
"(",
"p_list",
")",
":",
"f",
"=",
"(",
"_Cfunctions",
".",
"get",
"(",
"'libvlc_audio_output_device_list_release'",
",",
"None",
")",
"or",
"_Cfunction",
"(",
"'libvlc_audio_output_device_list_release'",
",",
"(",
"(",
"1",
",",
")",
",",
")",
",",
"None",
",",
"None",
",",
"ctypes",
".",
"POINTER",
"(",
"AudioOutputDevice",
")",
")",
")",
"return",
"f",
"(",
"p_list",
")"
] | frees a list of available audio output devices . | train | true |
37,846 | def getTogetherLists(fileName):
functionLists = getFunctionLists(fileName)
togetherLists = []
for functionList in functionLists:
addTogetherList(functionList, togetherLists)
return togetherLists
| [
"def",
"getTogetherLists",
"(",
"fileName",
")",
":",
"functionLists",
"=",
"getFunctionLists",
"(",
"fileName",
")",
"togetherLists",
"=",
"[",
"]",
"for",
"functionList",
"in",
"functionLists",
":",
"addTogetherList",
"(",
"functionList",
",",
"togetherLists",
")",
"return",
"togetherLists"
] | get the lists of the unsorted and sorted functions in the file . | train | false |
37,847 | def appendproctitle(name):
if HAS_SETPROCTITLE:
setproctitle.setproctitle(((setproctitle.getproctitle() + ' ') + name))
| [
"def",
"appendproctitle",
"(",
"name",
")",
":",
"if",
"HAS_SETPROCTITLE",
":",
"setproctitle",
".",
"setproctitle",
"(",
"(",
"(",
"setproctitle",
".",
"getproctitle",
"(",
")",
"+",
"' '",
")",
"+",
"name",
")",
")"
] | append "name" to the current process title . | train | false |
37,848 | @register.tag(u'if')
def do_if(parser, token):
bits = token.split_contents()[1:]
condition = TemplateIfParser(parser, bits).parse()
nodelist = parser.parse((u'elif', u'else', u'endif'))
conditions_nodelists = [(condition, nodelist)]
token = parser.next_token()
while token.contents.startswith(u'elif'):
bits = token.split_contents()[1:]
condition = TemplateIfParser(parser, bits).parse()
nodelist = parser.parse((u'elif', u'else', u'endif'))
conditions_nodelists.append((condition, nodelist))
token = parser.next_token()
if (token.contents == u'else'):
nodelist = parser.parse((u'endif',))
conditions_nodelists.append((None, nodelist))
token = parser.next_token()
assert (token.contents == u'endif')
return IfNode(conditions_nodelists)
| [
"@",
"register",
".",
"tag",
"(",
"u'if'",
")",
"def",
"do_if",
"(",
"parser",
",",
"token",
")",
":",
"bits",
"=",
"token",
".",
"split_contents",
"(",
")",
"[",
"1",
":",
"]",
"condition",
"=",
"TemplateIfParser",
"(",
"parser",
",",
"bits",
")",
".",
"parse",
"(",
")",
"nodelist",
"=",
"parser",
".",
"parse",
"(",
"(",
"u'elif'",
",",
"u'else'",
",",
"u'endif'",
")",
")",
"conditions_nodelists",
"=",
"[",
"(",
"condition",
",",
"nodelist",
")",
"]",
"token",
"=",
"parser",
".",
"next_token",
"(",
")",
"while",
"token",
".",
"contents",
".",
"startswith",
"(",
"u'elif'",
")",
":",
"bits",
"=",
"token",
".",
"split_contents",
"(",
")",
"[",
"1",
":",
"]",
"condition",
"=",
"TemplateIfParser",
"(",
"parser",
",",
"bits",
")",
".",
"parse",
"(",
")",
"nodelist",
"=",
"parser",
".",
"parse",
"(",
"(",
"u'elif'",
",",
"u'else'",
",",
"u'endif'",
")",
")",
"conditions_nodelists",
".",
"append",
"(",
"(",
"condition",
",",
"nodelist",
")",
")",
"token",
"=",
"parser",
".",
"next_token",
"(",
")",
"if",
"(",
"token",
".",
"contents",
"==",
"u'else'",
")",
":",
"nodelist",
"=",
"parser",
".",
"parse",
"(",
"(",
"u'endif'",
",",
")",
")",
"conditions_nodelists",
".",
"append",
"(",
"(",
"None",
",",
"nodelist",
")",
")",
"token",
"=",
"parser",
".",
"next_token",
"(",
")",
"assert",
"(",
"token",
".",
"contents",
"==",
"u'endif'",
")",
"return",
"IfNode",
"(",
"conditions_nodelists",
")"
] | the {% if %} tag evaluates a variable . | train | false |
37,849 | def parse_sudoku_assignment(grid):
digits = re.sub('\\s', '', grid)
assert (len(digits) == 81)
digit = iter(digits)
asg = {}
for row in range(9):
for col in range(9):
d = int(digit.next())
if (d > 0):
asg[(row, col)] = d
return asg
| [
"def",
"parse_sudoku_assignment",
"(",
"grid",
")",
":",
"digits",
"=",
"re",
".",
"sub",
"(",
"'\\\\s'",
",",
"''",
",",
"grid",
")",
"assert",
"(",
"len",
"(",
"digits",
")",
"==",
"81",
")",
"digit",
"=",
"iter",
"(",
"digits",
")",
"asg",
"=",
"{",
"}",
"for",
"row",
"in",
"range",
"(",
"9",
")",
":",
"for",
"col",
"in",
"range",
"(",
"9",
")",
":",
"d",
"=",
"int",
"(",
"digit",
".",
"next",
"(",
")",
")",
"if",
"(",
"d",
">",
"0",
")",
":",
"asg",
"[",
"(",
"row",
",",
"col",
")",
"]",
"=",
"d",
"return",
"asg"
] | given a string of 81 digits . | train | false |
37,852 | def is_probably_builtin(node):
prev = node.prev_sibling
if ((prev is not None) and (prev.type == token.DOT)):
return False
parent = node.parent
if (parent.type in (syms.funcdef, syms.classdef)):
return False
if ((parent.type == syms.expr_stmt) and (parent.children[0] is node)):
return False
if ((parent.type == syms.parameters) or ((parent.type == syms.typedargslist) and (((prev is not None) and (prev.type == token.COMMA)) or (parent.children[0] is node)))):
return False
return True
| [
"def",
"is_probably_builtin",
"(",
"node",
")",
":",
"prev",
"=",
"node",
".",
"prev_sibling",
"if",
"(",
"(",
"prev",
"is",
"not",
"None",
")",
"and",
"(",
"prev",
".",
"type",
"==",
"token",
".",
"DOT",
")",
")",
":",
"return",
"False",
"parent",
"=",
"node",
".",
"parent",
"if",
"(",
"parent",
".",
"type",
"in",
"(",
"syms",
".",
"funcdef",
",",
"syms",
".",
"classdef",
")",
")",
":",
"return",
"False",
"if",
"(",
"(",
"parent",
".",
"type",
"==",
"syms",
".",
"expr_stmt",
")",
"and",
"(",
"parent",
".",
"children",
"[",
"0",
"]",
"is",
"node",
")",
")",
":",
"return",
"False",
"if",
"(",
"(",
"parent",
".",
"type",
"==",
"syms",
".",
"parameters",
")",
"or",
"(",
"(",
"parent",
".",
"type",
"==",
"syms",
".",
"typedargslist",
")",
"and",
"(",
"(",
"(",
"prev",
"is",
"not",
"None",
")",
"and",
"(",
"prev",
".",
"type",
"==",
"token",
".",
"COMMA",
")",
")",
"or",
"(",
"parent",
".",
"children",
"[",
"0",
"]",
"is",
"node",
")",
")",
")",
")",
":",
"return",
"False",
"return",
"True"
] | check that something isnt an attribute or function name etc . | train | true |
37,853 | def _reshow_nbagg_figure(fig):
try:
reshow = fig.canvas.manager.reshow
except AttributeError:
raise NotImplementedError()
else:
reshow()
| [
"def",
"_reshow_nbagg_figure",
"(",
"fig",
")",
":",
"try",
":",
"reshow",
"=",
"fig",
".",
"canvas",
".",
"manager",
".",
"reshow",
"except",
"AttributeError",
":",
"raise",
"NotImplementedError",
"(",
")",
"else",
":",
"reshow",
"(",
")"
] | reshow an nbagg figure . | train | false |
37,854 | def xorS(a, b):
assert (len(a) == len(b))
x = []
for i in range(len(a)):
x.append(chr((ord(a[i]) ^ ord(b[i]))))
return ''.join(x)
| [
"def",
"xorS",
"(",
"a",
",",
"b",
")",
":",
"assert",
"(",
"len",
"(",
"a",
")",
"==",
"len",
"(",
"b",
")",
")",
"x",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"a",
")",
")",
":",
"x",
".",
"append",
"(",
"chr",
"(",
"(",
"ord",
"(",
"a",
"[",
"i",
"]",
")",
"^",
"ord",
"(",
"b",
"[",
"i",
"]",
")",
")",
")",
")",
"return",
"''",
".",
"join",
"(",
"x",
")"
] | xor two strings . | train | false |
37,855 | def _ls_logs(fs, log_dir_stream, matcher, **kwargs):
def _fs_ls(path):
try:
if fs.exists(log_dir):
for path in fs.ls(log_dir):
(yield path)
except (IOError, OSError) as e:
log.warning(("couldn't ls() %s: %r" % (log_dir, e)))
for log_dirs in log_dir_stream:
if isinstance(log_dirs, str):
raise TypeError
matches = []
for log_dir in log_dirs:
for path in _fs_ls(log_dir):
m = matcher(path, **kwargs)
if (m is not None):
m['path'] = path
matches.append(m)
if matches:
return _sort_by_recency(matches)
return []
| [
"def",
"_ls_logs",
"(",
"fs",
",",
"log_dir_stream",
",",
"matcher",
",",
"**",
"kwargs",
")",
":",
"def",
"_fs_ls",
"(",
"path",
")",
":",
"try",
":",
"if",
"fs",
".",
"exists",
"(",
"log_dir",
")",
":",
"for",
"path",
"in",
"fs",
".",
"ls",
"(",
"log_dir",
")",
":",
"(",
"yield",
"path",
")",
"except",
"(",
"IOError",
",",
"OSError",
")",
"as",
"e",
":",
"log",
".",
"warning",
"(",
"(",
"\"couldn't ls() %s: %r\"",
"%",
"(",
"log_dir",
",",
"e",
")",
")",
")",
"for",
"log_dirs",
"in",
"log_dir_stream",
":",
"if",
"isinstance",
"(",
"log_dirs",
",",
"str",
")",
":",
"raise",
"TypeError",
"matches",
"=",
"[",
"]",
"for",
"log_dir",
"in",
"log_dirs",
":",
"for",
"path",
"in",
"_fs_ls",
"(",
"log_dir",
")",
":",
"m",
"=",
"matcher",
"(",
"path",
",",
"**",
"kwargs",
")",
"if",
"(",
"m",
"is",
"not",
"None",
")",
":",
"m",
"[",
"'path'",
"]",
"=",
"path",
"matches",
".",
"append",
"(",
"m",
")",
"if",
"matches",
":",
"return",
"_sort_by_recency",
"(",
"matches",
")",
"return",
"[",
"]"
] | return a list matches against log files . | train | false |
37,856 | def test_roc_auc():
skip_if_no_sklearn()
trainer = yaml_parse.load(test_yaml)
trainer.main_loop()
| [
"def",
"test_roc_auc",
"(",
")",
":",
"skip_if_no_sklearn",
"(",
")",
"trainer",
"=",
"yaml_parse",
".",
"load",
"(",
"test_yaml",
")",
"trainer",
".",
"main_loop",
"(",
")"
] | test rocaucchannel . | train | false |
37,858 | def _sizestr(size_in_g):
return ('%sG' % size_in_g)
| [
"def",
"_sizestr",
"(",
"size_in_g",
")",
":",
"return",
"(",
"'%sG'",
"%",
"size_in_g",
")"
] | convert the specified size into a string value . | train | false |
37,859 | def test_bootstrap_range():
(min, max) = (a_norm.min(), a_norm.max())
out = algo.bootstrap(a_norm)
nose.tools.assert_less(min, out.min())
nose.tools.assert_greater_equal(max, out.max())
| [
"def",
"test_bootstrap_range",
"(",
")",
":",
"(",
"min",
",",
"max",
")",
"=",
"(",
"a_norm",
".",
"min",
"(",
")",
",",
"a_norm",
".",
"max",
"(",
")",
")",
"out",
"=",
"algo",
".",
"bootstrap",
"(",
"a_norm",
")",
"nose",
".",
"tools",
".",
"assert_less",
"(",
"min",
",",
"out",
".",
"min",
"(",
")",
")",
"nose",
".",
"tools",
".",
"assert_greater_equal",
"(",
"max",
",",
"out",
".",
"max",
"(",
")",
")"
] | test that boostrapping a random array stays within the right range . | train | false |
37,860 | def floating_ip_fixed_ip_associate(context, floating_address, fixed_address, host):
return IMPL.floating_ip_fixed_ip_associate(context, floating_address, fixed_address, host)
| [
"def",
"floating_ip_fixed_ip_associate",
"(",
"context",
",",
"floating_address",
",",
"fixed_address",
",",
"host",
")",
":",
"return",
"IMPL",
".",
"floating_ip_fixed_ip_associate",
"(",
"context",
",",
"floating_address",
",",
"fixed_address",
",",
"host",
")"
] | associate a floating ip to a fixed_ip by address . | train | false |
37,865 | def add_wedge_spacing(df, spacing):
if isinstance(spacing, list):
for (i, space) in enumerate(spacing):
df.ix[((df['level'] == i), 'inners')] += space
else:
df.ix[((df['level'] > 0), 'inners')] += spacing
| [
"def",
"add_wedge_spacing",
"(",
"df",
",",
"spacing",
")",
":",
"if",
"isinstance",
"(",
"spacing",
",",
"list",
")",
":",
"for",
"(",
"i",
",",
"space",
")",
"in",
"enumerate",
"(",
"spacing",
")",
":",
"df",
".",
"ix",
"[",
"(",
"(",
"df",
"[",
"'level'",
"]",
"==",
"i",
")",
",",
"'inners'",
")",
"]",
"+=",
"space",
"else",
":",
"df",
".",
"ix",
"[",
"(",
"(",
"df",
"[",
"'level'",
"]",
">",
"0",
")",
",",
"'inners'",
")",
"]",
"+=",
"spacing"
] | add spacing to the inners column of the provided data based on level . | train | false |
37,870 | def GenerateYamlHandlersList(app_engine_web_xml, web_xml, static_files):
welcome_properties = _MakeWelcomeProperties(web_xml, static_files)
static_handler_generator = StaticHandlerGenerator(app_engine_web_xml, web_xml, welcome_properties)
dynamic_handler_generator = DynamicHandlerGenerator(app_engine_web_xml, web_xml)
handler_length = len(dynamic_handler_generator.GenerateOrderedHandlerList())
if static_files:
handler_length += len(static_handler_generator.GenerateOrderedHandlerList())
if (handler_length > appinfo.MAX_URL_MAPS):
dynamic_handler_generator.fall_through = True
dynamic_handler_generator.welcome_properties = {}
yaml_statements = ['handlers:']
if static_files:
yaml_statements += static_handler_generator.GetHandlerYaml()
yaml_statements += dynamic_handler_generator.GetHandlerYaml()
return yaml_statements
| [
"def",
"GenerateYamlHandlersList",
"(",
"app_engine_web_xml",
",",
"web_xml",
",",
"static_files",
")",
":",
"welcome_properties",
"=",
"_MakeWelcomeProperties",
"(",
"web_xml",
",",
"static_files",
")",
"static_handler_generator",
"=",
"StaticHandlerGenerator",
"(",
"app_engine_web_xml",
",",
"web_xml",
",",
"welcome_properties",
")",
"dynamic_handler_generator",
"=",
"DynamicHandlerGenerator",
"(",
"app_engine_web_xml",
",",
"web_xml",
")",
"handler_length",
"=",
"len",
"(",
"dynamic_handler_generator",
".",
"GenerateOrderedHandlerList",
"(",
")",
")",
"if",
"static_files",
":",
"handler_length",
"+=",
"len",
"(",
"static_handler_generator",
".",
"GenerateOrderedHandlerList",
"(",
")",
")",
"if",
"(",
"handler_length",
">",
"appinfo",
".",
"MAX_URL_MAPS",
")",
":",
"dynamic_handler_generator",
".",
"fall_through",
"=",
"True",
"dynamic_handler_generator",
".",
"welcome_properties",
"=",
"{",
"}",
"yaml_statements",
"=",
"[",
"'handlers:'",
"]",
"if",
"static_files",
":",
"yaml_statements",
"+=",
"static_handler_generator",
".",
"GetHandlerYaml",
"(",
")",
"yaml_statements",
"+=",
"dynamic_handler_generator",
".",
"GetHandlerYaml",
"(",
")",
"return",
"yaml_statements"
] | produces a list of yaml strings for dynamic and static handlers . | train | false |
37,871 | def _system_check_dimensionally_valid(a, b):
raise NotImplementedError
| [
"def",
"_system_check_dimensionally_valid",
"(",
"a",
",",
"b",
")",
":",
"raise",
"NotImplementedError"
] | check that ax=b style system input is dimensionally valid . | train | false |
37,872 | def CDLABANDONEDBABY(barDs, count, penetration=(-4e+37)):
return call_talib_with_ohlc(barDs, count, talib.CDLABANDONEDBABY, penetration)
| [
"def",
"CDLABANDONEDBABY",
"(",
"barDs",
",",
"count",
",",
"penetration",
"=",
"(",
"-",
"4e+37",
")",
")",
":",
"return",
"call_talib_with_ohlc",
"(",
"barDs",
",",
"count",
",",
"talib",
".",
"CDLABANDONEDBABY",
",",
"penetration",
")"
] | abandoned baby . | train | false |
37,873 | def debug_print_commands(flag):
global _print_commands
_print_commands = bool(flag)
| [
"def",
"debug_print_commands",
"(",
"flag",
")",
":",
"global",
"_print_commands",
"_print_commands",
"=",
"bool",
"(",
"flag",
")"
] | turn on/off printing of commands as they are executed . | train | false |
37,876 | def random_chars(num_chars):
return binascii.hexlify(os.urandom(int((num_chars / 2)))).decode('ascii')
| [
"def",
"random_chars",
"(",
"num_chars",
")",
":",
"return",
"binascii",
".",
"hexlify",
"(",
"os",
".",
"urandom",
"(",
"int",
"(",
"(",
"num_chars",
"/",
"2",
")",
")",
")",
")",
".",
"decode",
"(",
"'ascii'",
")"
] | returns random hex characters . | train | false |
37,877 | def expanded_eq(want, to_expand):
expander = ForParser(to_expand)
expander.expand_fors()
eq_(want, expander.to_unicode())
| [
"def",
"expanded_eq",
"(",
"want",
",",
"to_expand",
")",
":",
"expander",
"=",
"ForParser",
"(",
"to_expand",
")",
"expander",
".",
"expand_fors",
"(",
")",
"eq_",
"(",
"want",
",",
"expander",
".",
"to_unicode",
"(",
")",
")"
] | balance and expand the fors in to_expand . | train | false |
37,878 | def is_storage_local(storage):
try:
storage.path('test')
except NotImplementedError:
return False
return True
| [
"def",
"is_storage_local",
"(",
"storage",
")",
":",
"try",
":",
"storage",
".",
"path",
"(",
"'test'",
")",
"except",
"NotImplementedError",
":",
"return",
"False",
"return",
"True"
] | check to see if a file storage is local . | train | false |
37,879 | def _check_path_header(req, name, length, error_msg):
src_header = unquote(req.headers.get(name))
if (not src_header.startswith('/')):
src_header = ('/' + src_header)
try:
return utils.split_path(src_header, length, length, True)
except ValueError:
raise HTTPPreconditionFailed(request=req, body=error_msg)
| [
"def",
"_check_path_header",
"(",
"req",
",",
"name",
",",
"length",
",",
"error_msg",
")",
":",
"src_header",
"=",
"unquote",
"(",
"req",
".",
"headers",
".",
"get",
"(",
"name",
")",
")",
"if",
"(",
"not",
"src_header",
".",
"startswith",
"(",
"'/'",
")",
")",
":",
"src_header",
"=",
"(",
"'/'",
"+",
"src_header",
")",
"try",
":",
"return",
"utils",
".",
"split_path",
"(",
"src_header",
",",
"length",
",",
"length",
",",
"True",
")",
"except",
"ValueError",
":",
"raise",
"HTTPPreconditionFailed",
"(",
"request",
"=",
"req",
",",
"body",
"=",
"error_msg",
")"
] | validate that the value of path-like header is well formatted . | train | false |
37,880 | def make_token(user, operation, expire=3600):
s = TimedJSONWebSignatureSerializer(current_app.config['SECRET_KEY'], expire)
data = {'id': user.id, 'op': operation}
return s.dumps(data)
| [
"def",
"make_token",
"(",
"user",
",",
"operation",
",",
"expire",
"=",
"3600",
")",
":",
"s",
"=",
"TimedJSONWebSignatureSerializer",
"(",
"current_app",
".",
"config",
"[",
"'SECRET_KEY'",
"]",
",",
"expire",
")",
"data",
"=",
"{",
"'id'",
":",
"user",
".",
"id",
",",
"'op'",
":",
"operation",
"}",
"return",
"s",
".",
"dumps",
"(",
"data",
")"
] | generates a json web signature . | train | false |
37,881 | def ansi_color_style(style='default'):
if (style in ANSI_STYLES):
cmap = ANSI_STYLES[style]
else:
msg = 'Could not find color style {0!r}, using default.'.format(style)
warnings.warn(msg, RuntimeWarning)
cmap = ANSI_STYLES['default']
return cmap
| [
"def",
"ansi_color_style",
"(",
"style",
"=",
"'default'",
")",
":",
"if",
"(",
"style",
"in",
"ANSI_STYLES",
")",
":",
"cmap",
"=",
"ANSI_STYLES",
"[",
"style",
"]",
"else",
":",
"msg",
"=",
"'Could not find color style {0!r}, using default.'",
".",
"format",
"(",
"style",
")",
"warnings",
".",
"warn",
"(",
"msg",
",",
"RuntimeWarning",
")",
"cmap",
"=",
"ANSI_STYLES",
"[",
"'default'",
"]",
"return",
"cmap"
] | returns the current color map . | train | false |
37,882 | def init_collection(obj, key):
state = instance_state(obj)
dict_ = state.dict
return init_state_collection(state, dict_, key)
| [
"def",
"init_collection",
"(",
"obj",
",",
"key",
")",
":",
"state",
"=",
"instance_state",
"(",
"obj",
")",
"dict_",
"=",
"state",
".",
"dict",
"return",
"init_state_collection",
"(",
"state",
",",
"dict_",
",",
"key",
")"
] | initialize a collection attribute and return the collection adapter . | train | false |
37,884 | def odnoklassniki_iframe_sig(data, client_secret_or_session_secret):
param_list = sorted(['{0:s}={1:s}'.format(key, value) for (key, value) in data.items()])
return md5((''.join(param_list) + client_secret_or_session_secret).encode('utf-8')).hexdigest()
| [
"def",
"odnoklassniki_iframe_sig",
"(",
"data",
",",
"client_secret_or_session_secret",
")",
":",
"param_list",
"=",
"sorted",
"(",
"[",
"'{0:s}={1:s}'",
".",
"format",
"(",
"key",
",",
"value",
")",
"for",
"(",
"key",
",",
"value",
")",
"in",
"data",
".",
"items",
"(",
")",
"]",
")",
"return",
"md5",
"(",
"(",
"''",
".",
"join",
"(",
"param_list",
")",
"+",
"client_secret_or_session_secret",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")"
] | calculates signature as described at: URL authentication+and+authorization if api method requires session context . | train | false |
37,885 | def make_strictly_feasible(x, lb, ub, rstep=1e-10):
x_new = x.copy()
active = find_active_constraints(x, lb, ub, rstep)
lower_mask = np.equal(active, (-1))
upper_mask = np.equal(active, 1)
if (rstep == 0):
x_new[lower_mask] = np.nextafter(lb[lower_mask], ub[lower_mask])
x_new[upper_mask] = np.nextafter(ub[upper_mask], lb[upper_mask])
else:
x_new[lower_mask] = (lb[lower_mask] + (rstep * np.maximum(1, np.abs(lb[lower_mask]))))
x_new[upper_mask] = (ub[upper_mask] - (rstep * np.maximum(1, np.abs(ub[upper_mask]))))
tight_bounds = ((x_new < lb) | (x_new > ub))
x_new[tight_bounds] = (0.5 * (lb[tight_bounds] + ub[tight_bounds]))
return x_new
| [
"def",
"make_strictly_feasible",
"(",
"x",
",",
"lb",
",",
"ub",
",",
"rstep",
"=",
"1e-10",
")",
":",
"x_new",
"=",
"x",
".",
"copy",
"(",
")",
"active",
"=",
"find_active_constraints",
"(",
"x",
",",
"lb",
",",
"ub",
",",
"rstep",
")",
"lower_mask",
"=",
"np",
".",
"equal",
"(",
"active",
",",
"(",
"-",
"1",
")",
")",
"upper_mask",
"=",
"np",
".",
"equal",
"(",
"active",
",",
"1",
")",
"if",
"(",
"rstep",
"==",
"0",
")",
":",
"x_new",
"[",
"lower_mask",
"]",
"=",
"np",
".",
"nextafter",
"(",
"lb",
"[",
"lower_mask",
"]",
",",
"ub",
"[",
"lower_mask",
"]",
")",
"x_new",
"[",
"upper_mask",
"]",
"=",
"np",
".",
"nextafter",
"(",
"ub",
"[",
"upper_mask",
"]",
",",
"lb",
"[",
"upper_mask",
"]",
")",
"else",
":",
"x_new",
"[",
"lower_mask",
"]",
"=",
"(",
"lb",
"[",
"lower_mask",
"]",
"+",
"(",
"rstep",
"*",
"np",
".",
"maximum",
"(",
"1",
",",
"np",
".",
"abs",
"(",
"lb",
"[",
"lower_mask",
"]",
")",
")",
")",
")",
"x_new",
"[",
"upper_mask",
"]",
"=",
"(",
"ub",
"[",
"upper_mask",
"]",
"-",
"(",
"rstep",
"*",
"np",
".",
"maximum",
"(",
"1",
",",
"np",
".",
"abs",
"(",
"ub",
"[",
"upper_mask",
"]",
")",
")",
")",
")",
"tight_bounds",
"=",
"(",
"(",
"x_new",
"<",
"lb",
")",
"|",
"(",
"x_new",
">",
"ub",
")",
")",
"x_new",
"[",
"tight_bounds",
"]",
"=",
"(",
"0.5",
"*",
"(",
"lb",
"[",
"tight_bounds",
"]",
"+",
"ub",
"[",
"tight_bounds",
"]",
")",
")",
"return",
"x_new"
] | shift a point to the interior of a feasible region . | train | false |
37,886 | def restart_apppool(name):
pscmd = list()
pscmd.append("Restart-WebAppPool '{0}'".format(name))
cmd_ret = _srvmgr(str().join(pscmd))
return (cmd_ret['retcode'] == 0)
| [
"def",
"restart_apppool",
"(",
"name",
")",
":",
"pscmd",
"=",
"list",
"(",
")",
"pscmd",
".",
"append",
"(",
"\"Restart-WebAppPool '{0}'\"",
".",
"format",
"(",
"name",
")",
")",
"cmd_ret",
"=",
"_srvmgr",
"(",
"str",
"(",
")",
".",
"join",
"(",
"pscmd",
")",
")",
"return",
"(",
"cmd_ret",
"[",
"'retcode'",
"]",
"==",
"0",
")"
] | restart an iis application pool . | train | false |
37,887 | @blueprint.route('/projects')
def list_all_projects():
return _list_projects()
| [
"@",
"blueprint",
".",
"route",
"(",
"'/projects'",
")",
"def",
"list_all_projects",
"(",
")",
":",
"return",
"_list_projects",
"(",
")"
] | return a list of all known project names . | train | false |
37,888 | def get_sql_all(app):
return ((get_sql_create(app) + get_custom_sql(app)) + get_sql_indexes(app))
| [
"def",
"get_sql_all",
"(",
"app",
")",
":",
"return",
"(",
"(",
"get_sql_create",
"(",
"app",
")",
"+",
"get_custom_sql",
"(",
"app",
")",
")",
"+",
"get_sql_indexes",
"(",
"app",
")",
")"
] | returns a list of create table sql . | train | false |
37,889 | def duration(function, *args, **kwargs):
t = time()
function(*args, **kwargs)
return (time() - t)
| [
"def",
"duration",
"(",
"function",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"t",
"=",
"time",
"(",
")",
"function",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"return",
"(",
"time",
"(",
")",
"-",
"t",
")"
] | returns the running time of the given function . | train | false |
37,890 | def power_off_instance(session, instance, vm_ref=None):
if (vm_ref is None):
vm_ref = get_vm_ref(session, instance)
LOG.debug('Powering off the VM', instance=instance)
try:
poweroff_task = session._call_method(session.vim, 'PowerOffVM_Task', vm_ref)
session._wait_for_task(poweroff_task)
LOG.debug('Powered off the VM', instance=instance)
except vexc.InvalidPowerStateException:
LOG.debug('VM already powered off', instance=instance)
| [
"def",
"power_off_instance",
"(",
"session",
",",
"instance",
",",
"vm_ref",
"=",
"None",
")",
":",
"if",
"(",
"vm_ref",
"is",
"None",
")",
":",
"vm_ref",
"=",
"get_vm_ref",
"(",
"session",
",",
"instance",
")",
"LOG",
".",
"debug",
"(",
"'Powering off the VM'",
",",
"instance",
"=",
"instance",
")",
"try",
":",
"poweroff_task",
"=",
"session",
".",
"_call_method",
"(",
"session",
".",
"vim",
",",
"'PowerOffVM_Task'",
",",
"vm_ref",
")",
"session",
".",
"_wait_for_task",
"(",
"poweroff_task",
")",
"LOG",
".",
"debug",
"(",
"'Powered off the VM'",
",",
"instance",
"=",
"instance",
")",
"except",
"vexc",
".",
"InvalidPowerStateException",
":",
"LOG",
".",
"debug",
"(",
"'VM already powered off'",
",",
"instance",
"=",
"instance",
")"
] | power off the specified instance . | train | false |
37,892 | def get_authorized_node_settings(user_addon):
return AddonDataverseNodeSettings.find(Q('user_settings', 'eq', user_addon))
| [
"def",
"get_authorized_node_settings",
"(",
"user_addon",
")",
":",
"return",
"AddonDataverseNodeSettings",
".",
"find",
"(",
"Q",
"(",
"'user_settings'",
",",
"'eq'",
",",
"user_addon",
")",
")"
] | returns node settings authorized by a given user settings object . | train | false |
37,893 | def option_namespace(name):
return (name + '-')
| [
"def",
"option_namespace",
"(",
"name",
")",
":",
"return",
"(",
"name",
"+",
"'-'",
")"
] | argumentparser options namespace . | train | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.