id_within_dataset
int64 1
55.5k
| snippet
stringlengths 19
14.2k
| tokens
listlengths 6
1.63k
| nl
stringlengths 6
352
| split_within_dataset
stringclasses 1
value | is_duplicated
bool 2
classes |
|---|---|---|---|---|---|
4,309
|
def get_num_cat(sample_by_cat, samples_in_otus):
num_cat = defaultdict(int)
for (cat, samples) in sample_by_cat.items():
num_samples = len((set(samples_in_otus) & set(samples)))
num_cat[cat[0]] += ((num_samples * (num_samples - 1)) / 2)
return num_cat
|
[
"def",
"get_num_cat",
"(",
"sample_by_cat",
",",
"samples_in_otus",
")",
":",
"num_cat",
"=",
"defaultdict",
"(",
"int",
")",
"for",
"(",
"cat",
",",
"samples",
")",
"in",
"sample_by_cat",
".",
"items",
"(",
")",
":",
"num_samples",
"=",
"len",
"(",
"(",
"set",
"(",
"samples_in_otus",
")",
"&",
"set",
"(",
"samples",
")",
")",
")",
"num_cat",
"[",
"cat",
"[",
"0",
"]",
"]",
"+=",
"(",
"(",
"num_samples",
"*",
"(",
"num_samples",
"-",
"1",
")",
")",
"/",
"2",
")",
"return",
"num_cat"
] |
builds a dictionary of numbers of samples keyed by metadata value .
|
train
| false
|
4,310
|
@gof.local_optimizer([csm_properties])
def local_csm_properties_csm(node):
if (node.op == csm_properties):
(csm,) = node.inputs
if (csm.owner and ((csm.owner.op == CSC) or (csm.owner.op == CSR))):
ret_var = [theano.tensor.patternbroadcast(i, o.broadcastable) for (i, o) in izip(csm.owner.inputs, node.outputs)]
return ret_var
return False
|
[
"@",
"gof",
".",
"local_optimizer",
"(",
"[",
"csm_properties",
"]",
")",
"def",
"local_csm_properties_csm",
"(",
"node",
")",
":",
"if",
"(",
"node",
".",
"op",
"==",
"csm_properties",
")",
":",
"(",
"csm",
",",
")",
"=",
"node",
".",
"inputs",
"if",
"(",
"csm",
".",
"owner",
"and",
"(",
"(",
"csm",
".",
"owner",
".",
"op",
"==",
"CSC",
")",
"or",
"(",
"csm",
".",
"owner",
".",
"op",
"==",
"CSR",
")",
")",
")",
":",
"ret_var",
"=",
"[",
"theano",
".",
"tensor",
".",
"patternbroadcast",
"(",
"i",
",",
"o",
".",
"broadcastable",
")",
"for",
"(",
"i",
",",
"o",
")",
"in",
"izip",
"(",
"csm",
".",
"owner",
".",
"inputs",
",",
"node",
".",
"outputs",
")",
"]",
"return",
"ret_var",
"return",
"False"
] |
if we find csm_properties(csm) .
|
train
| false
|
4,311
|
def getThreadPolicy(getDefault, flavour):
if importCtypesFailed:
return False
extendedPolicy = _timeConstraintThreadPolicy()
getDefault = ctypes.c_int(getDefault)
err = cocoa.thread_policy_get(cocoa.mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY, ctypes.byref(extendedPolicy), ctypes.byref(THREAD_TIME_CONSTRAINT_POLICY_COUNT), ctypes.byref(getDefault))
return extendedPolicy
|
[
"def",
"getThreadPolicy",
"(",
"getDefault",
",",
"flavour",
")",
":",
"if",
"importCtypesFailed",
":",
"return",
"False",
"extendedPolicy",
"=",
"_timeConstraintThreadPolicy",
"(",
")",
"getDefault",
"=",
"ctypes",
".",
"c_int",
"(",
"getDefault",
")",
"err",
"=",
"cocoa",
".",
"thread_policy_get",
"(",
"cocoa",
".",
"mach_thread_self",
"(",
")",
",",
"THREAD_TIME_CONSTRAINT_POLICY",
",",
"ctypes",
".",
"byref",
"(",
"extendedPolicy",
")",
",",
"ctypes",
".",
"byref",
"(",
"THREAD_TIME_CONSTRAINT_POLICY_COUNT",
")",
",",
"ctypes",
".",
"byref",
"(",
"getDefault",
")",
")",
"return",
"extendedPolicy"
] |
retrieve the current thread policy .
|
train
| false
|
4,312
|
@docstring.dedent_interpd
def phase_spectrum(x, Fs=None, window=None, pad_to=None, sides=None):
return _single_spectrum_helper(x=x, Fs=Fs, window=window, pad_to=pad_to, sides=sides, mode=u'phase')
|
[
"@",
"docstring",
".",
"dedent_interpd",
"def",
"phase_spectrum",
"(",
"x",
",",
"Fs",
"=",
"None",
",",
"window",
"=",
"None",
",",
"pad_to",
"=",
"None",
",",
"sides",
"=",
"None",
")",
":",
"return",
"_single_spectrum_helper",
"(",
"x",
"=",
"x",
",",
"Fs",
"=",
"Fs",
",",
"window",
"=",
"window",
",",
"pad_to",
"=",
"pad_to",
",",
"sides",
"=",
"sides",
",",
"mode",
"=",
"u'phase'",
")"
] |
compute the phase of the frequency spectrum of *x* .
|
train
| false
|
4,314
|
def annotated(letter):
ucode_pics = {'F': (2, 0, 2, 0, u'\u250c\u2500\n\u251c\u2500\n\u2575'), 'G': (3, 0, 3, 1, u'\u256d\u2500\u256e\n\u2502\u2576\u2510\n\u2570\u2500\u256f')}
ascii_pics = {'F': (3, 0, 3, 0, ' _\n|_\n|\n'), 'G': (3, 0, 3, 1, ' __\n/__\n\\_|')}
if _use_unicode:
return ucode_pics[letter]
else:
return ascii_pics[letter]
|
[
"def",
"annotated",
"(",
"letter",
")",
":",
"ucode_pics",
"=",
"{",
"'F'",
":",
"(",
"2",
",",
"0",
",",
"2",
",",
"0",
",",
"u'\\u250c\\u2500\\n\\u251c\\u2500\\n\\u2575'",
")",
",",
"'G'",
":",
"(",
"3",
",",
"0",
",",
"3",
",",
"1",
",",
"u'\\u256d\\u2500\\u256e\\n\\u2502\\u2576\\u2510\\n\\u2570\\u2500\\u256f'",
")",
"}",
"ascii_pics",
"=",
"{",
"'F'",
":",
"(",
"3",
",",
"0",
",",
"3",
",",
"0",
",",
"' _\\n|_\\n|\\n'",
")",
",",
"'G'",
":",
"(",
"3",
",",
"0",
",",
"3",
",",
"1",
",",
"' __\\n/__\\n\\\\_|'",
")",
"}",
"if",
"_use_unicode",
":",
"return",
"ucode_pics",
"[",
"letter",
"]",
"else",
":",
"return",
"ascii_pics",
"[",
"letter",
"]"
] |
return a stylised drawing of the letter letter .
|
train
| false
|
4,315
|
def features_contains(value):
return var_contains('FEATURES', value)
|
[
"def",
"features_contains",
"(",
"value",
")",
":",
"return",
"var_contains",
"(",
"'FEATURES'",
",",
"value",
")"
] |
verify if features variable contains a value in make .
|
train
| false
|
4,316
|
def find_control_points(c1x, c1y, mmx, mmy, c2x, c2y):
cmx = (0.5 * ((4 * mmx) - (c1x + c2x)))
cmy = (0.5 * ((4 * mmy) - (c1y + c2y)))
return [(c1x, c1y), (cmx, cmy), (c2x, c2y)]
|
[
"def",
"find_control_points",
"(",
"c1x",
",",
"c1y",
",",
"mmx",
",",
"mmy",
",",
"c2x",
",",
"c2y",
")",
":",
"cmx",
"=",
"(",
"0.5",
"*",
"(",
"(",
"4",
"*",
"mmx",
")",
"-",
"(",
"c1x",
"+",
"c2x",
")",
")",
")",
"cmy",
"=",
"(",
"0.5",
"*",
"(",
"(",
"4",
"*",
"mmy",
")",
"-",
"(",
"c1y",
"+",
"c2y",
")",
")",
")",
"return",
"[",
"(",
"c1x",
",",
"c1y",
")",
",",
"(",
"cmx",
",",
"cmy",
")",
",",
"(",
"c2x",
",",
"c2y",
")",
"]"
] |
find control points of the bezier line throught c1 .
|
train
| false
|
4,317
|
def getRightStripAlphabetPercent(word):
word = word.strip()
for characterIndex in xrange((len(word) - 1), (-1), (-1)):
character = word[characterIndex]
if ((not character.isalpha()) and (not (character == '%'))):
return float(word[:(characterIndex + 1)])
return None
|
[
"def",
"getRightStripAlphabetPercent",
"(",
"word",
")",
":",
"word",
"=",
"word",
".",
"strip",
"(",
")",
"for",
"characterIndex",
"in",
"xrange",
"(",
"(",
"len",
"(",
"word",
")",
"-",
"1",
")",
",",
"(",
"-",
"1",
")",
",",
"(",
"-",
"1",
")",
")",
":",
"character",
"=",
"word",
"[",
"characterIndex",
"]",
"if",
"(",
"(",
"not",
"character",
".",
"isalpha",
"(",
")",
")",
"and",
"(",
"not",
"(",
"character",
"==",
"'%'",
")",
")",
")",
":",
"return",
"float",
"(",
"word",
"[",
":",
"(",
"characterIndex",
"+",
"1",
")",
"]",
")",
"return",
"None"
] |
get word with alphabet characters and the percent sign stripped from the right .
|
train
| false
|
4,318
|
def _check_minions_directories_raetkey(pki_dir):
accepted = os.path.join(pki_dir, salt.key.RaetKey.ACC)
pre = os.path.join(pki_dir, salt.key.RaetKey.PEND)
rejected = os.path.join(pki_dir, salt.key.RaetKey.REJ)
return (accepted, pre, rejected)
|
[
"def",
"_check_minions_directories_raetkey",
"(",
"pki_dir",
")",
":",
"accepted",
"=",
"os",
".",
"path",
".",
"join",
"(",
"pki_dir",
",",
"salt",
".",
"key",
".",
"RaetKey",
".",
"ACC",
")",
"pre",
"=",
"os",
".",
"path",
".",
"join",
"(",
"pki_dir",
",",
"salt",
".",
"key",
".",
"RaetKey",
".",
"PEND",
")",
"rejected",
"=",
"os",
".",
"path",
".",
"join",
"(",
"pki_dir",
",",
"salt",
".",
"key",
".",
"RaetKey",
".",
"REJ",
")",
"return",
"(",
"accepted",
",",
"pre",
",",
"rejected",
")"
] |
return the minion keys directory paths .
|
train
| false
|
4,319
|
def assert_urls(host_blocker, blocked=BLOCKLIST_HOSTS, whitelisted=WHITELISTED_HOSTS, urls_to_check=URLS_TO_CHECK):
whitelisted = (list(whitelisted) + list(host_blocker.WHITELISTED))
for str_url in urls_to_check:
url = QUrl(str_url)
host = url.host()
if ((host in blocked) and (host not in whitelisted)):
assert host_blocker.is_blocked(url)
else:
assert (not host_blocker.is_blocked(url))
|
[
"def",
"assert_urls",
"(",
"host_blocker",
",",
"blocked",
"=",
"BLOCKLIST_HOSTS",
",",
"whitelisted",
"=",
"WHITELISTED_HOSTS",
",",
"urls_to_check",
"=",
"URLS_TO_CHECK",
")",
":",
"whitelisted",
"=",
"(",
"list",
"(",
"whitelisted",
")",
"+",
"list",
"(",
"host_blocker",
".",
"WHITELISTED",
")",
")",
"for",
"str_url",
"in",
"urls_to_check",
":",
"url",
"=",
"QUrl",
"(",
"str_url",
")",
"host",
"=",
"url",
".",
"host",
"(",
")",
"if",
"(",
"(",
"host",
"in",
"blocked",
")",
"and",
"(",
"host",
"not",
"in",
"whitelisted",
")",
")",
":",
"assert",
"host_blocker",
".",
"is_blocked",
"(",
"url",
")",
"else",
":",
"assert",
"(",
"not",
"host_blocker",
".",
"is_blocked",
"(",
"url",
")",
")"
] |
test if urls to check are blocked or not by hostblocker .
|
train
| false
|
4,320
|
def register_scale(scale_class):
_scale_mapping[scale_class.name] = scale_class
|
[
"def",
"register_scale",
"(",
"scale_class",
")",
":",
"_scale_mapping",
"[",
"scale_class",
".",
"name",
"]",
"=",
"scale_class"
] |
register a new kind of scale .
|
train
| false
|
4,322
|
def _compare_epochs_infos(info1, info2, ind):
info1._check_consistency()
info2._check_consistency()
if (info1['nchan'] != info2['nchan']):
raise ValueError(("epochs[%d]['info']['nchan'] must match" % ind))
if (info1['bads'] != info2['bads']):
raise ValueError(("epochs[%d]['info']['bads'] must match" % ind))
if (info1['sfreq'] != info2['sfreq']):
raise ValueError(("epochs[%d]['info']['sfreq'] must match" % ind))
if (set(info1['ch_names']) != set(info2['ch_names'])):
raise ValueError(("epochs[%d]['info']['ch_names'] must match" % ind))
if (len(info2['projs']) != len(info1['projs'])):
raise ValueError('SSP projectors in epochs files must be the same')
if any(((not _proj_equal(p1, p2)) for (p1, p2) in zip(info2['projs'], info1['projs']))):
raise ValueError('SSP projectors in epochs files must be the same')
if (((info1['dev_head_t'] is None) != (info2['dev_head_t'] is None)) or ((info1['dev_head_t'] is not None) and (not np.allclose(info1['dev_head_t']['trans'], info2['dev_head_t']['trans'], rtol=1e-06)))):
raise ValueError(("epochs[%d]['info']['dev_head_t'] must match. The epochs probably come from different runs, and are therefore associated with different head positions. Manually change info['dev_head_t'] to avoid this message but beware that this means the MEG sensors will not be properly spatially aligned. See mne.preprocessing.maxwell_filter to realign the runs to a common head position." % ind))
|
[
"def",
"_compare_epochs_infos",
"(",
"info1",
",",
"info2",
",",
"ind",
")",
":",
"info1",
".",
"_check_consistency",
"(",
")",
"info2",
".",
"_check_consistency",
"(",
")",
"if",
"(",
"info1",
"[",
"'nchan'",
"]",
"!=",
"info2",
"[",
"'nchan'",
"]",
")",
":",
"raise",
"ValueError",
"(",
"(",
"\"epochs[%d]['info']['nchan'] must match\"",
"%",
"ind",
")",
")",
"if",
"(",
"info1",
"[",
"'bads'",
"]",
"!=",
"info2",
"[",
"'bads'",
"]",
")",
":",
"raise",
"ValueError",
"(",
"(",
"\"epochs[%d]['info']['bads'] must match\"",
"%",
"ind",
")",
")",
"if",
"(",
"info1",
"[",
"'sfreq'",
"]",
"!=",
"info2",
"[",
"'sfreq'",
"]",
")",
":",
"raise",
"ValueError",
"(",
"(",
"\"epochs[%d]['info']['sfreq'] must match\"",
"%",
"ind",
")",
")",
"if",
"(",
"set",
"(",
"info1",
"[",
"'ch_names'",
"]",
")",
"!=",
"set",
"(",
"info2",
"[",
"'ch_names'",
"]",
")",
")",
":",
"raise",
"ValueError",
"(",
"(",
"\"epochs[%d]['info']['ch_names'] must match\"",
"%",
"ind",
")",
")",
"if",
"(",
"len",
"(",
"info2",
"[",
"'projs'",
"]",
")",
"!=",
"len",
"(",
"info1",
"[",
"'projs'",
"]",
")",
")",
":",
"raise",
"ValueError",
"(",
"'SSP projectors in epochs files must be the same'",
")",
"if",
"any",
"(",
"(",
"(",
"not",
"_proj_equal",
"(",
"p1",
",",
"p2",
")",
")",
"for",
"(",
"p1",
",",
"p2",
")",
"in",
"zip",
"(",
"info2",
"[",
"'projs'",
"]",
",",
"info1",
"[",
"'projs'",
"]",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"'SSP projectors in epochs files must be the same'",
")",
"if",
"(",
"(",
"(",
"info1",
"[",
"'dev_head_t'",
"]",
"is",
"None",
")",
"!=",
"(",
"info2",
"[",
"'dev_head_t'",
"]",
"is",
"None",
")",
")",
"or",
"(",
"(",
"info1",
"[",
"'dev_head_t'",
"]",
"is",
"not",
"None",
")",
"and",
"(",
"not",
"np",
".",
"allclose",
"(",
"info1",
"[",
"'dev_head_t'",
"]",
"[",
"'trans'",
"]",
",",
"info2",
"[",
"'dev_head_t'",
"]",
"[",
"'trans'",
"]",
",",
"rtol",
"=",
"1e-06",
")",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"(",
"\"epochs[%d]['info']['dev_head_t'] must match. The epochs probably come from different runs, and are therefore associated with different head positions. Manually change info['dev_head_t'] to avoid this message but beware that this means the MEG sensors will not be properly spatially aligned. See mne.preprocessing.maxwell_filter to realign the runs to a common head position.\"",
"%",
"ind",
")",
")"
] |
compare infos .
|
train
| false
|
4,323
|
def dup_ff_lcm(f, g, K):
h = dup_quo(dup_mul(f, g, K), dup_gcd(f, g, K), K)
return dup_monic(h, K)
|
[
"def",
"dup_ff_lcm",
"(",
"f",
",",
"g",
",",
"K",
")",
":",
"h",
"=",
"dup_quo",
"(",
"dup_mul",
"(",
"f",
",",
"g",
",",
"K",
")",
",",
"dup_gcd",
"(",
"f",
",",
"g",
",",
"K",
")",
",",
"K",
")",
"return",
"dup_monic",
"(",
"h",
",",
"K",
")"
] |
computes polynomial lcm over a field in k[x] .
|
train
| false
|
4,325
|
def regen_keys():
for fn_ in os.listdir(__opts__['pki_dir']):
path = os.path.join(__opts__['pki_dir'], fn_)
try:
os.remove(path)
except os.error:
pass
channel = salt.transport.Channel.factory(__opts__)
|
[
"def",
"regen_keys",
"(",
")",
":",
"for",
"fn_",
"in",
"os",
".",
"listdir",
"(",
"__opts__",
"[",
"'pki_dir'",
"]",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"__opts__",
"[",
"'pki_dir'",
"]",
",",
"fn_",
")",
"try",
":",
"os",
".",
"remove",
"(",
"path",
")",
"except",
"os",
".",
"error",
":",
"pass",
"channel",
"=",
"salt",
".",
"transport",
".",
"Channel",
".",
"factory",
"(",
"__opts__",
")"
] |
used to regenerate the minion keys .
|
train
| false
|
4,326
|
def init(mpstate):
return SerialModule(mpstate)
|
[
"def",
"init",
"(",
"mpstate",
")",
":",
"return",
"SerialModule",
"(",
"mpstate",
")"
] |
initalize the platform with devices .
|
train
| false
|
4,329
|
@contextmanager
def silence_stderr():
if DEBUG:
(yield)
else:
with threading.Lock():
stderr = sys.stderr
sys.stderr = StringIO()
(yield)
with threading.Lock():
sys.stderr = stderr
|
[
"@",
"contextmanager",
"def",
"silence_stderr",
"(",
")",
":",
"if",
"DEBUG",
":",
"(",
"yield",
")",
"else",
":",
"with",
"threading",
".",
"Lock",
"(",
")",
":",
"stderr",
"=",
"sys",
".",
"stderr",
"sys",
".",
"stderr",
"=",
"StringIO",
"(",
")",
"(",
"yield",
")",
"with",
"threading",
".",
"Lock",
"(",
")",
":",
"sys",
".",
"stderr",
"=",
"stderr"
] |
redirect stderr .
|
train
| false
|
4,330
|
@_np.deprecate(message='scipy.constants.C2F is deprecated in scipy 0.18.0. Use scipy.constants.convert_temperature instead. Note that the new function has a different signature.')
def C2F(C):
return ((1.8 * _np.asanyarray(C)) + 32)
|
[
"@",
"_np",
".",
"deprecate",
"(",
"message",
"=",
"'scipy.constants.C2F is deprecated in scipy 0.18.0. Use scipy.constants.convert_temperature instead. Note that the new function has a different signature.'",
")",
"def",
"C2F",
"(",
"C",
")",
":",
"return",
"(",
"(",
"1.8",
"*",
"_np",
".",
"asanyarray",
"(",
"C",
")",
")",
"+",
"32",
")"
] |
convert celsius to fahrenheit parameters c : array_like celsius temperature(s) to be converted .
|
train
| false
|
4,331
|
def vserver_exists(v_name, v_ip=None, v_port=None, v_type=None, **connection_args):
vserver = _vserver_get(v_name, **connection_args)
if (vserver is None):
return False
if ((v_ip is not None) and (vserver.get_ipv46() != v_ip)):
return False
if ((v_port is not None) and (vserver.get_port() != v_port)):
return False
if ((v_type is not None) and (vserver.get_servicetype().upper() != v_type.upper())):
return False
return True
|
[
"def",
"vserver_exists",
"(",
"v_name",
",",
"v_ip",
"=",
"None",
",",
"v_port",
"=",
"None",
",",
"v_type",
"=",
"None",
",",
"**",
"connection_args",
")",
":",
"vserver",
"=",
"_vserver_get",
"(",
"v_name",
",",
"**",
"connection_args",
")",
"if",
"(",
"vserver",
"is",
"None",
")",
":",
"return",
"False",
"if",
"(",
"(",
"v_ip",
"is",
"not",
"None",
")",
"and",
"(",
"vserver",
".",
"get_ipv46",
"(",
")",
"!=",
"v_ip",
")",
")",
":",
"return",
"False",
"if",
"(",
"(",
"v_port",
"is",
"not",
"None",
")",
"and",
"(",
"vserver",
".",
"get_port",
"(",
")",
"!=",
"v_port",
")",
")",
":",
"return",
"False",
"if",
"(",
"(",
"v_type",
"is",
"not",
"None",
")",
"and",
"(",
"vserver",
".",
"get_servicetype",
"(",
")",
".",
"upper",
"(",
")",
"!=",
"v_type",
".",
"upper",
"(",
")",
")",
")",
":",
"return",
"False",
"return",
"True"
] |
checks if a vserver exists cli example: .
|
train
| true
|
4,333
|
def task_coverage():
return {'task_dep': ['locale', 'doctest'], 'actions': ['py.test --cov nikola --cov-report term-missing tests/'], 'verbosity': 2}
|
[
"def",
"task_coverage",
"(",
")",
":",
"return",
"{",
"'task_dep'",
":",
"[",
"'locale'",
",",
"'doctest'",
"]",
",",
"'actions'",
":",
"[",
"'py.test --cov nikola --cov-report term-missing tests/'",
"]",
",",
"'verbosity'",
":",
"2",
"}"
] |
run unit-tests using py .
|
train
| false
|
4,334
|
def prepare_request(uri, headers=None, data=None, method=None):
if (headers is None):
headers = {}
if (data and (not method)):
method = 'POST'
elif (not method):
method = 'GET'
if ((method == 'GET') and data):
uri = add_params_to_uri(uri, data)
data = None
return (uri, headers, data, method)
|
[
"def",
"prepare_request",
"(",
"uri",
",",
"headers",
"=",
"None",
",",
"data",
"=",
"None",
",",
"method",
"=",
"None",
")",
":",
"if",
"(",
"headers",
"is",
"None",
")",
":",
"headers",
"=",
"{",
"}",
"if",
"(",
"data",
"and",
"(",
"not",
"method",
")",
")",
":",
"method",
"=",
"'POST'",
"elif",
"(",
"not",
"method",
")",
":",
"method",
"=",
"'GET'",
"if",
"(",
"(",
"method",
"==",
"'GET'",
")",
"and",
"data",
")",
":",
"uri",
"=",
"add_params_to_uri",
"(",
"uri",
",",
"data",
")",
"data",
"=",
"None",
"return",
"(",
"uri",
",",
"headers",
",",
"data",
",",
"method",
")"
] |
make request parameters right .
|
train
| true
|
4,335
|
def _sysfs_attr(name, value=None, log_lvl=None, log_msg=None):
if isinstance(name, six.string_types):
name = [name]
res = __salt__['sysfs.attr'](os.path.join(*name), value)
if ((not res) and (log_lvl is not None)):
log.log(LOG[log_lvl], log_msg)
return res
|
[
"def",
"_sysfs_attr",
"(",
"name",
",",
"value",
"=",
"None",
",",
"log_lvl",
"=",
"None",
",",
"log_msg",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"name",
",",
"six",
".",
"string_types",
")",
":",
"name",
"=",
"[",
"name",
"]",
"res",
"=",
"__salt__",
"[",
"'sysfs.attr'",
"]",
"(",
"os",
".",
"path",
".",
"join",
"(",
"*",
"name",
")",
",",
"value",
")",
"if",
"(",
"(",
"not",
"res",
")",
"and",
"(",
"log_lvl",
"is",
"not",
"None",
")",
")",
":",
"log",
".",
"log",
"(",
"LOG",
"[",
"log_lvl",
"]",
",",
"log_msg",
")",
"return",
"res"
] |
simple wrapper with logging around sysfs .
|
train
| true
|
4,338
|
def platform_supported(rospack, pkg, os, version):
return _platform_supported(rospack.get_manifest(pkg), os, version)
|
[
"def",
"platform_supported",
"(",
"rospack",
",",
"pkg",
",",
"os",
",",
"version",
")",
":",
"return",
"_platform_supported",
"(",
"rospack",
".",
"get_manifest",
"(",
"pkg",
")",
",",
"os",
",",
"version",
")"
] |
return whether the platform defined by os and version is marked as supported in the package .
|
train
| false
|
4,339
|
def _check_is_not_multitable(values, model):
used_models = set()
for field in values:
if isinstance(field, sqlalchemy.orm.attributes.InstrumentedAttribute):
used_models.add(field.class_)
elif isinstance(field, six.string_types):
used_models.add(model)
else:
raise exception.ProgrammingError(reason='DB Conditional update - Unknown field type, must be string or ORM field.')
if (len(used_models) > 1):
raise exception.ProgrammingError(reason='DB Conditional update - Error in query, multitable updates are not supported.')
|
[
"def",
"_check_is_not_multitable",
"(",
"values",
",",
"model",
")",
":",
"used_models",
"=",
"set",
"(",
")",
"for",
"field",
"in",
"values",
":",
"if",
"isinstance",
"(",
"field",
",",
"sqlalchemy",
".",
"orm",
".",
"attributes",
".",
"InstrumentedAttribute",
")",
":",
"used_models",
".",
"add",
"(",
"field",
".",
"class_",
")",
"elif",
"isinstance",
"(",
"field",
",",
"six",
".",
"string_types",
")",
":",
"used_models",
".",
"add",
"(",
"model",
")",
"else",
":",
"raise",
"exception",
".",
"ProgrammingError",
"(",
"reason",
"=",
"'DB Conditional update - Unknown field type, must be string or ORM field.'",
")",
"if",
"(",
"len",
"(",
"used_models",
")",
">",
"1",
")",
":",
"raise",
"exception",
".",
"ProgrammingError",
"(",
"reason",
"=",
"'DB Conditional update - Error in query, multitable updates are not supported.'",
")"
] |
check that we dont try to do multitable updates .
|
train
| false
|
4,340
|
def captured_stderr():
return captured_output('stderr')
|
[
"def",
"captured_stderr",
"(",
")",
":",
"return",
"captured_output",
"(",
"'stderr'",
")"
] |
return a context manager used by captured_stdout/stdin/stderr that temporarily replaces the sys stream *stream_name* with a stringio .
|
train
| false
|
4,341
|
def use_conn_pool(func):
def wrapper(self, *args, **kwargs):
with self._get_pool_connection() as conn:
self._apply_options(conn)
return func(self, conn, *args, **kwargs)
return wrapper
|
[
"def",
"use_conn_pool",
"(",
"func",
")",
":",
"def",
"wrapper",
"(",
"self",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"with",
"self",
".",
"_get_pool_connection",
"(",
")",
"as",
"conn",
":",
"self",
".",
"_apply_options",
"(",
"conn",
")",
"return",
"func",
"(",
"self",
",",
"conn",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
"return",
"wrapper"
] |
use this only for connection pool specific ldap api .
|
train
| false
|
4,343
|
def block_diag(*arrs):
if (arrs == ()):
arrs = ([],)
arrs = [np.atleast_2d(a) for a in arrs]
bad_args = [k for k in range(len(arrs)) if (arrs[k].ndim > 2)]
if bad_args:
raise ValueError(('arguments in the following positions have dimension greater than 2: %s' % bad_args))
shapes = np.array([a.shape for a in arrs])
out_dtype = np.find_common_type([arr.dtype for arr in arrs], [])
out = np.zeros(np.sum(shapes, axis=0), dtype=out_dtype)
(r, c) = (0, 0)
for (i, (rr, cc)) in enumerate(shapes):
out[r:(r + rr), c:(c + cc)] = arrs[i]
r += rr
c += cc
return out
|
[
"def",
"block_diag",
"(",
"*",
"arrs",
")",
":",
"if",
"(",
"arrs",
"==",
"(",
")",
")",
":",
"arrs",
"=",
"(",
"[",
"]",
",",
")",
"arrs",
"=",
"[",
"np",
".",
"atleast_2d",
"(",
"a",
")",
"for",
"a",
"in",
"arrs",
"]",
"bad_args",
"=",
"[",
"k",
"for",
"k",
"in",
"range",
"(",
"len",
"(",
"arrs",
")",
")",
"if",
"(",
"arrs",
"[",
"k",
"]",
".",
"ndim",
">",
"2",
")",
"]",
"if",
"bad_args",
":",
"raise",
"ValueError",
"(",
"(",
"'arguments in the following positions have dimension greater than 2: %s'",
"%",
"bad_args",
")",
")",
"shapes",
"=",
"np",
".",
"array",
"(",
"[",
"a",
".",
"shape",
"for",
"a",
"in",
"arrs",
"]",
")",
"out_dtype",
"=",
"np",
".",
"find_common_type",
"(",
"[",
"arr",
".",
"dtype",
"for",
"arr",
"in",
"arrs",
"]",
",",
"[",
"]",
")",
"out",
"=",
"np",
".",
"zeros",
"(",
"np",
".",
"sum",
"(",
"shapes",
",",
"axis",
"=",
"0",
")",
",",
"dtype",
"=",
"out_dtype",
")",
"(",
"r",
",",
"c",
")",
"=",
"(",
"0",
",",
"0",
")",
"for",
"(",
"i",
",",
"(",
"rr",
",",
"cc",
")",
")",
"in",
"enumerate",
"(",
"shapes",
")",
":",
"out",
"[",
"r",
":",
"(",
"r",
"+",
"rr",
")",
",",
"c",
":",
"(",
"c",
"+",
"cc",
")",
"]",
"=",
"arrs",
"[",
"i",
"]",
"r",
"+=",
"rr",
"c",
"+=",
"cc",
"return",
"out"
] |
build a block diagonal sparse matrix from provided matrices .
|
train
| false
|
4,345
|
def discardLogs():
global logfile
logfile = NullFile()
|
[
"def",
"discardLogs",
"(",
")",
":",
"global",
"logfile",
"logfile",
"=",
"NullFile",
"(",
")"
] |
throw away all logs .
|
train
| false
|
4,348
|
def commit_message_path():
path = git.git_path(u'GIT_COLA_MSG')
if core.exists(path):
return path
return None
|
[
"def",
"commit_message_path",
"(",
")",
":",
"path",
"=",
"git",
".",
"git_path",
"(",
"u'GIT_COLA_MSG'",
")",
"if",
"core",
".",
"exists",
"(",
"path",
")",
":",
"return",
"path",
"return",
"None"
] |
return the path to .
|
train
| false
|
4,349
|
def Var(xs, mu=None, ddof=0):
xs = np.asarray(xs)
if (mu is None):
mu = xs.mean()
ds = (xs - mu)
return (np.dot(ds, ds) / (len(xs) - ddof))
|
[
"def",
"Var",
"(",
"xs",
",",
"mu",
"=",
"None",
",",
"ddof",
"=",
"0",
")",
":",
"xs",
"=",
"np",
".",
"asarray",
"(",
"xs",
")",
"if",
"(",
"mu",
"is",
"None",
")",
":",
"mu",
"=",
"xs",
".",
"mean",
"(",
")",
"ds",
"=",
"(",
"xs",
"-",
"mu",
")",
"return",
"(",
"np",
".",
"dot",
"(",
"ds",
",",
"ds",
")",
"/",
"(",
"len",
"(",
"xs",
")",
"-",
"ddof",
")",
")"
] |
computes variance .
|
train
| false
|
4,354
|
def get_closest_dir(workdir):
closest_dir = ''
for wdi in path_split_all(workdir):
if os.path.isdir(os.path.join(closest_dir, wdi)):
closest_dir = os.path.join(closest_dir, wdi)
else:
break
assert (closest_dir != workdir)
return (closest_dir, wdi)
|
[
"def",
"get_closest_dir",
"(",
"workdir",
")",
":",
"closest_dir",
"=",
"''",
"for",
"wdi",
"in",
"path_split_all",
"(",
"workdir",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"closest_dir",
",",
"wdi",
")",
")",
":",
"closest_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"closest_dir",
",",
"wdi",
")",
"else",
":",
"break",
"assert",
"(",
"closest_dir",
"!=",
"workdir",
")",
"return",
"(",
"closest_dir",
",",
"wdi",
")"
] |
returns the topmost already-existing directory in the given path erasing work-dirs should never progress above this file .
|
train
| false
|
4,356
|
@pytest.mark.parametrize(u'mode', modes)
def test_gaussian_eval_1D(mode):
model = Gaussian1D(1, 0, 20)
x = np.arange((-100), 101)
values = model(x)
disc_values = discretize_model(model, ((-100), 101), mode=mode)
assert_allclose(values, disc_values, atol=0.001)
|
[
"@",
"pytest",
".",
"mark",
".",
"parametrize",
"(",
"u'mode'",
",",
"modes",
")",
"def",
"test_gaussian_eval_1D",
"(",
"mode",
")",
":",
"model",
"=",
"Gaussian1D",
"(",
"1",
",",
"0",
",",
"20",
")",
"x",
"=",
"np",
".",
"arange",
"(",
"(",
"-",
"100",
")",
",",
"101",
")",
"values",
"=",
"model",
"(",
"x",
")",
"disc_values",
"=",
"discretize_model",
"(",
"model",
",",
"(",
"(",
"-",
"100",
")",
",",
"101",
")",
",",
"mode",
"=",
"mode",
")",
"assert_allclose",
"(",
"values",
",",
"disc_values",
",",
"atol",
"=",
"0.001",
")"
] |
discretize gaussian with different modes and check if result is at least similar to gaussian1d .
|
train
| false
|
4,357
|
def _get_read_region(read_name):
return int(read_name[8])
|
[
"def",
"_get_read_region",
"(",
"read_name",
")",
":",
"return",
"int",
"(",
"read_name",
"[",
"8",
"]",
")"
] |
extract region from read name .
|
train
| false
|
4,358
|
def zpad(x, l):
return (('\x00' * max(0, (l - len(x)))) + x)
|
[
"def",
"zpad",
"(",
"x",
",",
"l",
")",
":",
"return",
"(",
"(",
"'\\x00'",
"*",
"max",
"(",
"0",
",",
"(",
"l",
"-",
"len",
"(",
"x",
")",
")",
")",
")",
"+",
"x",
")"
] |
left zero pad value x at least to length l .
|
train
| false
|
4,359
|
def set_parser(new_parser_fun=None):
global parser_fun
if (new_parser_fun is None):
new_parser_fun = mb_parser_xml
if (not callable(new_parser_fun)):
raise ValueError('new_parser_fun must be callable')
parser_fun = new_parser_fun
|
[
"def",
"set_parser",
"(",
"new_parser_fun",
"=",
"None",
")",
":",
"global",
"parser_fun",
"if",
"(",
"new_parser_fun",
"is",
"None",
")",
":",
"new_parser_fun",
"=",
"mb_parser_xml",
"if",
"(",
"not",
"callable",
"(",
"new_parser_fun",
")",
")",
":",
"raise",
"ValueError",
"(",
"'new_parser_fun must be callable'",
")",
"parser_fun",
"=",
"new_parser_fun"
] |
sets the function used to parse the response from the musicbrainz web service .
|
train
| false
|
4,360
|
def select_row(view, row):
selmodel = view.selectionModel()
selmodel.select(view.model().index(row, 0), QItemSelectionModel.ClearAndSelect)
|
[
"def",
"select_row",
"(",
"view",
",",
"row",
")",
":",
"selmodel",
"=",
"view",
".",
"selectionModel",
"(",
")",
"selmodel",
".",
"select",
"(",
"view",
".",
"model",
"(",
")",
".",
"index",
"(",
"row",
",",
"0",
")",
",",
"QItemSelectionModel",
".",
"ClearAndSelect",
")"
] |
select a row in an item view .
|
train
| false
|
4,361
|
@login_required
def preview_handler(request, usage_key_string, handler, suffix=''):
usage_key = UsageKey.from_string(usage_key_string)
if isinstance(usage_key, (AsideUsageKeyV1, AsideUsageKeyV2)):
descriptor = modulestore().get_item(usage_key.usage_key)
for aside in descriptor.runtime.get_asides(descriptor):
if (aside.scope_ids.block_type == usage_key.aside_type):
asides = [aside]
instance = aside
break
else:
descriptor = modulestore().get_item(usage_key)
instance = _load_preview_module(request, descriptor)
asides = []
req = django_to_webob_request(request)
try:
resp = instance.handle(handler, req, suffix)
except NoSuchHandlerError:
log.exception('XBlock %s attempted to access missing handler %r', instance, handler)
raise Http404
except NotFoundError:
log.exception("Module indicating to user that request doesn't exist")
raise Http404
except ProcessingError:
log.warning('Module raised an error while processing AJAX request', exc_info=True)
return HttpResponseBadRequest()
except Exception:
log.exception('error processing ajax call')
raise
modulestore().update_item(descriptor, request.user.id, asides=asides)
return webob_to_django_response(resp)
|
[
"@",
"login_required",
"def",
"preview_handler",
"(",
"request",
",",
"usage_key_string",
",",
"handler",
",",
"suffix",
"=",
"''",
")",
":",
"usage_key",
"=",
"UsageKey",
".",
"from_string",
"(",
"usage_key_string",
")",
"if",
"isinstance",
"(",
"usage_key",
",",
"(",
"AsideUsageKeyV1",
",",
"AsideUsageKeyV2",
")",
")",
":",
"descriptor",
"=",
"modulestore",
"(",
")",
".",
"get_item",
"(",
"usage_key",
".",
"usage_key",
")",
"for",
"aside",
"in",
"descriptor",
".",
"runtime",
".",
"get_asides",
"(",
"descriptor",
")",
":",
"if",
"(",
"aside",
".",
"scope_ids",
".",
"block_type",
"==",
"usage_key",
".",
"aside_type",
")",
":",
"asides",
"=",
"[",
"aside",
"]",
"instance",
"=",
"aside",
"break",
"else",
":",
"descriptor",
"=",
"modulestore",
"(",
")",
".",
"get_item",
"(",
"usage_key",
")",
"instance",
"=",
"_load_preview_module",
"(",
"request",
",",
"descriptor",
")",
"asides",
"=",
"[",
"]",
"req",
"=",
"django_to_webob_request",
"(",
"request",
")",
"try",
":",
"resp",
"=",
"instance",
".",
"handle",
"(",
"handler",
",",
"req",
",",
"suffix",
")",
"except",
"NoSuchHandlerError",
":",
"log",
".",
"exception",
"(",
"'XBlock %s attempted to access missing handler %r'",
",",
"instance",
",",
"handler",
")",
"raise",
"Http404",
"except",
"NotFoundError",
":",
"log",
".",
"exception",
"(",
"\"Module indicating to user that request doesn't exist\"",
")",
"raise",
"Http404",
"except",
"ProcessingError",
":",
"log",
".",
"warning",
"(",
"'Module raised an error while processing AJAX request'",
",",
"exc_info",
"=",
"True",
")",
"return",
"HttpResponseBadRequest",
"(",
")",
"except",
"Exception",
":",
"log",
".",
"exception",
"(",
"'error processing ajax call'",
")",
"raise",
"modulestore",
"(",
")",
".",
"update_item",
"(",
"descriptor",
",",
"request",
".",
"user",
".",
"id",
",",
"asides",
"=",
"asides",
")",
"return",
"webob_to_django_response",
"(",
"resp",
")"
] |
dispatch an ajax action to an xblock usage_key_string: the usage_key_string-id of the block to dispatch to .
|
train
| false
|
4,362
|
def _deserialize_spec_test(data, file_path):
context = data['data']
description = data['desc']
expected = unicode(data['expected'])
partials = ((data.has_key('partials') and data['partials']) or {})
template = data['template']
test_name = data['name']
_convert_children(context)
test_case = _make_spec_test(expected, template, context, partials, description, test_name, file_path)
return test_case
|
[
"def",
"_deserialize_spec_test",
"(",
"data",
",",
"file_path",
")",
":",
"context",
"=",
"data",
"[",
"'data'",
"]",
"description",
"=",
"data",
"[",
"'desc'",
"]",
"expected",
"=",
"unicode",
"(",
"data",
"[",
"'expected'",
"]",
")",
"partials",
"=",
"(",
"(",
"data",
".",
"has_key",
"(",
"'partials'",
")",
"and",
"data",
"[",
"'partials'",
"]",
")",
"or",
"{",
"}",
")",
"template",
"=",
"data",
"[",
"'template'",
"]",
"test_name",
"=",
"data",
"[",
"'name'",
"]",
"_convert_children",
"(",
"context",
")",
"test_case",
"=",
"_make_spec_test",
"(",
"expected",
",",
"template",
",",
"context",
",",
"partials",
",",
"description",
",",
"test_name",
",",
"file_path",
")",
"return",
"test_case"
] |
return a unittest .
|
train
| false
|
4,363
|
def getIndexOfStartingWithSecond(letter, splitLine):
for wordIndex in xrange(1, len(splitLine)):
word = splitLine[wordIndex]
firstLetter = word[0]
if (firstLetter == letter):
return wordIndex
return (-1)
|
[
"def",
"getIndexOfStartingWithSecond",
"(",
"letter",
",",
"splitLine",
")",
":",
"for",
"wordIndex",
"in",
"xrange",
"(",
"1",
",",
"len",
"(",
"splitLine",
")",
")",
":",
"word",
"=",
"splitLine",
"[",
"wordIndex",
"]",
"firstLetter",
"=",
"word",
"[",
"0",
"]",
"if",
"(",
"firstLetter",
"==",
"letter",
")",
":",
"return",
"wordIndex",
"return",
"(",
"-",
"1",
")"
] |
get index of the first occurence of the given letter in the split line .
|
train
| false
|
4,364
|
def subscribe_to_exploration(user_id, exploration_id):
subscriptions_model = user_models.UserSubscriptionsModel.get(user_id, strict=False)
if (not subscriptions_model):
subscriptions_model = user_models.UserSubscriptionsModel(id=user_id)
if (exploration_id not in subscriptions_model.activity_ids):
subscriptions_model.activity_ids.append(exploration_id)
subscriptions_model.put()
|
[
"def",
"subscribe_to_exploration",
"(",
"user_id",
",",
"exploration_id",
")",
":",
"subscriptions_model",
"=",
"user_models",
".",
"UserSubscriptionsModel",
".",
"get",
"(",
"user_id",
",",
"strict",
"=",
"False",
")",
"if",
"(",
"not",
"subscriptions_model",
")",
":",
"subscriptions_model",
"=",
"user_models",
".",
"UserSubscriptionsModel",
"(",
"id",
"=",
"user_id",
")",
"if",
"(",
"exploration_id",
"not",
"in",
"subscriptions_model",
".",
"activity_ids",
")",
":",
"subscriptions_model",
".",
"activity_ids",
".",
"append",
"(",
"exploration_id",
")",
"subscriptions_model",
".",
"put",
"(",
")"
] |
subscribes a user to an exploration .
|
train
| false
|
4,366
|
def _conv(obj, dtype=None):
if (obj is None):
return obj
else:
if (dtype is None):
obj = numpy.asarray(obj)
else:
obj = numpy.asarray(obj, dtype)
if (obj.shape == ()):
return obj.dtype.type(obj)
else:
return obj
|
[
"def",
"_conv",
"(",
"obj",
",",
"dtype",
"=",
"None",
")",
":",
"if",
"(",
"obj",
"is",
"None",
")",
":",
"return",
"obj",
"else",
":",
"if",
"(",
"dtype",
"is",
"None",
")",
":",
"obj",
"=",
"numpy",
".",
"asarray",
"(",
"obj",
")",
"else",
":",
"obj",
"=",
"numpy",
".",
"asarray",
"(",
"obj",
",",
"dtype",
")",
"if",
"(",
"obj",
".",
"shape",
"==",
"(",
")",
")",
":",
"return",
"obj",
".",
"dtype",
".",
"type",
"(",
"obj",
")",
"else",
":",
"return",
"obj"
] |
convert an object to the preferred form for input to the odr routine .
|
train
| false
|
4,367
|
def sysctlTestAndSet(name, limit):
if ('/' not in name):
name = ('/proc/sys/' + name.replace('.', '/'))
with open(name, 'r') as readFile:
oldLimit = readFile.readline()
if isinstance(limit, int):
if (int(oldLimit) < limit):
with open(name, 'w') as writeFile:
writeFile.write(('%d' % limit))
else:
with open(name, 'w') as writeFile:
writeFile.write(limit)
|
[
"def",
"sysctlTestAndSet",
"(",
"name",
",",
"limit",
")",
":",
"if",
"(",
"'/'",
"not",
"in",
"name",
")",
":",
"name",
"=",
"(",
"'/proc/sys/'",
"+",
"name",
".",
"replace",
"(",
"'.'",
",",
"'/'",
")",
")",
"with",
"open",
"(",
"name",
",",
"'r'",
")",
"as",
"readFile",
":",
"oldLimit",
"=",
"readFile",
".",
"readline",
"(",
")",
"if",
"isinstance",
"(",
"limit",
",",
"int",
")",
":",
"if",
"(",
"int",
"(",
"oldLimit",
")",
"<",
"limit",
")",
":",
"with",
"open",
"(",
"name",
",",
"'w'",
")",
"as",
"writeFile",
":",
"writeFile",
".",
"write",
"(",
"(",
"'%d'",
"%",
"limit",
")",
")",
"else",
":",
"with",
"open",
"(",
"name",
",",
"'w'",
")",
"as",
"writeFile",
":",
"writeFile",
".",
"write",
"(",
"limit",
")"
] |
helper function to set sysctl limits .
|
train
| false
|
4,368
|
def remove_borders_from_image(img, fuzz=None):
fuzz = (tweaks[u'cover_trim_fuzz_value'] if (fuzz is None) else fuzz)
img = image_from_data(img)
ans = imageops.remove_borders(img, max(0, fuzz))
return (ans if (ans.size() != img.size()) else img)
|
[
"def",
"remove_borders_from_image",
"(",
"img",
",",
"fuzz",
"=",
"None",
")",
":",
"fuzz",
"=",
"(",
"tweaks",
"[",
"u'cover_trim_fuzz_value'",
"]",
"if",
"(",
"fuzz",
"is",
"None",
")",
"else",
"fuzz",
")",
"img",
"=",
"image_from_data",
"(",
"img",
")",
"ans",
"=",
"imageops",
".",
"remove_borders",
"(",
"img",
",",
"max",
"(",
"0",
",",
"fuzz",
")",
")",
"return",
"(",
"ans",
"if",
"(",
"ans",
".",
"size",
"(",
")",
"!=",
"img",
".",
"size",
"(",
")",
")",
"else",
"img",
")"
] |
try to auto-detect and remove any borders from the image .
|
train
| false
|
4,369
|
def remove_course_milestones(course_key, user, relationship):
if (not settings.FEATURES.get('MILESTONES_APP')):
return None
course_milestones = milestones_api.get_course_milestones(course_key=course_key, relationship=relationship)
for milestone in course_milestones:
milestones_api.remove_user_milestone({'id': user.id}, milestone)
|
[
"def",
"remove_course_milestones",
"(",
"course_key",
",",
"user",
",",
"relationship",
")",
":",
"if",
"(",
"not",
"settings",
".",
"FEATURES",
".",
"get",
"(",
"'MILESTONES_APP'",
")",
")",
":",
"return",
"None",
"course_milestones",
"=",
"milestones_api",
".",
"get_course_milestones",
"(",
"course_key",
"=",
"course_key",
",",
"relationship",
"=",
"relationship",
")",
"for",
"milestone",
"in",
"course_milestones",
":",
"milestones_api",
".",
"remove_user_milestone",
"(",
"{",
"'id'",
":",
"user",
".",
"id",
"}",
",",
"milestone",
")"
] |
remove all user milestones for the course specified by course_key .
|
train
| false
|
4,372
|
def load_processed_files(path):
processed_files = {}
with open(path) as input_file:
for line in input_file.readlines():
line = line.strip()
if (not line):
continue
if (' ' not in line):
raise TypeError(('Malformed line: %s' % line))
(path, timestamp) = line.rsplit(' ', 1)
if (not os.path.isabs(path)):
raise TypeError(("'%s' is not an absolute path" % path))
elif (not timestamp.isdigit()):
raise TypeError(("'%s' is not an integer timestamp" % timestamp))
processed_files[path] = int(timestamp)
return processed_files
|
[
"def",
"load_processed_files",
"(",
"path",
")",
":",
"processed_files",
"=",
"{",
"}",
"with",
"open",
"(",
"path",
")",
"as",
"input_file",
":",
"for",
"line",
"in",
"input_file",
".",
"readlines",
"(",
")",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"(",
"not",
"line",
")",
":",
"continue",
"if",
"(",
"' '",
"not",
"in",
"line",
")",
":",
"raise",
"TypeError",
"(",
"(",
"'Malformed line: %s'",
"%",
"line",
")",
")",
"(",
"path",
",",
"timestamp",
")",
"=",
"line",
".",
"rsplit",
"(",
"' '",
",",
"1",
")",
"if",
"(",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"path",
")",
")",
":",
"raise",
"TypeError",
"(",
"(",
"\"'%s' is not an absolute path\"",
"%",
"path",
")",
")",
"elif",
"(",
"not",
"timestamp",
".",
"isdigit",
"(",
")",
")",
":",
"raise",
"TypeError",
"(",
"(",
"\"'%s' is not an integer timestamp\"",
"%",
"timestamp",
")",
")",
"processed_files",
"[",
"path",
"]",
"=",
"int",
"(",
"timestamp",
")",
"return",
"processed_files"
] |
loads a dictionary of path => last modified timestamp mappings .
|
train
| false
|
4,373
|
def get_basename(fileName):
if fileName.endswith(os.path.sep):
fileName = fileName[:(-1)]
return os.path.basename(fileName)
|
[
"def",
"get_basename",
"(",
"fileName",
")",
":",
"if",
"fileName",
".",
"endswith",
"(",
"os",
".",
"path",
".",
"sep",
")",
":",
"fileName",
"=",
"fileName",
"[",
":",
"(",
"-",
"1",
")",
"]",
"return",
"os",
".",
"path",
".",
"basename",
"(",
"fileName",
")"
] |
get the name of a file or folder specified in a path .
|
train
| false
|
4,374
|
def get_industry_classified(standard='sina'):
if (standard == 'sw'):
df = _get_type_data((ct.SINA_INDUSTRY_INDEX_URL % (ct.P_TYPE['http'], ct.DOMAINS['vsf'], ct.PAGES['ids_sw'])))
else:
df = _get_type_data((ct.SINA_INDUSTRY_INDEX_URL % (ct.P_TYPE['http'], ct.DOMAINS['vsf'], ct.PAGES['ids'])))
data = []
ct._write_head()
for row in df.values:
rowDf = _get_detail(row[0], retry_count=10, pause=0.01)
rowDf['c_name'] = row[1]
data.append(rowDf)
data = pd.concat(data, ignore_index=True)
return data
|
[
"def",
"get_industry_classified",
"(",
"standard",
"=",
"'sina'",
")",
":",
"if",
"(",
"standard",
"==",
"'sw'",
")",
":",
"df",
"=",
"_get_type_data",
"(",
"(",
"ct",
".",
"SINA_INDUSTRY_INDEX_URL",
"%",
"(",
"ct",
".",
"P_TYPE",
"[",
"'http'",
"]",
",",
"ct",
".",
"DOMAINS",
"[",
"'vsf'",
"]",
",",
"ct",
".",
"PAGES",
"[",
"'ids_sw'",
"]",
")",
")",
")",
"else",
":",
"df",
"=",
"_get_type_data",
"(",
"(",
"ct",
".",
"SINA_INDUSTRY_INDEX_URL",
"%",
"(",
"ct",
".",
"P_TYPE",
"[",
"'http'",
"]",
",",
"ct",
".",
"DOMAINS",
"[",
"'vsf'",
"]",
",",
"ct",
".",
"PAGES",
"[",
"'ids'",
"]",
")",
")",
")",
"data",
"=",
"[",
"]",
"ct",
".",
"_write_head",
"(",
")",
"for",
"row",
"in",
"df",
".",
"values",
":",
"rowDf",
"=",
"_get_detail",
"(",
"row",
"[",
"0",
"]",
",",
"retry_count",
"=",
"10",
",",
"pause",
"=",
"0.01",
")",
"rowDf",
"[",
"'c_name'",
"]",
"=",
"row",
"[",
"1",
"]",
"data",
".",
"append",
"(",
"rowDf",
")",
"data",
"=",
"pd",
".",
"concat",
"(",
"data",
",",
"ignore_index",
"=",
"True",
")",
"return",
"data"
] |
parameters standard sina:新浪行业 sw:申万 行业 returns dataframe code :股票代码 name :股票名称 c_name :行业名称 .
|
train
| false
|
4,375
|
def bins(data, values=None, column=None, bins=None, labels=None, **kwargs):
if isinstance(data, str):
column = data
values = None
else:
column = None
return Bins(values=values, column=column, bins=bins, **kwargs)
|
[
"def",
"bins",
"(",
"data",
",",
"values",
"=",
"None",
",",
"column",
"=",
"None",
",",
"bins",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"**",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"str",
")",
":",
"column",
"=",
"data",
"values",
"=",
"None",
"else",
":",
"column",
"=",
"None",
"return",
"Bins",
"(",
"values",
"=",
"values",
",",
"column",
"=",
"column",
",",
"bins",
"=",
"bins",
",",
"**",
"kwargs",
")"
] |
specify binning or bins to be used for column or values .
|
train
| false
|
4,376
|
def format_datetime(time_obj):
timestamp = time_obj.isoformat('T')
if (time_obj.tzinfo == tzutc()):
timestamp = timestamp[:(-6)]
return ('%sZ' % timestamp)
return timestamp
|
[
"def",
"format_datetime",
"(",
"time_obj",
")",
":",
"timestamp",
"=",
"time_obj",
".",
"isoformat",
"(",
"'T'",
")",
"if",
"(",
"time_obj",
".",
"tzinfo",
"==",
"tzutc",
"(",
")",
")",
":",
"timestamp",
"=",
"timestamp",
"[",
":",
"(",
"-",
"6",
")",
"]",
"return",
"(",
"'%sZ'",
"%",
"timestamp",
")",
"return",
"timestamp"
] |
see :meth:i18n .
|
train
| false
|
4,377
|
@memoize
def get_parent_until(path):
dirname = osp.dirname(path)
try:
mod = osp.basename(path)
mod = osp.splitext(mod)[0]
imp.find_module(mod, [dirname])
except ImportError:
return
items = [mod]
while 1:
items.append(osp.basename(dirname))
try:
dirname = osp.dirname(dirname)
imp.find_module('__init__', [(dirname + os.sep)])
except ImportError:
break
return '.'.join(reversed(items))
|
[
"@",
"memoize",
"def",
"get_parent_until",
"(",
"path",
")",
":",
"dirname",
"=",
"osp",
".",
"dirname",
"(",
"path",
")",
"try",
":",
"mod",
"=",
"osp",
".",
"basename",
"(",
"path",
")",
"mod",
"=",
"osp",
".",
"splitext",
"(",
"mod",
")",
"[",
"0",
"]",
"imp",
".",
"find_module",
"(",
"mod",
",",
"[",
"dirname",
"]",
")",
"except",
"ImportError",
":",
"return",
"items",
"=",
"[",
"mod",
"]",
"while",
"1",
":",
"items",
".",
"append",
"(",
"osp",
".",
"basename",
"(",
"dirname",
")",
")",
"try",
":",
"dirname",
"=",
"osp",
".",
"dirname",
"(",
"dirname",
")",
"imp",
".",
"find_module",
"(",
"'__init__'",
",",
"[",
"(",
"dirname",
"+",
"os",
".",
"sep",
")",
"]",
")",
"except",
"ImportError",
":",
"break",
"return",
"'.'",
".",
"join",
"(",
"reversed",
"(",
"items",
")",
")"
] |
given a file path .
|
train
| true
|
4,378
|
def volume_create(**kwargs):
return create_volume(kwargs, 'function')
|
[
"def",
"volume_create",
"(",
"**",
"kwargs",
")",
":",
"return",
"create_volume",
"(",
"kwargs",
",",
"'function'",
")"
] |
create a block storage volume name name of the new volume size volume size snapshot block storage snapshot id voltype type of storage profile profile to build on cli example: .
|
train
| false
|
4,379
|
def window_none(x):
return x
|
[
"def",
"window_none",
"(",
"x",
")",
":",
"return",
"x"
] |
no window function; simply return x .
|
train
| false
|
4,380
|
def date_from_str(date_str):
today = datetime.date.today()
if (date_str in (u'now', u'today')):
return today
if (date_str == u'yesterday'):
return (today - datetime.timedelta(days=1))
match = re.match(u'(now|today)(?P<sign>[+-])(?P<time>\\d+)(?P<unit>day|week|month|year)(s)?', date_str)
if (match is not None):
sign = match.group(u'sign')
time = int(match.group(u'time'))
if (sign == u'-'):
time = (- time)
unit = match.group(u'unit')
if (unit == u'month'):
unit = u'day'
time *= 30
elif (unit == u'year'):
unit = u'day'
time *= 365
unit += u's'
delta = datetime.timedelta(**{unit: time})
return (today + delta)
return datetime.datetime.strptime(date_str, u'%Y%m%d').date()
|
[
"def",
"date_from_str",
"(",
"date_str",
")",
":",
"today",
"=",
"datetime",
".",
"date",
".",
"today",
"(",
")",
"if",
"(",
"date_str",
"in",
"(",
"u'now'",
",",
"u'today'",
")",
")",
":",
"return",
"today",
"if",
"(",
"date_str",
"==",
"u'yesterday'",
")",
":",
"return",
"(",
"today",
"-",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"1",
")",
")",
"match",
"=",
"re",
".",
"match",
"(",
"u'(now|today)(?P<sign>[+-])(?P<time>\\\\d+)(?P<unit>day|week|month|year)(s)?'",
",",
"date_str",
")",
"if",
"(",
"match",
"is",
"not",
"None",
")",
":",
"sign",
"=",
"match",
".",
"group",
"(",
"u'sign'",
")",
"time",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"u'time'",
")",
")",
"if",
"(",
"sign",
"==",
"u'-'",
")",
":",
"time",
"=",
"(",
"-",
"time",
")",
"unit",
"=",
"match",
".",
"group",
"(",
"u'unit'",
")",
"if",
"(",
"unit",
"==",
"u'month'",
")",
":",
"unit",
"=",
"u'day'",
"time",
"*=",
"30",
"elif",
"(",
"unit",
"==",
"u'year'",
")",
":",
"unit",
"=",
"u'day'",
"time",
"*=",
"365",
"unit",
"+=",
"u's'",
"delta",
"=",
"datetime",
".",
"timedelta",
"(",
"**",
"{",
"unit",
":",
"time",
"}",
")",
"return",
"(",
"today",
"+",
"delta",
")",
"return",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"date_str",
",",
"u'%Y%m%d'",
")",
".",
"date",
"(",
")"
] |
return a datetime object from a string in the format yyyymmdd or [+-][0-9](s)? .
|
train
| false
|
4,384
|
def facilityNetToMs():
a = TpPd(pd=3)
b = MessageType(mesType=58)
c = Facility()
packet = ((a / b) / c)
return packet
|
[
"def",
"facilityNetToMs",
"(",
")",
":",
"a",
"=",
"TpPd",
"(",
"pd",
"=",
"3",
")",
"b",
"=",
"MessageType",
"(",
"mesType",
"=",
"58",
")",
"c",
"=",
"Facility",
"(",
")",
"packet",
"=",
"(",
"(",
"a",
"/",
"b",
")",
"/",
"c",
")",
"return",
"packet"
] |
facility section 9 .
|
train
| true
|
4,387
|
def load_images(img_names):
if (img_names[0] == ''):
return {}
for image_name in img_names:
img = open(image_name)
loaded_imgs = {}
img_list = ''
img_line = ' '
name = img.readline().replace('\n', '')
name = name[1:]
while True:
img_line = img.readline()
if (img_line == ''):
break
img_line.replace('\n', '')
if (img_line[0] == ':'):
loaded_imgs[name] = json.loads(img_list)
name = img_line[1:]
img_list = ''
else:
img_list += img_line
loaded_imgs[name] = json.loads(img_list)
return loaded_imgs
|
[
"def",
"load_images",
"(",
"img_names",
")",
":",
"if",
"(",
"img_names",
"[",
"0",
"]",
"==",
"''",
")",
":",
"return",
"{",
"}",
"for",
"image_name",
"in",
"img_names",
":",
"img",
"=",
"open",
"(",
"image_name",
")",
"loaded_imgs",
"=",
"{",
"}",
"img_list",
"=",
"''",
"img_line",
"=",
"' '",
"name",
"=",
"img",
".",
"readline",
"(",
")",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
"name",
"=",
"name",
"[",
"1",
":",
"]",
"while",
"True",
":",
"img_line",
"=",
"img",
".",
"readline",
"(",
")",
"if",
"(",
"img_line",
"==",
"''",
")",
":",
"break",
"img_line",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
"if",
"(",
"img_line",
"[",
"0",
"]",
"==",
"':'",
")",
":",
"loaded_imgs",
"[",
"name",
"]",
"=",
"json",
".",
"loads",
"(",
"img_list",
")",
"name",
"=",
"img_line",
"[",
"1",
":",
"]",
"img_list",
"=",
"''",
"else",
":",
"img_list",
"+=",
"img_line",
"loaded_imgs",
"[",
"name",
"]",
"=",
"json",
".",
"loads",
"(",
"img_list",
")",
"return",
"loaded_imgs"
] |
loads user images from given file(s) .
|
train
| false
|
4,388
|
def upgrade_to_float64(*types):
return (get_scalar_type('float64'),)
|
[
"def",
"upgrade_to_float64",
"(",
"*",
"types",
")",
":",
"return",
"(",
"get_scalar_type",
"(",
"'float64'",
")",
",",
")"
] |
upgrade any int and float32 to float64 to do as scipy .
|
train
| false
|
4,389
|
def geocode():
vars = request.post_vars
street = vars.get('address', None)
postcode = vars.get('postcode', None)
L0 = vars.get('L0', None)
if L0:
L0 = int(L0)
L1 = vars.get('L1', None)
if L1:
L1 = int(L1)
L2 = vars.get('L2', None)
if L2:
L2 = int(L2)
L3 = vars.get('L3', None)
if L3:
L3 = int(L3)
L4 = vars.get('L4', None)
if L4:
L4 = int(L4)
L5 = vars.get('L5', None)
if L5:
L5 = int(L5)
if street:
Lx_ids = []
append = Lx_ids.append
for id in (L0, L1, L2, L3, L4, L5):
if id:
append(id)
gis.google_geocode_retry = False
results = gis.geocode(street, postcode, Lx_ids)
else:
results = 'NotImplementedError'
results = json.dumps(results, separators=SEPARATORS)
response.headers['Content-Type'] = 'application/json'
return results
|
[
"def",
"geocode",
"(",
")",
":",
"vars",
"=",
"request",
".",
"post_vars",
"street",
"=",
"vars",
".",
"get",
"(",
"'address'",
",",
"None",
")",
"postcode",
"=",
"vars",
".",
"get",
"(",
"'postcode'",
",",
"None",
")",
"L0",
"=",
"vars",
".",
"get",
"(",
"'L0'",
",",
"None",
")",
"if",
"L0",
":",
"L0",
"=",
"int",
"(",
"L0",
")",
"L1",
"=",
"vars",
".",
"get",
"(",
"'L1'",
",",
"None",
")",
"if",
"L1",
":",
"L1",
"=",
"int",
"(",
"L1",
")",
"L2",
"=",
"vars",
".",
"get",
"(",
"'L2'",
",",
"None",
")",
"if",
"L2",
":",
"L2",
"=",
"int",
"(",
"L2",
")",
"L3",
"=",
"vars",
".",
"get",
"(",
"'L3'",
",",
"None",
")",
"if",
"L3",
":",
"L3",
"=",
"int",
"(",
"L3",
")",
"L4",
"=",
"vars",
".",
"get",
"(",
"'L4'",
",",
"None",
")",
"if",
"L4",
":",
"L4",
"=",
"int",
"(",
"L4",
")",
"L5",
"=",
"vars",
".",
"get",
"(",
"'L5'",
",",
"None",
")",
"if",
"L5",
":",
"L5",
"=",
"int",
"(",
"L5",
")",
"if",
"street",
":",
"Lx_ids",
"=",
"[",
"]",
"append",
"=",
"Lx_ids",
".",
"append",
"for",
"id",
"in",
"(",
"L0",
",",
"L1",
",",
"L2",
",",
"L3",
",",
"L4",
",",
"L5",
")",
":",
"if",
"id",
":",
"append",
"(",
"id",
")",
"gis",
".",
"google_geocode_retry",
"=",
"False",
"results",
"=",
"gis",
".",
"geocode",
"(",
"street",
",",
"postcode",
",",
"Lx_ids",
")",
"else",
":",
"results",
"=",
"'NotImplementedError'",
"results",
"=",
"json",
".",
"dumps",
"(",
"results",
",",
"separators",
"=",
"SEPARATORS",
")",
"response",
".",
"headers",
"[",
"'Content-Type'",
"]",
"=",
"'application/json'",
"return",
"results"
] |
geocoding is the process of converting addresses into geographic coordinates .
|
train
| false
|
4,390
|
def make_alert(warnfile, msg_type, msg_template, timestamp_format=None):
def t_format():
return int(time.time())
if (timestamp_format is None):
timestamp_format = t_format
def alert(*params):
formatted_msg = ((msg_type + ' DCTB ') + (msg_template % params))
timestamped_msg = prepend_timestamp(formatted_msg, timestamp_format)
print >>warnfile, timestamped_msg
return alert
|
[
"def",
"make_alert",
"(",
"warnfile",
",",
"msg_type",
",",
"msg_template",
",",
"timestamp_format",
"=",
"None",
")",
":",
"def",
"t_format",
"(",
")",
":",
"return",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"if",
"(",
"timestamp_format",
"is",
"None",
")",
":",
"timestamp_format",
"=",
"t_format",
"def",
"alert",
"(",
"*",
"params",
")",
":",
"formatted_msg",
"=",
"(",
"(",
"msg_type",
"+",
"' DCTB '",
")",
"+",
"(",
"msg_template",
"%",
"params",
")",
")",
"timestamped_msg",
"=",
"prepend_timestamp",
"(",
"formatted_msg",
",",
"timestamp_format",
")",
"print",
">>",
"warnfile",
",",
"timestamped_msg",
"return",
"alert"
] |
create an alert generation function that writes to warnfile .
|
train
| false
|
4,391
|
def validate_labels(db_session, added_categories, removed_categories):
add = {c.name for c in added_categories if c.name}
add_all = ('all' in add)
add_trash = ('trash' in add)
add_spam = ('spam' in add)
if ((add_all and (add_trash or add_spam)) or (add_trash and add_spam)):
raise InputError('Only one of "all", "trash" or "spam" can be added')
remove = {c.name for c in removed_categories if c.name}
remove_all = ('all' in remove)
remove_trash = ('trash' in remove)
remove_spam = ('spam' in remove)
if (remove_all and remove_trash and remove_spam):
raise InputError('"all", "trash" and "spam" cannot all be removed')
|
[
"def",
"validate_labels",
"(",
"db_session",
",",
"added_categories",
",",
"removed_categories",
")",
":",
"add",
"=",
"{",
"c",
".",
"name",
"for",
"c",
"in",
"added_categories",
"if",
"c",
".",
"name",
"}",
"add_all",
"=",
"(",
"'all'",
"in",
"add",
")",
"add_trash",
"=",
"(",
"'trash'",
"in",
"add",
")",
"add_spam",
"=",
"(",
"'spam'",
"in",
"add",
")",
"if",
"(",
"(",
"add_all",
"and",
"(",
"add_trash",
"or",
"add_spam",
")",
")",
"or",
"(",
"add_trash",
"and",
"add_spam",
")",
")",
":",
"raise",
"InputError",
"(",
"'Only one of \"all\", \"trash\" or \"spam\" can be added'",
")",
"remove",
"=",
"{",
"c",
".",
"name",
"for",
"c",
"in",
"removed_categories",
"if",
"c",
".",
"name",
"}",
"remove_all",
"=",
"(",
"'all'",
"in",
"remove",
")",
"remove_trash",
"=",
"(",
"'trash'",
"in",
"remove",
")",
"remove_spam",
"=",
"(",
"'spam'",
"in",
"remove",
")",
"if",
"(",
"remove_all",
"and",
"remove_trash",
"and",
"remove_spam",
")",
":",
"raise",
"InputError",
"(",
"'\"all\", \"trash\" and \"spam\" cannot all be removed'",
")"
] |
validate that the labels added and removed obey gmails semantics -- gmail messages must belong to exactly one of the [gmail]all mail .
|
train
| false
|
4,397
|
def removeElementFromDictionary(dictionary, key):
if (key in dictionary):
del dictionary[key]
|
[
"def",
"removeElementFromDictionary",
"(",
"dictionary",
",",
"key",
")",
":",
"if",
"(",
"key",
"in",
"dictionary",
")",
":",
"del",
"dictionary",
"[",
"key",
"]"
] |
remove element from the dictionary .
|
train
| false
|
4,398
|
def has_progress(toppath):
state = progress_read()
return (toppath in state)
|
[
"def",
"has_progress",
"(",
"toppath",
")",
":",
"state",
"=",
"progress_read",
"(",
")",
"return",
"(",
"toppath",
"in",
"state",
")"
] |
return true if there exist paths that have already been imported under toppath .
|
train
| false
|
4,399
|
@ownership_required
def page_create(request, slug, template_name='groups/pages/page_form.html'):
group = get_object_or_404(Group, slug=slug)
form = GroupPageForm(initial={'group': group})
if (request.method == 'POST'):
form = GroupPageForm(request.POST)
if form.is_valid():
page = form.save(commit=False)
page.group = group
page.save()
return redirect(request, page)
return render(request, template_name, {'group': group, 'form': form})
|
[
"@",
"ownership_required",
"def",
"page_create",
"(",
"request",
",",
"slug",
",",
"template_name",
"=",
"'groups/pages/page_form.html'",
")",
":",
"group",
"=",
"get_object_or_404",
"(",
"Group",
",",
"slug",
"=",
"slug",
")",
"form",
"=",
"GroupPageForm",
"(",
"initial",
"=",
"{",
"'group'",
":",
"group",
"}",
")",
"if",
"(",
"request",
".",
"method",
"==",
"'POST'",
")",
":",
"form",
"=",
"GroupPageForm",
"(",
"request",
".",
"POST",
")",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"page",
"=",
"form",
".",
"save",
"(",
"commit",
"=",
"False",
")",
"page",
".",
"group",
"=",
"group",
"page",
".",
"save",
"(",
")",
"return",
"redirect",
"(",
"request",
",",
"page",
")",
"return",
"render",
"(",
"request",
",",
"template_name",
",",
"{",
"'group'",
":",
"group",
",",
"'form'",
":",
"form",
"}",
")"
] |
creates a group page .
|
train
| false
|
4,400
|
def extract_backgrounds(archive_name):
os.mkdir('bgs')
t = tarfile.open(name=archive_name)
def members():
m = t.next()
while m:
(yield m)
m = t.next()
index = 0
for m in members():
if (not m.name.endswith('.jpg')):
continue
f = t.extractfile(m)
try:
im = im_from_file(f)
finally:
f.close()
if (im is None):
continue
if (im.shape[0] > im.shape[1]):
im = im[:im.shape[1], :]
else:
im = im[:, :im.shape[0]]
if (im.shape[0] > 256):
im = cv2.resize(im, (256, 256))
fname = 'bgs/{:08}.jpg'.format(index)
print fname
rc = cv2.imwrite(fname, im)
if (not rc):
raise Exception('Failed to write file {}'.format(fname))
index += 1
|
[
"def",
"extract_backgrounds",
"(",
"archive_name",
")",
":",
"os",
".",
"mkdir",
"(",
"'bgs'",
")",
"t",
"=",
"tarfile",
".",
"open",
"(",
"name",
"=",
"archive_name",
")",
"def",
"members",
"(",
")",
":",
"m",
"=",
"t",
".",
"next",
"(",
")",
"while",
"m",
":",
"(",
"yield",
"m",
")",
"m",
"=",
"t",
".",
"next",
"(",
")",
"index",
"=",
"0",
"for",
"m",
"in",
"members",
"(",
")",
":",
"if",
"(",
"not",
"m",
".",
"name",
".",
"endswith",
"(",
"'.jpg'",
")",
")",
":",
"continue",
"f",
"=",
"t",
".",
"extractfile",
"(",
"m",
")",
"try",
":",
"im",
"=",
"im_from_file",
"(",
"f",
")",
"finally",
":",
"f",
".",
"close",
"(",
")",
"if",
"(",
"im",
"is",
"None",
")",
":",
"continue",
"if",
"(",
"im",
".",
"shape",
"[",
"0",
"]",
">",
"im",
".",
"shape",
"[",
"1",
"]",
")",
":",
"im",
"=",
"im",
"[",
":",
"im",
".",
"shape",
"[",
"1",
"]",
",",
":",
"]",
"else",
":",
"im",
"=",
"im",
"[",
":",
",",
":",
"im",
".",
"shape",
"[",
"0",
"]",
"]",
"if",
"(",
"im",
".",
"shape",
"[",
"0",
"]",
">",
"256",
")",
":",
"im",
"=",
"cv2",
".",
"resize",
"(",
"im",
",",
"(",
"256",
",",
"256",
")",
")",
"fname",
"=",
"'bgs/{:08}.jpg'",
".",
"format",
"(",
"index",
")",
"print",
"fname",
"rc",
"=",
"cv2",
".",
"imwrite",
"(",
"fname",
",",
"im",
")",
"if",
"(",
"not",
"rc",
")",
":",
"raise",
"Exception",
"(",
"'Failed to write file {}'",
".",
"format",
"(",
"fname",
")",
")",
"index",
"+=",
"1"
] |
extract backgrounds from provided tar archive .
|
train
| false
|
4,402
|
@register.simple_tag
def get_advertisement_html_mail():
advertisement = Advertisement.objects.get_advertisement(Advertisement.PLACEMENT_MAIL_HTML)
if (advertisement is None):
return u''
return mark_safe(advertisement.text)
|
[
"@",
"register",
".",
"simple_tag",
"def",
"get_advertisement_html_mail",
"(",
")",
":",
"advertisement",
"=",
"Advertisement",
".",
"objects",
".",
"get_advertisement",
"(",
"Advertisement",
".",
"PLACEMENT_MAIL_HTML",
")",
"if",
"(",
"advertisement",
"is",
"None",
")",
":",
"return",
"u''",
"return",
"mark_safe",
"(",
"advertisement",
".",
"text",
")"
] |
returns advertisement text .
|
train
| false
|
4,403
|
def test_ast_good_try():
can_compile(u'(try)')
can_compile(u'(try 1)')
can_compile(u'(try 1 (except) (else 1))')
can_compile(u'(try 1 (else 1) (except))')
can_compile(u'(try 1 (finally 1) (except))')
can_compile(u'(try 1 (finally 1))')
can_compile(u'(try 1 (except) (finally 1))')
can_compile(u'(try 1 (except) (finally 1) (else 1))')
can_compile(u'(try 1 (except) (else 1) (finally 1))')
|
[
"def",
"test_ast_good_try",
"(",
")",
":",
"can_compile",
"(",
"u'(try)'",
")",
"can_compile",
"(",
"u'(try 1)'",
")",
"can_compile",
"(",
"u'(try 1 (except) (else 1))'",
")",
"can_compile",
"(",
"u'(try 1 (else 1) (except))'",
")",
"can_compile",
"(",
"u'(try 1 (finally 1) (except))'",
")",
"can_compile",
"(",
"u'(try 1 (finally 1))'",
")",
"can_compile",
"(",
"u'(try 1 (except) (finally 1))'",
")",
"can_compile",
"(",
"u'(try 1 (except) (finally 1) (else 1))'",
")",
"can_compile",
"(",
"u'(try 1 (except) (else 1) (finally 1))'",
")"
] |
make sure ast can compile valid try .
|
train
| false
|
4,404
|
def test_vstack_bytes():
t = table.Table([['a']], names=['a'])
assert (t['a'].itemsize == 1)
t2 = table.vstack([t, t])
assert (len(t2) == 2)
assert (t2['a'].itemsize == 1)
|
[
"def",
"test_vstack_bytes",
"(",
")",
":",
"t",
"=",
"table",
".",
"Table",
"(",
"[",
"[",
"'a'",
"]",
"]",
",",
"names",
"=",
"[",
"'a'",
"]",
")",
"assert",
"(",
"t",
"[",
"'a'",
"]",
".",
"itemsize",
"==",
"1",
")",
"t2",
"=",
"table",
".",
"vstack",
"(",
"[",
"t",
",",
"t",
"]",
")",
"assert",
"(",
"len",
"(",
"t2",
")",
"==",
"2",
")",
"assert",
"(",
"t2",
"[",
"'a'",
"]",
".",
"itemsize",
"==",
"1",
")"
] |
test for issue #5617 when vstacking bytes columns in py3 .
|
train
| false
|
4,405
|
def _parse_list_rule(rule):
if (not rule):
return TrueCheck()
or_list = []
for inner_rule in rule:
if (not inner_rule):
continue
if isinstance(inner_rule, basestring):
inner_rule = [inner_rule]
and_list = [_parse_check(r) for r in inner_rule]
if (len(and_list) == 1):
or_list.append(and_list[0])
else:
or_list.append(AndCheck(and_list))
if (not or_list):
return FalseCheck()
elif (len(or_list) == 1):
return or_list[0]
return OrCheck(or_list)
|
[
"def",
"_parse_list_rule",
"(",
"rule",
")",
":",
"if",
"(",
"not",
"rule",
")",
":",
"return",
"TrueCheck",
"(",
")",
"or_list",
"=",
"[",
"]",
"for",
"inner_rule",
"in",
"rule",
":",
"if",
"(",
"not",
"inner_rule",
")",
":",
"continue",
"if",
"isinstance",
"(",
"inner_rule",
",",
"basestring",
")",
":",
"inner_rule",
"=",
"[",
"inner_rule",
"]",
"and_list",
"=",
"[",
"_parse_check",
"(",
"r",
")",
"for",
"r",
"in",
"inner_rule",
"]",
"if",
"(",
"len",
"(",
"and_list",
")",
"==",
"1",
")",
":",
"or_list",
".",
"append",
"(",
"and_list",
"[",
"0",
"]",
")",
"else",
":",
"or_list",
".",
"append",
"(",
"AndCheck",
"(",
"and_list",
")",
")",
"if",
"(",
"not",
"or_list",
")",
":",
"return",
"FalseCheck",
"(",
")",
"elif",
"(",
"len",
"(",
"or_list",
")",
"==",
"1",
")",
":",
"return",
"or_list",
"[",
"0",
"]",
"return",
"OrCheck",
"(",
"or_list",
")"
] |
provided for backwards compatibility .
|
train
| false
|
4,406
|
def E_n(n, omega):
return ((hbar * omega) * (n + Rational(1, 2)))
|
[
"def",
"E_n",
"(",
"n",
",",
"omega",
")",
":",
"return",
"(",
"(",
"hbar",
"*",
"omega",
")",
"*",
"(",
"n",
"+",
"Rational",
"(",
"1",
",",
"2",
")",
")",
")"
] |
returns the energy psi_{n} for a 1d potential hole with infinity borders n the "principal" quantum number .
|
train
| false
|
4,407
|
def randstr(length, alphabet='abcdefghijklmnopqrstuvwxyz0123456789'):
return ''.join((random.choice(alphabet) for _ in xrange(length)))
|
[
"def",
"randstr",
"(",
"length",
",",
"alphabet",
"=",
"'abcdefghijklmnopqrstuvwxyz0123456789'",
")",
":",
"return",
"''",
".",
"join",
"(",
"(",
"random",
".",
"choice",
"(",
"alphabet",
")",
"for",
"_",
"in",
"xrange",
"(",
"length",
")",
")",
")"
] |
return a string made up of random chars from alphabet .
|
train
| false
|
4,408
|
def test_add_events():
raw = read_raw_fif(raw_fname)
events = np.array([[raw.first_samp, 0, 1]])
assert_raises(RuntimeError, raw.add_events, events, 'STI 014')
raw = read_raw_fif(raw_fname, preload=True)
orig_events = find_events(raw, 'STI 014')
events = np.array([raw.first_samp, 0, 1])
assert_raises(ValueError, raw.add_events, events, 'STI 014')
events[0] = ((raw.first_samp + raw.n_times) + 1)
events = events[np.newaxis, :]
assert_raises(ValueError, raw.add_events, events, 'STI 014')
events[(0, 0)] = (raw.first_samp - 1)
assert_raises(ValueError, raw.add_events, events, 'STI 014')
events[(0, 0)] = (raw.first_samp + 1)
assert_raises(ValueError, raw.add_events, events, 'STI FOO')
raw.add_events(events, 'STI 014')
new_events = find_events(raw, 'STI 014')
assert_array_equal(new_events, np.concatenate((events, orig_events)))
|
[
"def",
"test_add_events",
"(",
")",
":",
"raw",
"=",
"read_raw_fif",
"(",
"raw_fname",
")",
"events",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"raw",
".",
"first_samp",
",",
"0",
",",
"1",
"]",
"]",
")",
"assert_raises",
"(",
"RuntimeError",
",",
"raw",
".",
"add_events",
",",
"events",
",",
"'STI 014'",
")",
"raw",
"=",
"read_raw_fif",
"(",
"raw_fname",
",",
"preload",
"=",
"True",
")",
"orig_events",
"=",
"find_events",
"(",
"raw",
",",
"'STI 014'",
")",
"events",
"=",
"np",
".",
"array",
"(",
"[",
"raw",
".",
"first_samp",
",",
"0",
",",
"1",
"]",
")",
"assert_raises",
"(",
"ValueError",
",",
"raw",
".",
"add_events",
",",
"events",
",",
"'STI 014'",
")",
"events",
"[",
"0",
"]",
"=",
"(",
"(",
"raw",
".",
"first_samp",
"+",
"raw",
".",
"n_times",
")",
"+",
"1",
")",
"events",
"=",
"events",
"[",
"np",
".",
"newaxis",
",",
":",
"]",
"assert_raises",
"(",
"ValueError",
",",
"raw",
".",
"add_events",
",",
"events",
",",
"'STI 014'",
")",
"events",
"[",
"(",
"0",
",",
"0",
")",
"]",
"=",
"(",
"raw",
".",
"first_samp",
"-",
"1",
")",
"assert_raises",
"(",
"ValueError",
",",
"raw",
".",
"add_events",
",",
"events",
",",
"'STI 014'",
")",
"events",
"[",
"(",
"0",
",",
"0",
")",
"]",
"=",
"(",
"raw",
".",
"first_samp",
"+",
"1",
")",
"assert_raises",
"(",
"ValueError",
",",
"raw",
".",
"add_events",
",",
"events",
",",
"'STI FOO'",
")",
"raw",
".",
"add_events",
"(",
"events",
",",
"'STI 014'",
")",
"new_events",
"=",
"find_events",
"(",
"raw",
",",
"'STI 014'",
")",
"assert_array_equal",
"(",
"new_events",
",",
"np",
".",
"concatenate",
"(",
"(",
"events",
",",
"orig_events",
")",
")",
")"
] |
test adding events to a raw file .
|
train
| false
|
4,409
|
def read_float(fid):
return _unpack_simple(fid, '>f4', np.float32)
|
[
"def",
"read_float",
"(",
"fid",
")",
":",
"return",
"_unpack_simple",
"(",
"fid",
",",
"'>f4'",
",",
"np",
".",
"float32",
")"
] |
read 32bit float from bti file .
|
train
| false
|
4,410
|
def hprModelSynth(hfreq, hmag, hphase, xr, N, H, fs):
yh = SM.sineModelSynth(hfreq, hmag, hphase, N, H, fs)
y = (yh[:min(yh.size, xr.size)] + xr[:min(yh.size, xr.size)])
return (y, yh)
|
[
"def",
"hprModelSynth",
"(",
"hfreq",
",",
"hmag",
",",
"hphase",
",",
"xr",
",",
"N",
",",
"H",
",",
"fs",
")",
":",
"yh",
"=",
"SM",
".",
"sineModelSynth",
"(",
"hfreq",
",",
"hmag",
",",
"hphase",
",",
"N",
",",
"H",
",",
"fs",
")",
"y",
"=",
"(",
"yh",
"[",
":",
"min",
"(",
"yh",
".",
"size",
",",
"xr",
".",
"size",
")",
"]",
"+",
"xr",
"[",
":",
"min",
"(",
"yh",
".",
"size",
",",
"xr",
".",
"size",
")",
"]",
")",
"return",
"(",
"y",
",",
"yh",
")"
] |
synthesis of a sound using the sinusoidal plus residual model tfreq .
|
train
| false
|
4,412
|
def absolute_path_link(path):
if os.path.islink(path):
link = os.readlink(path)
if (not os.path.isabs(link)):
link = os.path.join(os.path.dirname(path), link)
else:
link = os.path.abspath(path)
return link
|
[
"def",
"absolute_path_link",
"(",
"path",
")",
":",
"if",
"os",
".",
"path",
".",
"islink",
"(",
"path",
")",
":",
"link",
"=",
"os",
".",
"readlink",
"(",
"path",
")",
"if",
"(",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"link",
")",
")",
":",
"link",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
",",
"link",
")",
"else",
":",
"link",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
"return",
"link"
] |
returns an absolute path for the destination of a symlink .
|
train
| false
|
4,413
|
@require_context
def device_array(shape, dtype=np.float, strides=None, order='C', stream=0):
(shape, strides, dtype) = _prepare_shape_strides_dtype(shape, strides, dtype, order)
return devicearray.DeviceNDArray(shape=shape, strides=strides, dtype=dtype, stream=stream)
|
[
"@",
"require_context",
"def",
"device_array",
"(",
"shape",
",",
"dtype",
"=",
"np",
".",
"float",
",",
"strides",
"=",
"None",
",",
"order",
"=",
"'C'",
",",
"stream",
"=",
"0",
")",
":",
"(",
"shape",
",",
"strides",
",",
"dtype",
")",
"=",
"_prepare_shape_strides_dtype",
"(",
"shape",
",",
"strides",
",",
"dtype",
",",
"order",
")",
"return",
"devicearray",
".",
"DeviceNDArray",
"(",
"shape",
"=",
"shape",
",",
"strides",
"=",
"strides",
",",
"dtype",
"=",
"dtype",
",",
"stream",
"=",
"stream",
")"
] |
device_array allocate an empty device ndarray .
|
train
| false
|
4,414
|
@command('history')
def view_history(duplicates=True):
history = g.userhist.get('history')
try:
hist_list = list(reversed(history.songs))
message = 'Viewing play history'
if (not duplicates):
seen = set()
seen_add = seen.add
hist_list = [x for x in hist_list if (not ((x in seen) or seen_add(x)))]
message = 'Viewing recent played songs'
paginatesongs(hist_list)
g.message = message
except AttributeError:
g.content = logo(c.r)
g.message = 'History empty'
|
[
"@",
"command",
"(",
"'history'",
")",
"def",
"view_history",
"(",
"duplicates",
"=",
"True",
")",
":",
"history",
"=",
"g",
".",
"userhist",
".",
"get",
"(",
"'history'",
")",
"try",
":",
"hist_list",
"=",
"list",
"(",
"reversed",
"(",
"history",
".",
"songs",
")",
")",
"message",
"=",
"'Viewing play history'",
"if",
"(",
"not",
"duplicates",
")",
":",
"seen",
"=",
"set",
"(",
")",
"seen_add",
"=",
"seen",
".",
"add",
"hist_list",
"=",
"[",
"x",
"for",
"x",
"in",
"hist_list",
"if",
"(",
"not",
"(",
"(",
"x",
"in",
"seen",
")",
"or",
"seen_add",
"(",
"x",
")",
")",
")",
"]",
"message",
"=",
"'Viewing recent played songs'",
"paginatesongs",
"(",
"hist_list",
")",
"g",
".",
"message",
"=",
"message",
"except",
"AttributeError",
":",
"g",
".",
"content",
"=",
"logo",
"(",
"c",
".",
"r",
")",
"g",
".",
"message",
"=",
"'History empty'"
] |
display the users play history .
|
train
| false
|
4,415
|
def detect_text(path):
vision_client = vision.Client()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision_client.image(content=content)
texts = image.detect_text()
print 'Texts:'
for text in texts:
print text.description
|
[
"def",
"detect_text",
"(",
"path",
")",
":",
"vision_client",
"=",
"vision",
".",
"Client",
"(",
")",
"with",
"io",
".",
"open",
"(",
"path",
",",
"'rb'",
")",
"as",
"image_file",
":",
"content",
"=",
"image_file",
".",
"read",
"(",
")",
"image",
"=",
"vision_client",
".",
"image",
"(",
"content",
"=",
"content",
")",
"texts",
"=",
"image",
".",
"detect_text",
"(",
")",
"print",
"'Texts:'",
"for",
"text",
"in",
"texts",
":",
"print",
"text",
".",
"description"
] |
detects text in the file .
|
train
| false
|
4,417
|
def places():
from datetime import datetime, timedelta
response.headers['Expires'] = (datetime.now() + timedelta(days=7)).strftime('%a, %d %b %Y %H:%M:%S GMT')
return response.stream(open(_map_plugin().place_data(), 'rb'), chunk_size=4096)
|
[
"def",
"places",
"(",
")",
":",
"from",
"datetime",
"import",
"datetime",
",",
"timedelta",
"response",
".",
"headers",
"[",
"'Expires'",
"]",
"=",
"(",
"datetime",
".",
"now",
"(",
")",
"+",
"timedelta",
"(",
"days",
"=",
"7",
")",
")",
".",
"strftime",
"(",
"'%a, %d %b %Y %H:%M:%S GMT'",
")",
"return",
"response",
".",
"stream",
"(",
"open",
"(",
"_map_plugin",
"(",
")",
".",
"place_data",
"(",
")",
",",
"'rb'",
")",
",",
"chunk_size",
"=",
"4096",
")"
] |
places search .
|
train
| false
|
4,419
|
def _traverse_results(value, fields, row, path):
for (f, v) in value.iteritems():
field_name = ('{path}.{name}'.format(path=path, name=f) if path else f)
if (not isinstance(v, (dict, list, tuple))):
if (field_name in fields):
row[fields.index(field_name)] = ensure_utf(v)
elif (isinstance(v, dict) and (f != 'attributes')):
_traverse_results(v, fields, row, field_name)
|
[
"def",
"_traverse_results",
"(",
"value",
",",
"fields",
",",
"row",
",",
"path",
")",
":",
"for",
"(",
"f",
",",
"v",
")",
"in",
"value",
".",
"iteritems",
"(",
")",
":",
"field_name",
"=",
"(",
"'{path}.{name}'",
".",
"format",
"(",
"path",
"=",
"path",
",",
"name",
"=",
"f",
")",
"if",
"path",
"else",
"f",
")",
"if",
"(",
"not",
"isinstance",
"(",
"v",
",",
"(",
"dict",
",",
"list",
",",
"tuple",
")",
")",
")",
":",
"if",
"(",
"field_name",
"in",
"fields",
")",
":",
"row",
"[",
"fields",
".",
"index",
"(",
"field_name",
")",
"]",
"=",
"ensure_utf",
"(",
"v",
")",
"elif",
"(",
"isinstance",
"(",
"v",
",",
"dict",
")",
"and",
"(",
"f",
"!=",
"'attributes'",
")",
")",
":",
"_traverse_results",
"(",
"v",
",",
"fields",
",",
"row",
",",
"field_name",
")"
] |
helper method for parse_results() .
|
train
| true
|
4,420
|
def find_empty_redis_database():
for dbnum in range(4, 17):
testconn = StrictRedis(db=dbnum)
empty = (len(testconn.keys(u'*')) == 0)
if empty:
return testconn
assert False, u'No empty Redis database found to run tests in.'
|
[
"def",
"find_empty_redis_database",
"(",
")",
":",
"for",
"dbnum",
"in",
"range",
"(",
"4",
",",
"17",
")",
":",
"testconn",
"=",
"StrictRedis",
"(",
"db",
"=",
"dbnum",
")",
"empty",
"=",
"(",
"len",
"(",
"testconn",
".",
"keys",
"(",
"u'*'",
")",
")",
"==",
"0",
")",
"if",
"empty",
":",
"return",
"testconn",
"assert",
"False",
",",
"u'No empty Redis database found to run tests in.'"
] |
tries to connect to a random redis database .
|
train
| false
|
4,421
|
def XRI(xri):
if (not xri.startswith('xri://')):
xri = ('xri://' + xri)
return xri
|
[
"def",
"XRI",
"(",
"xri",
")",
":",
"if",
"(",
"not",
"xri",
".",
"startswith",
"(",
"'xri://'",
")",
")",
":",
"xri",
"=",
"(",
"'xri://'",
"+",
"xri",
")",
"return",
"xri"
] |
an xri object allowing comparison of xri .
|
train
| false
|
4,422
|
@pytest.fixture
def english_tutorial(english, tutorial):
return _require_tp(english, tutorial)
|
[
"@",
"pytest",
".",
"fixture",
"def",
"english_tutorial",
"(",
"english",
",",
"tutorial",
")",
":",
"return",
"_require_tp",
"(",
"english",
",",
"tutorial",
")"
] |
require english tutorial .
|
train
| false
|
4,425
|
def roundup(number, ndigits=0, return_type=None):
sign = (1 if (number >= 0) else (-1))
precision = (10 ** ((-1) * ndigits))
if (not return_type):
return_type = (float if (ndigits > 0) else int)
(quotient, remainder) = divmod(abs(number), precision)
if ((not (IRONPYTHON and (((quotient * precision) + remainder) > abs(number)))) and (remainder >= (precision / 2))):
quotient += 1
return (sign * return_type((quotient * precision)))
|
[
"def",
"roundup",
"(",
"number",
",",
"ndigits",
"=",
"0",
",",
"return_type",
"=",
"None",
")",
":",
"sign",
"=",
"(",
"1",
"if",
"(",
"number",
">=",
"0",
")",
"else",
"(",
"-",
"1",
")",
")",
"precision",
"=",
"(",
"10",
"**",
"(",
"(",
"-",
"1",
")",
"*",
"ndigits",
")",
")",
"if",
"(",
"not",
"return_type",
")",
":",
"return_type",
"=",
"(",
"float",
"if",
"(",
"ndigits",
">",
"0",
")",
"else",
"int",
")",
"(",
"quotient",
",",
"remainder",
")",
"=",
"divmod",
"(",
"abs",
"(",
"number",
")",
",",
"precision",
")",
"if",
"(",
"(",
"not",
"(",
"IRONPYTHON",
"and",
"(",
"(",
"(",
"quotient",
"*",
"precision",
")",
"+",
"remainder",
")",
">",
"abs",
"(",
"number",
")",
")",
")",
")",
"and",
"(",
"remainder",
">=",
"(",
"precision",
"/",
"2",
")",
")",
")",
":",
"quotient",
"+=",
"1",
"return",
"(",
"sign",
"*",
"return_type",
"(",
"(",
"quotient",
"*",
"precision",
")",
")",
")"
] |
rounds number to the given number of digits .
|
train
| false
|
4,427
|
def default_resolver():
if DNSPYTHON_AVAILABLE:
return dns.resolver.get_default_resolver()
return None
|
[
"def",
"default_resolver",
"(",
")",
":",
"if",
"DNSPYTHON_AVAILABLE",
":",
"return",
"dns",
".",
"resolver",
".",
"get_default_resolver",
"(",
")",
"return",
"None"
] |
return a basic dns resolver object .
|
train
| false
|
4,428
|
def test_disconnect_one_invalid(timer):
func1 = mock.Mock()
func2 = mock.Mock()
timer.timeout.connect(func1)
with pytest.raises(TypeError):
timer.timeout.disconnect(func2)
assert (not func1.called)
assert (not func2.called)
timer.timeout.emit()
func1.assert_called_once_with()
|
[
"def",
"test_disconnect_one_invalid",
"(",
"timer",
")",
":",
"func1",
"=",
"mock",
".",
"Mock",
"(",
")",
"func2",
"=",
"mock",
".",
"Mock",
"(",
")",
"timer",
".",
"timeout",
".",
"connect",
"(",
"func1",
")",
"with",
"pytest",
".",
"raises",
"(",
"TypeError",
")",
":",
"timer",
".",
"timeout",
".",
"disconnect",
"(",
"func2",
")",
"assert",
"(",
"not",
"func1",
".",
"called",
")",
"assert",
"(",
"not",
"func2",
".",
"called",
")",
"timer",
".",
"timeout",
".",
"emit",
"(",
")",
"func1",
".",
"assert_called_once_with",
"(",
")"
] |
test disconnecting with an invalid connection .
|
train
| false
|
4,429
|
def _modify_tagids(source, add=True):
output = []
tagcount = 0
if (not isinstance(source, HtmlPage)):
source = HtmlPage(body=source)
for element in source.parsed_body:
if _must_add_tagid(element):
if add:
element.attributes[TAGID] = str(tagcount)
tagcount += 1
else:
element.attributes.pop(TAGID, None)
output.append(serialize_tag(element))
else:
output.append(source.body[element.start:element.end])
return u''.join(output)
|
[
"def",
"_modify_tagids",
"(",
"source",
",",
"add",
"=",
"True",
")",
":",
"output",
"=",
"[",
"]",
"tagcount",
"=",
"0",
"if",
"(",
"not",
"isinstance",
"(",
"source",
",",
"HtmlPage",
")",
")",
":",
"source",
"=",
"HtmlPage",
"(",
"body",
"=",
"source",
")",
"for",
"element",
"in",
"source",
".",
"parsed_body",
":",
"if",
"_must_add_tagid",
"(",
"element",
")",
":",
"if",
"add",
":",
"element",
".",
"attributes",
"[",
"TAGID",
"]",
"=",
"str",
"(",
"tagcount",
")",
"tagcount",
"+=",
"1",
"else",
":",
"element",
".",
"attributes",
".",
"pop",
"(",
"TAGID",
",",
"None",
")",
"output",
".",
"append",
"(",
"serialize_tag",
"(",
"element",
")",
")",
"else",
":",
"output",
".",
"append",
"(",
"source",
".",
"body",
"[",
"element",
".",
"start",
":",
"element",
".",
"end",
"]",
")",
"return",
"u''",
".",
"join",
"(",
"output",
")"
] |
add or remove tags ids to/from html document .
|
train
| false
|
4,430
|
def imagej_description_dict(description):
def _bool(val):
return {'true': True, 'false': False}[val.lower()]
_str = (str if (sys.version_info[0] < 3) else (lambda x: str(x, 'cp1252')))
result = {}
for line in description.splitlines():
try:
(key, val) = line.split('=')
except Exception:
continue
key = key.strip()
val = val.strip()
for dtype in (int, float, _bool, _str):
try:
val = dtype(val)
break
except Exception:
pass
result[_str(key)] = val
if ('ImageJ' not in result):
raise ValueError('not a ImageJ image description')
return result
|
[
"def",
"imagej_description_dict",
"(",
"description",
")",
":",
"def",
"_bool",
"(",
"val",
")",
":",
"return",
"{",
"'true'",
":",
"True",
",",
"'false'",
":",
"False",
"}",
"[",
"val",
".",
"lower",
"(",
")",
"]",
"_str",
"=",
"(",
"str",
"if",
"(",
"sys",
".",
"version_info",
"[",
"0",
"]",
"<",
"3",
")",
"else",
"(",
"lambda",
"x",
":",
"str",
"(",
"x",
",",
"'cp1252'",
")",
")",
")",
"result",
"=",
"{",
"}",
"for",
"line",
"in",
"description",
".",
"splitlines",
"(",
")",
":",
"try",
":",
"(",
"key",
",",
"val",
")",
"=",
"line",
".",
"split",
"(",
"'='",
")",
"except",
"Exception",
":",
"continue",
"key",
"=",
"key",
".",
"strip",
"(",
")",
"val",
"=",
"val",
".",
"strip",
"(",
")",
"for",
"dtype",
"in",
"(",
"int",
",",
"float",
",",
"_bool",
",",
"_str",
")",
":",
"try",
":",
"val",
"=",
"dtype",
"(",
"val",
")",
"break",
"except",
"Exception",
":",
"pass",
"result",
"[",
"_str",
"(",
"key",
")",
"]",
"=",
"val",
"if",
"(",
"'ImageJ'",
"not",
"in",
"result",
")",
":",
"raise",
"ValueError",
"(",
"'not a ImageJ image description'",
")",
"return",
"result"
] |
return dictionary from imagej image description byte string .
|
train
| false
|
4,431
|
def get_doctype_module(doctype):
def make_modules_dict():
return dict(frappe.db.sql(u'select name, module from tabDocType'))
return frappe.cache().get_value(u'doctype_modules', make_modules_dict)[doctype]
|
[
"def",
"get_doctype_module",
"(",
"doctype",
")",
":",
"def",
"make_modules_dict",
"(",
")",
":",
"return",
"dict",
"(",
"frappe",
".",
"db",
".",
"sql",
"(",
"u'select name, module from tabDocType'",
")",
")",
"return",
"frappe",
".",
"cache",
"(",
")",
".",
"get_value",
"(",
"u'doctype_modules'",
",",
"make_modules_dict",
")",
"[",
"doctype",
"]"
] |
returns **module def** name of given doctype .
|
train
| false
|
4,432
|
def ndarray_device_allocate_data(ary):
datasize = driver.host_memory_size(ary)
gpu_data = devices.get_context().memalloc(datasize)
return gpu_data
|
[
"def",
"ndarray_device_allocate_data",
"(",
"ary",
")",
":",
"datasize",
"=",
"driver",
".",
"host_memory_size",
"(",
"ary",
")",
"gpu_data",
"=",
"devices",
".",
"get_context",
"(",
")",
".",
"memalloc",
"(",
"datasize",
")",
"return",
"gpu_data"
] |
allocate gpu data buffer .
|
train
| false
|
4,433
|
def remove_if(predicate, lst):
return [elem for elem in lst if (not predicate(elem))]
|
[
"def",
"remove_if",
"(",
"predicate",
",",
"lst",
")",
":",
"return",
"[",
"elem",
"for",
"elem",
"in",
"lst",
"if",
"(",
"not",
"predicate",
"(",
"elem",
")",
")",
"]"
] |
returns a new list with elements of the iterable lst excepting those satisfying predicate .
|
train
| false
|
4,436
|
def name_yaml_formatter(table_dict):
table_string = 'name: {\n'
namerecord_list = table_dict['names']
for record in namerecord_list:
if (record.__dict__['langID'] == 0):
record_name = str(record.__dict__['nameID'])
else:
record_name = (str(record.__dict__['nameID']) + 'u')
record_field = (((' ' * 4) + 'nameID') + record_name)
table_string = ((((table_string + record_field) + ': ') + str(record.__dict__)) + ',\n')
table_string = (table_string + '}\n\n')
return table_string
|
[
"def",
"name_yaml_formatter",
"(",
"table_dict",
")",
":",
"table_string",
"=",
"'name: {\\n'",
"namerecord_list",
"=",
"table_dict",
"[",
"'names'",
"]",
"for",
"record",
"in",
"namerecord_list",
":",
"if",
"(",
"record",
".",
"__dict__",
"[",
"'langID'",
"]",
"==",
"0",
")",
":",
"record_name",
"=",
"str",
"(",
"record",
".",
"__dict__",
"[",
"'nameID'",
"]",
")",
"else",
":",
"record_name",
"=",
"(",
"str",
"(",
"record",
".",
"__dict__",
"[",
"'nameID'",
"]",
")",
"+",
"'u'",
")",
"record_field",
"=",
"(",
"(",
"(",
"' '",
"*",
"4",
")",
"+",
"'nameID'",
")",
"+",
"record_name",
")",
"table_string",
"=",
"(",
"(",
"(",
"(",
"table_string",
"+",
"record_field",
")",
"+",
"': '",
")",
"+",
"str",
"(",
"record",
".",
"__dict__",
")",
")",
"+",
"',\\n'",
")",
"table_string",
"=",
"(",
"table_string",
"+",
"'}\\n\\n'",
")",
"return",
"table_string"
] |
formats the yaml table string for opentype name tables .
|
train
| false
|
4,437
|
def download_pricing_file(file_url=DEFAULT_FILE_URL, file_path=CUSTOM_PRICING_FILE_PATH):
dir_name = os.path.dirname(file_path)
if (not os.path.exists(dir_name)):
msg = ("Can't write to %s, directory %s, doesn't exist" % (file_path, dir_name))
raise ValueError(msg)
if (os.path.exists(file_path) and os.path.isdir(file_path)):
msg = ("Can't write to %s file path because it's a directory" % file_path)
raise ValueError(msg)
response = get_response_object(file_url)
body = response.body
try:
data = json.loads(body)
except JSONDecodeError:
msg = "Provided URL doesn't contain valid pricing data"
raise Exception(msg)
if (not data.get('updated', None)):
msg = "Provided URL doesn't contain valid pricing data"
raise Exception(msg)
with open(file_path, 'w') as file_handle:
file_handle.write(body)
|
[
"def",
"download_pricing_file",
"(",
"file_url",
"=",
"DEFAULT_FILE_URL",
",",
"file_path",
"=",
"CUSTOM_PRICING_FILE_PATH",
")",
":",
"dir_name",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"file_path",
")",
"if",
"(",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dir_name",
")",
")",
":",
"msg",
"=",
"(",
"\"Can't write to %s, directory %s, doesn't exist\"",
"%",
"(",
"file_path",
",",
"dir_name",
")",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"if",
"(",
"os",
".",
"path",
".",
"exists",
"(",
"file_path",
")",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"file_path",
")",
")",
":",
"msg",
"=",
"(",
"\"Can't write to %s file path because it's a directory\"",
"%",
"file_path",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"response",
"=",
"get_response_object",
"(",
"file_url",
")",
"body",
"=",
"response",
".",
"body",
"try",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"body",
")",
"except",
"JSONDecodeError",
":",
"msg",
"=",
"\"Provided URL doesn't contain valid pricing data\"",
"raise",
"Exception",
"(",
"msg",
")",
"if",
"(",
"not",
"data",
".",
"get",
"(",
"'updated'",
",",
"None",
")",
")",
":",
"msg",
"=",
"\"Provided URL doesn't contain valid pricing data\"",
"raise",
"Exception",
"(",
"msg",
")",
"with",
"open",
"(",
"file_path",
",",
"'w'",
")",
"as",
"file_handle",
":",
"file_handle",
".",
"write",
"(",
"body",
")"
] |
download pricing file from the file_url and save it to file_path .
|
train
| false
|
4,438
|
def netapi(opts):
return LazyLoader(_module_dirs(opts, 'netapi'), opts, tag='netapi')
|
[
"def",
"netapi",
"(",
"opts",
")",
":",
"return",
"LazyLoader",
"(",
"_module_dirs",
"(",
"opts",
",",
"'netapi'",
")",
",",
"opts",
",",
"tag",
"=",
"'netapi'",
")"
] |
return the network api functions .
|
train
| false
|
4,439
|
def project_time_day(row):
try:
thisdate = row['project_time.date']
except AttributeError:
return current.messages['NONE']
if (not thisdate):
return current.messages['NONE']
now = current.request.utcnow
week = datetime.timedelta(days=7)
return thisdate.date().strftime('%d %B %y')
|
[
"def",
"project_time_day",
"(",
"row",
")",
":",
"try",
":",
"thisdate",
"=",
"row",
"[",
"'project_time.date'",
"]",
"except",
"AttributeError",
":",
"return",
"current",
".",
"messages",
"[",
"'NONE'",
"]",
"if",
"(",
"not",
"thisdate",
")",
":",
"return",
"current",
".",
"messages",
"[",
"'NONE'",
"]",
"now",
"=",
"current",
".",
"request",
".",
"utcnow",
"week",
"=",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"7",
")",
"return",
"thisdate",
".",
"date",
"(",
")",
".",
"strftime",
"(",
"'%d %B %y'",
")"
] |
virtual field for project_time - abbreviated string format for date .
|
train
| false
|
4,442
|
def cov_crosssection_0(results, group):
scale = S_crosssection(results.resid[:, None], group)
scale = np.squeeze(scale)
cov = _HCCM1(results, scale)
return cov
|
[
"def",
"cov_crosssection_0",
"(",
"results",
",",
"group",
")",
":",
"scale",
"=",
"S_crosssection",
"(",
"results",
".",
"resid",
"[",
":",
",",
"None",
"]",
",",
"group",
")",
"scale",
"=",
"np",
".",
"squeeze",
"(",
"scale",
")",
"cov",
"=",
"_HCCM1",
"(",
"results",
",",
"scale",
")",
"return",
"cov"
] |
this one is still wrong .
|
train
| false
|
4,443
|
def testing_on(name, reload=False):
ret = {'name': 'testing mode', 'changes': {}, 'result': True, 'comment': 'Testing mode already ON.'}
result = {}
testing = __salt__['csf.get_testing_status']()
if (int(testing) == 1):
return ret
enable = __salt__['csf.enable_testing_mode']()
if enable:
comment = 'Csf testing mode enabled'
if reload:
if __salt__['csf.reload']():
comment += ' and csf reloaded.'
ret['changes']['Testing Mode'] = 'on'
ret['comment'] = result
return ret
|
[
"def",
"testing_on",
"(",
"name",
",",
"reload",
"=",
"False",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"'testing mode'",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"True",
",",
"'comment'",
":",
"'Testing mode already ON.'",
"}",
"result",
"=",
"{",
"}",
"testing",
"=",
"__salt__",
"[",
"'csf.get_testing_status'",
"]",
"(",
")",
"if",
"(",
"int",
"(",
"testing",
")",
"==",
"1",
")",
":",
"return",
"ret",
"enable",
"=",
"__salt__",
"[",
"'csf.enable_testing_mode'",
"]",
"(",
")",
"if",
"enable",
":",
"comment",
"=",
"'Csf testing mode enabled'",
"if",
"reload",
":",
"if",
"__salt__",
"[",
"'csf.reload'",
"]",
"(",
")",
":",
"comment",
"+=",
"' and csf reloaded.'",
"ret",
"[",
"'changes'",
"]",
"[",
"'Testing Mode'",
"]",
"=",
"'on'",
"ret",
"[",
"'comment'",
"]",
"=",
"result",
"return",
"ret"
] |
ensure testing mode is enabled in csf .
|
train
| true
|
4,444
|
def update_user_contributions(user_id, created_exploration_ids, edited_exploration_ids):
user_contributions = get_user_contributions(user_id, strict=False)
if (not user_contributions):
raise Exception(('User contributions model for user %s does not exist.' % user_id))
user_contributions.created_exploration_ids = created_exploration_ids
user_contributions.edited_exploration_ids = edited_exploration_ids
_save_user_contributions(user_contributions)
|
[
"def",
"update_user_contributions",
"(",
"user_id",
",",
"created_exploration_ids",
",",
"edited_exploration_ids",
")",
":",
"user_contributions",
"=",
"get_user_contributions",
"(",
"user_id",
",",
"strict",
"=",
"False",
")",
"if",
"(",
"not",
"user_contributions",
")",
":",
"raise",
"Exception",
"(",
"(",
"'User contributions model for user %s does not exist.'",
"%",
"user_id",
")",
")",
"user_contributions",
".",
"created_exploration_ids",
"=",
"created_exploration_ids",
"user_contributions",
".",
"edited_exploration_ids",
"=",
"edited_exploration_ids",
"_save_user_contributions",
"(",
"user_contributions",
")"
] |
updates an existing usercontributionsmodel with new calculated contributions .
|
train
| false
|
4,445
|
def extract_description(texts):
document = ''
for text in texts:
try:
document += text['description']
except KeyError as e:
print ('KeyError: %s\n%s' % (e, text))
return document
|
[
"def",
"extract_description",
"(",
"texts",
")",
":",
"document",
"=",
"''",
"for",
"text",
"in",
"texts",
":",
"try",
":",
"document",
"+=",
"text",
"[",
"'description'",
"]",
"except",
"KeyError",
"as",
"e",
":",
"print",
"(",
"'KeyError: %s\\n%s'",
"%",
"(",
"e",
",",
"text",
")",
")",
"return",
"document"
] |
returns text annotations as a single string .
|
train
| false
|
4,446
|
def get_os_args():
if (PY2 and WIN and (_initial_argv_hash == _hash_py_argv())):
return _get_windows_argv()
return sys.argv[1:]
|
[
"def",
"get_os_args",
"(",
")",
":",
"if",
"(",
"PY2",
"and",
"WIN",
"and",
"(",
"_initial_argv_hash",
"==",
"_hash_py_argv",
"(",
")",
")",
")",
":",
"return",
"_get_windows_argv",
"(",
")",
"return",
"sys",
".",
"argv",
"[",
"1",
":",
"]"
] |
this returns the argument part of sys .
|
train
| false
|
4,447
|
def _maybe_convert_scalar(values):
if is_scalar(values):
(dtype, values) = _infer_dtype_from_scalar(values)
try:
values = dtype(values)
except TypeError:
pass
return values
|
[
"def",
"_maybe_convert_scalar",
"(",
"values",
")",
":",
"if",
"is_scalar",
"(",
"values",
")",
":",
"(",
"dtype",
",",
"values",
")",
"=",
"_infer_dtype_from_scalar",
"(",
"values",
")",
"try",
":",
"values",
"=",
"dtype",
"(",
"values",
")",
"except",
"TypeError",
":",
"pass",
"return",
"values"
] |
convert a python scalar to the appropriate numpy dtype if possible this avoids numpy directly converting according to platform preferences .
|
train
| false
|
4,448
|
def tanh_cutoff(x, cutoff):
y = tf.tanh(x)
if (cutoff < 1.01):
return y
d = ((cutoff - 1.0) / 2.0)
return tf.minimum(1.0, tf.maximum((-1.0), ((1.0 + d) * y)))
|
[
"def",
"tanh_cutoff",
"(",
"x",
",",
"cutoff",
")",
":",
"y",
"=",
"tf",
".",
"tanh",
"(",
"x",
")",
"if",
"(",
"cutoff",
"<",
"1.01",
")",
":",
"return",
"y",
"d",
"=",
"(",
"(",
"cutoff",
"-",
"1.0",
")",
"/",
"2.0",
")",
"return",
"tf",
".",
"minimum",
"(",
"1.0",
",",
"tf",
".",
"maximum",
"(",
"(",
"-",
"1.0",
")",
",",
"(",
"(",
"1.0",
"+",
"d",
")",
"*",
"y",
")",
")",
")"
] |
tanh with cutoff .
|
train
| false
|
4,449
|
def jobs():
job_names = _jobs().keys()
job_names.sort()
return {'jobs': job_names}
|
[
"def",
"jobs",
"(",
")",
":",
"job_names",
"=",
"_jobs",
"(",
")",
".",
"keys",
"(",
")",
"job_names",
".",
"sort",
"(",
")",
"return",
"{",
"'jobs'",
":",
"job_names",
"}"
] |
return a list of the currently installed job names .
|
train
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.