id_within_dataset
int64 1
55.5k
| snippet
stringlengths 19
14.2k
| tokens
listlengths 6
1.63k
| nl
stringlengths 6
352
| split_within_dataset
stringclasses 1
value | is_duplicated
bool 2
classes |
|---|---|---|---|---|---|
54,949
|
def ToCanonicalJSON(dict, indent=False):
return json.dumps(dict, sort_keys=True, indent=indent)
|
[
"def",
"ToCanonicalJSON",
"(",
"dict",
",",
"indent",
"=",
"False",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"dict",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"indent",
")"
] |
convert "dict" to a canonical json string .
|
train
| false
|
54,950
|
def url_decode_stream(stream, charset='utf-8', decode_keys=False, include_empty=True, errors='replace', separator='&', cls=None, limit=None, return_iterator=False):
from werkzeug.wsgi import make_chunk_iter
if return_iterator:
cls = (lambda x: x)
elif (cls is None):
cls = MultiDict
pair_iter = make_chunk_iter(stream, separator, limit)
return cls(_url_decode_impl(pair_iter, charset, decode_keys, include_empty, errors))
|
[
"def",
"url_decode_stream",
"(",
"stream",
",",
"charset",
"=",
"'utf-8'",
",",
"decode_keys",
"=",
"False",
",",
"include_empty",
"=",
"True",
",",
"errors",
"=",
"'replace'",
",",
"separator",
"=",
"'&'",
",",
"cls",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"return_iterator",
"=",
"False",
")",
":",
"from",
"werkzeug",
".",
"wsgi",
"import",
"make_chunk_iter",
"if",
"return_iterator",
":",
"cls",
"=",
"(",
"lambda",
"x",
":",
"x",
")",
"elif",
"(",
"cls",
"is",
"None",
")",
":",
"cls",
"=",
"MultiDict",
"pair_iter",
"=",
"make_chunk_iter",
"(",
"stream",
",",
"separator",
",",
"limit",
")",
"return",
"cls",
"(",
"_url_decode_impl",
"(",
"pair_iter",
",",
"charset",
",",
"decode_keys",
",",
"include_empty",
",",
"errors",
")",
")"
] |
works like :func:url_decode but decodes a stream .
|
train
| true
|
54,952
|
def _get_used_lun_id_counter(mapping):
used_luns = _get_used_lun_ids_for_mappings(mapping)
used_lun_id_counter = collections.Counter(used_luns)
return used_lun_id_counter
|
[
"def",
"_get_used_lun_id_counter",
"(",
"mapping",
")",
":",
"used_luns",
"=",
"_get_used_lun_ids_for_mappings",
"(",
"mapping",
")",
"used_lun_id_counter",
"=",
"collections",
".",
"Counter",
"(",
"used_luns",
")",
"return",
"used_lun_id_counter"
] |
returns used lun ids with count as a dictionary .
|
train
| false
|
54,953
|
def _adapt_mismatch(original, matchee):
marker = object()
if (getattr(original, 'mismatched', marker) is marker):
return mismatch(matchee, original.describe(), original.get_details())
return original
|
[
"def",
"_adapt_mismatch",
"(",
"original",
",",
"matchee",
")",
":",
"marker",
"=",
"object",
"(",
")",
"if",
"(",
"getattr",
"(",
"original",
",",
"'mismatched'",
",",
"marker",
")",
"is",
"marker",
")",
":",
"return",
"mismatch",
"(",
"matchee",
",",
"original",
".",
"describe",
"(",
")",
",",
"original",
".",
"get_details",
"(",
")",
")",
"return",
"original"
] |
if original doesnt already store matchee then return a new one that has it stored .
|
train
| false
|
54,956
|
def pytest_configure(config):
if (config.getoption('gae_sdk') is not None):
set_up_gae_environment(config.getoption('gae_sdk'))
|
[
"def",
"pytest_configure",
"(",
"config",
")",
":",
"if",
"(",
"config",
".",
"getoption",
"(",
"'gae_sdk'",
")",
"is",
"not",
"None",
")",
":",
"set_up_gae_environment",
"(",
"config",
".",
"getoption",
"(",
"'gae_sdk'",
")",
")"
] |
configures the app engine sdk imports on py .
|
train
| false
|
54,957
|
def _date_year(release):
try:
date = release['ReleaseDate']
except TypeError:
date = ''
if (date is not None):
year = date[:4]
else:
year = ''
return (date, year)
|
[
"def",
"_date_year",
"(",
"release",
")",
":",
"try",
":",
"date",
"=",
"release",
"[",
"'ReleaseDate'",
"]",
"except",
"TypeError",
":",
"date",
"=",
"''",
"if",
"(",
"date",
"is",
"not",
"None",
")",
":",
"year",
"=",
"date",
"[",
":",
"4",
"]",
"else",
":",
"year",
"=",
"''",
"return",
"(",
"date",
",",
"year",
")"
] |
extract release date and year from database row .
|
train
| false
|
54,958
|
def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar):
for i in xrange(startpos, len(line)):
if (line[i] == startchar):
depth += 1
elif (line[i] == endchar):
depth -= 1
if (depth == 0):
return ((i + 1), 0)
return ((-1), depth)
|
[
"def",
"FindEndOfExpressionInLine",
"(",
"line",
",",
"startpos",
",",
"depth",
",",
"startchar",
",",
"endchar",
")",
":",
"for",
"i",
"in",
"xrange",
"(",
"startpos",
",",
"len",
"(",
"line",
")",
")",
":",
"if",
"(",
"line",
"[",
"i",
"]",
"==",
"startchar",
")",
":",
"depth",
"+=",
"1",
"elif",
"(",
"line",
"[",
"i",
"]",
"==",
"endchar",
")",
":",
"depth",
"-=",
"1",
"if",
"(",
"depth",
"==",
"0",
")",
":",
"return",
"(",
"(",
"i",
"+",
"1",
")",
",",
"0",
")",
"return",
"(",
"(",
"-",
"1",
")",
",",
"depth",
")"
] |
find the position just after the matching endchar .
|
train
| false
|
54,959
|
def expand_login_view(login_view):
if login_view.startswith(('https://', 'http://', '/')):
return login_view
else:
return url_for(login_view)
|
[
"def",
"expand_login_view",
"(",
"login_view",
")",
":",
"if",
"login_view",
".",
"startswith",
"(",
"(",
"'https://'",
",",
"'http://'",
",",
"'/'",
")",
")",
":",
"return",
"login_view",
"else",
":",
"return",
"url_for",
"(",
"login_view",
")"
] |
returns the url for the login view .
|
train
| false
|
54,960
|
def varOr(population, toolbox, lambda_, cxpb, mutpb):
assert ((cxpb + mutpb) <= 1.0), 'The sum of the crossover and mutation probabilities must be smaller or equal to 1.0.'
offspring = []
for _ in xrange(lambda_):
op_choice = random.random()
if (op_choice < cxpb):
(ind1, ind2) = map(toolbox.clone, random.sample(population, 2))
(ind1, ind2) = toolbox.mate(ind1, ind2)
del ind1.fitness.values
offspring.append(ind1)
elif (op_choice < (cxpb + mutpb)):
ind = toolbox.clone(random.choice(population))
(ind,) = toolbox.mutate(ind)
del ind.fitness.values
offspring.append(ind)
else:
offspring.append(random.choice(population))
return offspring
|
[
"def",
"varOr",
"(",
"population",
",",
"toolbox",
",",
"lambda_",
",",
"cxpb",
",",
"mutpb",
")",
":",
"assert",
"(",
"(",
"cxpb",
"+",
"mutpb",
")",
"<=",
"1.0",
")",
",",
"'The sum of the crossover and mutation probabilities must be smaller or equal to 1.0.'",
"offspring",
"=",
"[",
"]",
"for",
"_",
"in",
"xrange",
"(",
"lambda_",
")",
":",
"op_choice",
"=",
"random",
".",
"random",
"(",
")",
"if",
"(",
"op_choice",
"<",
"cxpb",
")",
":",
"(",
"ind1",
",",
"ind2",
")",
"=",
"map",
"(",
"toolbox",
".",
"clone",
",",
"random",
".",
"sample",
"(",
"population",
",",
"2",
")",
")",
"(",
"ind1",
",",
"ind2",
")",
"=",
"toolbox",
".",
"mate",
"(",
"ind1",
",",
"ind2",
")",
"del",
"ind1",
".",
"fitness",
".",
"values",
"offspring",
".",
"append",
"(",
"ind1",
")",
"elif",
"(",
"op_choice",
"<",
"(",
"cxpb",
"+",
"mutpb",
")",
")",
":",
"ind",
"=",
"toolbox",
".",
"clone",
"(",
"random",
".",
"choice",
"(",
"population",
")",
")",
"(",
"ind",
",",
")",
"=",
"toolbox",
".",
"mutate",
"(",
"ind",
")",
"del",
"ind",
".",
"fitness",
".",
"values",
"offspring",
".",
"append",
"(",
"ind",
")",
"else",
":",
"offspring",
".",
"append",
"(",
"random",
".",
"choice",
"(",
"population",
")",
")",
"return",
"offspring"
] |
part of an evolutionary algorithm applying only the variation part .
|
train
| false
|
54,962
|
def build_lcms_70(compiler):
if (compiler['platform'] == 'x64'):
return ''
'Build LCMS on VC2008. This version is only 32bit/Win32'
return ('\nrem Build lcms2\nsetlocal\nrd /S /Q %%LCMS%%\\Lib\nrd /S /Q %%LCMS%%\\Projects\\VC%(vc_version)s\\Release\n%%MSBUILD%% %%LCMS%%\\Projects\\VC%(vc_version)s\\lcms2.sln /t:Clean /p:Configuration="Release" /p:Platform=Win32 /m\n%%MSBUILD%% %%LCMS%%\\Projects\\VC%(vc_version)s\\lcms2.sln /t:lcms2_static /p:Configuration="Release" /p:Platform=Win32 /m\nxcopy /Y /E /Q %%LCMS%%\\include %%INCLIB%%\ncopy /Y /B %%LCMS%%\\Projects\\VC%(vc_version)s\\Release\\*.lib %%INCLIB%%\nendlocal\n' % compiler)
|
[
"def",
"build_lcms_70",
"(",
"compiler",
")",
":",
"if",
"(",
"compiler",
"[",
"'platform'",
"]",
"==",
"'x64'",
")",
":",
"return",
"''",
"'Build LCMS on VC2008. This version is only 32bit/Win32'",
"return",
"(",
"'\\nrem Build lcms2\\nsetlocal\\nrd /S /Q %%LCMS%%\\\\Lib\\nrd /S /Q %%LCMS%%\\\\Projects\\\\VC%(vc_version)s\\\\Release\\n%%MSBUILD%% %%LCMS%%\\\\Projects\\\\VC%(vc_version)s\\\\lcms2.sln /t:Clean /p:Configuration=\"Release\" /p:Platform=Win32 /m\\n%%MSBUILD%% %%LCMS%%\\\\Projects\\\\VC%(vc_version)s\\\\lcms2.sln /t:lcms2_static /p:Configuration=\"Release\" /p:Platform=Win32 /m\\nxcopy /Y /E /Q %%LCMS%%\\\\include %%INCLIB%%\\ncopy /Y /B %%LCMS%%\\\\Projects\\\\VC%(vc_version)s\\\\Release\\\\*.lib %%INCLIB%%\\nendlocal\\n'",
"%",
"compiler",
")"
] |
link error here on x64 .
|
train
| false
|
54,963
|
def sentence_chrf(reference, hypothesis, min_len=1, max_len=6, beta=3.0):
return corpus_chrf([reference], [hypothesis], min_len, max_len, beta=beta)
|
[
"def",
"sentence_chrf",
"(",
"reference",
",",
"hypothesis",
",",
"min_len",
"=",
"1",
",",
"max_len",
"=",
"6",
",",
"beta",
"=",
"3.0",
")",
":",
"return",
"corpus_chrf",
"(",
"[",
"reference",
"]",
",",
"[",
"hypothesis",
"]",
",",
"min_len",
",",
"max_len",
",",
"beta",
"=",
"beta",
")"
] |
calculates the sentence level chrf described in - maja popovic .
|
train
| false
|
54,964
|
def check_print_compat():
return (not ((os.name == 'nt') and version_check('5.3.0', operator.lt)))
|
[
"def",
"check_print_compat",
"(",
")",
":",
"return",
"(",
"not",
"(",
"(",
"os",
".",
"name",
"==",
"'nt'",
")",
"and",
"version_check",
"(",
"'5.3.0'",
",",
"operator",
".",
"lt",
")",
")",
")"
] |
check if printing should work in the given qt version .
|
train
| false
|
54,965
|
def run_script(scriptfile):
try:
f = open(scriptfile, mode='r')
except Exception:
return
mpstate.console.writeln(('Running script %s' % scriptfile))
for line in f:
line = line.strip()
if ((line == '') or line.startswith('#')):
continue
if line.startswith('@'):
line = line[1:]
else:
mpstate.console.writeln(('-> %s' % line))
process_stdin(line)
f.close()
|
[
"def",
"run_script",
"(",
"scriptfile",
")",
":",
"try",
":",
"f",
"=",
"open",
"(",
"scriptfile",
",",
"mode",
"=",
"'r'",
")",
"except",
"Exception",
":",
"return",
"mpstate",
".",
"console",
".",
"writeln",
"(",
"(",
"'Running script %s'",
"%",
"scriptfile",
")",
")",
"for",
"line",
"in",
"f",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"(",
"(",
"line",
"==",
"''",
")",
"or",
"line",
".",
"startswith",
"(",
"'#'",
")",
")",
":",
"continue",
"if",
"line",
".",
"startswith",
"(",
"'@'",
")",
":",
"line",
"=",
"line",
"[",
"1",
":",
"]",
"else",
":",
"mpstate",
".",
"console",
".",
"writeln",
"(",
"(",
"'-> %s'",
"%",
"line",
")",
")",
"process_stdin",
"(",
"line",
")",
"f",
".",
"close",
"(",
")"
] |
run a script file .
|
train
| true
|
54,966
|
def user_pre_save(sender, instance, **kw):
if instance.id:
user = User.objects.get(id=instance.id)
if (user.username != instance.username):
questions = Question.objects.filter((Q(creator=instance) | Q(answers__creator=instance))).only('id').distinct()
for q in questions:
q.index_later()
|
[
"def",
"user_pre_save",
"(",
"sender",
",",
"instance",
",",
"**",
"kw",
")",
":",
"if",
"instance",
".",
"id",
":",
"user",
"=",
"User",
".",
"objects",
".",
"get",
"(",
"id",
"=",
"instance",
".",
"id",
")",
"if",
"(",
"user",
".",
"username",
"!=",
"instance",
".",
"username",
")",
":",
"questions",
"=",
"Question",
".",
"objects",
".",
"filter",
"(",
"(",
"Q",
"(",
"creator",
"=",
"instance",
")",
"|",
"Q",
"(",
"answers__creator",
"=",
"instance",
")",
")",
")",
".",
"only",
"(",
"'id'",
")",
".",
"distinct",
"(",
")",
"for",
"q",
"in",
"questions",
":",
"q",
".",
"index_later",
"(",
")"
] |
when a users username is changed .
|
train
| false
|
54,967
|
def peakDetection(mX, t):
thresh = np.where((mX[1:(-1)] > t), mX[1:(-1)], 0)
next_minor = np.where((mX[1:(-1)] > mX[2:]), mX[1:(-1)], 0)
prev_minor = np.where((mX[1:(-1)] > mX[:(-2)]), mX[1:(-1)], 0)
ploc = ((thresh * next_minor) * prev_minor)
ploc = (ploc.nonzero()[0] + 1)
return ploc
|
[
"def",
"peakDetection",
"(",
"mX",
",",
"t",
")",
":",
"thresh",
"=",
"np",
".",
"where",
"(",
"(",
"mX",
"[",
"1",
":",
"(",
"-",
"1",
")",
"]",
">",
"t",
")",
",",
"mX",
"[",
"1",
":",
"(",
"-",
"1",
")",
"]",
",",
"0",
")",
"next_minor",
"=",
"np",
".",
"where",
"(",
"(",
"mX",
"[",
"1",
":",
"(",
"-",
"1",
")",
"]",
">",
"mX",
"[",
"2",
":",
"]",
")",
",",
"mX",
"[",
"1",
":",
"(",
"-",
"1",
")",
"]",
",",
"0",
")",
"prev_minor",
"=",
"np",
".",
"where",
"(",
"(",
"mX",
"[",
"1",
":",
"(",
"-",
"1",
")",
"]",
">",
"mX",
"[",
":",
"(",
"-",
"2",
")",
"]",
")",
",",
"mX",
"[",
"1",
":",
"(",
"-",
"1",
")",
"]",
",",
"0",
")",
"ploc",
"=",
"(",
"(",
"thresh",
"*",
"next_minor",
")",
"*",
"prev_minor",
")",
"ploc",
"=",
"(",
"ploc",
".",
"nonzero",
"(",
")",
"[",
"0",
"]",
"+",
"1",
")",
"return",
"ploc"
] |
detect spectral peak locations mx: magnitude spectrum .
|
train
| false
|
54,971
|
def uslugify_encoded(text, sep):
if (text is None):
return u''
tag_id = RE_TAGS.sub(u'', unicodedata.normalize(u'NFKD', text)).lower()
tag_id = RE_WORD.sub(u'', tag_id).replace(u' ', sep)
return quote(tag_id.encode(u'utf-8'))
|
[
"def",
"uslugify_encoded",
"(",
"text",
",",
"sep",
")",
":",
"if",
"(",
"text",
"is",
"None",
")",
":",
"return",
"u''",
"tag_id",
"=",
"RE_TAGS",
".",
"sub",
"(",
"u''",
",",
"unicodedata",
".",
"normalize",
"(",
"u'NFKD'",
",",
"text",
")",
")",
".",
"lower",
"(",
")",
"tag_id",
"=",
"RE_WORD",
".",
"sub",
"(",
"u''",
",",
"tag_id",
")",
".",
"replace",
"(",
"u' '",
",",
"sep",
")",
"return",
"quote",
"(",
"tag_id",
".",
"encode",
"(",
"u'utf-8'",
")",
")"
] |
custom slugify .
|
train
| false
|
54,972
|
def record_usage(key_prefix, time_slice):
key = _make_ratelimit_cache_key(key_prefix, time_slice)
try:
g.ratelimitcache.add(key, 0, time=time_slice.remaining)
try:
return g.ratelimitcache.incr(key)
except pylibmc.NotFound:
now = int(time.time())
if (now < time_slice.end):
g.ratelimitcache.add(key, 1, time=((time_slice.end - now) + 1))
g.stats.simple_event('ratelimit.eviction')
return 1
except pylibmc.Error as e:
raise RatelimitError(e)
|
[
"def",
"record_usage",
"(",
"key_prefix",
",",
"time_slice",
")",
":",
"key",
"=",
"_make_ratelimit_cache_key",
"(",
"key_prefix",
",",
"time_slice",
")",
"try",
":",
"g",
".",
"ratelimitcache",
".",
"add",
"(",
"key",
",",
"0",
",",
"time",
"=",
"time_slice",
".",
"remaining",
")",
"try",
":",
"return",
"g",
".",
"ratelimitcache",
".",
"incr",
"(",
"key",
")",
"except",
"pylibmc",
".",
"NotFound",
":",
"now",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"if",
"(",
"now",
"<",
"time_slice",
".",
"end",
")",
":",
"g",
".",
"ratelimitcache",
".",
"add",
"(",
"key",
",",
"1",
",",
"time",
"=",
"(",
"(",
"time_slice",
".",
"end",
"-",
"now",
")",
"+",
"1",
")",
")",
"g",
".",
"stats",
".",
"simple_event",
"(",
"'ratelimit.eviction'",
")",
"return",
"1",
"except",
"pylibmc",
".",
"Error",
"as",
"e",
":",
"raise",
"RatelimitError",
"(",
"e",
")"
] |
record usage of a ratelimit for the specified time slice .
|
train
| false
|
54,973
|
@require_POST
@login_required
def watch_forum(request, forum_slug):
forum = get_object_or_404(Forum, slug=forum_slug)
if (not forum.allows_viewing_by(request.user)):
raise Http404
if (request.POST.get('watch') == 'yes'):
NewThreadEvent.notify(request.user, forum)
statsd.incr('forums.watches.forum')
else:
NewThreadEvent.stop_notifying(request.user, forum)
return HttpResponseRedirect(reverse('forums.threads', args=[forum_slug]))
|
[
"@",
"require_POST",
"@",
"login_required",
"def",
"watch_forum",
"(",
"request",
",",
"forum_slug",
")",
":",
"forum",
"=",
"get_object_or_404",
"(",
"Forum",
",",
"slug",
"=",
"forum_slug",
")",
"if",
"(",
"not",
"forum",
".",
"allows_viewing_by",
"(",
"request",
".",
"user",
")",
")",
":",
"raise",
"Http404",
"if",
"(",
"request",
".",
"POST",
".",
"get",
"(",
"'watch'",
")",
"==",
"'yes'",
")",
":",
"NewThreadEvent",
".",
"notify",
"(",
"request",
".",
"user",
",",
"forum",
")",
"statsd",
".",
"incr",
"(",
"'forums.watches.forum'",
")",
"else",
":",
"NewThreadEvent",
".",
"stop_notifying",
"(",
"request",
".",
"user",
",",
"forum",
")",
"return",
"HttpResponseRedirect",
"(",
"reverse",
"(",
"'forums.threads'",
",",
"args",
"=",
"[",
"forum_slug",
"]",
")",
")"
] |
watch/unwatch a forum .
|
train
| false
|
54,974
|
def overrides_disabled():
return bool(_OVERRIDES_DISABLED.disabled)
|
[
"def",
"overrides_disabled",
"(",
")",
":",
"return",
"bool",
"(",
"_OVERRIDES_DISABLED",
".",
"disabled",
")"
] |
checks to see whether overrides are disabled in the current context .
|
train
| false
|
54,975
|
def shorten_string(string, max_width):
string_len = len(string)
if (string_len <= max_width):
return string
visible = ((max_width - 16) - int(log10(string_len)))
if (not isinstance(string, unistr)):
visstring = unistr(string[:visible], errors='ignore')
else:
visstring = string[:visible]
return u''.join((visstring, u'...(and ', unistr((string_len - visible)), u' more)'))
|
[
"def",
"shorten_string",
"(",
"string",
",",
"max_width",
")",
":",
"string_len",
"=",
"len",
"(",
"string",
")",
"if",
"(",
"string_len",
"<=",
"max_width",
")",
":",
"return",
"string",
"visible",
"=",
"(",
"(",
"max_width",
"-",
"16",
")",
"-",
"int",
"(",
"log10",
"(",
"string_len",
")",
")",
")",
"if",
"(",
"not",
"isinstance",
"(",
"string",
",",
"unistr",
")",
")",
":",
"visstring",
"=",
"unistr",
"(",
"string",
"[",
":",
"visible",
"]",
",",
"errors",
"=",
"'ignore'",
")",
"else",
":",
"visstring",
"=",
"string",
"[",
":",
"visible",
"]",
"return",
"u''",
".",
"join",
"(",
"(",
"visstring",
",",
"u'...(and '",
",",
"unistr",
"(",
"(",
"string_len",
"-",
"visible",
")",
")",
",",
"u' more)'",
")",
")"
] |
make limited length string in form: "the string is very lo .
|
train
| true
|
54,976
|
def test_mouse_key_events():
me = MouseEvent('mouse_press')
for fun in (me.pos, me.button, me.buttons, me.modifiers, me.delta, me.press_event, me.last_event, me.is_dragging):
fun
me.drag_events()
me._forget_last_event()
me.trail()
ke = KeyEvent('key_release')
ke.key
ke.text
ke.modifiers
|
[
"def",
"test_mouse_key_events",
"(",
")",
":",
"me",
"=",
"MouseEvent",
"(",
"'mouse_press'",
")",
"for",
"fun",
"in",
"(",
"me",
".",
"pos",
",",
"me",
".",
"button",
",",
"me",
".",
"buttons",
",",
"me",
".",
"modifiers",
",",
"me",
".",
"delta",
",",
"me",
".",
"press_event",
",",
"me",
".",
"last_event",
",",
"me",
".",
"is_dragging",
")",
":",
"fun",
"me",
".",
"drag_events",
"(",
")",
"me",
".",
"_forget_last_event",
"(",
")",
"me",
".",
"trail",
"(",
")",
"ke",
"=",
"KeyEvent",
"(",
"'key_release'",
")",
"ke",
".",
"key",
"ke",
".",
"text",
"ke",
".",
"modifiers"
] |
test mouse and key events .
|
train
| false
|
54,977
|
def test_no_truncate_using_compare():
w = wcs.WCS(naxis=3)
w.wcs.crval = [240.9303333333, 50, 212345678000.0]
w.wcs.cdelt = [0.001, 0.001, 100000000.0]
w.wcs.ctype = [u'RA---TAN', u'DEC--TAN', u'FREQ']
w.wcs.set()
w2 = wcs.WCS(w.to_header())
w.wcs.compare(w2.wcs)
|
[
"def",
"test_no_truncate_using_compare",
"(",
")",
":",
"w",
"=",
"wcs",
".",
"WCS",
"(",
"naxis",
"=",
"3",
")",
"w",
".",
"wcs",
".",
"crval",
"=",
"[",
"240.9303333333",
",",
"50",
",",
"212345678000.0",
"]",
"w",
".",
"wcs",
".",
"cdelt",
"=",
"[",
"0.001",
",",
"0.001",
",",
"100000000.0",
"]",
"w",
".",
"wcs",
".",
"ctype",
"=",
"[",
"u'RA---TAN'",
",",
"u'DEC--TAN'",
",",
"u'FREQ'",
"]",
"w",
".",
"wcs",
".",
"set",
"(",
")",
"w2",
"=",
"wcs",
".",
"WCS",
"(",
"w",
".",
"to_header",
"(",
")",
")",
"w",
".",
"wcs",
".",
"compare",
"(",
"w2",
".",
"wcs",
")"
] |
regression test for URL this one uses wcs .
|
train
| false
|
54,978
|
def _synthesize(browser, update_tryorder=1):
cmd = browser.split()[0]
if (not _iscommand(cmd)):
return [None, None]
name = os.path.basename(cmd)
try:
command = _browsers[name.lower()]
except KeyError:
return [None, None]
controller = command[1]
if (controller and (name.lower() == controller.basename)):
import copy
controller = copy.copy(controller)
controller.name = browser
controller.basename = os.path.basename(browser)
register(browser, None, controller, update_tryorder)
return [None, controller]
return [None, None]
|
[
"def",
"_synthesize",
"(",
"browser",
",",
"update_tryorder",
"=",
"1",
")",
":",
"cmd",
"=",
"browser",
".",
"split",
"(",
")",
"[",
"0",
"]",
"if",
"(",
"not",
"_iscommand",
"(",
"cmd",
")",
")",
":",
"return",
"[",
"None",
",",
"None",
"]",
"name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"cmd",
")",
"try",
":",
"command",
"=",
"_browsers",
"[",
"name",
".",
"lower",
"(",
")",
"]",
"except",
"KeyError",
":",
"return",
"[",
"None",
",",
"None",
"]",
"controller",
"=",
"command",
"[",
"1",
"]",
"if",
"(",
"controller",
"and",
"(",
"name",
".",
"lower",
"(",
")",
"==",
"controller",
".",
"basename",
")",
")",
":",
"import",
"copy",
"controller",
"=",
"copy",
".",
"copy",
"(",
"controller",
")",
"controller",
".",
"name",
"=",
"browser",
"controller",
".",
"basename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"browser",
")",
"register",
"(",
"browser",
",",
"None",
",",
"controller",
",",
"update_tryorder",
")",
"return",
"[",
"None",
",",
"controller",
"]",
"return",
"[",
"None",
",",
"None",
"]"
] |
attempt to synthesize a controller base on existing controllers .
|
train
| false
|
54,979
|
def tree_from_cix(cix):
if isinstance(cix, unicode):
cix = cix.encode('UTF-8', 'xmlcharrefreplace')
tree = ET.XML(cix)
version = tree.get('version')
if (version == CIX_VERSION):
return tree
elif (version == '0.1'):
return tree_2_0_from_tree_0_1(tree)
else:
raise CodeIntelError(('unknown CIX version: %r' % version))
|
[
"def",
"tree_from_cix",
"(",
"cix",
")",
":",
"if",
"isinstance",
"(",
"cix",
",",
"unicode",
")",
":",
"cix",
"=",
"cix",
".",
"encode",
"(",
"'UTF-8'",
",",
"'xmlcharrefreplace'",
")",
"tree",
"=",
"ET",
".",
"XML",
"(",
"cix",
")",
"version",
"=",
"tree",
".",
"get",
"(",
"'version'",
")",
"if",
"(",
"version",
"==",
"CIX_VERSION",
")",
":",
"return",
"tree",
"elif",
"(",
"version",
"==",
"'0.1'",
")",
":",
"return",
"tree_2_0_from_tree_0_1",
"(",
"tree",
")",
"else",
":",
"raise",
"CodeIntelError",
"(",
"(",
"'unknown CIX version: %r'",
"%",
"version",
")",
")"
] |
return a tree for the given cix content .
|
train
| false
|
54,980
|
@register.filter
@stringfilter
def issue_status_icon(status):
if (status == BaseComment.OPEN):
return u'rb-icon-issue-open'
elif (status == BaseComment.RESOLVED):
return u'rb-icon-issue-resolved'
elif (status == BaseComment.DROPPED):
return u'rb-icon-issue-dropped'
else:
raise ValueError((u'Unknown comment issue status "%s"' % status))
|
[
"@",
"register",
".",
"filter",
"@",
"stringfilter",
"def",
"issue_status_icon",
"(",
"status",
")",
":",
"if",
"(",
"status",
"==",
"BaseComment",
".",
"OPEN",
")",
":",
"return",
"u'rb-icon-issue-open'",
"elif",
"(",
"status",
"==",
"BaseComment",
".",
"RESOLVED",
")",
":",
"return",
"u'rb-icon-issue-resolved'",
"elif",
"(",
"status",
"==",
"BaseComment",
".",
"DROPPED",
")",
":",
"return",
"u'rb-icon-issue-dropped'",
"else",
":",
"raise",
"ValueError",
"(",
"(",
"u'Unknown comment issue status \"%s\"'",
"%",
"status",
")",
")"
] |
return an icon name for the issue status .
|
train
| false
|
54,981
|
def remove_trailing_string(content, trailing):
if (content.endswith(trailing) and (content != trailing)):
return content[:(- len(trailing))]
return content
|
[
"def",
"remove_trailing_string",
"(",
"content",
",",
"trailing",
")",
":",
"if",
"(",
"content",
".",
"endswith",
"(",
"trailing",
")",
"and",
"(",
"content",
"!=",
"trailing",
")",
")",
":",
"return",
"content",
"[",
":",
"(",
"-",
"len",
"(",
"trailing",
")",
")",
"]",
"return",
"content"
] |
strip trailing component trailing from content if it exists .
|
train
| true
|
54,982
|
def instances_by_name(name_filter):
return [o for o in gc.get_objects() if (name_filter == typename(o))]
|
[
"def",
"instances_by_name",
"(",
"name_filter",
")",
":",
"return",
"[",
"o",
"for",
"o",
"in",
"gc",
".",
"get_objects",
"(",
")",
"if",
"(",
"name_filter",
"==",
"typename",
"(",
"o",
")",
")",
"]"
] |
return the list of objects that exactly match the given name_filter .
|
train
| false
|
54,983
|
def RenderParetoCdf(xmin, alpha, low, high, n=50):
if (low < xmin):
low = xmin
xs = np.linspace(low, high, n)
ps = (1 - ((xs / xmin) ** (- alpha)))
return (xs, ps)
|
[
"def",
"RenderParetoCdf",
"(",
"xmin",
",",
"alpha",
",",
"low",
",",
"high",
",",
"n",
"=",
"50",
")",
":",
"if",
"(",
"low",
"<",
"xmin",
")",
":",
"low",
"=",
"xmin",
"xs",
"=",
"np",
".",
"linspace",
"(",
"low",
",",
"high",
",",
"n",
")",
"ps",
"=",
"(",
"1",
"-",
"(",
"(",
"xs",
"/",
"xmin",
")",
"**",
"(",
"-",
"alpha",
")",
")",
")",
"return",
"(",
"xs",
",",
"ps",
")"
] |
generates sequences of xs and ps for a pareto cdf .
|
train
| false
|
54,984
|
def _check_apt():
if (not HAS_APT):
raise CommandExecutionError("Error: 'python-apt' package not installed")
|
[
"def",
"_check_apt",
"(",
")",
":",
"if",
"(",
"not",
"HAS_APT",
")",
":",
"raise",
"CommandExecutionError",
"(",
"\"Error: 'python-apt' package not installed\"",
")"
] |
abort if python-apt is not installed .
|
train
| false
|
54,985
|
def word_ids_to_words(data, id_to_word):
return [id_to_word[i] for i in data]
|
[
"def",
"word_ids_to_words",
"(",
"data",
",",
"id_to_word",
")",
":",
"return",
"[",
"id_to_word",
"[",
"i",
"]",
"for",
"i",
"in",
"data",
"]"
] |
given a context in list format and the vocabulary .
|
train
| false
|
54,986
|
def occur_check(var, x):
if (var == x):
return True
elif isinstance(x, Compound):
return occur_check(var, x.args)
elif is_args(x):
if any((occur_check(var, xi) for xi in x)):
return True
return False
|
[
"def",
"occur_check",
"(",
"var",
",",
"x",
")",
":",
"if",
"(",
"var",
"==",
"x",
")",
":",
"return",
"True",
"elif",
"isinstance",
"(",
"x",
",",
"Compound",
")",
":",
"return",
"occur_check",
"(",
"var",
",",
"x",
".",
"args",
")",
"elif",
"is_args",
"(",
"x",
")",
":",
"if",
"any",
"(",
"(",
"occur_check",
"(",
"var",
",",
"xi",
")",
"for",
"xi",
"in",
"x",
")",
")",
":",
"return",
"True",
"return",
"False"
] |
var occurs in subtree owned by x? .
|
train
| false
|
54,987
|
def predecessor(G, source, target=None, cutoff=None, return_seen=None):
if (source not in G):
raise nx.NodeNotFound('Source {} not in G'.format(source))
level = 0
nextlevel = [source]
seen = {source: level}
pred = {source: []}
while nextlevel:
level = (level + 1)
thislevel = nextlevel
nextlevel = []
for v in thislevel:
for w in G[v]:
if (w not in seen):
pred[w] = [v]
seen[w] = level
nextlevel.append(w)
elif (seen[w] == level):
pred[w].append(v)
if (cutoff and (cutoff <= level)):
break
if (target is not None):
if return_seen:
if (not (target in pred)):
return ([], (-1))
return (pred[target], seen[target])
else:
if (not (target in pred)):
return []
return pred[target]
elif return_seen:
return (pred, seen)
else:
return pred
|
[
"def",
"predecessor",
"(",
"G",
",",
"source",
",",
"target",
"=",
"None",
",",
"cutoff",
"=",
"None",
",",
"return_seen",
"=",
"None",
")",
":",
"if",
"(",
"source",
"not",
"in",
"G",
")",
":",
"raise",
"nx",
".",
"NodeNotFound",
"(",
"'Source {} not in G'",
".",
"format",
"(",
"source",
")",
")",
"level",
"=",
"0",
"nextlevel",
"=",
"[",
"source",
"]",
"seen",
"=",
"{",
"source",
":",
"level",
"}",
"pred",
"=",
"{",
"source",
":",
"[",
"]",
"}",
"while",
"nextlevel",
":",
"level",
"=",
"(",
"level",
"+",
"1",
")",
"thislevel",
"=",
"nextlevel",
"nextlevel",
"=",
"[",
"]",
"for",
"v",
"in",
"thislevel",
":",
"for",
"w",
"in",
"G",
"[",
"v",
"]",
":",
"if",
"(",
"w",
"not",
"in",
"seen",
")",
":",
"pred",
"[",
"w",
"]",
"=",
"[",
"v",
"]",
"seen",
"[",
"w",
"]",
"=",
"level",
"nextlevel",
".",
"append",
"(",
"w",
")",
"elif",
"(",
"seen",
"[",
"w",
"]",
"==",
"level",
")",
":",
"pred",
"[",
"w",
"]",
".",
"append",
"(",
"v",
")",
"if",
"(",
"cutoff",
"and",
"(",
"cutoff",
"<=",
"level",
")",
")",
":",
"break",
"if",
"(",
"target",
"is",
"not",
"None",
")",
":",
"if",
"return_seen",
":",
"if",
"(",
"not",
"(",
"target",
"in",
"pred",
")",
")",
":",
"return",
"(",
"[",
"]",
",",
"(",
"-",
"1",
")",
")",
"return",
"(",
"pred",
"[",
"target",
"]",
",",
"seen",
"[",
"target",
"]",
")",
"else",
":",
"if",
"(",
"not",
"(",
"target",
"in",
"pred",
")",
")",
":",
"return",
"[",
"]",
"return",
"pred",
"[",
"target",
"]",
"elif",
"return_seen",
":",
"return",
"(",
"pred",
",",
"seen",
")",
"else",
":",
"return",
"pred"
] |
returns dictionary of predecessors for the path from source to all nodes in g .
|
train
| false
|
54,988
|
def _fingerprint(public_key):
try:
if six.PY2:
raw_key = public_key.decode('base64')
else:
raw_key = base64.b64decode(public_key, validate=True)
except binascii.Error:
return None
ret = hashlib.md5(raw_key).hexdigest()
chunks = [ret[i:(i + 2)] for i in range(0, len(ret), 2)]
return ':'.join(chunks)
|
[
"def",
"_fingerprint",
"(",
"public_key",
")",
":",
"try",
":",
"if",
"six",
".",
"PY2",
":",
"raw_key",
"=",
"public_key",
".",
"decode",
"(",
"'base64'",
")",
"else",
":",
"raw_key",
"=",
"base64",
".",
"b64decode",
"(",
"public_key",
",",
"validate",
"=",
"True",
")",
"except",
"binascii",
".",
"Error",
":",
"return",
"None",
"ret",
"=",
"hashlib",
".",
"md5",
"(",
"raw_key",
")",
".",
"hexdigest",
"(",
")",
"chunks",
"=",
"[",
"ret",
"[",
"i",
":",
"(",
"i",
"+",
"2",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"ret",
")",
",",
"2",
")",
"]",
"return",
"':'",
".",
"join",
"(",
"chunks",
")"
] |
return a public key fingerprint based on its base64-encoded representation the fingerprint string is formatted according to rfc 4716 .
|
train
| false
|
54,990
|
def hash_(attrs=None, where=None):
return _osquery_cmd(table='hash', attrs=attrs, where=where)
|
[
"def",
"hash_",
"(",
"attrs",
"=",
"None",
",",
"where",
"=",
"None",
")",
":",
"return",
"_osquery_cmd",
"(",
"table",
"=",
"'hash'",
",",
"attrs",
"=",
"attrs",
",",
"where",
"=",
"where",
")"
] |
return hash information from osquery cli example: .
|
train
| false
|
54,992
|
def is_master_node(client):
my_node_id = list(client.nodes.info('_local')['nodes'])[0]
master_node_id = client.cluster.state(metric='master_node')['master_node']
return (my_node_id == master_node_id)
|
[
"def",
"is_master_node",
"(",
"client",
")",
":",
"my_node_id",
"=",
"list",
"(",
"client",
".",
"nodes",
".",
"info",
"(",
"'_local'",
")",
"[",
"'nodes'",
"]",
")",
"[",
"0",
"]",
"master_node_id",
"=",
"client",
".",
"cluster",
".",
"state",
"(",
"metric",
"=",
"'master_node'",
")",
"[",
"'master_node'",
"]",
"return",
"(",
"my_node_id",
"==",
"master_node_id",
")"
] |
return true if the connected client node is the elected master node in the elasticsearch cluster .
|
train
| false
|
54,993
|
def get_config_vars(*args):
global _config_vars
if (_config_vars is None):
func = globals().get(('_init_' + os.name))
if func:
func()
else:
_config_vars = {}
_config_vars['prefix'] = PREFIX
_config_vars['exec_prefix'] = EXEC_PREFIX
if (sys.platform == 'darwin'):
import _osx_support
_osx_support.customize_config_vars(_config_vars)
if args:
vals = []
for name in args:
vals.append(_config_vars.get(name))
return vals
else:
return _config_vars
|
[
"def",
"get_config_vars",
"(",
"*",
"args",
")",
":",
"global",
"_config_vars",
"if",
"(",
"_config_vars",
"is",
"None",
")",
":",
"func",
"=",
"globals",
"(",
")",
".",
"get",
"(",
"(",
"'_init_'",
"+",
"os",
".",
"name",
")",
")",
"if",
"func",
":",
"func",
"(",
")",
"else",
":",
"_config_vars",
"=",
"{",
"}",
"_config_vars",
"[",
"'prefix'",
"]",
"=",
"PREFIX",
"_config_vars",
"[",
"'exec_prefix'",
"]",
"=",
"EXEC_PREFIX",
"if",
"(",
"sys",
".",
"platform",
"==",
"'darwin'",
")",
":",
"import",
"_osx_support",
"_osx_support",
".",
"customize_config_vars",
"(",
"_config_vars",
")",
"if",
"args",
":",
"vals",
"=",
"[",
"]",
"for",
"name",
"in",
"args",
":",
"vals",
".",
"append",
"(",
"_config_vars",
".",
"get",
"(",
"name",
")",
")",
"return",
"vals",
"else",
":",
"return",
"_config_vars"
] |
with no arguments .
|
train
| false
|
54,996
|
def between(expr, lower_bound, upper_bound):
expr = _literal_as_binds(expr)
return expr.between(lower_bound, upper_bound)
|
[
"def",
"between",
"(",
"expr",
",",
"lower_bound",
",",
"upper_bound",
")",
":",
"expr",
"=",
"_literal_as_binds",
"(",
"expr",
")",
"return",
"expr",
".",
"between",
"(",
"lower_bound",
",",
"upper_bound",
")"
] |
produce a between predicate clause .
|
train
| false
|
54,997
|
def _windows_commondata_path():
import ctypes
from ctypes import wintypes, windll
CSIDL_COMMON_APPDATA = 35
_SHGetFolderPath = windll.shell32.SHGetFolderPathW
_SHGetFolderPath.argtypes = [wintypes.HWND, ctypes.c_int, wintypes.HANDLE, wintypes.DWORD, wintypes.LPCWSTR]
path_buf = wintypes.create_unicode_buffer(wintypes.MAX_PATH)
_SHGetFolderPath(0, CSIDL_COMMON_APPDATA, 0, 0, path_buf)
return path_buf.value
|
[
"def",
"_windows_commondata_path",
"(",
")",
":",
"import",
"ctypes",
"from",
"ctypes",
"import",
"wintypes",
",",
"windll",
"CSIDL_COMMON_APPDATA",
"=",
"35",
"_SHGetFolderPath",
"=",
"windll",
".",
"shell32",
".",
"SHGetFolderPathW",
"_SHGetFolderPath",
".",
"argtypes",
"=",
"[",
"wintypes",
".",
"HWND",
",",
"ctypes",
".",
"c_int",
",",
"wintypes",
".",
"HANDLE",
",",
"wintypes",
".",
"DWORD",
",",
"wintypes",
".",
"LPCWSTR",
"]",
"path_buf",
"=",
"wintypes",
".",
"create_unicode_buffer",
"(",
"wintypes",
".",
"MAX_PATH",
")",
"_SHGetFolderPath",
"(",
"0",
",",
"CSIDL_COMMON_APPDATA",
",",
"0",
",",
"0",
",",
"path_buf",
")",
"return",
"path_buf",
".",
"value"
] |
return the common appdata path .
|
train
| false
|
54,998
|
def get_all_launch_configurations(region=None, key=None, keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
return conn.get_all_launch_configurations()
except boto.exception.BotoServerError as e:
log.error(e)
return []
|
[
"def",
"get_all_launch_configurations",
"(",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"try",
":",
"return",
"conn",
".",
"get_all_launch_configurations",
"(",
")",
"except",
"boto",
".",
"exception",
".",
"BotoServerError",
"as",
"e",
":",
"log",
".",
"error",
"(",
"e",
")",
"return",
"[",
"]"
] |
fetch and return all launch configuration with details .
|
train
| false
|
54,999
|
def test_conflicting_path(tmpdir, mocked_aws_cf_simple):
with tmpdir.as_cwd():
tmpdir.join('config.yaml').write(mocked_aws_cf_simple)
assert (main(['create']) == 0)
assert (main(['create']) == 1)
|
[
"def",
"test_conflicting_path",
"(",
"tmpdir",
",",
"mocked_aws_cf_simple",
")",
":",
"with",
"tmpdir",
".",
"as_cwd",
"(",
")",
":",
"tmpdir",
".",
"join",
"(",
"'config.yaml'",
")",
".",
"write",
"(",
"mocked_aws_cf_simple",
")",
"assert",
"(",
"main",
"(",
"[",
"'create'",
"]",
")",
"==",
"0",
")",
"assert",
"(",
"main",
"(",
"[",
"'create'",
"]",
")",
"==",
"1",
")"
] |
ensure default cluster info path is never overwritten by launching successive clusters .
|
train
| false
|
55,000
|
def get_base_dirs():
if options['basedirlist']:
return options['basedirlist']
if os.environ.get('MPLBASEDIRLIST'):
return os.environ.get('MPLBASEDIRLIST').split(os.pathsep)
win_bases = ['win32_static']
if os.getenv('CONDA_DEFAULT_ENV'):
win_bases.append(os.path.join(os.getenv('CONDA_DEFAULT_ENV'), 'Library'))
basedir_map = {'win32': win_bases, 'darwin': ['/usr/local/', '/usr', '/usr/X11', '/opt/X11', '/opt/local'], 'sunos5': [(os.getenv('MPLIB_BASE') or '/usr/local')], 'gnu0': ['/usr'], 'aix5': ['/usr/local']}
return basedir_map.get(sys.platform, ['/usr/local', '/usr'])
|
[
"def",
"get_base_dirs",
"(",
")",
":",
"if",
"options",
"[",
"'basedirlist'",
"]",
":",
"return",
"options",
"[",
"'basedirlist'",
"]",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"'MPLBASEDIRLIST'",
")",
":",
"return",
"os",
".",
"environ",
".",
"get",
"(",
"'MPLBASEDIRLIST'",
")",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"win_bases",
"=",
"[",
"'win32_static'",
"]",
"if",
"os",
".",
"getenv",
"(",
"'CONDA_DEFAULT_ENV'",
")",
":",
"win_bases",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getenv",
"(",
"'CONDA_DEFAULT_ENV'",
")",
",",
"'Library'",
")",
")",
"basedir_map",
"=",
"{",
"'win32'",
":",
"win_bases",
",",
"'darwin'",
":",
"[",
"'/usr/local/'",
",",
"'/usr'",
",",
"'/usr/X11'",
",",
"'/opt/X11'",
",",
"'/opt/local'",
"]",
",",
"'sunos5'",
":",
"[",
"(",
"os",
".",
"getenv",
"(",
"'MPLIB_BASE'",
")",
"or",
"'/usr/local'",
")",
"]",
",",
"'gnu0'",
":",
"[",
"'/usr'",
"]",
",",
"'aix5'",
":",
"[",
"'/usr/local'",
"]",
"}",
"return",
"basedir_map",
".",
"get",
"(",
"sys",
".",
"platform",
",",
"[",
"'/usr/local'",
",",
"'/usr'",
"]",
")"
] |
returns a list of standard base directories on this platform .
|
train
| false
|
55,001
|
def test_to():
with pytest.raises(falcon.http_status.HTTPStatus) as redirect:
hug.redirect.to('/')
assert ('302' in redirect.value.status)
|
[
"def",
"test_to",
"(",
")",
":",
"with",
"pytest",
".",
"raises",
"(",
"falcon",
".",
"http_status",
".",
"HTTPStatus",
")",
"as",
"redirect",
":",
"hug",
".",
"redirect",
".",
"to",
"(",
"'/'",
")",
"assert",
"(",
"'302'",
"in",
"redirect",
".",
"value",
".",
"status",
")"
] |
test that the base redirect to function works as expected .
|
train
| false
|
55,002
|
def parse_features(feature_files, language=None):
scenario_collector = FeatureScenarioLocationCollector()
features = []
for location in feature_files:
if (not isinstance(location, FileLocation)):
assert isinstance(location, string_types)
location = FileLocation(os.path.normpath(location))
if (location.filename == scenario_collector.filename):
scenario_collector.add_location(location)
continue
elif scenario_collector.feature:
current_feature = scenario_collector.build_feature()
features.append(current_feature)
scenario_collector.clear()
assert isinstance(location, FileLocation)
filename = os.path.abspath(location.filename)
feature = parser.parse_file(filename, language=language)
if feature:
scenario_collector.feature = feature
scenario_collector.add_location(location)
if scenario_collector.feature:
current_feature = scenario_collector.build_feature()
features.append(current_feature)
return features
|
[
"def",
"parse_features",
"(",
"feature_files",
",",
"language",
"=",
"None",
")",
":",
"scenario_collector",
"=",
"FeatureScenarioLocationCollector",
"(",
")",
"features",
"=",
"[",
"]",
"for",
"location",
"in",
"feature_files",
":",
"if",
"(",
"not",
"isinstance",
"(",
"location",
",",
"FileLocation",
")",
")",
":",
"assert",
"isinstance",
"(",
"location",
",",
"string_types",
")",
"location",
"=",
"FileLocation",
"(",
"os",
".",
"path",
".",
"normpath",
"(",
"location",
")",
")",
"if",
"(",
"location",
".",
"filename",
"==",
"scenario_collector",
".",
"filename",
")",
":",
"scenario_collector",
".",
"add_location",
"(",
"location",
")",
"continue",
"elif",
"scenario_collector",
".",
"feature",
":",
"current_feature",
"=",
"scenario_collector",
".",
"build_feature",
"(",
")",
"features",
".",
"append",
"(",
"current_feature",
")",
"scenario_collector",
".",
"clear",
"(",
")",
"assert",
"isinstance",
"(",
"location",
",",
"FileLocation",
")",
"filename",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"location",
".",
"filename",
")",
"feature",
"=",
"parser",
".",
"parse_file",
"(",
"filename",
",",
"language",
"=",
"language",
")",
"if",
"feature",
":",
"scenario_collector",
".",
"feature",
"=",
"feature",
"scenario_collector",
".",
"add_location",
"(",
"location",
")",
"if",
"scenario_collector",
".",
"feature",
":",
"current_feature",
"=",
"scenario_collector",
".",
"build_feature",
"(",
")",
"features",
".",
"append",
"(",
"current_feature",
")",
"return",
"features"
] |
parse feature files and return list of feature model objects .
|
train
| false
|
55,004
|
def iterable(obj):
try:
iter(obj)
except TypeError:
return False
return True
|
[
"def",
"iterable",
"(",
"obj",
")",
":",
"try",
":",
"iter",
"(",
"obj",
")",
"except",
"TypeError",
":",
"return",
"False",
"return",
"True"
] |
return true if *obj* is iterable .
|
train
| false
|
55,006
|
def krackhardt_kite_graph(create_using=None):
description = ['adjacencylist', 'Krackhardt Kite Social Network', 10, [[2, 3, 4, 6], [1, 4, 5, 7], [1, 4, 6], [1, 2, 3, 5, 6, 7], [2, 4, 7], [1, 3, 4, 7, 8], [2, 4, 5, 6, 8], [6, 7, 9], [8, 10], [9]]]
G = make_small_undirected_graph(description, create_using)
return G
|
[
"def",
"krackhardt_kite_graph",
"(",
"create_using",
"=",
"None",
")",
":",
"description",
"=",
"[",
"'adjacencylist'",
",",
"'Krackhardt Kite Social Network'",
",",
"10",
",",
"[",
"[",
"2",
",",
"3",
",",
"4",
",",
"6",
"]",
",",
"[",
"1",
",",
"4",
",",
"5",
",",
"7",
"]",
",",
"[",
"1",
",",
"4",
",",
"6",
"]",
",",
"[",
"1",
",",
"2",
",",
"3",
",",
"5",
",",
"6",
",",
"7",
"]",
",",
"[",
"2",
",",
"4",
",",
"7",
"]",
",",
"[",
"1",
",",
"3",
",",
"4",
",",
"7",
",",
"8",
"]",
",",
"[",
"2",
",",
"4",
",",
"5",
",",
"6",
",",
"8",
"]",
",",
"[",
"6",
",",
"7",
",",
"9",
"]",
",",
"[",
"8",
",",
"10",
"]",
",",
"[",
"9",
"]",
"]",
"]",
"G",
"=",
"make_small_undirected_graph",
"(",
"description",
",",
"create_using",
")",
"return",
"G"
] |
return the krackhardt kite social network .
|
train
| false
|
55,008
|
def is_effective_user(user_id_or_name):
euid = os.geteuid()
if (str(user_id_or_name) == str(euid)):
return True
effective_user_name = pwd.getpwuid(euid).pw_name
return (user_id_or_name == effective_user_name)
|
[
"def",
"is_effective_user",
"(",
"user_id_or_name",
")",
":",
"euid",
"=",
"os",
".",
"geteuid",
"(",
")",
"if",
"(",
"str",
"(",
"user_id_or_name",
")",
"==",
"str",
"(",
"euid",
")",
")",
":",
"return",
"True",
"effective_user_name",
"=",
"pwd",
".",
"getpwuid",
"(",
"euid",
")",
".",
"pw_name",
"return",
"(",
"user_id_or_name",
"==",
"effective_user_name",
")"
] |
returns true if user_id_or_name is effective user .
|
train
| false
|
55,010
|
def weight_boundary(graph, src, dst, n):
default = {'weight': 0.0, 'count': 0}
count_src = graph[src].get(n, default)['count']
count_dst = graph[dst].get(n, default)['count']
weight_src = graph[src].get(n, default)['weight']
weight_dst = graph[dst].get(n, default)['weight']
count = (count_src + count_dst)
return {'count': count, 'weight': (((count_src * weight_src) + (count_dst * weight_dst)) / count)}
|
[
"def",
"weight_boundary",
"(",
"graph",
",",
"src",
",",
"dst",
",",
"n",
")",
":",
"default",
"=",
"{",
"'weight'",
":",
"0.0",
",",
"'count'",
":",
"0",
"}",
"count_src",
"=",
"graph",
"[",
"src",
"]",
".",
"get",
"(",
"n",
",",
"default",
")",
"[",
"'count'",
"]",
"count_dst",
"=",
"graph",
"[",
"dst",
"]",
".",
"get",
"(",
"n",
",",
"default",
")",
"[",
"'count'",
"]",
"weight_src",
"=",
"graph",
"[",
"src",
"]",
".",
"get",
"(",
"n",
",",
"default",
")",
"[",
"'weight'",
"]",
"weight_dst",
"=",
"graph",
"[",
"dst",
"]",
".",
"get",
"(",
"n",
",",
"default",
")",
"[",
"'weight'",
"]",
"count",
"=",
"(",
"count_src",
"+",
"count_dst",
")",
"return",
"{",
"'count'",
":",
"count",
",",
"'weight'",
":",
"(",
"(",
"(",
"count_src",
"*",
"weight_src",
")",
"+",
"(",
"count_dst",
"*",
"weight_dst",
")",
")",
"/",
"count",
")",
"}"
] |
handle merging of nodes of a region boundary region adjacency graph .
|
train
| false
|
55,011
|
def spew(trace_names=None, show_values=False):
sys.settrace(Spew(trace_names, show_values))
|
[
"def",
"spew",
"(",
"trace_names",
"=",
"None",
",",
"show_values",
"=",
"False",
")",
":",
"sys",
".",
"settrace",
"(",
"Spew",
"(",
"trace_names",
",",
"show_values",
")",
")"
] |
install a trace hook which writes incredibly detailed logs about what code is being executed to stdout .
|
train
| false
|
55,012
|
def add_csrf(request, **kwargs):
d = dict(user=request.user, **kwargs)
d.update(csrf(request))
return RequestContext(request, d)
|
[
"def",
"add_csrf",
"(",
"request",
",",
"**",
"kwargs",
")",
":",
"d",
"=",
"dict",
"(",
"user",
"=",
"request",
".",
"user",
",",
"**",
"kwargs",
")",
"d",
".",
"update",
"(",
"csrf",
"(",
"request",
")",
")",
"return",
"RequestContext",
"(",
"request",
",",
"d",
")"
] |
add csrf to dictionary and wrap in a requestcontext .
|
train
| false
|
55,013
|
def isPointOfTableInLoop(loop, pointTable):
for point in loop:
if (point in pointTable):
return True
return False
|
[
"def",
"isPointOfTableInLoop",
"(",
"loop",
",",
"pointTable",
")",
":",
"for",
"point",
"in",
"loop",
":",
"if",
"(",
"point",
"in",
"pointTable",
")",
":",
"return",
"True",
"return",
"False"
] |
determine if a point in the point table is in the loop .
|
train
| false
|
55,014
|
def datatype(dbtype, description):
dt = connection.introspection.get_field_type(dbtype, description)
if (type(dt) is tuple):
return dt[0]
else:
return dt
|
[
"def",
"datatype",
"(",
"dbtype",
",",
"description",
")",
":",
"dt",
"=",
"connection",
".",
"introspection",
".",
"get_field_type",
"(",
"dbtype",
",",
"description",
")",
"if",
"(",
"type",
"(",
"dt",
")",
"is",
"tuple",
")",
":",
"return",
"dt",
"[",
"0",
"]",
"else",
":",
"return",
"dt"
] |
helper to convert a data type into a string .
|
train
| false
|
55,016
|
def scenario_tests_need_service_tags(physical_line, filename, previous_logical):
if (('tempest/scenario/' in filename) and ('/test_' in filename)):
if TEST_DEFINITION.match(physical_line):
if (not SCENARIO_DECORATOR.match(previous_logical)):
return (physical_line.find('def'), 'T104: Scenario tests require a service decorator')
|
[
"def",
"scenario_tests_need_service_tags",
"(",
"physical_line",
",",
"filename",
",",
"previous_logical",
")",
":",
"if",
"(",
"(",
"'tempest/scenario/'",
"in",
"filename",
")",
"and",
"(",
"'/test_'",
"in",
"filename",
")",
")",
":",
"if",
"TEST_DEFINITION",
".",
"match",
"(",
"physical_line",
")",
":",
"if",
"(",
"not",
"SCENARIO_DECORATOR",
".",
"match",
"(",
"previous_logical",
")",
")",
":",
"return",
"(",
"physical_line",
".",
"find",
"(",
"'def'",
")",
",",
"'T104: Scenario tests require a service decorator'",
")"
] |
check that scenario tests have service tags t104: scenario tests require a services decorator .
|
train
| false
|
55,017
|
@receiver(post_save, sender=UserLog)
def cull_records(sender, **kwargs):
if (settings.USER_LOG_MAX_RECORDS_PER_USER and kwargs['created']):
current_models = UserLog.objects.filter(user=kwargs['instance'].user, activity_type=kwargs['instance'].activity_type)
if (current_models.count() > settings.USER_LOG_MAX_RECORDS_PER_USER):
to_discard = current_models.order_by('start_datetime')[0:(current_models.count() - settings.USER_LOG_MAX_RECORDS_PER_USER)]
UserLog.objects.filter(pk__in=to_discard).delete()
|
[
"@",
"receiver",
"(",
"post_save",
",",
"sender",
"=",
"UserLog",
")",
"def",
"cull_records",
"(",
"sender",
",",
"**",
"kwargs",
")",
":",
"if",
"(",
"settings",
".",
"USER_LOG_MAX_RECORDS_PER_USER",
"and",
"kwargs",
"[",
"'created'",
"]",
")",
":",
"current_models",
"=",
"UserLog",
".",
"objects",
".",
"filter",
"(",
"user",
"=",
"kwargs",
"[",
"'instance'",
"]",
".",
"user",
",",
"activity_type",
"=",
"kwargs",
"[",
"'instance'",
"]",
".",
"activity_type",
")",
"if",
"(",
"current_models",
".",
"count",
"(",
")",
">",
"settings",
".",
"USER_LOG_MAX_RECORDS_PER_USER",
")",
":",
"to_discard",
"=",
"current_models",
".",
"order_by",
"(",
"'start_datetime'",
")",
"[",
"0",
":",
"(",
"current_models",
".",
"count",
"(",
")",
"-",
"settings",
".",
"USER_LOG_MAX_RECORDS_PER_USER",
")",
"]",
"UserLog",
".",
"objects",
".",
"filter",
"(",
"pk__in",
"=",
"to_discard",
")",
".",
"delete",
"(",
")"
] |
listen in to see when videos become available .
|
train
| false
|
55,018
|
def JarContents(jar_path):
with zipfile.ZipFile(jar_path) as jar:
for name in jar.namelist():
(yield (name, jar.read(name)))
|
[
"def",
"JarContents",
"(",
"jar_path",
")",
":",
"with",
"zipfile",
".",
"ZipFile",
"(",
"jar_path",
")",
"as",
"jar",
":",
"for",
"name",
"in",
"jar",
".",
"namelist",
"(",
")",
":",
"(",
"yield",
"(",
"name",
",",
"jar",
".",
"read",
"(",
"name",
")",
")",
")"
] |
generates pairs for the given jar .
|
train
| false
|
55,019
|
def import_from_cwd(module, imp=None, package=None):
if (imp is None):
imp = importlib.import_module
with cwd_in_path():
return imp(module, package=package)
|
[
"def",
"import_from_cwd",
"(",
"module",
",",
"imp",
"=",
"None",
",",
"package",
"=",
"None",
")",
":",
"if",
"(",
"imp",
"is",
"None",
")",
":",
"imp",
"=",
"importlib",
".",
"import_module",
"with",
"cwd_in_path",
"(",
")",
":",
"return",
"imp",
"(",
"module",
",",
"package",
"=",
"package",
")"
] |
import module .
|
train
| true
|
55,021
|
def graphviz_layout(G, prog='neato', root=None, args=''):
return pygraphviz_layout(G, prog=prog, root=root, args=args)
|
[
"def",
"graphviz_layout",
"(",
"G",
",",
"prog",
"=",
"'neato'",
",",
"root",
"=",
"None",
",",
"args",
"=",
"''",
")",
":",
"return",
"pygraphviz_layout",
"(",
"G",
",",
"prog",
"=",
"prog",
",",
"root",
"=",
"root",
",",
"args",
"=",
"args",
")"
] |
create node positions for g using graphviz .
|
train
| false
|
55,022
|
def get_level_tags():
level_tags = constants.DEFAULT_TAGS.copy()
level_tags.update(getattr(settings, 'MESSAGE_TAGS', {}))
return level_tags
|
[
"def",
"get_level_tags",
"(",
")",
":",
"level_tags",
"=",
"constants",
".",
"DEFAULT_TAGS",
".",
"copy",
"(",
")",
"level_tags",
".",
"update",
"(",
"getattr",
"(",
"settings",
",",
"'MESSAGE_TAGS'",
",",
"{",
"}",
")",
")",
"return",
"level_tags"
] |
returns the message level tags .
|
train
| false
|
55,024
|
def bygroups(*args):
def callback(lexer, match, ctx=None):
for (i, action) in enumerate(args):
if (action is None):
continue
elif (type(action) is _TokenType):
data = match.group((i + 1))
if data:
(yield (match.start((i + 1)), action, data))
else:
if ctx:
ctx.pos = match.start((i + 1))
for item in action(lexer, _PseudoMatch(match.start((i + 1)), match.group((i + 1))), ctx):
if item:
(yield item)
if ctx:
ctx.pos = match.end()
return callback
|
[
"def",
"bygroups",
"(",
"*",
"args",
")",
":",
"def",
"callback",
"(",
"lexer",
",",
"match",
",",
"ctx",
"=",
"None",
")",
":",
"for",
"(",
"i",
",",
"action",
")",
"in",
"enumerate",
"(",
"args",
")",
":",
"if",
"(",
"action",
"is",
"None",
")",
":",
"continue",
"elif",
"(",
"type",
"(",
"action",
")",
"is",
"_TokenType",
")",
":",
"data",
"=",
"match",
".",
"group",
"(",
"(",
"i",
"+",
"1",
")",
")",
"if",
"data",
":",
"(",
"yield",
"(",
"match",
".",
"start",
"(",
"(",
"i",
"+",
"1",
")",
")",
",",
"action",
",",
"data",
")",
")",
"else",
":",
"if",
"ctx",
":",
"ctx",
".",
"pos",
"=",
"match",
".",
"start",
"(",
"(",
"i",
"+",
"1",
")",
")",
"for",
"item",
"in",
"action",
"(",
"lexer",
",",
"_PseudoMatch",
"(",
"match",
".",
"start",
"(",
"(",
"i",
"+",
"1",
")",
")",
",",
"match",
".",
"group",
"(",
"(",
"i",
"+",
"1",
")",
")",
")",
",",
"ctx",
")",
":",
"if",
"item",
":",
"(",
"yield",
"item",
")",
"if",
"ctx",
":",
"ctx",
".",
"pos",
"=",
"match",
".",
"end",
"(",
")",
"return",
"callback"
] |
callback that yields multiple actions for each group in the match .
|
train
| true
|
55,026
|
def __test_html():
with open('test.rpt', 'r') as input_file:
data_text = input_file.read()
data = yaml.safe_load(data_text)
string_file = StringIO.StringIO()
_generate_html(data, string_file)
string_file.seek(0)
result = string_file.read()
with open('test.html', 'w') as output:
output.write(result)
|
[
"def",
"__test_html",
"(",
")",
":",
"with",
"open",
"(",
"'test.rpt'",
",",
"'r'",
")",
"as",
"input_file",
":",
"data_text",
"=",
"input_file",
".",
"read",
"(",
")",
"data",
"=",
"yaml",
".",
"safe_load",
"(",
"data_text",
")",
"string_file",
"=",
"StringIO",
".",
"StringIO",
"(",
")",
"_generate_html",
"(",
"data",
",",
"string_file",
")",
"string_file",
".",
"seek",
"(",
"0",
")",
"result",
"=",
"string_file",
".",
"read",
"(",
")",
"with",
"open",
"(",
"'test.html'",
",",
"'w'",
")",
"as",
"output",
":",
"output",
".",
"write",
"(",
"result",
")"
] |
html generation test only used when called from the command line: python .
|
train
| false
|
55,028
|
def greater_than_zero():
return st.floats(min_value=0.0, allow_infinity=False).filter((lambda x: (x > 0.0)))
|
[
"def",
"greater_than_zero",
"(",
")",
":",
"return",
"st",
".",
"floats",
"(",
"min_value",
"=",
"0.0",
",",
"allow_infinity",
"=",
"False",
")",
".",
"filter",
"(",
"(",
"lambda",
"x",
":",
"(",
"x",
">",
"0.0",
")",
")",
")"
] |
a strategy that yields floats greater than zero .
|
train
| false
|
55,029
|
def _int64_feature_list(values):
return tf.train.FeatureList(feature=[_int64_feature(v) for v in values])
|
[
"def",
"_int64_feature_list",
"(",
"values",
")",
":",
"return",
"tf",
".",
"train",
".",
"FeatureList",
"(",
"feature",
"=",
"[",
"_int64_feature",
"(",
"v",
")",
"for",
"v",
"in",
"values",
"]",
")"
] |
wrapper for inserting an int64 featurelist into a sequenceexample proto .
|
train
| true
|
55,030
|
def add_resource(zone, resource_type, **kwargs):
return _resource('add', zone, resource_type, None, **kwargs)
|
[
"def",
"add_resource",
"(",
"zone",
",",
"resource_type",
",",
"**",
"kwargs",
")",
":",
"return",
"_resource",
"(",
"'add'",
",",
"zone",
",",
"resource_type",
",",
"None",
",",
"**",
"kwargs",
")"
] |
add a resource zone : string name of zone resource_type : string type of resource **kwargs : string|int| .
|
train
| false
|
55,031
|
def bits_str(s, endian='big', zero='0', one='1'):
return ''.join(bits(s, endian, zero, one))
|
[
"def",
"bits_str",
"(",
"s",
",",
"endian",
"=",
"'big'",
",",
"zero",
"=",
"'0'",
",",
"one",
"=",
"'1'",
")",
":",
"return",
"''",
".",
"join",
"(",
"bits",
"(",
"s",
",",
"endian",
",",
"zero",
",",
"one",
")",
")"
] |
bits_str -> str a wrapper around :func:bits .
|
train
| false
|
55,032
|
def is_private(ip_addr):
return ipaddress.ip_address(ip_addr).is_private
|
[
"def",
"is_private",
"(",
"ip_addr",
")",
":",
"return",
"ipaddress",
".",
"ip_address",
"(",
"ip_addr",
")",
".",
"is_private"
] |
check if the given ip address is a private address .
|
train
| false
|
55,034
|
def first_value(obj):
return six.next(six.itervalues(obj))
|
[
"def",
"first_value",
"(",
"obj",
")",
":",
"return",
"six",
".",
"next",
"(",
"six",
".",
"itervalues",
"(",
"obj",
")",
")"
] |
return the first value parameters obj: dict-like object .
|
train
| false
|
55,035
|
def plate_scale(platescale):
if platescale.unit.is_equivalent((si.arcsec / si.m)):
platescale_val = platescale.to((si.radian / si.m)).value
elif platescale.unit.is_equivalent((si.m / si.arcsec)):
platescale_val = (1 / platescale).to((si.radian / si.m)).value
else:
raise UnitsError(u'The pixel scale must be in angle/distance or distance/angle')
return [(si.m, si.radian, (lambda d: (d * platescale_val)), (lambda rad: (rad / platescale_val)))]
|
[
"def",
"plate_scale",
"(",
"platescale",
")",
":",
"if",
"platescale",
".",
"unit",
".",
"is_equivalent",
"(",
"(",
"si",
".",
"arcsec",
"/",
"si",
".",
"m",
")",
")",
":",
"platescale_val",
"=",
"platescale",
".",
"to",
"(",
"(",
"si",
".",
"radian",
"/",
"si",
".",
"m",
")",
")",
".",
"value",
"elif",
"platescale",
".",
"unit",
".",
"is_equivalent",
"(",
"(",
"si",
".",
"m",
"/",
"si",
".",
"arcsec",
")",
")",
":",
"platescale_val",
"=",
"(",
"1",
"/",
"platescale",
")",
".",
"to",
"(",
"(",
"si",
".",
"radian",
"/",
"si",
".",
"m",
")",
")",
".",
"value",
"else",
":",
"raise",
"UnitsError",
"(",
"u'The pixel scale must be in angle/distance or distance/angle'",
")",
"return",
"[",
"(",
"si",
".",
"m",
",",
"si",
".",
"radian",
",",
"(",
"lambda",
"d",
":",
"(",
"d",
"*",
"platescale_val",
")",
")",
",",
"(",
"lambda",
"rad",
":",
"(",
"rad",
"/",
"platescale_val",
")",
")",
")",
"]"
] |
convert between lengths and angular units with a specified platescale .
|
train
| false
|
55,036
|
def getReadRepository(repository):
text = archive.getFileText(archive.getProfilesPath(getProfileBaseName(repository)), False)
if (text == ''):
if (repository.baseNameSynonym != None):
text = archive.getFileText(archive.getProfilesPath(getProfileBaseNameSynonym(repository)), False)
if (text == ''):
print ('The default %s will be written in the .skeinforge folder in the home directory.' % repository.title.lower())
text = archive.getFileText(getProfilesDirectoryInAboveDirectory(getProfileBaseName(repository)), False)
if (text != ''):
readSettingsFromText(repository, text)
writeSettings(repository)
temporaryApplyOverrides(repository)
return repository
readSettingsFromText(repository, text)
temporaryApplyOverrides(repository)
return repository
|
[
"def",
"getReadRepository",
"(",
"repository",
")",
":",
"text",
"=",
"archive",
".",
"getFileText",
"(",
"archive",
".",
"getProfilesPath",
"(",
"getProfileBaseName",
"(",
"repository",
")",
")",
",",
"False",
")",
"if",
"(",
"text",
"==",
"''",
")",
":",
"if",
"(",
"repository",
".",
"baseNameSynonym",
"!=",
"None",
")",
":",
"text",
"=",
"archive",
".",
"getFileText",
"(",
"archive",
".",
"getProfilesPath",
"(",
"getProfileBaseNameSynonym",
"(",
"repository",
")",
")",
",",
"False",
")",
"if",
"(",
"text",
"==",
"''",
")",
":",
"print",
"(",
"'The default %s will be written in the .skeinforge folder in the home directory.'",
"%",
"repository",
".",
"title",
".",
"lower",
"(",
")",
")",
"text",
"=",
"archive",
".",
"getFileText",
"(",
"getProfilesDirectoryInAboveDirectory",
"(",
"getProfileBaseName",
"(",
"repository",
")",
")",
",",
"False",
")",
"if",
"(",
"text",
"!=",
"''",
")",
":",
"readSettingsFromText",
"(",
"repository",
",",
"text",
")",
"writeSettings",
"(",
"repository",
")",
"temporaryApplyOverrides",
"(",
"repository",
")",
"return",
"repository",
"readSettingsFromText",
"(",
"repository",
",",
"text",
")",
"temporaryApplyOverrides",
"(",
"repository",
")",
"return",
"repository"
] |
read and return settings from a file .
|
train
| false
|
55,037
|
def findTypeParent(element, tag):
p = element
while True:
p = p.getparent()
if (p.tag == tag):
return p
return None
|
[
"def",
"findTypeParent",
"(",
"element",
",",
"tag",
")",
":",
"p",
"=",
"element",
"while",
"True",
":",
"p",
"=",
"p",
".",
"getparent",
"(",
")",
"if",
"(",
"p",
".",
"tag",
"==",
"tag",
")",
":",
"return",
"p",
"return",
"None"
] |
finds fist parent of element of the given type .
|
train
| true
|
55,039
|
def to_names(domain_obj_list):
objs = []
for obj in domain_obj_list:
objs.append((obj.name if obj else None))
return objs
|
[
"def",
"to_names",
"(",
"domain_obj_list",
")",
":",
"objs",
"=",
"[",
"]",
"for",
"obj",
"in",
"domain_obj_list",
":",
"objs",
".",
"append",
"(",
"(",
"obj",
".",
"name",
"if",
"obj",
"else",
"None",
")",
")",
"return",
"objs"
] |
takes a list of domain objects and returns a corresponding list of their names .
|
train
| false
|
55,040
|
def load_json_dict(filename, *args):
data = {}
if os.path.exists(filename):
lock.acquire()
with open(filename, 'r') as f:
try:
data = _json.load(f)
if (not isinstance(data, dict)):
data = {}
except:
data = {}
lock.release()
if args:
return {key: data[key] for key in args if (key in data)}
return data
|
[
"def",
"load_json_dict",
"(",
"filename",
",",
"*",
"args",
")",
":",
"data",
"=",
"{",
"}",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"lock",
".",
"acquire",
"(",
")",
"with",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"f",
":",
"try",
":",
"data",
"=",
"_json",
".",
"load",
"(",
"f",
")",
"if",
"(",
"not",
"isinstance",
"(",
"data",
",",
"dict",
")",
")",
":",
"data",
"=",
"{",
"}",
"except",
":",
"data",
"=",
"{",
"}",
"lock",
".",
"release",
"(",
")",
"if",
"args",
":",
"return",
"{",
"key",
":",
"data",
"[",
"key",
"]",
"for",
"key",
"in",
"args",
"if",
"(",
"key",
"in",
"data",
")",
"}",
"return",
"data"
] |
checks if file exists .
|
train
| false
|
55,041
|
@app.route('/stream/<int:n>')
def stream_n_messages(n):
response = get_dict('url', 'args', 'headers', 'origin')
n = min(n, 100)
def generate_stream():
for i in range(n):
response['id'] = i
(yield (json.dumps(response) + '\n'))
return Response(generate_stream(), headers={'Content-Type': 'application/json'})
|
[
"@",
"app",
".",
"route",
"(",
"'/stream/<int:n>'",
")",
"def",
"stream_n_messages",
"(",
"n",
")",
":",
"response",
"=",
"get_dict",
"(",
"'url'",
",",
"'args'",
",",
"'headers'",
",",
"'origin'",
")",
"n",
"=",
"min",
"(",
"n",
",",
"100",
")",
"def",
"generate_stream",
"(",
")",
":",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"response",
"[",
"'id'",
"]",
"=",
"i",
"(",
"yield",
"(",
"json",
".",
"dumps",
"(",
"response",
")",
"+",
"'\\n'",
")",
")",
"return",
"Response",
"(",
"generate_stream",
"(",
")",
",",
"headers",
"=",
"{",
"'Content-Type'",
":",
"'application/json'",
"}",
")"
] |
stream n json messages .
|
train
| true
|
55,042
|
def partial_velocity(vel_vecs, gen_speeds, frame):
if (not iterable(vel_vecs)):
raise TypeError('Velocity vectors must be contained in an iterable.')
if (not iterable(gen_speeds)):
raise TypeError('Generalized speeds must be contained in an iterable')
vec_partials = []
for vec in vel_vecs:
partials = []
for speed in gen_speeds:
partials.append(vec.diff(speed, frame, var_in_dcm=False))
vec_partials.append(partials)
return vec_partials
|
[
"def",
"partial_velocity",
"(",
"vel_vecs",
",",
"gen_speeds",
",",
"frame",
")",
":",
"if",
"(",
"not",
"iterable",
"(",
"vel_vecs",
")",
")",
":",
"raise",
"TypeError",
"(",
"'Velocity vectors must be contained in an iterable.'",
")",
"if",
"(",
"not",
"iterable",
"(",
"gen_speeds",
")",
")",
":",
"raise",
"TypeError",
"(",
"'Generalized speeds must be contained in an iterable'",
")",
"vec_partials",
"=",
"[",
"]",
"for",
"vec",
"in",
"vel_vecs",
":",
"partials",
"=",
"[",
"]",
"for",
"speed",
"in",
"gen_speeds",
":",
"partials",
".",
"append",
"(",
"vec",
".",
"diff",
"(",
"speed",
",",
"frame",
",",
"var_in_dcm",
"=",
"False",
")",
")",
"vec_partials",
".",
"append",
"(",
"partials",
")",
"return",
"vec_partials"
] |
returns a list of partial velocities with respect to the provided generalized speeds in the given reference frame for each of the supplied velocity vectors .
|
train
| false
|
55,044
|
def get_last_modified(files):
files = list(files)
if files:
return max((datetime.datetime.fromtimestamp(os.path.getmtime(f)) for f in files))
return datetime.datetime(1970, 1, 1)
|
[
"def",
"get_last_modified",
"(",
"files",
")",
":",
"files",
"=",
"list",
"(",
"files",
")",
"if",
"files",
":",
"return",
"max",
"(",
"(",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"os",
".",
"path",
".",
"getmtime",
"(",
"f",
")",
")",
"for",
"f",
"in",
"files",
")",
")",
"return",
"datetime",
".",
"datetime",
"(",
"1970",
",",
"1",
",",
"1",
")"
] |
returns the modification time of the most recently modified file provided .
|
train
| false
|
55,045
|
def discoverInfo(disp, jid, node=None):
' According to JEP-0030:\n query MAY have node attribute\n identity: MUST HAVE category and name attributes and MAY HAVE type attribute.\n feature: MUST HAVE var attribute'
(identities, features) = ([], [])
for i in _discover(disp, NS_DISCO_INFO, jid, node):
if (i.getName() == 'identity'):
identities.append(i.attrs)
elif (i.getName() == 'feature'):
features.append(i.getAttr('var'))
elif (i.getName() == 'agent'):
if i.getTag('name'):
i.setAttr('name', i.getTagData('name'))
if i.getTag('description'):
i.setAttr('name', i.getTagData('description'))
identities.append(i.attrs)
if i.getTag('groupchat'):
features.append(NS_GROUPCHAT)
if i.getTag('register'):
features.append(NS_REGISTER)
if i.getTag('search'):
features.append(NS_SEARCH)
return (identities, features)
|
[
"def",
"discoverInfo",
"(",
"disp",
",",
"jid",
",",
"node",
"=",
"None",
")",
":",
"(",
"identities",
",",
"features",
")",
"=",
"(",
"[",
"]",
",",
"[",
"]",
")",
"for",
"i",
"in",
"_discover",
"(",
"disp",
",",
"NS_DISCO_INFO",
",",
"jid",
",",
"node",
")",
":",
"if",
"(",
"i",
".",
"getName",
"(",
")",
"==",
"'identity'",
")",
":",
"identities",
".",
"append",
"(",
"i",
".",
"attrs",
")",
"elif",
"(",
"i",
".",
"getName",
"(",
")",
"==",
"'feature'",
")",
":",
"features",
".",
"append",
"(",
"i",
".",
"getAttr",
"(",
"'var'",
")",
")",
"elif",
"(",
"i",
".",
"getName",
"(",
")",
"==",
"'agent'",
")",
":",
"if",
"i",
".",
"getTag",
"(",
"'name'",
")",
":",
"i",
".",
"setAttr",
"(",
"'name'",
",",
"i",
".",
"getTagData",
"(",
"'name'",
")",
")",
"if",
"i",
".",
"getTag",
"(",
"'description'",
")",
":",
"i",
".",
"setAttr",
"(",
"'name'",
",",
"i",
".",
"getTagData",
"(",
"'description'",
")",
")",
"identities",
".",
"append",
"(",
"i",
".",
"attrs",
")",
"if",
"i",
".",
"getTag",
"(",
"'groupchat'",
")",
":",
"features",
".",
"append",
"(",
"NS_GROUPCHAT",
")",
"if",
"i",
".",
"getTag",
"(",
"'register'",
")",
":",
"features",
".",
"append",
"(",
"NS_REGISTER",
")",
"if",
"i",
".",
"getTag",
"(",
"'search'",
")",
":",
"features",
".",
"append",
"(",
"NS_SEARCH",
")",
"return",
"(",
"identities",
",",
"features",
")"
] |
query remote object about info that it publishes .
|
train
| false
|
55,046
|
def is_on(hass, entity_id=None):
entity_id = (entity_id or ENTITY_ID)
return hass.states.is_state(entity_id, STATE_ABOVE_HORIZON)
|
[
"def",
"is_on",
"(",
"hass",
",",
"entity_id",
"=",
"None",
")",
":",
"entity_id",
"=",
"(",
"entity_id",
"or",
"ENTITY_ID",
")",
"return",
"hass",
".",
"states",
".",
"is_state",
"(",
"entity_id",
",",
"STATE_ABOVE_HORIZON",
")"
] |
test if the sun is currently up based on the statemachine .
|
train
| false
|
55,047
|
def available_oficial_plugins():
return _availables_plugins(resources.PLUGINS_WEB)
|
[
"def",
"available_oficial_plugins",
"(",
")",
":",
"return",
"_availables_plugins",
"(",
"resources",
".",
"PLUGINS_WEB",
")"
] |
returns a dict with oficial availables plugins in ninja-ide web page .
|
train
| false
|
55,049
|
def decodeString(string):
decodedString = string
octalNumbers = re.findall('\\\\([0-7]{1-3})', decodedString, re.DOTALL)
for octal in octalNumbers:
try:
decodedString = decodedString.replace(('\\\\' + octal), chr(int(octal, 8)))
except:
return ((-1), 'Error decoding string')
return (0, decodedString)
|
[
"def",
"decodeString",
"(",
"string",
")",
":",
"decodedString",
"=",
"string",
"octalNumbers",
"=",
"re",
".",
"findall",
"(",
"'\\\\\\\\([0-7]{1-3})'",
",",
"decodedString",
",",
"re",
".",
"DOTALL",
")",
"for",
"octal",
"in",
"octalNumbers",
":",
"try",
":",
"decodedString",
"=",
"decodedString",
".",
"replace",
"(",
"(",
"'\\\\\\\\'",
"+",
"octal",
")",
",",
"chr",
"(",
"int",
"(",
"octal",
",",
"8",
")",
")",
")",
"except",
":",
"return",
"(",
"(",
"-",
"1",
")",
",",
"'Error decoding string'",
")",
"return",
"(",
"0",
",",
"decodedString",
")"
] |
decode the given pdf string .
|
train
| false
|
55,050
|
def checkRecursive(paths, reporter):
warnings = 0
for sourcePath in iterSourceCode(paths):
warnings += checkPath(sourcePath, reporter)
return warnings
|
[
"def",
"checkRecursive",
"(",
"paths",
",",
"reporter",
")",
":",
"warnings",
"=",
"0",
"for",
"sourcePath",
"in",
"iterSourceCode",
"(",
"paths",
")",
":",
"warnings",
"+=",
"checkPath",
"(",
"sourcePath",
",",
"reporter",
")",
"return",
"warnings"
] |
recursively check all source files in c{paths} .
|
train
| true
|
55,051
|
def test_hsl_to_rgb_part_10():
assert (hsl_to_rgb(180, 20, 50) == (102, 153, 153))
assert (hsl_to_rgb(180, 60, 50) == (51, 204, 204))
assert (hsl_to_rgb(180, 100, 50) == (0, 255, 255))
|
[
"def",
"test_hsl_to_rgb_part_10",
"(",
")",
":",
"assert",
"(",
"hsl_to_rgb",
"(",
"180",
",",
"20",
",",
"50",
")",
"==",
"(",
"102",
",",
"153",
",",
"153",
")",
")",
"assert",
"(",
"hsl_to_rgb",
"(",
"180",
",",
"60",
",",
"50",
")",
"==",
"(",
"51",
",",
"204",
",",
"204",
")",
")",
"assert",
"(",
"hsl_to_rgb",
"(",
"180",
",",
"100",
",",
"50",
")",
"==",
"(",
"0",
",",
"255",
",",
"255",
")",
")"
] |
test hsl to rgb color function .
|
train
| false
|
55,052
|
def paths_from_event(self, event):
md = event.mimeData()
if (md.hasFormat(u'text/uri-list') and (not md.hasFormat(u'application/calibre+from_library'))):
urls = [unicode(u.toLocalFile()) for u in md.urls()]
return [u for u in urls if (os.path.splitext(u)[1] and os.path.exists(u))]
|
[
"def",
"paths_from_event",
"(",
"self",
",",
"event",
")",
":",
"md",
"=",
"event",
".",
"mimeData",
"(",
")",
"if",
"(",
"md",
".",
"hasFormat",
"(",
"u'text/uri-list'",
")",
"and",
"(",
"not",
"md",
".",
"hasFormat",
"(",
"u'application/calibre+from_library'",
")",
")",
")",
":",
"urls",
"=",
"[",
"unicode",
"(",
"u",
".",
"toLocalFile",
"(",
")",
")",
"for",
"u",
"in",
"md",
".",
"urls",
"(",
")",
"]",
"return",
"[",
"u",
"for",
"u",
"in",
"urls",
"if",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"u",
")",
"[",
"1",
"]",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"u",
")",
")",
"]"
] |
accept a drop event and return a list of paths that can be read from and represent files with extensions .
|
train
| false
|
55,054
|
def func_np(a, b):
return np.exp(((2.1 * a) + (3.2 * b)))
|
[
"def",
"func_np",
"(",
"a",
",",
"b",
")",
":",
"return",
"np",
".",
"exp",
"(",
"(",
"(",
"2.1",
"*",
"a",
")",
"+",
"(",
"3.2",
"*",
"b",
")",
")",
")"
] |
control function using numpy .
|
train
| false
|
55,056
|
def get_policy_string(base, policy_or_index):
if isinstance(policy_or_index, BaseStoragePolicy):
policy = policy_or_index
else:
policy = POLICIES.get_by_index(policy_or_index)
if (policy is None):
raise PolicyError('Unknown policy', index=policy_or_index)
return _get_policy_string(base, int(policy))
|
[
"def",
"get_policy_string",
"(",
"base",
",",
"policy_or_index",
")",
":",
"if",
"isinstance",
"(",
"policy_or_index",
",",
"BaseStoragePolicy",
")",
":",
"policy",
"=",
"policy_or_index",
"else",
":",
"policy",
"=",
"POLICIES",
".",
"get_by_index",
"(",
"policy_or_index",
")",
"if",
"(",
"policy",
"is",
"None",
")",
":",
"raise",
"PolicyError",
"(",
"'Unknown policy'",
",",
"index",
"=",
"policy_or_index",
")",
"return",
"_get_policy_string",
"(",
"base",
",",
"int",
"(",
"policy",
")",
")"
] |
helper function to construct a string from a base and the policy .
|
train
| false
|
55,058
|
def getmode(mode):
global _modes
if (not _modes):
from . import Image
modes = {}
for (m, (basemode, basetype, bands)) in Image._MODEINFO.items():
modes[m] = ModeDescriptor(m, bands, basemode, basetype)
modes['RGBa'] = ModeDescriptor('RGBa', ('R', 'G', 'B', 'a'), 'RGB', 'L')
modes['LA'] = ModeDescriptor('LA', ('L', 'A'), 'L', 'L')
modes['La'] = ModeDescriptor('La', ('L', 'a'), 'L', 'L')
modes['PA'] = ModeDescriptor('PA', ('P', 'A'), 'RGB', 'L')
modes['I;16'] = ModeDescriptor('I;16', 'I', 'L', 'L')
modes['I;16L'] = ModeDescriptor('I;16L', 'I', 'L', 'L')
modes['I;16B'] = ModeDescriptor('I;16B', 'I', 'L', 'L')
_modes = modes
return _modes[mode]
|
[
"def",
"getmode",
"(",
"mode",
")",
":",
"global",
"_modes",
"if",
"(",
"not",
"_modes",
")",
":",
"from",
".",
"import",
"Image",
"modes",
"=",
"{",
"}",
"for",
"(",
"m",
",",
"(",
"basemode",
",",
"basetype",
",",
"bands",
")",
")",
"in",
"Image",
".",
"_MODEINFO",
".",
"items",
"(",
")",
":",
"modes",
"[",
"m",
"]",
"=",
"ModeDescriptor",
"(",
"m",
",",
"bands",
",",
"basemode",
",",
"basetype",
")",
"modes",
"[",
"'RGBa'",
"]",
"=",
"ModeDescriptor",
"(",
"'RGBa'",
",",
"(",
"'R'",
",",
"'G'",
",",
"'B'",
",",
"'a'",
")",
",",
"'RGB'",
",",
"'L'",
")",
"modes",
"[",
"'LA'",
"]",
"=",
"ModeDescriptor",
"(",
"'LA'",
",",
"(",
"'L'",
",",
"'A'",
")",
",",
"'L'",
",",
"'L'",
")",
"modes",
"[",
"'La'",
"]",
"=",
"ModeDescriptor",
"(",
"'La'",
",",
"(",
"'L'",
",",
"'a'",
")",
",",
"'L'",
",",
"'L'",
")",
"modes",
"[",
"'PA'",
"]",
"=",
"ModeDescriptor",
"(",
"'PA'",
",",
"(",
"'P'",
",",
"'A'",
")",
",",
"'RGB'",
",",
"'L'",
")",
"modes",
"[",
"'I;16'",
"]",
"=",
"ModeDescriptor",
"(",
"'I;16'",
",",
"'I'",
",",
"'L'",
",",
"'L'",
")",
"modes",
"[",
"'I;16L'",
"]",
"=",
"ModeDescriptor",
"(",
"'I;16L'",
",",
"'I'",
",",
"'L'",
",",
"'L'",
")",
"modes",
"[",
"'I;16B'",
"]",
"=",
"ModeDescriptor",
"(",
"'I;16B'",
",",
"'I'",
",",
"'L'",
",",
"'L'",
")",
"_modes",
"=",
"modes",
"return",
"_modes",
"[",
"mode",
"]"
] |
gets a mode descriptor for the given mode .
|
train
| false
|
55,060
|
def salted_hmac(key_salt, value, secret=None):
if (secret is None):
secret = settings.SECRET_KEY
key_salt = force_bytes(key_salt)
secret = force_bytes(secret)
key = hashlib.sha1((key_salt + secret)).digest()
return hmac.new(key, msg=force_bytes(value), digestmod=hashlib.sha1)
|
[
"def",
"salted_hmac",
"(",
"key_salt",
",",
"value",
",",
"secret",
"=",
"None",
")",
":",
"if",
"(",
"secret",
"is",
"None",
")",
":",
"secret",
"=",
"settings",
".",
"SECRET_KEY",
"key_salt",
"=",
"force_bytes",
"(",
"key_salt",
")",
"secret",
"=",
"force_bytes",
"(",
"secret",
")",
"key",
"=",
"hashlib",
".",
"sha1",
"(",
"(",
"key_salt",
"+",
"secret",
")",
")",
".",
"digest",
"(",
")",
"return",
"hmac",
".",
"new",
"(",
"key",
",",
"msg",
"=",
"force_bytes",
"(",
"value",
")",
",",
"digestmod",
"=",
"hashlib",
".",
"sha1",
")"
] |
returns the hmac-sha1 of value .
|
train
| false
|
55,061
|
def skip_if_config(*args):
def decorator(f):
group = args[0]
name = args[1]
@functools.wraps(f)
def wrapper(self, *func_args, **func_kwargs):
if hasattr(CONF, group):
conf_group = getattr(CONF, group)
if hasattr(conf_group, name):
value = getattr(conf_group, name)
if value:
if (len(args) == 3):
msg = args[2]
else:
msg = ('Config option %s.%s is false' % (group, name))
raise testtools.TestCase.skipException(msg)
return f(self, *func_args, **func_kwargs)
return wrapper
return decorator
|
[
"def",
"skip_if_config",
"(",
"*",
"args",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"group",
"=",
"args",
"[",
"0",
"]",
"name",
"=",
"args",
"[",
"1",
"]",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"self",
",",
"*",
"func_args",
",",
"**",
"func_kwargs",
")",
":",
"if",
"hasattr",
"(",
"CONF",
",",
"group",
")",
":",
"conf_group",
"=",
"getattr",
"(",
"CONF",
",",
"group",
")",
"if",
"hasattr",
"(",
"conf_group",
",",
"name",
")",
":",
"value",
"=",
"getattr",
"(",
"conf_group",
",",
"name",
")",
"if",
"value",
":",
"if",
"(",
"len",
"(",
"args",
")",
"==",
"3",
")",
":",
"msg",
"=",
"args",
"[",
"2",
"]",
"else",
":",
"msg",
"=",
"(",
"'Config option %s.%s is false'",
"%",
"(",
"group",
",",
"name",
")",
")",
"raise",
"testtools",
".",
"TestCase",
".",
"skipException",
"(",
"msg",
")",
"return",
"f",
"(",
"self",
",",
"*",
"func_args",
",",
"**",
"func_kwargs",
")",
"return",
"wrapper",
"return",
"decorator"
] |
raise a skipexception if a config exists and is true .
|
train
| false
|
55,062
|
def lasso_path(X, y, eps=0.001, n_alphas=100, alphas=None, precompute='auto', Xy=None, copy_X=True, coef_init=None, verbose=False, return_n_iter=False, positive=False, **params):
return enet_path(X, y, l1_ratio=1.0, eps=eps, n_alphas=n_alphas, alphas=alphas, precompute=precompute, Xy=Xy, copy_X=copy_X, coef_init=coef_init, verbose=verbose, positive=positive, return_n_iter=return_n_iter, **params)
|
[
"def",
"lasso_path",
"(",
"X",
",",
"y",
",",
"eps",
"=",
"0.001",
",",
"n_alphas",
"=",
"100",
",",
"alphas",
"=",
"None",
",",
"precompute",
"=",
"'auto'",
",",
"Xy",
"=",
"None",
",",
"copy_X",
"=",
"True",
",",
"coef_init",
"=",
"None",
",",
"verbose",
"=",
"False",
",",
"return_n_iter",
"=",
"False",
",",
"positive",
"=",
"False",
",",
"**",
"params",
")",
":",
"return",
"enet_path",
"(",
"X",
",",
"y",
",",
"l1_ratio",
"=",
"1.0",
",",
"eps",
"=",
"eps",
",",
"n_alphas",
"=",
"n_alphas",
",",
"alphas",
"=",
"alphas",
",",
"precompute",
"=",
"precompute",
",",
"Xy",
"=",
"Xy",
",",
"copy_X",
"=",
"copy_X",
",",
"coef_init",
"=",
"coef_init",
",",
"verbose",
"=",
"verbose",
",",
"positive",
"=",
"positive",
",",
"return_n_iter",
"=",
"return_n_iter",
",",
"**",
"params",
")"
] |
compute lasso path with coordinate descent the lasso optimization function varies for mono and multi-outputs .
|
train
| false
|
55,063
|
def print_exc(limit=None, file=None):
if (file is None):
file = sys.stderr
try:
(etype, value, tb) = sys.exc_info()
print_exception(etype, value, tb, limit, file)
finally:
etype = value = tb = None
|
[
"def",
"print_exc",
"(",
"limit",
"=",
"None",
",",
"file",
"=",
"None",
")",
":",
"if",
"(",
"file",
"is",
"None",
")",
":",
"file",
"=",
"sys",
".",
"stderr",
"try",
":",
"(",
"etype",
",",
"value",
",",
"tb",
")",
"=",
"sys",
".",
"exc_info",
"(",
")",
"print_exception",
"(",
"etype",
",",
"value",
",",
"tb",
",",
"limit",
",",
"file",
")",
"finally",
":",
"etype",
"=",
"value",
"=",
"tb",
"=",
"None"
] |
shorthand for print_exception .
|
train
| true
|
55,064
|
def find_prepositions(chunked):
for ch in chunked:
ch.append(u'O')
for (i, chunk) in enumerate(chunked):
if (chunk[2].endswith(u'PP') and (chunk[(-1)] == u'O')):
if ((i < (len(chunked) - 1)) and (chunked[(i + 1)][2].endswith((u'NP', u'PP')) or (chunked[(i + 1)][1] in (u'VBG', u'VBN')))):
chunk[(-1)] = u'B-PNP'
pp = True
for ch in chunked[(i + 1):]:
if (not (ch[2].endswith((u'NP', u'PP')) or (ch[1] in (u'VBG', u'VBN')))):
break
if (ch[2].endswith(u'PP') and pp):
ch[(-1)] = u'I-PNP'
if (not ch[2].endswith(u'PP')):
ch[(-1)] = u'I-PNP'
pp = False
return chunked
|
[
"def",
"find_prepositions",
"(",
"chunked",
")",
":",
"for",
"ch",
"in",
"chunked",
":",
"ch",
".",
"append",
"(",
"u'O'",
")",
"for",
"(",
"i",
",",
"chunk",
")",
"in",
"enumerate",
"(",
"chunked",
")",
":",
"if",
"(",
"chunk",
"[",
"2",
"]",
".",
"endswith",
"(",
"u'PP'",
")",
"and",
"(",
"chunk",
"[",
"(",
"-",
"1",
")",
"]",
"==",
"u'O'",
")",
")",
":",
"if",
"(",
"(",
"i",
"<",
"(",
"len",
"(",
"chunked",
")",
"-",
"1",
")",
")",
"and",
"(",
"chunked",
"[",
"(",
"i",
"+",
"1",
")",
"]",
"[",
"2",
"]",
".",
"endswith",
"(",
"(",
"u'NP'",
",",
"u'PP'",
")",
")",
"or",
"(",
"chunked",
"[",
"(",
"i",
"+",
"1",
")",
"]",
"[",
"1",
"]",
"in",
"(",
"u'VBG'",
",",
"u'VBN'",
")",
")",
")",
")",
":",
"chunk",
"[",
"(",
"-",
"1",
")",
"]",
"=",
"u'B-PNP'",
"pp",
"=",
"True",
"for",
"ch",
"in",
"chunked",
"[",
"(",
"i",
"+",
"1",
")",
":",
"]",
":",
"if",
"(",
"not",
"(",
"ch",
"[",
"2",
"]",
".",
"endswith",
"(",
"(",
"u'NP'",
",",
"u'PP'",
")",
")",
"or",
"(",
"ch",
"[",
"1",
"]",
"in",
"(",
"u'VBG'",
",",
"u'VBN'",
")",
")",
")",
")",
":",
"break",
"if",
"(",
"ch",
"[",
"2",
"]",
".",
"endswith",
"(",
"u'PP'",
")",
"and",
"pp",
")",
":",
"ch",
"[",
"(",
"-",
"1",
")",
"]",
"=",
"u'I-PNP'",
"if",
"(",
"not",
"ch",
"[",
"2",
"]",
".",
"endswith",
"(",
"u'PP'",
")",
")",
":",
"ch",
"[",
"(",
"-",
"1",
")",
"]",
"=",
"u'I-PNP'",
"pp",
"=",
"False",
"return",
"chunked"
] |
the input is a list of [token .
|
train
| false
|
55,065
|
def eval(expression, _dict={}, **kw):
args = ops.copy()
args.update(_dict)
args.update(kw)
for (k, v) in list(args.items()):
if hasattr(v, 'im'):
args[k] = _Operand(v)
out = builtins.eval(expression, args)
try:
return out.im
except AttributeError:
return out
|
[
"def",
"eval",
"(",
"expression",
",",
"_dict",
"=",
"{",
"}",
",",
"**",
"kw",
")",
":",
"args",
"=",
"ops",
".",
"copy",
"(",
")",
"args",
".",
"update",
"(",
"_dict",
")",
"args",
".",
"update",
"(",
"kw",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"list",
"(",
"args",
".",
"items",
"(",
")",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"'im'",
")",
":",
"args",
"[",
"k",
"]",
"=",
"_Operand",
"(",
"v",
")",
"out",
"=",
"builtins",
".",
"eval",
"(",
"expression",
",",
"args",
")",
"try",
":",
"return",
"out",
".",
"im",
"except",
"AttributeError",
":",
"return",
"out"
] |
evaluates an image expression .
|
train
| false
|
55,066
|
def load_parser_result_store(package_dirpath, open_for_write=False):
open_flag = ((open_for_write and 'c') or 'r')
sto_filepath = path.join(package_dirpath, PARSER_RESULT_STORE)
return shelve_open(sto_filepath, flag=open_flag)
|
[
"def",
"load_parser_result_store",
"(",
"package_dirpath",
",",
"open_for_write",
"=",
"False",
")",
":",
"open_flag",
"=",
"(",
"(",
"open_for_write",
"and",
"'c'",
")",
"or",
"'r'",
")",
"sto_filepath",
"=",
"path",
".",
"join",
"(",
"package_dirpath",
",",
"PARSER_RESULT_STORE",
")",
"return",
"shelve_open",
"(",
"sto_filepath",
",",
"flag",
"=",
"open_flag",
")"
] |
load parser result store from specified scenario package .
|
train
| false
|
55,068
|
def socktype_to_enum(num):
if (enum is None):
return num
else:
try:
return socket.AddressType(num)
except (ValueError, AttributeError):
return num
|
[
"def",
"socktype_to_enum",
"(",
"num",
")",
":",
"if",
"(",
"enum",
"is",
"None",
")",
":",
"return",
"num",
"else",
":",
"try",
":",
"return",
"socket",
".",
"AddressType",
"(",
"num",
")",
"except",
"(",
"ValueError",
",",
"AttributeError",
")",
":",
"return",
"num"
] |
convert a numeric socket type value to an intenum member .
|
train
| false
|
55,069
|
def is_asn1_token(token):
return (token[:3] == PKI_ASN1_PREFIX)
|
[
"def",
"is_asn1_token",
"(",
"token",
")",
":",
"return",
"(",
"token",
"[",
":",
"3",
"]",
"==",
"PKI_ASN1_PREFIX",
")"
] |
determine if a token appears to be pki-based .
|
train
| false
|
55,070
|
def surround_quotes(string):
if (in_bash() and string):
return '"{0}"'.format(string)
return string
|
[
"def",
"surround_quotes",
"(",
"string",
")",
":",
"if",
"(",
"in_bash",
"(",
")",
"and",
"string",
")",
":",
"return",
"'\"{0}\"'",
".",
"format",
"(",
"string",
")",
"return",
"string"
] |
bash has problems dealing with certain paths so were surrounding all path outputs with quotes .
|
train
| false
|
55,071
|
def add_log_redaction_filter_to_logger(engine, logger):
if engine.policies:
redaction_filter = RedactionFilter(engine)
for handler in logger.handlers:
handler.addFilter(redaction_filter)
|
[
"def",
"add_log_redaction_filter_to_logger",
"(",
"engine",
",",
"logger",
")",
":",
"if",
"engine",
".",
"policies",
":",
"redaction_filter",
"=",
"RedactionFilter",
"(",
"engine",
")",
"for",
"handler",
"in",
"logger",
".",
"handlers",
":",
"handler",
".",
"addFilter",
"(",
"redaction_filter",
")"
] |
add_redaction_filter injects the redaction filter into all of the logger handlers .
|
train
| false
|
55,072
|
def distorted_inputs(dataset, batch_size=None, num_preprocess_threads=None):
if (not batch_size):
batch_size = FLAGS.batch_size
with tf.device('/cpu:0'):
(images, labels) = batch_inputs(dataset, batch_size, train=True, num_preprocess_threads=num_preprocess_threads, num_readers=FLAGS.num_readers)
return (images, labels)
|
[
"def",
"distorted_inputs",
"(",
"dataset",
",",
"batch_size",
"=",
"None",
",",
"num_preprocess_threads",
"=",
"None",
")",
":",
"if",
"(",
"not",
"batch_size",
")",
":",
"batch_size",
"=",
"FLAGS",
".",
"batch_size",
"with",
"tf",
".",
"device",
"(",
"'/cpu:0'",
")",
":",
"(",
"images",
",",
"labels",
")",
"=",
"batch_inputs",
"(",
"dataset",
",",
"batch_size",
",",
"train",
"=",
"True",
",",
"num_preprocess_threads",
"=",
"num_preprocess_threads",
",",
"num_readers",
"=",
"FLAGS",
".",
"num_readers",
")",
"return",
"(",
"images",
",",
"labels",
")"
] |
generate batches of distorted versions of imagenet images .
|
train
| true
|
55,074
|
def get_locked_port_and_binding(context, port_id):
try:
port = context.session.query(models_v2.Port).enable_eagerloads(False).filter_by(id=port_id).with_lockmode('update').one()
binding = context.session.query(models.PortBinding).enable_eagerloads(False).filter_by(port_id=port_id).with_lockmode('update').one()
return (port, binding)
except exc.NoResultFound:
return (None, None)
|
[
"def",
"get_locked_port_and_binding",
"(",
"context",
",",
"port_id",
")",
":",
"try",
":",
"port",
"=",
"context",
".",
"session",
".",
"query",
"(",
"models_v2",
".",
"Port",
")",
".",
"enable_eagerloads",
"(",
"False",
")",
".",
"filter_by",
"(",
"id",
"=",
"port_id",
")",
".",
"with_lockmode",
"(",
"'update'",
")",
".",
"one",
"(",
")",
"binding",
"=",
"context",
".",
"session",
".",
"query",
"(",
"models",
".",
"PortBinding",
")",
".",
"enable_eagerloads",
"(",
"False",
")",
".",
"filter_by",
"(",
"port_id",
"=",
"port_id",
")",
".",
"with_lockmode",
"(",
"'update'",
")",
".",
"one",
"(",
")",
"return",
"(",
"port",
",",
"binding",
")",
"except",
"exc",
".",
"NoResultFound",
":",
"return",
"(",
"None",
",",
"None",
")"
] |
get port and port binding records for update within transaction .
|
train
| false
|
55,075
|
def _fake_check_ldev_status(*args, **kwargs):
return None
|
[
"def",
"_fake_check_ldev_status",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"return",
"None"
] |
assume ldev status has changed as desired .
|
train
| false
|
55,076
|
def _date_to_datetime(value):
if (not isinstance(value, datetime.date)):
raise TypeError(('Cannot convert to datetime expected date value; received %s' % value))
return datetime.datetime(value.year, value.month, value.day)
|
[
"def",
"_date_to_datetime",
"(",
"value",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"date",
")",
")",
":",
"raise",
"TypeError",
"(",
"(",
"'Cannot convert to datetime expected date value; received %s'",
"%",
"value",
")",
")",
"return",
"datetime",
".",
"datetime",
"(",
"value",
".",
"year",
",",
"value",
".",
"month",
",",
"value",
".",
"day",
")"
] |
convert a date to a datetime for datastore storage .
|
train
| true
|
55,077
|
def find_bad_registrations():
registrations = models.Node.find(Q('is_registration', 'eq', True))
for registration in registrations:
meta = (registration.registered_meta or {})
keys = meta.keys()
if (len(keys) != 1):
print 'Inconsistency: Number of keys on project {} ({}) != 1'.format(registration.title, registration._primary_key)
continue
if (keys[0] not in known_schemas):
print 'Inconsistency: Registration schema {} on project {} ({}) not in known schemas'.format(keys[0], registration.title, registration._primary_key)
|
[
"def",
"find_bad_registrations",
"(",
")",
":",
"registrations",
"=",
"models",
".",
"Node",
".",
"find",
"(",
"Q",
"(",
"'is_registration'",
",",
"'eq'",
",",
"True",
")",
")",
"for",
"registration",
"in",
"registrations",
":",
"meta",
"=",
"(",
"registration",
".",
"registered_meta",
"or",
"{",
"}",
")",
"keys",
"=",
"meta",
".",
"keys",
"(",
")",
"if",
"(",
"len",
"(",
"keys",
")",
"!=",
"1",
")",
":",
"print",
"'Inconsistency: Number of keys on project {} ({}) != 1'",
".",
"format",
"(",
"registration",
".",
"title",
",",
"registration",
".",
"_primary_key",
")",
"continue",
"if",
"(",
"keys",
"[",
"0",
"]",
"not",
"in",
"known_schemas",
")",
":",
"print",
"'Inconsistency: Registration schema {} on project {} ({}) not in known schemas'",
".",
"format",
"(",
"keys",
"[",
"0",
"]",
",",
"registration",
".",
"title",
",",
"registration",
".",
"_primary_key",
")"
] |
find registrations with unexpected numbers of template keys or outdated templates .
|
train
| false
|
55,078
|
def assert_has_n_elements_with_path(output, path, n):
xml = to_xml(output)
n = int(n)
num_elements = len(xml.findall(path))
if (num_elements != n):
errmsg = ('Expected to find %d elements with path %s, but %d were found.' % (n, path, num_elements))
raise AssertionError(errmsg)
|
[
"def",
"assert_has_n_elements_with_path",
"(",
"output",
",",
"path",
",",
"n",
")",
":",
"xml",
"=",
"to_xml",
"(",
"output",
")",
"n",
"=",
"int",
"(",
"n",
")",
"num_elements",
"=",
"len",
"(",
"xml",
".",
"findall",
"(",
"path",
")",
")",
"if",
"(",
"num_elements",
"!=",
"n",
")",
":",
"errmsg",
"=",
"(",
"'Expected to find %d elements with path %s, but %d were found.'",
"%",
"(",
"n",
",",
"path",
",",
"num_elements",
")",
")",
"raise",
"AssertionError",
"(",
"errmsg",
")"
] |
asserts the specified output has exactly n elements matching the path specified .
|
train
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.