id_within_dataset
int64 1
55.5k
| snippet
stringlengths 19
14.2k
| tokens
listlengths 6
1.63k
| nl
stringlengths 6
352
| split_within_dataset
stringclasses 1
value | is_duplicated
bool 2
classes |
|---|---|---|---|---|---|
9,460
|
def _run_finalizers(minpriority=None):
if (minpriority is None):
f = (lambda p: (p[0][0] is not None))
else:
f = (lambda p: ((p[0][0] is not None) and (p[0][0] >= minpriority)))
items = [x for x in _finalizer_registry.items() if f(x)]
items.sort(reverse=True)
for (key, finalizer) in items:
sub_debug('calling %s', finalizer)
try:
finalizer()
except Exception:
import traceback
traceback.print_exc()
if (minpriority is None):
_finalizer_registry.clear()
|
[
"def",
"_run_finalizers",
"(",
"minpriority",
"=",
"None",
")",
":",
"if",
"(",
"minpriority",
"is",
"None",
")",
":",
"f",
"=",
"(",
"lambda",
"p",
":",
"(",
"p",
"[",
"0",
"]",
"[",
"0",
"]",
"is",
"not",
"None",
")",
")",
"else",
":",
"f",
"=",
"(",
"lambda",
"p",
":",
"(",
"(",
"p",
"[",
"0",
"]",
"[",
"0",
"]",
"is",
"not",
"None",
")",
"and",
"(",
"p",
"[",
"0",
"]",
"[",
"0",
"]",
">=",
"minpriority",
")",
")",
")",
"items",
"=",
"[",
"x",
"for",
"x",
"in",
"_finalizer_registry",
".",
"items",
"(",
")",
"if",
"f",
"(",
"x",
")",
"]",
"items",
".",
"sort",
"(",
"reverse",
"=",
"True",
")",
"for",
"(",
"key",
",",
"finalizer",
")",
"in",
"items",
":",
"sub_debug",
"(",
"'calling %s'",
",",
"finalizer",
")",
"try",
":",
"finalizer",
"(",
")",
"except",
"Exception",
":",
"import",
"traceback",
"traceback",
".",
"print_exc",
"(",
")",
"if",
"(",
"minpriority",
"is",
"None",
")",
":",
"_finalizer_registry",
".",
"clear",
"(",
")"
] |
run all finalizers whose exit priority is not none and at least minpriority finalizers with highest priority are called first; finalizers with the same priority will be called in reverse order of creation .
|
train
| false
|
9,461
|
def make_undefined_step_snippet(step, language=None):
if isinstance(step, string_types):
step_text = step
steps = parser.parse_steps(step_text, language=language)
step = steps[0]
assert step, ('ParseError: %s' % step_text)
prefix = u'u'
single_quote = "'"
if (single_quote in step.name):
step.name = step.name.replace(single_quote, "\\'")
schema = u"@%s(%s'%s')\ndef step_impl(context):\n"
schema += u" raise NotImplementedError(%s'STEP: %s %s')\n\n"
snippet = (schema % (step.step_type, prefix, step.name, prefix, step.step_type.title(), step.name))
return snippet
|
[
"def",
"make_undefined_step_snippet",
"(",
"step",
",",
"language",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"step",
",",
"string_types",
")",
":",
"step_text",
"=",
"step",
"steps",
"=",
"parser",
".",
"parse_steps",
"(",
"step_text",
",",
"language",
"=",
"language",
")",
"step",
"=",
"steps",
"[",
"0",
"]",
"assert",
"step",
",",
"(",
"'ParseError: %s'",
"%",
"step_text",
")",
"prefix",
"=",
"u'u'",
"single_quote",
"=",
"\"'\"",
"if",
"(",
"single_quote",
"in",
"step",
".",
"name",
")",
":",
"step",
".",
"name",
"=",
"step",
".",
"name",
".",
"replace",
"(",
"single_quote",
",",
"\"\\\\'\"",
")",
"schema",
"=",
"u\"@%s(%s'%s')\\ndef step_impl(context):\\n\"",
"schema",
"+=",
"u\" raise NotImplementedError(%s'STEP: %s %s')\\n\\n\"",
"snippet",
"=",
"(",
"schema",
"%",
"(",
"step",
".",
"step_type",
",",
"prefix",
",",
"step",
".",
"name",
",",
"prefix",
",",
"step",
".",
"step_type",
".",
"title",
"(",
")",
",",
"step",
".",
"name",
")",
")",
"return",
"snippet"
] |
helper function to create an undefined-step snippet for a step .
|
train
| false
|
9,462
|
def get_shared_doctypes(user=None):
if (not user):
user = frappe.session.user
return frappe.db.sql_list(u'select distinct share_doctype from tabDocShare where (user=%s or everyone=1)', user)
|
[
"def",
"get_shared_doctypes",
"(",
"user",
"=",
"None",
")",
":",
"if",
"(",
"not",
"user",
")",
":",
"user",
"=",
"frappe",
".",
"session",
".",
"user",
"return",
"frappe",
".",
"db",
".",
"sql_list",
"(",
"u'select distinct share_doctype from tabDocShare where (user=%s or everyone=1)'",
",",
"user",
")"
] |
return list of doctypes in which documents are shared for the given user .
|
train
| false
|
9,463
|
def import_keypair(kwargs=None, call=None):
with salt.utils.fopen(kwargs['file'], 'r') as public_key_filename:
public_key_content = public_key_filename.read()
digital_ocean_kwargs = {'name': kwargs['keyname'], 'public_key': public_key_content}
created_result = create_key(digital_ocean_kwargs, call=call)
return created_result
|
[
"def",
"import_keypair",
"(",
"kwargs",
"=",
"None",
",",
"call",
"=",
"None",
")",
":",
"with",
"salt",
".",
"utils",
".",
"fopen",
"(",
"kwargs",
"[",
"'file'",
"]",
",",
"'r'",
")",
"as",
"public_key_filename",
":",
"public_key_content",
"=",
"public_key_filename",
".",
"read",
"(",
")",
"digital_ocean_kwargs",
"=",
"{",
"'name'",
":",
"kwargs",
"[",
"'keyname'",
"]",
",",
"'public_key'",
":",
"public_key_content",
"}",
"created_result",
"=",
"create_key",
"(",
"digital_ocean_kwargs",
",",
"call",
"=",
"call",
")",
"return",
"created_result"
] |
import an ssh public key .
|
train
| false
|
9,466
|
@cacheit
def _remove_multiple_delta(expr):
from sympy.solvers import solve
if expr.is_Add:
return expr.func(*list(map(_remove_multiple_delta, expr.args)))
if (not expr.is_Mul):
return expr
eqs = []
newargs = []
for arg in expr.args:
if isinstance(arg, KroneckerDelta):
eqs.append((arg.args[0] - arg.args[1]))
else:
newargs.append(arg)
if (not eqs):
return expr
solns = solve(eqs, dict=True)
if (len(solns) == 0):
return S.Zero
elif (len(solns) == 1):
for key in solns[0].keys():
newargs.append(KroneckerDelta(key, solns[0][key]))
expr2 = expr.func(*newargs)
if (expr != expr2):
return _remove_multiple_delta(expr2)
return expr
|
[
"@",
"cacheit",
"def",
"_remove_multiple_delta",
"(",
"expr",
")",
":",
"from",
"sympy",
".",
"solvers",
"import",
"solve",
"if",
"expr",
".",
"is_Add",
":",
"return",
"expr",
".",
"func",
"(",
"*",
"list",
"(",
"map",
"(",
"_remove_multiple_delta",
",",
"expr",
".",
"args",
")",
")",
")",
"if",
"(",
"not",
"expr",
".",
"is_Mul",
")",
":",
"return",
"expr",
"eqs",
"=",
"[",
"]",
"newargs",
"=",
"[",
"]",
"for",
"arg",
"in",
"expr",
".",
"args",
":",
"if",
"isinstance",
"(",
"arg",
",",
"KroneckerDelta",
")",
":",
"eqs",
".",
"append",
"(",
"(",
"arg",
".",
"args",
"[",
"0",
"]",
"-",
"arg",
".",
"args",
"[",
"1",
"]",
")",
")",
"else",
":",
"newargs",
".",
"append",
"(",
"arg",
")",
"if",
"(",
"not",
"eqs",
")",
":",
"return",
"expr",
"solns",
"=",
"solve",
"(",
"eqs",
",",
"dict",
"=",
"True",
")",
"if",
"(",
"len",
"(",
"solns",
")",
"==",
"0",
")",
":",
"return",
"S",
".",
"Zero",
"elif",
"(",
"len",
"(",
"solns",
")",
"==",
"1",
")",
":",
"for",
"key",
"in",
"solns",
"[",
"0",
"]",
".",
"keys",
"(",
")",
":",
"newargs",
".",
"append",
"(",
"KroneckerDelta",
"(",
"key",
",",
"solns",
"[",
"0",
"]",
"[",
"key",
"]",
")",
")",
"expr2",
"=",
"expr",
".",
"func",
"(",
"*",
"newargs",
")",
"if",
"(",
"expr",
"!=",
"expr2",
")",
":",
"return",
"_remove_multiple_delta",
"(",
"expr2",
")",
"return",
"expr"
] |
evaluate products of kroneckerdeltas .
|
train
| false
|
9,468
|
def heavy_job(registry, xml_parent, data):
heavyjob = XML.SubElement(xml_parent, 'hudson.plugins.heavy__job.HeavyJobProperty')
XML.SubElement(heavyjob, 'weight').text = str(data.get('weight', 1))
|
[
"def",
"heavy_job",
"(",
"registry",
",",
"xml_parent",
",",
"data",
")",
":",
"heavyjob",
"=",
"XML",
".",
"SubElement",
"(",
"xml_parent",
",",
"'hudson.plugins.heavy__job.HeavyJobProperty'",
")",
"XML",
".",
"SubElement",
"(",
"heavyjob",
",",
"'weight'",
")",
".",
"text",
"=",
"str",
"(",
"data",
".",
"get",
"(",
"'weight'",
",",
"1",
")",
")"
] |
yaml: heavy-job this plugin allows you to define "weight" on each job .
|
train
| false
|
9,469
|
def TIMEOUT(v):
context.defaults['timeout'] = int(v)
|
[
"def",
"TIMEOUT",
"(",
"v",
")",
":",
"context",
".",
"defaults",
"[",
"'timeout'",
"]",
"=",
"int",
"(",
"v",
")"
] |
sets a timeout for tube operations via context .
|
train
| false
|
9,474
|
def compile_nrt_functions(ctx):
(ir_mod, library) = create_nrt_module(ctx)
library.add_ir_module(ir_mod)
library.finalize()
return library
|
[
"def",
"compile_nrt_functions",
"(",
"ctx",
")",
":",
"(",
"ir_mod",
",",
"library",
")",
"=",
"create_nrt_module",
"(",
"ctx",
")",
"library",
".",
"add_ir_module",
"(",
"ir_mod",
")",
"library",
".",
"finalize",
"(",
")",
"return",
"library"
] |
compile all llvm nrt functions and return a library containing them .
|
train
| false
|
9,477
|
def minimize_quadratic_1d(a, b, lb, ub, c=0):
t = [lb, ub]
if (a != 0):
extremum = (((-0.5) * b) / a)
if (lb < extremum < ub):
t.append(extremum)
t = np.asarray(t)
y = (((a * (t ** 2)) + (b * t)) + c)
min_index = np.argmin(y)
return (t[min_index], y[min_index])
|
[
"def",
"minimize_quadratic_1d",
"(",
"a",
",",
"b",
",",
"lb",
",",
"ub",
",",
"c",
"=",
"0",
")",
":",
"t",
"=",
"[",
"lb",
",",
"ub",
"]",
"if",
"(",
"a",
"!=",
"0",
")",
":",
"extremum",
"=",
"(",
"(",
"(",
"-",
"0.5",
")",
"*",
"b",
")",
"/",
"a",
")",
"if",
"(",
"lb",
"<",
"extremum",
"<",
"ub",
")",
":",
"t",
".",
"append",
"(",
"extremum",
")",
"t",
"=",
"np",
".",
"asarray",
"(",
"t",
")",
"y",
"=",
"(",
"(",
"(",
"a",
"*",
"(",
"t",
"**",
"2",
")",
")",
"+",
"(",
"b",
"*",
"t",
")",
")",
"+",
"c",
")",
"min_index",
"=",
"np",
".",
"argmin",
"(",
"y",
")",
"return",
"(",
"t",
"[",
"min_index",
"]",
",",
"y",
"[",
"min_index",
"]",
")"
] |
minimize a 1-d quadratic function subject to bounds .
|
train
| false
|
9,478
|
def get_exp_metadata_dicts_matching_query(query_string, search_cursor, user_id):
(exp_ids, new_search_cursor) = exp_services.get_exploration_ids_matching_query(query_string, cursor=search_cursor)
exploration_list = get_exploration_metadata_dicts(exp_ids, user_id)
return (exploration_list, new_search_cursor)
|
[
"def",
"get_exp_metadata_dicts_matching_query",
"(",
"query_string",
",",
"search_cursor",
",",
"user_id",
")",
":",
"(",
"exp_ids",
",",
"new_search_cursor",
")",
"=",
"exp_services",
".",
"get_exploration_ids_matching_query",
"(",
"query_string",
",",
"cursor",
"=",
"search_cursor",
")",
"exploration_list",
"=",
"get_exploration_metadata_dicts",
"(",
"exp_ids",
",",
"user_id",
")",
"return",
"(",
"exploration_list",
",",
"new_search_cursor",
")"
] |
given a query string and a search cursor .
|
train
| false
|
9,480
|
def decode_entities(html):
def decode(m):
html = m.group(0)
if (html[:2] == u'&#'):
try:
if (html[:3] == u'&#x'):
return chr(int(html[3:(-1)], 16))
else:
return chr(int(html[2:(-1)]))
except ValueError:
pass
else:
try:
html = chr(name2codepoint[html[1:(-1)]])
except KeyError:
pass
return html
return re.sub(u'&#?\\w+;', decode, html.replace(u'&', u'&'))
|
[
"def",
"decode_entities",
"(",
"html",
")",
":",
"def",
"decode",
"(",
"m",
")",
":",
"html",
"=",
"m",
".",
"group",
"(",
"0",
")",
"if",
"(",
"html",
"[",
":",
"2",
"]",
"==",
"u'&#'",
")",
":",
"try",
":",
"if",
"(",
"html",
"[",
":",
"3",
"]",
"==",
"u'&#x'",
")",
":",
"return",
"chr",
"(",
"int",
"(",
"html",
"[",
"3",
":",
"(",
"-",
"1",
")",
"]",
",",
"16",
")",
")",
"else",
":",
"return",
"chr",
"(",
"int",
"(",
"html",
"[",
"2",
":",
"(",
"-",
"1",
")",
"]",
")",
")",
"except",
"ValueError",
":",
"pass",
"else",
":",
"try",
":",
"html",
"=",
"chr",
"(",
"name2codepoint",
"[",
"html",
"[",
"1",
":",
"(",
"-",
"1",
")",
"]",
"]",
")",
"except",
"KeyError",
":",
"pass",
"return",
"html",
"return",
"re",
".",
"sub",
"(",
"u'&#?\\\\w+;'",
",",
"decode",
",",
"html",
".",
"replace",
"(",
"u'&'",
",",
"u'&'",
")",
")"
] |
decodes html entities in the given string .
|
train
| false
|
9,481
|
def describe_api_stage(restApiId, stageName, region=None, key=None, keyid=None, profile=None):
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
stage = conn.get_stage(restApiId=restApiId, stageName=stageName)
return {'stage': _convert_datetime_str(stage)}
except ClientError as e:
return {'error': salt.utils.boto3.get_error(e)}
|
[
"def",
"describe_api_stage",
"(",
"restApiId",
",",
"stageName",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"try",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"stage",
"=",
"conn",
".",
"get_stage",
"(",
"restApiId",
"=",
"restApiId",
",",
"stageName",
"=",
"stageName",
")",
"return",
"{",
"'stage'",
":",
"_convert_datetime_str",
"(",
"stage",
")",
"}",
"except",
"ClientError",
"as",
"e",
":",
"return",
"{",
"'error'",
":",
"salt",
".",
"utils",
".",
"boto3",
".",
"get_error",
"(",
"e",
")",
"}"
] |
get api stage for a given apiid and stage name cli example: .
|
train
| false
|
9,484
|
def tril(m, k=0):
m = np.asarray(m)
out = (tri(m.shape[0], m.shape[1], k=k, dtype=m.dtype.char) * m)
return out
|
[
"def",
"tril",
"(",
"m",
",",
"k",
"=",
"0",
")",
":",
"m",
"=",
"np",
".",
"asarray",
"(",
"m",
")",
"out",
"=",
"(",
"tri",
"(",
"m",
".",
"shape",
"[",
"0",
"]",
",",
"m",
".",
"shape",
"[",
"1",
"]",
",",
"k",
"=",
"k",
",",
"dtype",
"=",
"m",
".",
"dtype",
".",
"char",
")",
"*",
"m",
")",
"return",
"out"
] |
return the lower triangular portion of a matrix in sparse format returns the elements on or below the k-th diagonal of the matrix a .
|
train
| false
|
9,485
|
def ctc_batch_cost(y_true, y_pred, input_length, label_length):
label_length = tf.to_int32(tf.squeeze(label_length))
input_length = tf.to_int32(tf.squeeze(input_length))
sparse_labels = tf.to_int32(ctc_label_dense_to_sparse(y_true, label_length))
y_pred = tf.log((tf.transpose(y_pred, perm=[1, 0, 2]) + 1e-08))
return tf.expand_dims(ctc.ctc_loss(inputs=y_pred, labels=sparse_labels, sequence_length=input_length), 1)
|
[
"def",
"ctc_batch_cost",
"(",
"y_true",
",",
"y_pred",
",",
"input_length",
",",
"label_length",
")",
":",
"label_length",
"=",
"tf",
".",
"to_int32",
"(",
"tf",
".",
"squeeze",
"(",
"label_length",
")",
")",
"input_length",
"=",
"tf",
".",
"to_int32",
"(",
"tf",
".",
"squeeze",
"(",
"input_length",
")",
")",
"sparse_labels",
"=",
"tf",
".",
"to_int32",
"(",
"ctc_label_dense_to_sparse",
"(",
"y_true",
",",
"label_length",
")",
")",
"y_pred",
"=",
"tf",
".",
"log",
"(",
"(",
"tf",
".",
"transpose",
"(",
"y_pred",
",",
"perm",
"=",
"[",
"1",
",",
"0",
",",
"2",
"]",
")",
"+",
"1e-08",
")",
")",
"return",
"tf",
".",
"expand_dims",
"(",
"ctc",
".",
"ctc_loss",
"(",
"inputs",
"=",
"y_pred",
",",
"labels",
"=",
"sparse_labels",
",",
"sequence_length",
"=",
"input_length",
")",
",",
"1",
")"
] |
runs ctc loss algorithm on each batch element .
|
train
| false
|
9,486
|
def educateDashes(str):
str = re.sub('---', '–', str)
str = re.sub('--', '—', str)
return str
|
[
"def",
"educateDashes",
"(",
"str",
")",
":",
"str",
"=",
"re",
".",
"sub",
"(",
"'---'",
",",
"'–'",
",",
"str",
")",
"str",
"=",
"re",
".",
"sub",
"(",
"'--'",
",",
"'—'",
",",
"str",
")",
"return",
"str"
] |
parameter: string .
|
train
| false
|
9,488
|
def pick_authenticator(config, default, plugins, question='How would you like to authenticate with the ACME CA?'):
return pick_plugin(config, default, plugins, question, (interfaces.IAuthenticator,))
|
[
"def",
"pick_authenticator",
"(",
"config",
",",
"default",
",",
"plugins",
",",
"question",
"=",
"'How would you like to authenticate with the ACME CA?'",
")",
":",
"return",
"pick_plugin",
"(",
"config",
",",
"default",
",",
"plugins",
",",
"question",
",",
"(",
"interfaces",
".",
"IAuthenticator",
",",
")",
")"
] |
pick authentication plugin .
|
train
| false
|
9,489
|
def runClassifier(classifier, sensorRegion, tpRegion, recordNumber):
actualInput = float(sensorRegion.getOutputData('sourceOut')[0])
scalarEncoder = sensorRegion.getSelf().encoder.encoders[0][1]
bucketIndex = scalarEncoder.getBucketIndices(actualInput)[0]
tpOutput = tpRegion.getOutputData('bottomUpOut').nonzero()[0]
classDict = {'actValue': actualInput, 'bucketIdx': bucketIndex}
results = classifier.getSelf().customCompute(recordNum=recordNumber, patternNZ=tpOutput, classification=classDict)
mostLikelyResult = sorted(zip(results[1], results['actualValues']))[(-1)]
predictionConfidence = mostLikelyResult[0]
predictedValue = mostLikelyResult[1]
return (actualInput, predictedValue, predictionConfidence)
|
[
"def",
"runClassifier",
"(",
"classifier",
",",
"sensorRegion",
",",
"tpRegion",
",",
"recordNumber",
")",
":",
"actualInput",
"=",
"float",
"(",
"sensorRegion",
".",
"getOutputData",
"(",
"'sourceOut'",
")",
"[",
"0",
"]",
")",
"scalarEncoder",
"=",
"sensorRegion",
".",
"getSelf",
"(",
")",
".",
"encoder",
".",
"encoders",
"[",
"0",
"]",
"[",
"1",
"]",
"bucketIndex",
"=",
"scalarEncoder",
".",
"getBucketIndices",
"(",
"actualInput",
")",
"[",
"0",
"]",
"tpOutput",
"=",
"tpRegion",
".",
"getOutputData",
"(",
"'bottomUpOut'",
")",
".",
"nonzero",
"(",
")",
"[",
"0",
"]",
"classDict",
"=",
"{",
"'actValue'",
":",
"actualInput",
",",
"'bucketIdx'",
":",
"bucketIndex",
"}",
"results",
"=",
"classifier",
".",
"getSelf",
"(",
")",
".",
"customCompute",
"(",
"recordNum",
"=",
"recordNumber",
",",
"patternNZ",
"=",
"tpOutput",
",",
"classification",
"=",
"classDict",
")",
"mostLikelyResult",
"=",
"sorted",
"(",
"zip",
"(",
"results",
"[",
"1",
"]",
",",
"results",
"[",
"'actualValues'",
"]",
")",
")",
"[",
"(",
"-",
"1",
")",
"]",
"predictionConfidence",
"=",
"mostLikelyResult",
"[",
"0",
"]",
"predictedValue",
"=",
"mostLikelyResult",
"[",
"1",
"]",
"return",
"(",
"actualInput",
",",
"predictedValue",
",",
"predictionConfidence",
")"
] |
calls classifier manually .
|
train
| false
|
9,490
|
def pwReencrypt(epw, oldPassword, newPassword):
(plaintext, ok) = pwDecrypt(epw, oldPassword)
if ok:
return pwEncrypt(plaintext, newPassword)
else:
return (u'', False)
|
[
"def",
"pwReencrypt",
"(",
"epw",
",",
"oldPassword",
",",
"newPassword",
")",
":",
"(",
"plaintext",
",",
"ok",
")",
"=",
"pwDecrypt",
"(",
"epw",
",",
"oldPassword",
")",
"if",
"ok",
":",
"return",
"pwEncrypt",
"(",
"plaintext",
",",
"newPassword",
")",
"else",
":",
"return",
"(",
"u''",
",",
"False",
")"
] |
module function to re-encrypt a password .
|
train
| false
|
9,491
|
def grayscale(image):
return image.convert('L')
|
[
"def",
"grayscale",
"(",
"image",
")",
":",
"return",
"image",
".",
"convert",
"(",
"'L'",
")"
] |
convert the image to grayscale .
|
train
| false
|
9,492
|
def build_plural_mappings(special_mappings, resource_map):
plural_mappings = {}
for plural in resource_map:
singular = special_mappings.get(plural)
if (not singular):
if plural.endswith('ies'):
singular = ('%sy' % plural[:(-3)])
else:
singular = plural[:(-1)]
plural_mappings[plural] = singular
return plural_mappings
|
[
"def",
"build_plural_mappings",
"(",
"special_mappings",
",",
"resource_map",
")",
":",
"plural_mappings",
"=",
"{",
"}",
"for",
"plural",
"in",
"resource_map",
":",
"singular",
"=",
"special_mappings",
".",
"get",
"(",
"plural",
")",
"if",
"(",
"not",
"singular",
")",
":",
"if",
"plural",
".",
"endswith",
"(",
"'ies'",
")",
":",
"singular",
"=",
"(",
"'%sy'",
"%",
"plural",
"[",
":",
"(",
"-",
"3",
")",
"]",
")",
"else",
":",
"singular",
"=",
"plural",
"[",
":",
"(",
"-",
"1",
")",
"]",
"plural_mappings",
"[",
"plural",
"]",
"=",
"singular",
"return",
"plural_mappings"
] |
create plural to singular mapping for all resources .
|
train
| false
|
9,494
|
@pytest.mark.django_db
def test_delete_mark_obsolete_resurrect_sync(project0_nongnu, subdir0):
store = subdir0.child_stores.first()
store.sync()
tp = store.translation_project
dir_pootle_path = store.parent.pootle_path
store_pootle_path = store.pootle_path
for _store in tp.stores.all():
_store.sync()
os.remove(store.file.path)
shutil.rmtree(os.path.dirname(store.file.path))
tp.scan_files()
updated_directory = Directory.objects.get(pootle_path=dir_pootle_path)
assert updated_directory.obsolete
updated_store = Store.objects.get(pootle_path=store_pootle_path)
assert updated_store.obsolete
store_units = Unit.objects.filter(store=updated_store)
for unit in store_units:
assert unit.isobsolete()
updated_directory.obsolete = False
updated_directory.save()
updated_store.obsolete = False
updated_store.save()
for unit in store_units:
unit.resurrect()
unit.save()
updated_store.sync(only_newer=False)
assert os.path.exists(updated_store.file.path)
|
[
"@",
"pytest",
".",
"mark",
".",
"django_db",
"def",
"test_delete_mark_obsolete_resurrect_sync",
"(",
"project0_nongnu",
",",
"subdir0",
")",
":",
"store",
"=",
"subdir0",
".",
"child_stores",
".",
"first",
"(",
")",
"store",
".",
"sync",
"(",
")",
"tp",
"=",
"store",
".",
"translation_project",
"dir_pootle_path",
"=",
"store",
".",
"parent",
".",
"pootle_path",
"store_pootle_path",
"=",
"store",
".",
"pootle_path",
"for",
"_store",
"in",
"tp",
".",
"stores",
".",
"all",
"(",
")",
":",
"_store",
".",
"sync",
"(",
")",
"os",
".",
"remove",
"(",
"store",
".",
"file",
".",
"path",
")",
"shutil",
".",
"rmtree",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"store",
".",
"file",
".",
"path",
")",
")",
"tp",
".",
"scan_files",
"(",
")",
"updated_directory",
"=",
"Directory",
".",
"objects",
".",
"get",
"(",
"pootle_path",
"=",
"dir_pootle_path",
")",
"assert",
"updated_directory",
".",
"obsolete",
"updated_store",
"=",
"Store",
".",
"objects",
".",
"get",
"(",
"pootle_path",
"=",
"store_pootle_path",
")",
"assert",
"updated_store",
".",
"obsolete",
"store_units",
"=",
"Unit",
".",
"objects",
".",
"filter",
"(",
"store",
"=",
"updated_store",
")",
"for",
"unit",
"in",
"store_units",
":",
"assert",
"unit",
".",
"isobsolete",
"(",
")",
"updated_directory",
".",
"obsolete",
"=",
"False",
"updated_directory",
".",
"save",
"(",
")",
"updated_store",
".",
"obsolete",
"=",
"False",
"updated_store",
".",
"save",
"(",
")",
"for",
"unit",
"in",
"store_units",
":",
"unit",
".",
"resurrect",
"(",
")",
"unit",
".",
"save",
"(",
")",
"updated_store",
".",
"sync",
"(",
"only_newer",
"=",
"False",
")",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"updated_store",
".",
"file",
".",
"path",
")"
] |
tests that the in-db directory are marked as obsolete after the on-disk file ceased to exist and that the on-disk file and directory are recovered after syncing .
|
train
| false
|
9,495
|
def infer_from(self, context=None, asname=True):
name = context.lookupname
if (name is None):
raise InferenceError()
if asname:
name = self.real_name(name)
module = self.do_import_module()
try:
context = copy_context(context)
context.lookupname = name
return _infer_stmts(module.getattr(name, ignore_locals=(module is self.root())), context)
except NotFoundError:
raise InferenceError(name)
|
[
"def",
"infer_from",
"(",
"self",
",",
"context",
"=",
"None",
",",
"asname",
"=",
"True",
")",
":",
"name",
"=",
"context",
".",
"lookupname",
"if",
"(",
"name",
"is",
"None",
")",
":",
"raise",
"InferenceError",
"(",
")",
"if",
"asname",
":",
"name",
"=",
"self",
".",
"real_name",
"(",
"name",
")",
"module",
"=",
"self",
".",
"do_import_module",
"(",
")",
"try",
":",
"context",
"=",
"copy_context",
"(",
"context",
")",
"context",
".",
"lookupname",
"=",
"name",
"return",
"_infer_stmts",
"(",
"module",
".",
"getattr",
"(",
"name",
",",
"ignore_locals",
"=",
"(",
"module",
"is",
"self",
".",
"root",
"(",
")",
")",
")",
",",
"context",
")",
"except",
"NotFoundError",
":",
"raise",
"InferenceError",
"(",
"name",
")"
] |
infer a from nodes: return the imported module/object .
|
train
| false
|
9,496
|
def read_from_which_host(client, pref, tag_sets=None):
db = client.pymongo_test
if isinstance(tag_sets, dict):
tag_sets = [tag_sets]
if tag_sets:
tags = (tag_sets or pref.tag_sets)
pref = pref.__class__(tags)
db.read_preference = pref
cursor = db.test.find()
try:
try:
next(cursor)
except StopIteration:
pass
return cursor.address
except AutoReconnect:
return None
|
[
"def",
"read_from_which_host",
"(",
"client",
",",
"pref",
",",
"tag_sets",
"=",
"None",
")",
":",
"db",
"=",
"client",
".",
"pymongo_test",
"if",
"isinstance",
"(",
"tag_sets",
",",
"dict",
")",
":",
"tag_sets",
"=",
"[",
"tag_sets",
"]",
"if",
"tag_sets",
":",
"tags",
"=",
"(",
"tag_sets",
"or",
"pref",
".",
"tag_sets",
")",
"pref",
"=",
"pref",
".",
"__class__",
"(",
"tags",
")",
"db",
".",
"read_preference",
"=",
"pref",
"cursor",
"=",
"db",
".",
"test",
".",
"find",
"(",
")",
"try",
":",
"try",
":",
"next",
"(",
"cursor",
")",
"except",
"StopIteration",
":",
"pass",
"return",
"cursor",
".",
"address",
"except",
"AutoReconnect",
":",
"return",
"None"
] |
read from a client with the given read preference .
|
train
| false
|
9,497
|
def eval_power(parse_result):
parse_result = reversed([k for k in parse_result if isinstance(k, numbers.Number)])
power = reduce((lambda a, b: (b ** a)), parse_result)
return power
|
[
"def",
"eval_power",
"(",
"parse_result",
")",
":",
"parse_result",
"=",
"reversed",
"(",
"[",
"k",
"for",
"k",
"in",
"parse_result",
"if",
"isinstance",
"(",
"k",
",",
"numbers",
".",
"Number",
")",
"]",
")",
"power",
"=",
"reduce",
"(",
"(",
"lambda",
"a",
",",
"b",
":",
"(",
"b",
"**",
"a",
")",
")",
",",
"parse_result",
")",
"return",
"power"
] |
take a list of numbers and exponentiate them .
|
train
| false
|
9,498
|
def BackendsStatusUpdate(runtime, error_fh=sys.stderr):
language = runtime
if (language == 'python27'):
language = 'python'
elif (language == 'java7'):
language = 'java'
if ((language == 'python') or (language == 'java')):
StatusUpdate((BACKENDS_MESSAGE + (_CONVERTING_URL % language)), error_fh)
|
[
"def",
"BackendsStatusUpdate",
"(",
"runtime",
",",
"error_fh",
"=",
"sys",
".",
"stderr",
")",
":",
"language",
"=",
"runtime",
"if",
"(",
"language",
"==",
"'python27'",
")",
":",
"language",
"=",
"'python'",
"elif",
"(",
"language",
"==",
"'java7'",
")",
":",
"language",
"=",
"'java'",
"if",
"(",
"(",
"language",
"==",
"'python'",
")",
"or",
"(",
"language",
"==",
"'java'",
")",
")",
":",
"StatusUpdate",
"(",
"(",
"BACKENDS_MESSAGE",
"+",
"(",
"_CONVERTING_URL",
"%",
"language",
")",
")",
",",
"error_fh",
")"
] |
print the backends status message based on current runtime .
|
train
| false
|
9,500
|
def test_cache_metadata(config_stub, tmpdir):
config_stub.data = {'storage': {'cache-size': 1024}, 'general': {'private-browsing': False}}
url = 'http://qutebrowser.org'
metadata = QNetworkCacheMetaData()
metadata.setUrl(QUrl(url))
assert metadata.isValid()
disk_cache = cache.DiskCache(str(tmpdir))
device = disk_cache.prepare(metadata)
device.write('foobar')
disk_cache.insert(device)
assert (disk_cache.metaData(QUrl(url)) == metadata)
|
[
"def",
"test_cache_metadata",
"(",
"config_stub",
",",
"tmpdir",
")",
":",
"config_stub",
".",
"data",
"=",
"{",
"'storage'",
":",
"{",
"'cache-size'",
":",
"1024",
"}",
",",
"'general'",
":",
"{",
"'private-browsing'",
":",
"False",
"}",
"}",
"url",
"=",
"'http://qutebrowser.org'",
"metadata",
"=",
"QNetworkCacheMetaData",
"(",
")",
"metadata",
".",
"setUrl",
"(",
"QUrl",
"(",
"url",
")",
")",
"assert",
"metadata",
".",
"isValid",
"(",
")",
"disk_cache",
"=",
"cache",
".",
"DiskCache",
"(",
"str",
"(",
"tmpdir",
")",
")",
"device",
"=",
"disk_cache",
".",
"prepare",
"(",
"metadata",
")",
"device",
".",
"write",
"(",
"'foobar'",
")",
"disk_cache",
".",
"insert",
"(",
"device",
")",
"assert",
"(",
"disk_cache",
".",
"metaData",
"(",
"QUrl",
"(",
"url",
")",
")",
"==",
"metadata",
")"
] |
ensure that diskcache .
|
train
| false
|
9,501
|
def _email_url_parser(url_name, extra_param=None):
site_name = configuration_helpers.get_value('SITE_NAME', settings.SITE_NAME)
dashboard_url_path = ((reverse(url_name) + extra_param) if extra_param else reverse(url_name))
dashboard_link_parts = ('https', site_name, dashboard_url_path, '', '', '')
return urlparse.urlunparse(dashboard_link_parts)
|
[
"def",
"_email_url_parser",
"(",
"url_name",
",",
"extra_param",
"=",
"None",
")",
":",
"site_name",
"=",
"configuration_helpers",
".",
"get_value",
"(",
"'SITE_NAME'",
",",
"settings",
".",
"SITE_NAME",
")",
"dashboard_url_path",
"=",
"(",
"(",
"reverse",
"(",
"url_name",
")",
"+",
"extra_param",
")",
"if",
"extra_param",
"else",
"reverse",
"(",
"url_name",
")",
")",
"dashboard_link_parts",
"=",
"(",
"'https'",
",",
"site_name",
",",
"dashboard_url_path",
",",
"''",
",",
"''",
",",
"''",
")",
"return",
"urlparse",
".",
"urlunparse",
"(",
"dashboard_link_parts",
")"
] |
parse url according to site_name which will be used in the mail .
|
train
| false
|
9,502
|
def _scalar_vectorized(scalar, M):
return (scalar[:, np.newaxis, np.newaxis] * M)
|
[
"def",
"_scalar_vectorized",
"(",
"scalar",
",",
"M",
")",
":",
"return",
"(",
"scalar",
"[",
":",
",",
"np",
".",
"newaxis",
",",
"np",
".",
"newaxis",
"]",
"*",
"M",
")"
] |
scalar product between scalars and matrices .
|
train
| false
|
9,503
|
def md5_hash(text):
return md5(to_bytes(text)).hexdigest()
|
[
"def",
"md5_hash",
"(",
"text",
")",
":",
"return",
"md5",
"(",
"to_bytes",
"(",
"text",
")",
")",
".",
"hexdigest",
"(",
")"
] |
generate an md5 hash with the given text .
|
train
| false
|
9,504
|
def to_dnf(expr, simplify=False):
expr = sympify(expr)
if (not isinstance(expr, BooleanFunction)):
return expr
if simplify:
return simplify_logic(expr, 'dnf', True)
if is_dnf(expr):
return expr
expr = eliminate_implications(expr)
return distribute_or_over_and(expr)
|
[
"def",
"to_dnf",
"(",
"expr",
",",
"simplify",
"=",
"False",
")",
":",
"expr",
"=",
"sympify",
"(",
"expr",
")",
"if",
"(",
"not",
"isinstance",
"(",
"expr",
",",
"BooleanFunction",
")",
")",
":",
"return",
"expr",
"if",
"simplify",
":",
"return",
"simplify_logic",
"(",
"expr",
",",
"'dnf'",
",",
"True",
")",
"if",
"is_dnf",
"(",
"expr",
")",
":",
"return",
"expr",
"expr",
"=",
"eliminate_implications",
"(",
"expr",
")",
"return",
"distribute_or_over_and",
"(",
"expr",
")"
] |
convert a propositional logical sentence s to disjunctive normal form .
|
train
| false
|
9,505
|
def parse_dos_time(stamp):
(sec, stamp) = ((stamp & 31), (stamp >> 5))
(mn, stamp) = ((stamp & 63), (stamp >> 6))
(hr, stamp) = ((stamp & 31), (stamp >> 5))
(day, stamp) = ((stamp & 31), (stamp >> 5))
(mon, stamp) = ((stamp & 15), (stamp >> 4))
yr = ((stamp & 127) + 1980)
return (yr, mon, day, hr, mn, (sec * 2))
|
[
"def",
"parse_dos_time",
"(",
"stamp",
")",
":",
"(",
"sec",
",",
"stamp",
")",
"=",
"(",
"(",
"stamp",
"&",
"31",
")",
",",
"(",
"stamp",
">>",
"5",
")",
")",
"(",
"mn",
",",
"stamp",
")",
"=",
"(",
"(",
"stamp",
"&",
"63",
")",
",",
"(",
"stamp",
">>",
"6",
")",
")",
"(",
"hr",
",",
"stamp",
")",
"=",
"(",
"(",
"stamp",
"&",
"31",
")",
",",
"(",
"stamp",
">>",
"5",
")",
")",
"(",
"day",
",",
"stamp",
")",
"=",
"(",
"(",
"stamp",
"&",
"31",
")",
",",
"(",
"stamp",
">>",
"5",
")",
")",
"(",
"mon",
",",
"stamp",
")",
"=",
"(",
"(",
"stamp",
"&",
"15",
")",
",",
"(",
"stamp",
">>",
"4",
")",
")",
"yr",
"=",
"(",
"(",
"stamp",
"&",
"127",
")",
"+",
"1980",
")",
"return",
"(",
"yr",
",",
"mon",
",",
"day",
",",
"hr",
",",
"mn",
",",
"(",
"sec",
"*",
"2",
")",
")"
] |
parse standard 32-bit dos timestamp .
|
train
| true
|
9,506
|
def show_tag(repo, tag, decode, outstream=sys.stdout):
print_tag(tag, decode, outstream)
show_object(repo, repo[tag.object[1]], outstream)
|
[
"def",
"show_tag",
"(",
"repo",
",",
"tag",
",",
"decode",
",",
"outstream",
"=",
"sys",
".",
"stdout",
")",
":",
"print_tag",
"(",
"tag",
",",
"decode",
",",
"outstream",
")",
"show_object",
"(",
"repo",
",",
"repo",
"[",
"tag",
".",
"object",
"[",
"1",
"]",
"]",
",",
"outstream",
")"
] |
print a tag to a stream .
|
train
| false
|
9,507
|
@register.simple_tag
def language_name(code):
return escape(force_text(Language.objects.get(code=code)))
|
[
"@",
"register",
".",
"simple_tag",
"def",
"language_name",
"(",
"code",
")",
":",
"return",
"escape",
"(",
"force_text",
"(",
"Language",
".",
"objects",
".",
"get",
"(",
"code",
"=",
"code",
")",
")",
")"
] |
gets language name based on its code .
|
train
| false
|
9,510
|
def generate_recommendation_data():
global recommendation_data
if recommendation_data:
return recommendation_data
tree = get_topic_nodes_with_children(parent='root')
topic_index = 0
subtopic_index = 0
for topic in tree:
subtopic_index = 0
for subtopic_id in topic['children']:
neighbors_dist_1 = get_neighbors_at_dist_1(topic_index, subtopic_index, topic)
recommendation_data[subtopic_id] = {'related_subtopics': ([(subtopic_id + ' 0')] + neighbors_dist_1)}
subtopic_index += 1
topic_index += 1
for subtopic in recommendation_data:
related = recommendation_data[subtopic]['related_subtopics']
other_neighbors = get_subsequent_neighbors(related, recommendation_data, subtopic)
recommendation_data[subtopic]['related_subtopics'] += other_neighbors
for subtopic in recommendation_data:
at_dist_4 = []
at_dist_lt_4 = []
for recc in recommendation_data[subtopic]['related_subtopics']:
if (recc.split(' ')[1] == '4'):
at_dist_4.append(recc.split(' ')[0])
else:
at_dist_lt_4.append(recc.split(' ')[0])
sorted_related = (at_dist_lt_4 + at_dist_4)
recommendation_data[subtopic]['related_subtopics'] = sorted_related
return recommendation_data
|
[
"def",
"generate_recommendation_data",
"(",
")",
":",
"global",
"recommendation_data",
"if",
"recommendation_data",
":",
"return",
"recommendation_data",
"tree",
"=",
"get_topic_nodes_with_children",
"(",
"parent",
"=",
"'root'",
")",
"topic_index",
"=",
"0",
"subtopic_index",
"=",
"0",
"for",
"topic",
"in",
"tree",
":",
"subtopic_index",
"=",
"0",
"for",
"subtopic_id",
"in",
"topic",
"[",
"'children'",
"]",
":",
"neighbors_dist_1",
"=",
"get_neighbors_at_dist_1",
"(",
"topic_index",
",",
"subtopic_index",
",",
"topic",
")",
"recommendation_data",
"[",
"subtopic_id",
"]",
"=",
"{",
"'related_subtopics'",
":",
"(",
"[",
"(",
"subtopic_id",
"+",
"' 0'",
")",
"]",
"+",
"neighbors_dist_1",
")",
"}",
"subtopic_index",
"+=",
"1",
"topic_index",
"+=",
"1",
"for",
"subtopic",
"in",
"recommendation_data",
":",
"related",
"=",
"recommendation_data",
"[",
"subtopic",
"]",
"[",
"'related_subtopics'",
"]",
"other_neighbors",
"=",
"get_subsequent_neighbors",
"(",
"related",
",",
"recommendation_data",
",",
"subtopic",
")",
"recommendation_data",
"[",
"subtopic",
"]",
"[",
"'related_subtopics'",
"]",
"+=",
"other_neighbors",
"for",
"subtopic",
"in",
"recommendation_data",
":",
"at_dist_4",
"=",
"[",
"]",
"at_dist_lt_4",
"=",
"[",
"]",
"for",
"recc",
"in",
"recommendation_data",
"[",
"subtopic",
"]",
"[",
"'related_subtopics'",
"]",
":",
"if",
"(",
"recc",
".",
"split",
"(",
"' '",
")",
"[",
"1",
"]",
"==",
"'4'",
")",
":",
"at_dist_4",
".",
"append",
"(",
"recc",
".",
"split",
"(",
"' '",
")",
"[",
"0",
"]",
")",
"else",
":",
"at_dist_lt_4",
".",
"append",
"(",
"recc",
".",
"split",
"(",
"' '",
")",
"[",
"0",
"]",
")",
"sorted_related",
"=",
"(",
"at_dist_lt_4",
"+",
"at_dist_4",
")",
"recommendation_data",
"[",
"subtopic",
"]",
"[",
"'related_subtopics'",
"]",
"=",
"sorted_related",
"return",
"recommendation_data"
] |
traverses topic tree to generate a dictionary with related subtopics per subtopic .
|
train
| false
|
9,511
|
def create_welcome_forum():
if (User.query.count() < 1):
return False
user = User.query.filter_by(id=1).first()
category = Category(title=u'My Category', position=1)
category.save()
forum = Forum(title=u'Welcome', description=u'Your first forum', category_id=category.id)
forum.save()
topic = Topic(title=u'Welcome!')
post = Post(content=u'Have fun with your new FlaskBB Forum!')
topic.save(user=user, forum=forum, post=post)
return True
|
[
"def",
"create_welcome_forum",
"(",
")",
":",
"if",
"(",
"User",
".",
"query",
".",
"count",
"(",
")",
"<",
"1",
")",
":",
"return",
"False",
"user",
"=",
"User",
".",
"query",
".",
"filter_by",
"(",
"id",
"=",
"1",
")",
".",
"first",
"(",
")",
"category",
"=",
"Category",
"(",
"title",
"=",
"u'My Category'",
",",
"position",
"=",
"1",
")",
"category",
".",
"save",
"(",
")",
"forum",
"=",
"Forum",
"(",
"title",
"=",
"u'Welcome'",
",",
"description",
"=",
"u'Your first forum'",
",",
"category_id",
"=",
"category",
".",
"id",
")",
"forum",
".",
"save",
"(",
")",
"topic",
"=",
"Topic",
"(",
"title",
"=",
"u'Welcome!'",
")",
"post",
"=",
"Post",
"(",
"content",
"=",
"u'Have fun with your new FlaskBB Forum!'",
")",
"topic",
".",
"save",
"(",
"user",
"=",
"user",
",",
"forum",
"=",
"forum",
",",
"post",
"=",
"post",
")",
"return",
"True"
] |
this will create the welcome forum with a welcome topic .
|
train
| false
|
9,512
|
def AbsoluteNode(node):
if node.attributes:
for (name, value) in node.attributes.items():
if (name in ['InheritedPropertySheets', 'RelativePath', 'AdditionalIncludeDirectories', 'IntermediateDirectory', 'OutputDirectory', 'AdditionalLibraryDirectories']):
path_list = value.split(';')
new_list = FixFilenames(path_list, os.path.dirname(ARGUMENTS[1]))
node.setAttribute(name, ';'.join(new_list))
if (not value):
node.removeAttribute(name)
|
[
"def",
"AbsoluteNode",
"(",
"node",
")",
":",
"if",
"node",
".",
"attributes",
":",
"for",
"(",
"name",
",",
"value",
")",
"in",
"node",
".",
"attributes",
".",
"items",
"(",
")",
":",
"if",
"(",
"name",
"in",
"[",
"'InheritedPropertySheets'",
",",
"'RelativePath'",
",",
"'AdditionalIncludeDirectories'",
",",
"'IntermediateDirectory'",
",",
"'OutputDirectory'",
",",
"'AdditionalLibraryDirectories'",
"]",
")",
":",
"path_list",
"=",
"value",
".",
"split",
"(",
"';'",
")",
"new_list",
"=",
"FixFilenames",
"(",
"path_list",
",",
"os",
".",
"path",
".",
"dirname",
"(",
"ARGUMENTS",
"[",
"1",
"]",
")",
")",
"node",
".",
"setAttribute",
"(",
"name",
",",
"';'",
".",
"join",
"(",
"new_list",
")",
")",
"if",
"(",
"not",
"value",
")",
":",
"node",
".",
"removeAttribute",
"(",
"name",
")"
] |
makes all the properties we know about in this node absolute .
|
train
| false
|
9,513
|
@library.global_function
def private_message(user):
url = urlparams(reverse('messages.new'), to=user.username)
msg = _('Private message')
return Markup(u'<p class="pm"><a href="{url}">{msg}</a></p>'.format(url=url, msg=msg))
|
[
"@",
"library",
".",
"global_function",
"def",
"private_message",
"(",
"user",
")",
":",
"url",
"=",
"urlparams",
"(",
"reverse",
"(",
"'messages.new'",
")",
",",
"to",
"=",
"user",
".",
"username",
")",
"msg",
"=",
"_",
"(",
"'Private message'",
")",
"return",
"Markup",
"(",
"u'<p class=\"pm\"><a href=\"{url}\">{msg}</a></p>'",
".",
"format",
"(",
"url",
"=",
"url",
",",
"msg",
"=",
"msg",
")",
")"
] |
return a link to private message the user .
|
train
| false
|
9,515
|
def test_local_columns_flag(script, data):
script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0')
result = script.pip('list', '--local', '--format=columns')
assert ('Package' in result.stdout)
assert ('Version' in result.stdout)
assert ('simple (1.0)' not in result.stdout)
assert ('simple 1.0' in result.stdout), str(result)
|
[
"def",
"test_local_columns_flag",
"(",
"script",
",",
"data",
")",
":",
"script",
".",
"pip",
"(",
"'install'",
",",
"'-f'",
",",
"data",
".",
"find_links",
",",
"'--no-index'",
",",
"'simple==1.0'",
")",
"result",
"=",
"script",
".",
"pip",
"(",
"'list'",
",",
"'--local'",
",",
"'--format=columns'",
")",
"assert",
"(",
"'Package'",
"in",
"result",
".",
"stdout",
")",
"assert",
"(",
"'Version'",
"in",
"result",
".",
"stdout",
")",
"assert",
"(",
"'simple (1.0)'",
"not",
"in",
"result",
".",
"stdout",
")",
"assert",
"(",
"'simple 1.0'",
"in",
"result",
".",
"stdout",
")",
",",
"str",
"(",
"result",
")"
] |
test the behavior of --local --format=columns flags in the list command .
|
train
| false
|
9,517
|
def _op_maker(op_class, op_symbol):
def f(self, node, *args, **kwargs):
'Return a partial function with an Op subclass with an operator\n already passed.\n\n Returns\n -------\n f : callable\n '
return partial(op_class, op_symbol, *args, **kwargs)
return f
|
[
"def",
"_op_maker",
"(",
"op_class",
",",
"op_symbol",
")",
":",
"def",
"f",
"(",
"self",
",",
"node",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"return",
"partial",
"(",
"op_class",
",",
"op_symbol",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
"return",
"f"
] |
return a function to create an op class with its symbol already passed .
|
train
| true
|
9,518
|
def enforce_le_validity(domain):
domain = enforce_domain_sanity(domain)
if (not re.match('^[A-Za-z0-9.-]*$', domain)):
raise errors.ConfigurationError('{0} contains an invalid character. Valid characters are A-Z, a-z, 0-9, ., and -.'.format(domain))
labels = domain.split('.')
if (len(labels) < 2):
raise errors.ConfigurationError('{0} needs at least two labels'.format(domain))
for label in labels:
if label.startswith('-'):
raise errors.ConfigurationError('label "{0}" in domain "{1}" cannot start with "-"'.format(label, domain))
if label.endswith('-'):
raise errors.ConfigurationError('label "{0}" in domain "{1}" cannot end with "-"'.format(label, domain))
return domain
|
[
"def",
"enforce_le_validity",
"(",
"domain",
")",
":",
"domain",
"=",
"enforce_domain_sanity",
"(",
"domain",
")",
"if",
"(",
"not",
"re",
".",
"match",
"(",
"'^[A-Za-z0-9.-]*$'",
",",
"domain",
")",
")",
":",
"raise",
"errors",
".",
"ConfigurationError",
"(",
"'{0} contains an invalid character. Valid characters are A-Z, a-z, 0-9, ., and -.'",
".",
"format",
"(",
"domain",
")",
")",
"labels",
"=",
"domain",
".",
"split",
"(",
"'.'",
")",
"if",
"(",
"len",
"(",
"labels",
")",
"<",
"2",
")",
":",
"raise",
"errors",
".",
"ConfigurationError",
"(",
"'{0} needs at least two labels'",
".",
"format",
"(",
"domain",
")",
")",
"for",
"label",
"in",
"labels",
":",
"if",
"label",
".",
"startswith",
"(",
"'-'",
")",
":",
"raise",
"errors",
".",
"ConfigurationError",
"(",
"'label \"{0}\" in domain \"{1}\" cannot start with \"-\"'",
".",
"format",
"(",
"label",
",",
"domain",
")",
")",
"if",
"label",
".",
"endswith",
"(",
"'-'",
")",
":",
"raise",
"errors",
".",
"ConfigurationError",
"(",
"'label \"{0}\" in domain \"{1}\" cannot end with \"-\"'",
".",
"format",
"(",
"label",
",",
"domain",
")",
")",
"return",
"domain"
] |
checks that lets encrypt will consider domain to be valid .
|
train
| false
|
9,519
|
def social_auth_backends(request):
def context_value():
return backends_data(request.user)
return {'social_auth': LazyDict(context_value)}
|
[
"def",
"social_auth_backends",
"(",
"request",
")",
":",
"def",
"context_value",
"(",
")",
":",
"return",
"backends_data",
"(",
"request",
".",
"user",
")",
"return",
"{",
"'social_auth'",
":",
"LazyDict",
"(",
"context_value",
")",
"}"
] |
load social auth current user data to context .
|
train
| false
|
9,522
|
def set_default_retry_params(retry_params):
_thread_local_settings.default_retry_params = copy.copy(retry_params)
|
[
"def",
"set_default_retry_params",
"(",
"retry_params",
")",
":",
"_thread_local_settings",
".",
"default_retry_params",
"=",
"copy",
".",
"copy",
"(",
"retry_params",
")"
] |
set a default retryparams for current thread current request .
|
train
| false
|
9,524
|
def p_struct_declaration(t):
pass
|
[
"def",
"p_struct_declaration",
"(",
"t",
")",
":",
"pass"
] |
struct_declaration : specifier_qualifier_list struct_declarator_list semi .
|
train
| false
|
9,525
|
def refresh_db(full=False):
if full:
return (__salt__['cmd.retcode']('/bin/pkg refresh --full') == 0)
else:
return (__salt__['cmd.retcode']('/bin/pkg refresh') == 0)
|
[
"def",
"refresh_db",
"(",
"full",
"=",
"False",
")",
":",
"if",
"full",
":",
"return",
"(",
"__salt__",
"[",
"'cmd.retcode'",
"]",
"(",
"'/bin/pkg refresh --full'",
")",
"==",
"0",
")",
"else",
":",
"return",
"(",
"__salt__",
"[",
"'cmd.retcode'",
"]",
"(",
"'/bin/pkg refresh'",
")",
"==",
"0",
")"
] |
refresh packagesite contents .
|
train
| false
|
9,526
|
def get_upload_form(user, translation, *args):
project = translation.subproject.project
if can_author_translation(user, project):
form = ExtraUploadForm
elif can_overwrite_translation(user, project):
form = UploadForm
else:
form = SimpleUploadForm
result = form(*args)
if (not can_translate(user, translation)):
result.remove_translation_choice(u'translate')
result.remove_translation_choice(u'fuzzy')
if (not can_suggest(user, translation)):
result.remove_translation_choice(u'suggest')
return result
|
[
"def",
"get_upload_form",
"(",
"user",
",",
"translation",
",",
"*",
"args",
")",
":",
"project",
"=",
"translation",
".",
"subproject",
".",
"project",
"if",
"can_author_translation",
"(",
"user",
",",
"project",
")",
":",
"form",
"=",
"ExtraUploadForm",
"elif",
"can_overwrite_translation",
"(",
"user",
",",
"project",
")",
":",
"form",
"=",
"UploadForm",
"else",
":",
"form",
"=",
"SimpleUploadForm",
"result",
"=",
"form",
"(",
"*",
"args",
")",
"if",
"(",
"not",
"can_translate",
"(",
"user",
",",
"translation",
")",
")",
":",
"result",
".",
"remove_translation_choice",
"(",
"u'translate'",
")",
"result",
".",
"remove_translation_choice",
"(",
"u'fuzzy'",
")",
"if",
"(",
"not",
"can_suggest",
"(",
"user",
",",
"translation",
")",
")",
":",
"result",
".",
"remove_translation_choice",
"(",
"u'suggest'",
")",
"return",
"result"
] |
returns correct upload form based on user permissions .
|
train
| false
|
9,528
|
def required_modules_error(name, docstring):
modules = required_module_list(docstring)
if (not modules):
return ''
filename = os.path.basename(name).split('.')[0]
msg = "'{0}' requires these python modules: {1}"
return msg.format(filename, ', '.join(modules))
|
[
"def",
"required_modules_error",
"(",
"name",
",",
"docstring",
")",
":",
"modules",
"=",
"required_module_list",
"(",
"docstring",
")",
"if",
"(",
"not",
"modules",
")",
":",
"return",
"''",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"name",
")",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"msg",
"=",
"\"'{0}' requires these python modules: {1}\"",
"return",
"msg",
".",
"format",
"(",
"filename",
",",
"', '",
".",
"join",
"(",
"modules",
")",
")"
] |
pretty print error messages in critical salt modules which are missing deps not always in stdlib such as win32api on windows .
|
train
| false
|
9,529
|
def get_all_perms(role):
perms = frappe.get_all(u'DocPerm', fields=u'*', filters=dict(role=role))
custom_perms = frappe.get_all(u'Custom DocPerm', fields=u'*', filters=dict(role=role))
doctypes_with_custom_perms = list(set((p.parent for p in custom_perms)))
for p in perms:
if (p.parent not in doctypes_with_custom_perms):
custom_perms.append(p)
return p
|
[
"def",
"get_all_perms",
"(",
"role",
")",
":",
"perms",
"=",
"frappe",
".",
"get_all",
"(",
"u'DocPerm'",
",",
"fields",
"=",
"u'*'",
",",
"filters",
"=",
"dict",
"(",
"role",
"=",
"role",
")",
")",
"custom_perms",
"=",
"frappe",
".",
"get_all",
"(",
"u'Custom DocPerm'",
",",
"fields",
"=",
"u'*'",
",",
"filters",
"=",
"dict",
"(",
"role",
"=",
"role",
")",
")",
"doctypes_with_custom_perms",
"=",
"list",
"(",
"set",
"(",
"(",
"p",
".",
"parent",
"for",
"p",
"in",
"custom_perms",
")",
")",
")",
"for",
"p",
"in",
"perms",
":",
"if",
"(",
"p",
".",
"parent",
"not",
"in",
"doctypes_with_custom_perms",
")",
":",
"custom_perms",
".",
"append",
"(",
"p",
")",
"return",
"p"
] |
returns valid permissions for a given role .
|
train
| false
|
9,530
|
def eat_code_caves(flItms, caveone, cavetwo):
try:
if (flItms['CavesPicked'][cavetwo][0] == flItms['CavesPicked'][caveone][0]):
return (int(flItms['CavesPicked'][cavetwo][1], 16) - int(flItms['CavesPicked'][caveone][1], 16))
else:
caveone_found = False
cavetwo_found = False
for section in flItms['Sections']:
if ((flItms['CavesPicked'][caveone][0] == section[0]) and (caveone_found is False)):
rva_one = ((int(flItms['CavesPicked'][caveone][1], 16) - int(flItms['CavesPicked'][caveone][4], 16)) + flItms['CavesPicked'][caveone][8])
caveone_found = True
if ((flItms['CavesPicked'][cavetwo][0] == section[0]) and (cavetwo_found is False)):
rva_two = ((int(flItms['CavesPicked'][cavetwo][1], 16) - int(flItms['CavesPicked'][cavetwo][4], 16)) + flItms['CavesPicked'][cavetwo][8])
cavetwo_found = True
if ((caveone_found is True) and (cavetwo_found is True)):
if (flItms['CavesPicked'][caveone][1] < flItms['CavesPicked'][cavetwo][1]):
return (- (rva_one - rva_two))
else:
return (rva_two - rva_one)
except Exception:
return 0
|
[
"def",
"eat_code_caves",
"(",
"flItms",
",",
"caveone",
",",
"cavetwo",
")",
":",
"try",
":",
"if",
"(",
"flItms",
"[",
"'CavesPicked'",
"]",
"[",
"cavetwo",
"]",
"[",
"0",
"]",
"==",
"flItms",
"[",
"'CavesPicked'",
"]",
"[",
"caveone",
"]",
"[",
"0",
"]",
")",
":",
"return",
"(",
"int",
"(",
"flItms",
"[",
"'CavesPicked'",
"]",
"[",
"cavetwo",
"]",
"[",
"1",
"]",
",",
"16",
")",
"-",
"int",
"(",
"flItms",
"[",
"'CavesPicked'",
"]",
"[",
"caveone",
"]",
"[",
"1",
"]",
",",
"16",
")",
")",
"else",
":",
"caveone_found",
"=",
"False",
"cavetwo_found",
"=",
"False",
"for",
"section",
"in",
"flItms",
"[",
"'Sections'",
"]",
":",
"if",
"(",
"(",
"flItms",
"[",
"'CavesPicked'",
"]",
"[",
"caveone",
"]",
"[",
"0",
"]",
"==",
"section",
"[",
"0",
"]",
")",
"and",
"(",
"caveone_found",
"is",
"False",
")",
")",
":",
"rva_one",
"=",
"(",
"(",
"int",
"(",
"flItms",
"[",
"'CavesPicked'",
"]",
"[",
"caveone",
"]",
"[",
"1",
"]",
",",
"16",
")",
"-",
"int",
"(",
"flItms",
"[",
"'CavesPicked'",
"]",
"[",
"caveone",
"]",
"[",
"4",
"]",
",",
"16",
")",
")",
"+",
"flItms",
"[",
"'CavesPicked'",
"]",
"[",
"caveone",
"]",
"[",
"8",
"]",
")",
"caveone_found",
"=",
"True",
"if",
"(",
"(",
"flItms",
"[",
"'CavesPicked'",
"]",
"[",
"cavetwo",
"]",
"[",
"0",
"]",
"==",
"section",
"[",
"0",
"]",
")",
"and",
"(",
"cavetwo_found",
"is",
"False",
")",
")",
":",
"rva_two",
"=",
"(",
"(",
"int",
"(",
"flItms",
"[",
"'CavesPicked'",
"]",
"[",
"cavetwo",
"]",
"[",
"1",
"]",
",",
"16",
")",
"-",
"int",
"(",
"flItms",
"[",
"'CavesPicked'",
"]",
"[",
"cavetwo",
"]",
"[",
"4",
"]",
",",
"16",
")",
")",
"+",
"flItms",
"[",
"'CavesPicked'",
"]",
"[",
"cavetwo",
"]",
"[",
"8",
"]",
")",
"cavetwo_found",
"=",
"True",
"if",
"(",
"(",
"caveone_found",
"is",
"True",
")",
"and",
"(",
"cavetwo_found",
"is",
"True",
")",
")",
":",
"if",
"(",
"flItms",
"[",
"'CavesPicked'",
"]",
"[",
"caveone",
"]",
"[",
"1",
"]",
"<",
"flItms",
"[",
"'CavesPicked'",
"]",
"[",
"cavetwo",
"]",
"[",
"1",
"]",
")",
":",
"return",
"(",
"-",
"(",
"rva_one",
"-",
"rva_two",
")",
")",
"else",
":",
"return",
"(",
"rva_two",
"-",
"rva_one",
")",
"except",
"Exception",
":",
"return",
"0"
] |
return the difference between caves rva positions .
|
train
| false
|
9,531
|
def _compute_diff_ret():
return {'add': [], 'update': [], 'remove': []}
|
[
"def",
"_compute_diff_ret",
"(",
")",
":",
"return",
"{",
"'add'",
":",
"[",
"]",
",",
"'update'",
":",
"[",
"]",
",",
"'remove'",
":",
"[",
"]",
"}"
] |
default dictionary retuned by the _compute_diff helper .
|
train
| false
|
9,532
|
def _evalcode_python(executor, code, input_type):
global_dict = gdb.parse_and_eval('PyEval_GetGlobals()')
local_dict = gdb.parse_and_eval('PyEval_GetLocals()')
if ((pointervalue(global_dict) == 0) or (pointervalue(local_dict) == 0)):
raise gdb.GdbError('Unable to find the locals or globals of the most recent Python function (relative to the selected frame).')
return executor.evalcode(code, input_type, global_dict, local_dict)
|
[
"def",
"_evalcode_python",
"(",
"executor",
",",
"code",
",",
"input_type",
")",
":",
"global_dict",
"=",
"gdb",
".",
"parse_and_eval",
"(",
"'PyEval_GetGlobals()'",
")",
"local_dict",
"=",
"gdb",
".",
"parse_and_eval",
"(",
"'PyEval_GetLocals()'",
")",
"if",
"(",
"(",
"pointervalue",
"(",
"global_dict",
")",
"==",
"0",
")",
"or",
"(",
"pointervalue",
"(",
"local_dict",
")",
"==",
"0",
")",
")",
":",
"raise",
"gdb",
".",
"GdbError",
"(",
"'Unable to find the locals or globals of the most recent Python function (relative to the selected frame).'",
")",
"return",
"executor",
".",
"evalcode",
"(",
"code",
",",
"input_type",
",",
"global_dict",
",",
"local_dict",
")"
] |
execute python code in the most recent stack frame .
|
train
| false
|
9,535
|
def test_colormap_endian():
cmap = cm.get_cmap(u'jet')
a = [(-0.5), 0, 0.5, 1, 1.5, np.nan]
for dt in [u'f2', u'f4', u'f8']:
anative = np.ma.masked_invalid(np.array(a, dtype=dt))
aforeign = anative.byteswap().newbyteorder()
assert_array_equal(cmap(anative), cmap(aforeign))
|
[
"def",
"test_colormap_endian",
"(",
")",
":",
"cmap",
"=",
"cm",
".",
"get_cmap",
"(",
"u'jet'",
")",
"a",
"=",
"[",
"(",
"-",
"0.5",
")",
",",
"0",
",",
"0.5",
",",
"1",
",",
"1.5",
",",
"np",
".",
"nan",
"]",
"for",
"dt",
"in",
"[",
"u'f2'",
",",
"u'f4'",
",",
"u'f8'",
"]",
":",
"anative",
"=",
"np",
".",
"ma",
".",
"masked_invalid",
"(",
"np",
".",
"array",
"(",
"a",
",",
"dtype",
"=",
"dt",
")",
")",
"aforeign",
"=",
"anative",
".",
"byteswap",
"(",
")",
".",
"newbyteorder",
"(",
")",
"assert_array_equal",
"(",
"cmap",
"(",
"anative",
")",
",",
"cmap",
"(",
"aforeign",
")",
")"
] |
github issue #1005: a bug in putmask caused erroneous mapping of 1 .
|
train
| false
|
9,536
|
def test_spherical_conversions():
(az, pol) = np.meshgrid(np.linspace(0, (2 * np.pi), 30), np.linspace(0, np.pi, 20))
for degree in range(1, int_order):
for order in range(0, (degree + 1)):
sph = _get_sph_harm()(order, degree, az, pol)
assert_allclose(_sh_negate(sph, order), _get_sph_harm()((- order), degree, az, pol))
sph_real_pos = _sh_complex_to_real(sph, order)
sph_real_neg = _sh_complex_to_real(sph, (- order))
sph_2 = _sh_real_to_complex([sph_real_pos, sph_real_neg], order)
assert_allclose(sph, sph_2, atol=1e-07)
|
[
"def",
"test_spherical_conversions",
"(",
")",
":",
"(",
"az",
",",
"pol",
")",
"=",
"np",
".",
"meshgrid",
"(",
"np",
".",
"linspace",
"(",
"0",
",",
"(",
"2",
"*",
"np",
".",
"pi",
")",
",",
"30",
")",
",",
"np",
".",
"linspace",
"(",
"0",
",",
"np",
".",
"pi",
",",
"20",
")",
")",
"for",
"degree",
"in",
"range",
"(",
"1",
",",
"int_order",
")",
":",
"for",
"order",
"in",
"range",
"(",
"0",
",",
"(",
"degree",
"+",
"1",
")",
")",
":",
"sph",
"=",
"_get_sph_harm",
"(",
")",
"(",
"order",
",",
"degree",
",",
"az",
",",
"pol",
")",
"assert_allclose",
"(",
"_sh_negate",
"(",
"sph",
",",
"order",
")",
",",
"_get_sph_harm",
"(",
")",
"(",
"(",
"-",
"order",
")",
",",
"degree",
",",
"az",
",",
"pol",
")",
")",
"sph_real_pos",
"=",
"_sh_complex_to_real",
"(",
"sph",
",",
"order",
")",
"sph_real_neg",
"=",
"_sh_complex_to_real",
"(",
"sph",
",",
"(",
"-",
"order",
")",
")",
"sph_2",
"=",
"_sh_real_to_complex",
"(",
"[",
"sph_real_pos",
",",
"sph_real_neg",
"]",
",",
"order",
")",
"assert_allclose",
"(",
"sph",
",",
"sph_2",
",",
"atol",
"=",
"1e-07",
")"
] |
test spherical harmonic conversions .
|
train
| false
|
9,537
|
def OSXInstalledFonts(directory=None, fontext='ttf'):
if (directory is None):
directory = OSXFontDirectory()
fontext = get_fontext_synonyms(fontext)
files = []
for path in directory:
if (fontext is None):
files.extend(glob.glob(os.path.join(path, '*')))
else:
for ext in fontext:
files.extend(glob.glob(os.path.join(path, ('*.' + ext))))
files.extend(glob.glob(os.path.join(path, ('*.' + ext.upper()))))
return files
|
[
"def",
"OSXInstalledFonts",
"(",
"directory",
"=",
"None",
",",
"fontext",
"=",
"'ttf'",
")",
":",
"if",
"(",
"directory",
"is",
"None",
")",
":",
"directory",
"=",
"OSXFontDirectory",
"(",
")",
"fontext",
"=",
"get_fontext_synonyms",
"(",
"fontext",
")",
"files",
"=",
"[",
"]",
"for",
"path",
"in",
"directory",
":",
"if",
"(",
"fontext",
"is",
"None",
")",
":",
"files",
".",
"extend",
"(",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'*'",
")",
")",
")",
"else",
":",
"for",
"ext",
"in",
"fontext",
":",
"files",
".",
"extend",
"(",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"(",
"'*.'",
"+",
"ext",
")",
")",
")",
")",
"files",
".",
"extend",
"(",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"(",
"'*.'",
"+",
"ext",
".",
"upper",
"(",
")",
")",
")",
")",
")",
"return",
"files"
] |
get list of font files on os x - ignores font suffix by default .
|
train
| false
|
9,538
|
@with_session
def upgrade_required(session=None):
old_schemas = session.query(PluginSchema).all()
if (len(old_schemas) < len(plugin_schemas)):
return True
for old_schema in old_schemas:
if ((old_schema.plugin in plugin_schemas) and (old_schema.version < plugin_schemas[old_schema.plugin][u'version'])):
return True
return False
|
[
"@",
"with_session",
"def",
"upgrade_required",
"(",
"session",
"=",
"None",
")",
":",
"old_schemas",
"=",
"session",
".",
"query",
"(",
"PluginSchema",
")",
".",
"all",
"(",
")",
"if",
"(",
"len",
"(",
"old_schemas",
")",
"<",
"len",
"(",
"plugin_schemas",
")",
")",
":",
"return",
"True",
"for",
"old_schema",
"in",
"old_schemas",
":",
"if",
"(",
"(",
"old_schema",
".",
"plugin",
"in",
"plugin_schemas",
")",
"and",
"(",
"old_schema",
".",
"version",
"<",
"plugin_schemas",
"[",
"old_schema",
".",
"plugin",
"]",
"[",
"u'version'",
"]",
")",
")",
":",
"return",
"True",
"return",
"False"
] |
returns true if an upgrade of the database is required .
|
train
| false
|
9,541
|
def _start_date():
year_ago = (date.today() - timedelta(days=365))
return date(year_ago.year, year_ago.month, 1)
|
[
"def",
"_start_date",
"(",
")",
":",
"year_ago",
"=",
"(",
"date",
".",
"today",
"(",
")",
"-",
"timedelta",
"(",
"days",
"=",
"365",
")",
")",
"return",
"date",
"(",
"year_ago",
".",
"year",
",",
"year_ago",
".",
"month",
",",
"1",
")"
] |
the date from which we start querying monthly data .
|
train
| false
|
9,542
|
def _untranslate_volume_summary_view(context, vol):
d = {}
d['id'] = vol.id
d['status'] = vol.status
d['size'] = vol.size
d['availability_zone'] = vol.availability_zone
d['created_at'] = vol.created_at
d['attach_time'] = ''
d['mountpoint'] = ''
if vol.attachments:
att = vol.attachments[0]
d['attach_status'] = 'attached'
d['instance_uuid'] = att['server_id']
d['mountpoint'] = att['device']
else:
d['attach_status'] = 'detached'
d['display_name'] = vol.display_name
d['display_description'] = vol.display_description
d['volume_type_id'] = vol.volume_type
d['snapshot_id'] = vol.snapshot_id
d['volume_metadata'] = []
for (key, value) in vol.metadata.items():
item = {}
item['key'] = key
item['value'] = value
d['volume_metadata'].append(item)
if hasattr(vol, 'volume_image_metadata'):
d['volume_image_metadata'] = copy.deepcopy(vol.volume_image_metadata)
return d
|
[
"def",
"_untranslate_volume_summary_view",
"(",
"context",
",",
"vol",
")",
":",
"d",
"=",
"{",
"}",
"d",
"[",
"'id'",
"]",
"=",
"vol",
".",
"id",
"d",
"[",
"'status'",
"]",
"=",
"vol",
".",
"status",
"d",
"[",
"'size'",
"]",
"=",
"vol",
".",
"size",
"d",
"[",
"'availability_zone'",
"]",
"=",
"vol",
".",
"availability_zone",
"d",
"[",
"'created_at'",
"]",
"=",
"vol",
".",
"created_at",
"d",
"[",
"'attach_time'",
"]",
"=",
"''",
"d",
"[",
"'mountpoint'",
"]",
"=",
"''",
"if",
"vol",
".",
"attachments",
":",
"att",
"=",
"vol",
".",
"attachments",
"[",
"0",
"]",
"d",
"[",
"'attach_status'",
"]",
"=",
"'attached'",
"d",
"[",
"'instance_uuid'",
"]",
"=",
"att",
"[",
"'server_id'",
"]",
"d",
"[",
"'mountpoint'",
"]",
"=",
"att",
"[",
"'device'",
"]",
"else",
":",
"d",
"[",
"'attach_status'",
"]",
"=",
"'detached'",
"d",
"[",
"'display_name'",
"]",
"=",
"vol",
".",
"display_name",
"d",
"[",
"'display_description'",
"]",
"=",
"vol",
".",
"display_description",
"d",
"[",
"'volume_type_id'",
"]",
"=",
"vol",
".",
"volume_type",
"d",
"[",
"'snapshot_id'",
"]",
"=",
"vol",
".",
"snapshot_id",
"d",
"[",
"'volume_metadata'",
"]",
"=",
"[",
"]",
"for",
"(",
"key",
",",
"value",
")",
"in",
"vol",
".",
"metadata",
".",
"items",
"(",
")",
":",
"item",
"=",
"{",
"}",
"item",
"[",
"'key'",
"]",
"=",
"key",
"item",
"[",
"'value'",
"]",
"=",
"value",
"d",
"[",
"'volume_metadata'",
"]",
".",
"append",
"(",
"item",
")",
"if",
"hasattr",
"(",
"vol",
",",
"'volume_image_metadata'",
")",
":",
"d",
"[",
"'volume_image_metadata'",
"]",
"=",
"copy",
".",
"deepcopy",
"(",
"vol",
".",
"volume_image_metadata",
")",
"return",
"d"
] |
maps keys for volumes summary view .
|
train
| false
|
9,543
|
def get_key_data():
if os.path.exists(ssh_file(RSA_KEY_PUB)):
with open(ssh_file(RSA_KEY_PUB)) as handle:
key_data = handle.read()
(key_type, key_fingerprint, key_id) = key_data.strip().split(None, 2)
return {u'key': key_data, u'type': key_type, u'fingerprint': key_fingerprint, u'id': key_id}
return None
|
[
"def",
"get_key_data",
"(",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"ssh_file",
"(",
"RSA_KEY_PUB",
")",
")",
":",
"with",
"open",
"(",
"ssh_file",
"(",
"RSA_KEY_PUB",
")",
")",
"as",
"handle",
":",
"key_data",
"=",
"handle",
".",
"read",
"(",
")",
"(",
"key_type",
",",
"key_fingerprint",
",",
"key_id",
")",
"=",
"key_data",
".",
"strip",
"(",
")",
".",
"split",
"(",
"None",
",",
"2",
")",
"return",
"{",
"u'key'",
":",
"key_data",
",",
"u'type'",
":",
"key_type",
",",
"u'fingerprint'",
":",
"key_fingerprint",
",",
"u'id'",
":",
"key_id",
"}",
"return",
"None"
] |
parses host key and returns it .
|
train
| false
|
9,548
|
def _comp_split_codons(hsp, seq_type, scodon_moves):
scodons = []
for idx in range(len(scodon_moves[seq_type])):
pair = scodon_moves[seq_type][idx]
if (not any(pair)):
continue
else:
assert (not all(pair))
(a, b) = pair
anchor_pair = hsp[('%s_ranges' % seq_type)][(idx // 2)]
strand = (1 if (hsp[('%s_strand' % seq_type)] >= 0) else (-1))
if a:
func = (max if (strand == 1) else min)
anchor = func(anchor_pair)
(start_c, end_c) = ((anchor + ((a * strand) * (-1))), anchor)
elif b:
func = (min if (strand == 1) else max)
anchor = func(anchor_pair)
(start_c, end_c) = ((anchor + (b * strand)), anchor)
scodons.append((min(start_c, end_c), max(start_c, end_c)))
return scodons
|
[
"def",
"_comp_split_codons",
"(",
"hsp",
",",
"seq_type",
",",
"scodon_moves",
")",
":",
"scodons",
"=",
"[",
"]",
"for",
"idx",
"in",
"range",
"(",
"len",
"(",
"scodon_moves",
"[",
"seq_type",
"]",
")",
")",
":",
"pair",
"=",
"scodon_moves",
"[",
"seq_type",
"]",
"[",
"idx",
"]",
"if",
"(",
"not",
"any",
"(",
"pair",
")",
")",
":",
"continue",
"else",
":",
"assert",
"(",
"not",
"all",
"(",
"pair",
")",
")",
"(",
"a",
",",
"b",
")",
"=",
"pair",
"anchor_pair",
"=",
"hsp",
"[",
"(",
"'%s_ranges'",
"%",
"seq_type",
")",
"]",
"[",
"(",
"idx",
"//",
"2",
")",
"]",
"strand",
"=",
"(",
"1",
"if",
"(",
"hsp",
"[",
"(",
"'%s_strand'",
"%",
"seq_type",
")",
"]",
">=",
"0",
")",
"else",
"(",
"-",
"1",
")",
")",
"if",
"a",
":",
"func",
"=",
"(",
"max",
"if",
"(",
"strand",
"==",
"1",
")",
"else",
"min",
")",
"anchor",
"=",
"func",
"(",
"anchor_pair",
")",
"(",
"start_c",
",",
"end_c",
")",
"=",
"(",
"(",
"anchor",
"+",
"(",
"(",
"a",
"*",
"strand",
")",
"*",
"(",
"-",
"1",
")",
")",
")",
",",
"anchor",
")",
"elif",
"b",
":",
"func",
"=",
"(",
"min",
"if",
"(",
"strand",
"==",
"1",
")",
"else",
"max",
")",
"anchor",
"=",
"func",
"(",
"anchor_pair",
")",
"(",
"start_c",
",",
"end_c",
")",
"=",
"(",
"(",
"anchor",
"+",
"(",
"b",
"*",
"strand",
")",
")",
",",
"anchor",
")",
"scodons",
".",
"append",
"(",
"(",
"min",
"(",
"start_c",
",",
"end_c",
")",
",",
"max",
"(",
"start_c",
",",
"end_c",
")",
")",
")",
"return",
"scodons"
] |
computes the positions of split codons and puts the values in the given hsp dictionary .
|
train
| false
|
9,549
|
def test_auto_scale():
raw = read_raw_fif(raw_fname)
epochs = Epochs(raw, read_events(ev_fname))
rand_data = np.random.randn(10, 100)
for inst in [raw, epochs]:
scale_grad = 10000000000.0
scalings_def = dict([('eeg', 'auto'), ('grad', scale_grad), ('stim', 'auto')])
assert_raises(ValueError, inst.plot, scalings='foo')
assert_raises(ValueError, _compute_scalings, 'foo', inst)
scalings_new = _compute_scalings(scalings_def, inst)
assert_true((scale_grad == scalings_new['grad']))
assert_true((scalings_new['eeg'] != 'auto'))
assert_raises(ValueError, _compute_scalings, scalings_def, rand_data)
epochs = epochs[0].load_data()
epochs.pick_types(eeg=True, meg=False)
assert_raises(ValueError, _compute_scalings, dict(grad='auto'), epochs)
|
[
"def",
"test_auto_scale",
"(",
")",
":",
"raw",
"=",
"read_raw_fif",
"(",
"raw_fname",
")",
"epochs",
"=",
"Epochs",
"(",
"raw",
",",
"read_events",
"(",
"ev_fname",
")",
")",
"rand_data",
"=",
"np",
".",
"random",
".",
"randn",
"(",
"10",
",",
"100",
")",
"for",
"inst",
"in",
"[",
"raw",
",",
"epochs",
"]",
":",
"scale_grad",
"=",
"10000000000.0",
"scalings_def",
"=",
"dict",
"(",
"[",
"(",
"'eeg'",
",",
"'auto'",
")",
",",
"(",
"'grad'",
",",
"scale_grad",
")",
",",
"(",
"'stim'",
",",
"'auto'",
")",
"]",
")",
"assert_raises",
"(",
"ValueError",
",",
"inst",
".",
"plot",
",",
"scalings",
"=",
"'foo'",
")",
"assert_raises",
"(",
"ValueError",
",",
"_compute_scalings",
",",
"'foo'",
",",
"inst",
")",
"scalings_new",
"=",
"_compute_scalings",
"(",
"scalings_def",
",",
"inst",
")",
"assert_true",
"(",
"(",
"scale_grad",
"==",
"scalings_new",
"[",
"'grad'",
"]",
")",
")",
"assert_true",
"(",
"(",
"scalings_new",
"[",
"'eeg'",
"]",
"!=",
"'auto'",
")",
")",
"assert_raises",
"(",
"ValueError",
",",
"_compute_scalings",
",",
"scalings_def",
",",
"rand_data",
")",
"epochs",
"=",
"epochs",
"[",
"0",
"]",
".",
"load_data",
"(",
")",
"epochs",
".",
"pick_types",
"(",
"eeg",
"=",
"True",
",",
"meg",
"=",
"False",
")",
"assert_raises",
"(",
"ValueError",
",",
"_compute_scalings",
",",
"dict",
"(",
"grad",
"=",
"'auto'",
")",
",",
"epochs",
")"
] |
test auto-scaling of channels for quick plotting .
|
train
| false
|
9,550
|
def can_enable_dynamic(link, value):
return isinstance(value, link.sink_type())
|
[
"def",
"can_enable_dynamic",
"(",
"link",
",",
"value",
")",
":",
"return",
"isinstance",
"(",
"value",
",",
"link",
".",
"sink_type",
"(",
")",
")"
] |
can the a dynamic link be enabled forvalue .
|
train
| false
|
9,551
|
def zipline_magic(line, cell=None):
load_extensions(default=True, extensions=[], strict=True, environ=os.environ)
try:
return run.main(((['--algotext', cell, '--output', os.devnull] + (['--algotext', '', '--local-namespace'] if (cell is None) else [])) + line.split()), ('%s%%zipline' % ((cell or '') and '%')), standalone_mode=False)
except SystemExit as e:
if e.code:
raise ValueError(('main returned non-zero status code: %d' % e.code))
|
[
"def",
"zipline_magic",
"(",
"line",
",",
"cell",
"=",
"None",
")",
":",
"load_extensions",
"(",
"default",
"=",
"True",
",",
"extensions",
"=",
"[",
"]",
",",
"strict",
"=",
"True",
",",
"environ",
"=",
"os",
".",
"environ",
")",
"try",
":",
"return",
"run",
".",
"main",
"(",
"(",
"(",
"[",
"'--algotext'",
",",
"cell",
",",
"'--output'",
",",
"os",
".",
"devnull",
"]",
"+",
"(",
"[",
"'--algotext'",
",",
"''",
",",
"'--local-namespace'",
"]",
"if",
"(",
"cell",
"is",
"None",
")",
"else",
"[",
"]",
")",
")",
"+",
"line",
".",
"split",
"(",
")",
")",
",",
"(",
"'%s%%zipline'",
"%",
"(",
"(",
"cell",
"or",
"''",
")",
"and",
"'%'",
")",
")",
",",
"standalone_mode",
"=",
"False",
")",
"except",
"SystemExit",
"as",
"e",
":",
"if",
"e",
".",
"code",
":",
"raise",
"ValueError",
"(",
"(",
"'main returned non-zero status code: %d'",
"%",
"e",
".",
"code",
")",
")"
] |
the zipline ipython cell magic .
|
train
| true
|
9,553
|
def unhide(unit):
unit['hidden'] = False
for child in unit.get('children', ()):
unhide(child)
|
[
"def",
"unhide",
"(",
"unit",
")",
":",
"unit",
"[",
"'hidden'",
"]",
"=",
"False",
"for",
"child",
"in",
"unit",
".",
"get",
"(",
"'children'",
",",
"(",
")",
")",
":",
"unhide",
"(",
"child",
")"
] |
recursively unhide a unit and all of its children in the ccx schedule .
|
train
| false
|
9,554
|
@with_device
def disable_verity():
with log.waitfor(('Disabling dm-verity on %s' % context.device)) as w:
root()
with AdbClient() as c:
reply = c.disable_verity()
if ('Verity already disabled' in reply):
return
elif ('Now reboot your device' in reply):
reboot(wait=True)
elif ('0006closed' in reply):
return
else:
log.error(('Could not disable verity:\n%s' % reply))
|
[
"@",
"with_device",
"def",
"disable_verity",
"(",
")",
":",
"with",
"log",
".",
"waitfor",
"(",
"(",
"'Disabling dm-verity on %s'",
"%",
"context",
".",
"device",
")",
")",
"as",
"w",
":",
"root",
"(",
")",
"with",
"AdbClient",
"(",
")",
"as",
"c",
":",
"reply",
"=",
"c",
".",
"disable_verity",
"(",
")",
"if",
"(",
"'Verity already disabled'",
"in",
"reply",
")",
":",
"return",
"elif",
"(",
"'Now reboot your device'",
"in",
"reply",
")",
":",
"reboot",
"(",
"wait",
"=",
"True",
")",
"elif",
"(",
"'0006closed'",
"in",
"reply",
")",
":",
"return",
"else",
":",
"log",
".",
"error",
"(",
"(",
"'Could not disable verity:\\n%s'",
"%",
"reply",
")",
")"
] |
disables dm-verity on the device .
|
train
| false
|
9,555
|
def make_relative_path(source, dest, dest_is_directory=True):
source = os.path.dirname(source)
if (not dest_is_directory):
dest_filename = os.path.basename(dest)
dest = os.path.dirname(dest)
dest = os.path.normpath(os.path.abspath(dest))
source = os.path.normpath(os.path.abspath(source))
dest_parts = dest.strip(os.path.sep).split(os.path.sep)
source_parts = source.strip(os.path.sep).split(os.path.sep)
while (dest_parts and source_parts and (dest_parts[0] == source_parts[0])):
dest_parts.pop(0)
source_parts.pop(0)
full_parts = ((['..'] * len(source_parts)) + dest_parts)
if (not dest_is_directory):
full_parts.append(dest_filename)
if (not full_parts):
return './'
return os.path.sep.join(full_parts)
|
[
"def",
"make_relative_path",
"(",
"source",
",",
"dest",
",",
"dest_is_directory",
"=",
"True",
")",
":",
"source",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"source",
")",
"if",
"(",
"not",
"dest_is_directory",
")",
":",
"dest_filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"dest",
")",
"dest",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"dest",
")",
"dest",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"dest",
")",
")",
"source",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"source",
")",
")",
"dest_parts",
"=",
"dest",
".",
"strip",
"(",
"os",
".",
"path",
".",
"sep",
")",
".",
"split",
"(",
"os",
".",
"path",
".",
"sep",
")",
"source_parts",
"=",
"source",
".",
"strip",
"(",
"os",
".",
"path",
".",
"sep",
")",
".",
"split",
"(",
"os",
".",
"path",
".",
"sep",
")",
"while",
"(",
"dest_parts",
"and",
"source_parts",
"and",
"(",
"dest_parts",
"[",
"0",
"]",
"==",
"source_parts",
"[",
"0",
"]",
")",
")",
":",
"dest_parts",
".",
"pop",
"(",
"0",
")",
"source_parts",
".",
"pop",
"(",
"0",
")",
"full_parts",
"=",
"(",
"(",
"[",
"'..'",
"]",
"*",
"len",
"(",
"source_parts",
")",
")",
"+",
"dest_parts",
")",
"if",
"(",
"not",
"dest_is_directory",
")",
":",
"full_parts",
".",
"append",
"(",
"dest_filename",
")",
"if",
"(",
"not",
"full_parts",
")",
":",
"return",
"'./'",
"return",
"os",
".",
"path",
".",
"sep",
".",
"join",
"(",
"full_parts",
")"
] |
make a filename relative .
|
train
| true
|
9,556
|
def _make_segment_dict(record):
return {'id': record.id, NETWORK_TYPE: record.network_type, PHYSICAL_NETWORK: record.physical_network, SEGMENTATION_ID: record.segmentation_id}
|
[
"def",
"_make_segment_dict",
"(",
"record",
")",
":",
"return",
"{",
"'id'",
":",
"record",
".",
"id",
",",
"NETWORK_TYPE",
":",
"record",
".",
"network_type",
",",
"PHYSICAL_NETWORK",
":",
"record",
".",
"physical_network",
",",
"SEGMENTATION_ID",
":",
"record",
".",
"segmentation_id",
"}"
] |
make a segment dictionary out of a db record .
|
train
| false
|
9,557
|
def skip_if_custom_user(test_func):
return skipIf((settings.AUTH_USER_MODEL != 'auth.User'), 'Custom user model in use')(test_func)
|
[
"def",
"skip_if_custom_user",
"(",
"test_func",
")",
":",
"return",
"skipIf",
"(",
"(",
"settings",
".",
"AUTH_USER_MODEL",
"!=",
"'auth.User'",
")",
",",
"'Custom user model in use'",
")",
"(",
"test_func",
")"
] |
skip a test if a custom user model is in use .
|
train
| false
|
9,558
|
def mark_boundaries(image, label_img, color=(1, 1, 0), outline_color=None, mode='outer', background_label=0):
marked = img_as_float(image, force_copy=True)
if (marked.ndim == 2):
marked = gray2rgb(marked)
if (mode == 'subpixel'):
marked = ndi.zoom(marked, ([(2 - (1 / s)) for s in marked.shape[:(-1)]] + [1]), mode='reflect')
boundaries = find_boundaries(label_img, mode=mode, background=background_label)
if (outline_color is not None):
outlines = dilation(boundaries, square(3))
marked[outlines] = outline_color
marked[boundaries] = color
return marked
|
[
"def",
"mark_boundaries",
"(",
"image",
",",
"label_img",
",",
"color",
"=",
"(",
"1",
",",
"1",
",",
"0",
")",
",",
"outline_color",
"=",
"None",
",",
"mode",
"=",
"'outer'",
",",
"background_label",
"=",
"0",
")",
":",
"marked",
"=",
"img_as_float",
"(",
"image",
",",
"force_copy",
"=",
"True",
")",
"if",
"(",
"marked",
".",
"ndim",
"==",
"2",
")",
":",
"marked",
"=",
"gray2rgb",
"(",
"marked",
")",
"if",
"(",
"mode",
"==",
"'subpixel'",
")",
":",
"marked",
"=",
"ndi",
".",
"zoom",
"(",
"marked",
",",
"(",
"[",
"(",
"2",
"-",
"(",
"1",
"/",
"s",
")",
")",
"for",
"s",
"in",
"marked",
".",
"shape",
"[",
":",
"(",
"-",
"1",
")",
"]",
"]",
"+",
"[",
"1",
"]",
")",
",",
"mode",
"=",
"'reflect'",
")",
"boundaries",
"=",
"find_boundaries",
"(",
"label_img",
",",
"mode",
"=",
"mode",
",",
"background",
"=",
"background_label",
")",
"if",
"(",
"outline_color",
"is",
"not",
"None",
")",
":",
"outlines",
"=",
"dilation",
"(",
"boundaries",
",",
"square",
"(",
"3",
")",
")",
"marked",
"[",
"outlines",
"]",
"=",
"outline_color",
"marked",
"[",
"boundaries",
"]",
"=",
"color",
"return",
"marked"
] |
return image with boundaries between labeled regions highlighted .
|
train
| false
|
9,559
|
def create_members_in_bulk(bulk_data, callback=None, precall=None, **additional_fields):
members = get_members_from_bulk(bulk_data, **additional_fields)
db.save_in_bulk(members, callback, precall)
return members
|
[
"def",
"create_members_in_bulk",
"(",
"bulk_data",
",",
"callback",
"=",
"None",
",",
"precall",
"=",
"None",
",",
"**",
"additional_fields",
")",
":",
"members",
"=",
"get_members_from_bulk",
"(",
"bulk_data",
",",
"**",
"additional_fields",
")",
"db",
".",
"save_in_bulk",
"(",
"members",
",",
"callback",
",",
"precall",
")",
"return",
"members"
] |
create members from bulk_data .
|
train
| false
|
9,560
|
def _count_supplied_tokens(args):
return sum((1 for arg in args if (not isinstance(arg, astroid.Keyword))))
|
[
"def",
"_count_supplied_tokens",
"(",
"args",
")",
":",
"return",
"sum",
"(",
"(",
"1",
"for",
"arg",
"in",
"args",
"if",
"(",
"not",
"isinstance",
"(",
"arg",
",",
"astroid",
".",
"Keyword",
")",
")",
")",
")"
] |
counts the number of tokens in an args list .
|
train
| false
|
9,561
|
@register.inclusion_tag('zinnia/tags/dummy.html', takes_context=True)
def get_tag_cloud(context, steps=6, min_count=None, template='zinnia/tags/tag_cloud.html'):
tags = Tag.objects.usage_for_queryset(Entry.published.all(), counts=True, min_count=min_count)
return {'template': template, 'tags': calculate_cloud(tags, steps), 'context_tag': context.get('tag')}
|
[
"@",
"register",
".",
"inclusion_tag",
"(",
"'zinnia/tags/dummy.html'",
",",
"takes_context",
"=",
"True",
")",
"def",
"get_tag_cloud",
"(",
"context",
",",
"steps",
"=",
"6",
",",
"min_count",
"=",
"None",
",",
"template",
"=",
"'zinnia/tags/tag_cloud.html'",
")",
":",
"tags",
"=",
"Tag",
".",
"objects",
".",
"usage_for_queryset",
"(",
"Entry",
".",
"published",
".",
"all",
"(",
")",
",",
"counts",
"=",
"True",
",",
"min_count",
"=",
"min_count",
")",
"return",
"{",
"'template'",
":",
"template",
",",
"'tags'",
":",
"calculate_cloud",
"(",
"tags",
",",
"steps",
")",
",",
"'context_tag'",
":",
"context",
".",
"get",
"(",
"'tag'",
")",
"}"
] |
return a cloud of published tags .
|
train
| true
|
9,562
|
def chugid_and_umask(runas, umask):
if (runas is not None):
chugid(runas)
if (umask is not None):
os.umask(umask)
|
[
"def",
"chugid_and_umask",
"(",
"runas",
",",
"umask",
")",
":",
"if",
"(",
"runas",
"is",
"not",
"None",
")",
":",
"chugid",
"(",
"runas",
")",
"if",
"(",
"umask",
"is",
"not",
"None",
")",
":",
"os",
".",
"umask",
"(",
"umask",
")"
] |
helper method for for subprocess .
|
train
| false
|
9,564
|
def MakeError(name, message):
return JsToPyException(ERRORS[name](Js(message)))
|
[
"def",
"MakeError",
"(",
"name",
",",
"message",
")",
":",
"return",
"JsToPyException",
"(",
"ERRORS",
"[",
"name",
"]",
"(",
"Js",
"(",
"message",
")",
")",
")"
] |
returns pyjsexception with pyjserror inside .
|
train
| false
|
9,565
|
def parse_external_prop(lines):
externals = []
for line in lines.splitlines():
line = line.lstrip()
if (not line):
continue
if (sys.version_info < (3, 0)):
line = line.encode('utf-8')
line = shlex.split(line)
if (sys.version_info < (3, 0)):
line = [x.decode('utf-8') for x in line]
if urlparse.urlsplit(line[(-1)])[0]:
external = line[0]
else:
external = line[(-1)]
external = decode_as_string(external, encoding='utf-8')
externals.append(os.path.normpath(external))
return externals
|
[
"def",
"parse_external_prop",
"(",
"lines",
")",
":",
"externals",
"=",
"[",
"]",
"for",
"line",
"in",
"lines",
".",
"splitlines",
"(",
")",
":",
"line",
"=",
"line",
".",
"lstrip",
"(",
")",
"if",
"(",
"not",
"line",
")",
":",
"continue",
"if",
"(",
"sys",
".",
"version_info",
"<",
"(",
"3",
",",
"0",
")",
")",
":",
"line",
"=",
"line",
".",
"encode",
"(",
"'utf-8'",
")",
"line",
"=",
"shlex",
".",
"split",
"(",
"line",
")",
"if",
"(",
"sys",
".",
"version_info",
"<",
"(",
"3",
",",
"0",
")",
")",
":",
"line",
"=",
"[",
"x",
".",
"decode",
"(",
"'utf-8'",
")",
"for",
"x",
"in",
"line",
"]",
"if",
"urlparse",
".",
"urlsplit",
"(",
"line",
"[",
"(",
"-",
"1",
")",
"]",
")",
"[",
"0",
"]",
":",
"external",
"=",
"line",
"[",
"0",
"]",
"else",
":",
"external",
"=",
"line",
"[",
"(",
"-",
"1",
")",
"]",
"external",
"=",
"decode_as_string",
"(",
"external",
",",
"encoding",
"=",
"'utf-8'",
")",
"externals",
".",
"append",
"(",
"os",
".",
"path",
".",
"normpath",
"(",
"external",
")",
")",
"return",
"externals"
] |
parse the value of a retrieved svn:externals entry .
|
train
| false
|
9,566
|
def escape_abbr(text):
return re.sub('\\.(?=\\s|$)', '.\\@', text)
|
[
"def",
"escape_abbr",
"(",
"text",
")",
":",
"return",
"re",
".",
"sub",
"(",
"'\\\\.(?=\\\\s|$)'",
",",
"'.\\\\@'",
",",
"text",
")"
] |
adjust spacing after abbreviations .
|
train
| false
|
9,569
|
def plot_day_summary(ax, quotes, ticksize=3, colorup='k', colordown='r'):
lines = []
for q in quotes:
(t, open, close, high, low) = q[:5]
if (close >= open):
color = colorup
else:
color = colordown
vline = Line2D(xdata=(t, t), ydata=(low, high), color=color, antialiased=False)
oline = Line2D(xdata=(t, t), ydata=(open, open), color=color, antialiased=False, marker=TICKLEFT, markersize=ticksize)
cline = Line2D(xdata=(t, t), ydata=(close, close), color=color, antialiased=False, markersize=ticksize, marker=TICKRIGHT)
lines.extend((vline, oline, cline))
ax.add_line(vline)
ax.add_line(oline)
ax.add_line(cline)
ax.autoscale_view()
return lines
|
[
"def",
"plot_day_summary",
"(",
"ax",
",",
"quotes",
",",
"ticksize",
"=",
"3",
",",
"colorup",
"=",
"'k'",
",",
"colordown",
"=",
"'r'",
")",
":",
"lines",
"=",
"[",
"]",
"for",
"q",
"in",
"quotes",
":",
"(",
"t",
",",
"open",
",",
"close",
",",
"high",
",",
"low",
")",
"=",
"q",
"[",
":",
"5",
"]",
"if",
"(",
"close",
">=",
"open",
")",
":",
"color",
"=",
"colorup",
"else",
":",
"color",
"=",
"colordown",
"vline",
"=",
"Line2D",
"(",
"xdata",
"=",
"(",
"t",
",",
"t",
")",
",",
"ydata",
"=",
"(",
"low",
",",
"high",
")",
",",
"color",
"=",
"color",
",",
"antialiased",
"=",
"False",
")",
"oline",
"=",
"Line2D",
"(",
"xdata",
"=",
"(",
"t",
",",
"t",
")",
",",
"ydata",
"=",
"(",
"open",
",",
"open",
")",
",",
"color",
"=",
"color",
",",
"antialiased",
"=",
"False",
",",
"marker",
"=",
"TICKLEFT",
",",
"markersize",
"=",
"ticksize",
")",
"cline",
"=",
"Line2D",
"(",
"xdata",
"=",
"(",
"t",
",",
"t",
")",
",",
"ydata",
"=",
"(",
"close",
",",
"close",
")",
",",
"color",
"=",
"color",
",",
"antialiased",
"=",
"False",
",",
"markersize",
"=",
"ticksize",
",",
"marker",
"=",
"TICKRIGHT",
")",
"lines",
".",
"extend",
"(",
"(",
"vline",
",",
"oline",
",",
"cline",
")",
")",
"ax",
".",
"add_line",
"(",
"vline",
")",
"ax",
".",
"add_line",
"(",
"oline",
")",
"ax",
".",
"add_line",
"(",
"cline",
")",
"ax",
".",
"autoscale_view",
"(",
")",
"return",
"lines"
] |
quotes is a list of tuples represent the time .
|
train
| false
|
9,570
|
def _factors_product(factors):
return Mul(*[(f.as_expr() ** k) for (f, k) in factors])
|
[
"def",
"_factors_product",
"(",
"factors",
")",
":",
"return",
"Mul",
"(",
"*",
"[",
"(",
"f",
".",
"as_expr",
"(",
")",
"**",
"k",
")",
"for",
"(",
"f",
",",
"k",
")",
"in",
"factors",
"]",
")"
] |
multiply a list of pairs .
|
train
| false
|
9,571
|
def infer_import(self, context=None, asname=True):
name = context.lookupname
if (name is None):
raise InferenceError()
if asname:
(yield self.do_import_module(self.real_name(name)))
else:
(yield self.do_import_module(name))
|
[
"def",
"infer_import",
"(",
"self",
",",
"context",
"=",
"None",
",",
"asname",
"=",
"True",
")",
":",
"name",
"=",
"context",
".",
"lookupname",
"if",
"(",
"name",
"is",
"None",
")",
":",
"raise",
"InferenceError",
"(",
")",
"if",
"asname",
":",
"(",
"yield",
"self",
".",
"do_import_module",
"(",
"self",
".",
"real_name",
"(",
"name",
")",
")",
")",
"else",
":",
"(",
"yield",
"self",
".",
"do_import_module",
"(",
"name",
")",
")"
] |
infer an import node: return the imported module/object .
|
train
| false
|
9,572
|
def add_diversity(table):
table['Obscure'] = [('Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html', 'Google Bot'), ('Wget/1.16.1 (linux-gnu)', 'wget 1.16.1'), ('curl/7.40.0', 'curl 7.40.0')]
return table
|
[
"def",
"add_diversity",
"(",
"table",
")",
":",
"table",
"[",
"'Obscure'",
"]",
"=",
"[",
"(",
"'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html'",
",",
"'Google Bot'",
")",
",",
"(",
"'Wget/1.16.1 (linux-gnu)'",
",",
"'wget 1.16.1'",
")",
",",
"(",
"'curl/7.40.0'",
",",
"'curl 7.40.0'",
")",
"]",
"return",
"table"
] |
insert a few additional entries for diversity into the dict .
|
train
| false
|
9,573
|
def _plots_first(fname):
if (not (fname.startswith('plot') and fname.endswith('.py'))):
return ('zz' + fname)
return fname
|
[
"def",
"_plots_first",
"(",
"fname",
")",
":",
"if",
"(",
"not",
"(",
"fname",
".",
"startswith",
"(",
"'plot'",
")",
"and",
"fname",
".",
"endswith",
"(",
"'.py'",
")",
")",
")",
":",
"return",
"(",
"'zz'",
"+",
"fname",
")",
"return",
"fname"
] |
decorate filename so that examples with plots are displayed first .
|
train
| false
|
9,574
|
def handle_uploaded_file(filecnt, typ):
md5 = hashlib.md5()
for chunk in filecnt.chunks():
md5.update(chunk)
md5sum = md5.hexdigest()
anal_dir = os.path.join(settings.UPLD_DIR, (md5sum + '/'))
if (not os.path.exists(anal_dir)):
os.makedirs(anal_dir)
with open(((anal_dir + md5sum) + typ), 'wb+') as destination:
for chunk in filecnt.chunks():
destination.write(chunk)
return md5sum
|
[
"def",
"handle_uploaded_file",
"(",
"filecnt",
",",
"typ",
")",
":",
"md5",
"=",
"hashlib",
".",
"md5",
"(",
")",
"for",
"chunk",
"in",
"filecnt",
".",
"chunks",
"(",
")",
":",
"md5",
".",
"update",
"(",
"chunk",
")",
"md5sum",
"=",
"md5",
".",
"hexdigest",
"(",
")",
"anal_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"settings",
".",
"UPLD_DIR",
",",
"(",
"md5sum",
"+",
"'/'",
")",
")",
"if",
"(",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"anal_dir",
")",
")",
":",
"os",
".",
"makedirs",
"(",
"anal_dir",
")",
"with",
"open",
"(",
"(",
"(",
"anal_dir",
"+",
"md5sum",
")",
"+",
"typ",
")",
",",
"'wb+'",
")",
"as",
"destination",
":",
"for",
"chunk",
"in",
"filecnt",
".",
"chunks",
"(",
")",
":",
"destination",
".",
"write",
"(",
"chunk",
")",
"return",
"md5sum"
] |
write uploaded file .
|
train
| false
|
9,575
|
def getGlobalRepositoryDialogValues():
global globalRepositoryDialogListTable
return euclidean.getListTableElements(globalRepositoryDialogListTable)
|
[
"def",
"getGlobalRepositoryDialogValues",
"(",
")",
":",
"global",
"globalRepositoryDialogListTable",
"return",
"euclidean",
".",
"getListTableElements",
"(",
"globalRepositoryDialogListTable",
")"
] |
get the global repository dialog values .
|
train
| false
|
9,576
|
def _parseTrustRootPath(pathName):
if (pathName is None):
return None
return _loadCAsFromDir(FilePath(pathName))
|
[
"def",
"_parseTrustRootPath",
"(",
"pathName",
")",
":",
"if",
"(",
"pathName",
"is",
"None",
")",
":",
"return",
"None",
"return",
"_loadCAsFromDir",
"(",
"FilePath",
"(",
"pathName",
")",
")"
] |
parse a string referring to a directory full of certificate authorities into a trust root .
|
train
| false
|
9,577
|
@contextlib.contextmanager
def reset_setup_stop_context():
setup_stop_after = distutils.core._setup_stop_after
distutils.core._setup_stop_after = None
(yield)
distutils.core._setup_stop_after = setup_stop_after
|
[
"@",
"contextlib",
".",
"contextmanager",
"def",
"reset_setup_stop_context",
"(",
")",
":",
"setup_stop_after",
"=",
"distutils",
".",
"core",
".",
"_setup_stop_after",
"distutils",
".",
"core",
".",
"_setup_stop_after",
"=",
"None",
"(",
"yield",
")",
"distutils",
".",
"core",
".",
"_setup_stop_after",
"=",
"setup_stop_after"
] |
when the setuptools tests are run using setup .
|
train
| false
|
9,578
|
def grains_refresh():
DETAILS['grains_cache'] = {}
return grains()
|
[
"def",
"grains_refresh",
"(",
")",
":",
"DETAILS",
"[",
"'grains_cache'",
"]",
"=",
"{",
"}",
"return",
"grains",
"(",
")"
] |
refresh the grains .
|
train
| false
|
9,579
|
def _dummy_(name, token, **kwargs):
global _dummies
if (not ((name, token) in _dummies)):
_dummies[(name, token)] = Dummy(name, **kwargs)
return _dummies[(name, token)]
|
[
"def",
"_dummy_",
"(",
"name",
",",
"token",
",",
"**",
"kwargs",
")",
":",
"global",
"_dummies",
"if",
"(",
"not",
"(",
"(",
"name",
",",
"token",
")",
"in",
"_dummies",
")",
")",
":",
"_dummies",
"[",
"(",
"name",
",",
"token",
")",
"]",
"=",
"Dummy",
"(",
"name",
",",
"**",
"kwargs",
")",
"return",
"_dummies",
"[",
"(",
"name",
",",
"token",
")",
"]"
] |
return a dummy associated to name and token .
|
train
| false
|
9,580
|
@pytest.fixture(scope='module', params=['gpu'])
def backend_gpu(request):
be = get_backend(request)
def cleanup():
be = request.getfixturevalue('backend_gpu')
del be
request.addfinalizer(cleanup)
return be
|
[
"@",
"pytest",
".",
"fixture",
"(",
"scope",
"=",
"'module'",
",",
"params",
"=",
"[",
"'gpu'",
"]",
")",
"def",
"backend_gpu",
"(",
"request",
")",
":",
"be",
"=",
"get_backend",
"(",
"request",
")",
"def",
"cleanup",
"(",
")",
":",
"be",
"=",
"request",
".",
"getfixturevalue",
"(",
"'backend_gpu'",
")",
"del",
"be",
"request",
".",
"addfinalizer",
"(",
"cleanup",
")",
"return",
"be"
] |
fixture to setup the backend before running a test .
|
train
| false
|
9,581
|
def test_install_package_with_latin1_setup(script, data):
to_install = data.packages.join('SetupPyLatin1')
script.pip('install', to_install)
|
[
"def",
"test_install_package_with_latin1_setup",
"(",
"script",
",",
"data",
")",
":",
"to_install",
"=",
"data",
".",
"packages",
".",
"join",
"(",
"'SetupPyLatin1'",
")",
"script",
".",
"pip",
"(",
"'install'",
",",
"to_install",
")"
] |
install a package with a setup .
|
train
| false
|
9,583
|
def libvlc_track_description_list_release(p_track_description):
f = (_Cfunctions.get('libvlc_track_description_list_release', None) or _Cfunction('libvlc_track_description_list_release', ((1,),), None, None, ctypes.POINTER(TrackDescription)))
return f(p_track_description)
|
[
"def",
"libvlc_track_description_list_release",
"(",
"p_track_description",
")",
":",
"f",
"=",
"(",
"_Cfunctions",
".",
"get",
"(",
"'libvlc_track_description_list_release'",
",",
"None",
")",
"or",
"_Cfunction",
"(",
"'libvlc_track_description_list_release'",
",",
"(",
"(",
"1",
",",
")",
",",
")",
",",
"None",
",",
"None",
",",
"ctypes",
".",
"POINTER",
"(",
"TrackDescription",
")",
")",
")",
"return",
"f",
"(",
"p_track_description",
")"
] |
release l{trackdescription} .
|
train
| true
|
9,584
|
def set_email_preferences_for_exploration(user_id, exploration_id, mute_feedback_notifications=None, mute_suggestion_notifications=None):
exploration_user_model = user_models.ExplorationUserDataModel.get(user_id, exploration_id)
if (exploration_user_model is None):
exploration_user_model = user_models.ExplorationUserDataModel.create(user_id, exploration_id)
if (mute_feedback_notifications is not None):
exploration_user_model.mute_feedback_notifications = mute_feedback_notifications
if (mute_suggestion_notifications is not None):
exploration_user_model.mute_suggestion_notifications = mute_suggestion_notifications
exploration_user_model.put()
|
[
"def",
"set_email_preferences_for_exploration",
"(",
"user_id",
",",
"exploration_id",
",",
"mute_feedback_notifications",
"=",
"None",
",",
"mute_suggestion_notifications",
"=",
"None",
")",
":",
"exploration_user_model",
"=",
"user_models",
".",
"ExplorationUserDataModel",
".",
"get",
"(",
"user_id",
",",
"exploration_id",
")",
"if",
"(",
"exploration_user_model",
"is",
"None",
")",
":",
"exploration_user_model",
"=",
"user_models",
".",
"ExplorationUserDataModel",
".",
"create",
"(",
"user_id",
",",
"exploration_id",
")",
"if",
"(",
"mute_feedback_notifications",
"is",
"not",
"None",
")",
":",
"exploration_user_model",
".",
"mute_feedback_notifications",
"=",
"mute_feedback_notifications",
"if",
"(",
"mute_suggestion_notifications",
"is",
"not",
"None",
")",
":",
"exploration_user_model",
".",
"mute_suggestion_notifications",
"=",
"mute_suggestion_notifications",
"exploration_user_model",
".",
"put",
"(",
")"
] |
sets mute preferences for exploration with given exploration_id of user with given user_id .
|
train
| false
|
9,585
|
def get_protocol_string(tcp, udp):
if ((tcp == 'TCP') and (udp == 'UDP')):
return (((tcp + '/') + udp) + ' - ')
elif ((tcp == 'TCP') or (udp == 'UDP')):
return ((tcp + udp) + ' - ')
else:
return ''
|
[
"def",
"get_protocol_string",
"(",
"tcp",
",",
"udp",
")",
":",
"if",
"(",
"(",
"tcp",
"==",
"'TCP'",
")",
"and",
"(",
"udp",
"==",
"'UDP'",
")",
")",
":",
"return",
"(",
"(",
"(",
"tcp",
"+",
"'/'",
")",
"+",
"udp",
")",
"+",
"' - '",
")",
"elif",
"(",
"(",
"tcp",
"==",
"'TCP'",
")",
"or",
"(",
"udp",
"==",
"'UDP'",
")",
")",
":",
"return",
"(",
"(",
"tcp",
"+",
"udp",
")",
"+",
"' - '",
")",
"else",
":",
"return",
"''"
] |
tcp/udp string for description .
|
train
| false
|
9,586
|
def GetWordIds(text, vocab, pad_len=None, pad_id=None):
ids = []
for w in text.split():
i = vocab.WordToId(w)
if (i >= 0):
ids.append(i)
else:
ids.append(vocab.WordToId(UNKNOWN_TOKEN))
if (pad_len is not None):
return Pad(ids, pad_id, pad_len)
return ids
|
[
"def",
"GetWordIds",
"(",
"text",
",",
"vocab",
",",
"pad_len",
"=",
"None",
",",
"pad_id",
"=",
"None",
")",
":",
"ids",
"=",
"[",
"]",
"for",
"w",
"in",
"text",
".",
"split",
"(",
")",
":",
"i",
"=",
"vocab",
".",
"WordToId",
"(",
"w",
")",
"if",
"(",
"i",
">=",
"0",
")",
":",
"ids",
".",
"append",
"(",
"i",
")",
"else",
":",
"ids",
".",
"append",
"(",
"vocab",
".",
"WordToId",
"(",
"UNKNOWN_TOKEN",
")",
")",
"if",
"(",
"pad_len",
"is",
"not",
"None",
")",
":",
"return",
"Pad",
"(",
"ids",
",",
"pad_id",
",",
"pad_len",
")",
"return",
"ids"
] |
get ids corresponding to words in text .
|
train
| false
|
9,588
|
@app.route('/user-agent')
def view_user_agent():
headers = get_headers()
return jsonify({'user-agent': headers['user-agent']})
|
[
"@",
"app",
".",
"route",
"(",
"'/user-agent'",
")",
"def",
"view_user_agent",
"(",
")",
":",
"headers",
"=",
"get_headers",
"(",
")",
"return",
"jsonify",
"(",
"{",
"'user-agent'",
":",
"headers",
"[",
"'user-agent'",
"]",
"}",
")"
] |
returns user-agent .
|
train
| false
|
9,589
|
@testing.requires_testing_data
def test_hash_raw():
raw = read_raw_fif(fif_fname)
assert_raises(RuntimeError, raw.__hash__)
raw = read_raw_fif(fif_fname).crop(0, 0.5)
raw_size = raw._size
raw.load_data()
raw_load_size = raw._size
assert_true((raw_size < raw_load_size))
raw_2 = read_raw_fif(fif_fname).crop(0, 0.5)
raw_2.load_data()
assert_equal(hash(raw), hash(raw_2))
assert_equal(pickle.dumps(raw), pickle.dumps(raw_2))
raw_2._data[(0, 0)] -= 1
assert_not_equal(hash(raw), hash(raw_2))
|
[
"@",
"testing",
".",
"requires_testing_data",
"def",
"test_hash_raw",
"(",
")",
":",
"raw",
"=",
"read_raw_fif",
"(",
"fif_fname",
")",
"assert_raises",
"(",
"RuntimeError",
",",
"raw",
".",
"__hash__",
")",
"raw",
"=",
"read_raw_fif",
"(",
"fif_fname",
")",
".",
"crop",
"(",
"0",
",",
"0.5",
")",
"raw_size",
"=",
"raw",
".",
"_size",
"raw",
".",
"load_data",
"(",
")",
"raw_load_size",
"=",
"raw",
".",
"_size",
"assert_true",
"(",
"(",
"raw_size",
"<",
"raw_load_size",
")",
")",
"raw_2",
"=",
"read_raw_fif",
"(",
"fif_fname",
")",
".",
"crop",
"(",
"0",
",",
"0.5",
")",
"raw_2",
".",
"load_data",
"(",
")",
"assert_equal",
"(",
"hash",
"(",
"raw",
")",
",",
"hash",
"(",
"raw_2",
")",
")",
"assert_equal",
"(",
"pickle",
".",
"dumps",
"(",
"raw",
")",
",",
"pickle",
".",
"dumps",
"(",
"raw_2",
")",
")",
"raw_2",
".",
"_data",
"[",
"(",
"0",
",",
"0",
")",
"]",
"-=",
"1",
"assert_not_equal",
"(",
"hash",
"(",
"raw",
")",
",",
"hash",
"(",
"raw_2",
")",
")"
] |
test hashing raw objects .
|
train
| false
|
9,590
|
def parse_attributes(attributes, element):
result = {}
for attr in element.attrib:
if ('{' in attr):
a = fixtag(attr, NS_MAP)[0]
else:
a = attr
if (a in attributes):
result[a] = element.attrib[attr]
else:
_log.info('in <%s>, uncaught attribute %s', fixtag(element.tag, NS_MAP)[0], attr)
return result
|
[
"def",
"parse_attributes",
"(",
"attributes",
",",
"element",
")",
":",
"result",
"=",
"{",
"}",
"for",
"attr",
"in",
"element",
".",
"attrib",
":",
"if",
"(",
"'{'",
"in",
"attr",
")",
":",
"a",
"=",
"fixtag",
"(",
"attr",
",",
"NS_MAP",
")",
"[",
"0",
"]",
"else",
":",
"a",
"=",
"attr",
"if",
"(",
"a",
"in",
"attributes",
")",
":",
"result",
"[",
"a",
"]",
"=",
"element",
".",
"attrib",
"[",
"attr",
"]",
"else",
":",
"_log",
".",
"info",
"(",
"'in <%s>, uncaught attribute %s'",
",",
"fixtag",
"(",
"element",
".",
"tag",
",",
"NS_MAP",
")",
"[",
"0",
"]",
",",
"attr",
")",
"return",
"result"
] |
extract attributes from an element .
|
train
| false
|
9,591
|
def noop_cost_fn(host_state, weight_properties):
return 1
|
[
"def",
"noop_cost_fn",
"(",
"host_state",
",",
"weight_properties",
")",
":",
"return",
"1"
] |
return a pre-weight cost of 1 for each host .
|
train
| false
|
9,592
|
def hex_str(an_int):
return '{0:#x}'.format(an_int)
|
[
"def",
"hex_str",
"(",
"an_int",
")",
":",
"return",
"'{0:#x}'",
".",
"format",
"(",
"an_int",
")"
] |
convert an int to an hexadecimal string .
|
train
| false
|
9,594
|
def flip_axis_multi(x, axis, is_random=False):
if is_random:
factor = np.random.uniform((-1), 1)
if (factor > 0):
results = []
for data in x:
data = np.asarray(data).swapaxes(axis, 0)
data = data[::(-1), ...]
data = data.swapaxes(0, axis)
results.append(data)
return np.asarray(results)
else:
return np.asarray(x)
else:
results = []
for data in x:
data = np.asarray(data).swapaxes(axis, 0)
data = data[::(-1), ...]
data = data.swapaxes(0, axis)
results.append(data)
return np.asarray(results)
|
[
"def",
"flip_axis_multi",
"(",
"x",
",",
"axis",
",",
"is_random",
"=",
"False",
")",
":",
"if",
"is_random",
":",
"factor",
"=",
"np",
".",
"random",
".",
"uniform",
"(",
"(",
"-",
"1",
")",
",",
"1",
")",
"if",
"(",
"factor",
">",
"0",
")",
":",
"results",
"=",
"[",
"]",
"for",
"data",
"in",
"x",
":",
"data",
"=",
"np",
".",
"asarray",
"(",
"data",
")",
".",
"swapaxes",
"(",
"axis",
",",
"0",
")",
"data",
"=",
"data",
"[",
":",
":",
"(",
"-",
"1",
")",
",",
"...",
"]",
"data",
"=",
"data",
".",
"swapaxes",
"(",
"0",
",",
"axis",
")",
"results",
".",
"append",
"(",
"data",
")",
"return",
"np",
".",
"asarray",
"(",
"results",
")",
"else",
":",
"return",
"np",
".",
"asarray",
"(",
"x",
")",
"else",
":",
"results",
"=",
"[",
"]",
"for",
"data",
"in",
"x",
":",
"data",
"=",
"np",
".",
"asarray",
"(",
"data",
")",
".",
"swapaxes",
"(",
"axis",
",",
"0",
")",
"data",
"=",
"data",
"[",
":",
":",
"(",
"-",
"1",
")",
",",
"...",
"]",
"data",
"=",
"data",
".",
"swapaxes",
"(",
"0",
",",
"axis",
")",
"results",
".",
"append",
"(",
"data",
")",
"return",
"np",
".",
"asarray",
"(",
"results",
")"
] |
flip the axises of multiple images together .
|
train
| true
|
9,595
|
def s3_format_datetime(dt=None, dtfmt=None):
if (not dt):
dt = datetime.datetime.utcnow()
if (dtfmt is None):
dtfmt = ISOFORMAT
return dt.strftime(dtfmt)
|
[
"def",
"s3_format_datetime",
"(",
"dt",
"=",
"None",
",",
"dtfmt",
"=",
"None",
")",
":",
"if",
"(",
"not",
"dt",
")",
":",
"dt",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"if",
"(",
"dtfmt",
"is",
"None",
")",
":",
"dtfmt",
"=",
"ISOFORMAT",
"return",
"dt",
".",
"strftime",
"(",
"dtfmt",
")"
] |
format a datetime object according to the given format .
|
train
| false
|
9,596
|
def get_default_time_display(dtime):
if (dtime is None):
return u''
if (dtime.tzinfo is not None):
try:
timezone = (u' ' + dtime.tzinfo.tzname(dtime))
except NotImplementedError:
timezone = dtime.strftime('%z')
else:
timezone = u' UTC'
localized = strftime_localized(dtime, 'DATE_TIME')
return (localized + timezone).strip()
|
[
"def",
"get_default_time_display",
"(",
"dtime",
")",
":",
"if",
"(",
"dtime",
"is",
"None",
")",
":",
"return",
"u''",
"if",
"(",
"dtime",
".",
"tzinfo",
"is",
"not",
"None",
")",
":",
"try",
":",
"timezone",
"=",
"(",
"u' '",
"+",
"dtime",
".",
"tzinfo",
".",
"tzname",
"(",
"dtime",
")",
")",
"except",
"NotImplementedError",
":",
"timezone",
"=",
"dtime",
".",
"strftime",
"(",
"'%z'",
")",
"else",
":",
"timezone",
"=",
"u' UTC'",
"localized",
"=",
"strftime_localized",
"(",
"dtime",
",",
"'DATE_TIME'",
")",
"return",
"(",
"localized",
"+",
"timezone",
")",
".",
"strip",
"(",
")"
] |
converts a datetime to a string representation .
|
train
| false
|
9,602
|
def add_line_wrappers(data_passed_from_pelican):
if data_passed_from_pelican._content:
full_content_of_page_or_post = data_passed_from_pelican._content
else:
return
all_instances_of_pre_elements = re.findall('<pre>.*?</pre>', full_content_of_page_or_post, re.DOTALL)
if (len(all_instances_of_pre_elements) > 0):
updated_full_content_of_page_or_post = full_content_of_page_or_post
for pre_element_to_parse in all_instances_of_pre_elements:
replacement_text_with_beginning_of_each_line_wrapped_in_span = re.sub('(<pre.*?>|\\n(?!</pre>))', '\\1<span class="code-line">', pre_element_to_parse)
replacement_text_with_full_line_wrapped_in_span = re.sub('((?<!</pre>)$|(?<!</pre>)\\n)', '</span>\\1', replacement_text_with_beginning_of_each_line_wrapped_in_span)
updated_full_content_of_page_or_post = updated_full_content_of_page_or_post.replace(pre_element_to_parse, replacement_text_with_full_line_wrapped_in_span)
data_passed_from_pelican._content = updated_full_content_of_page_or_post
|
[
"def",
"add_line_wrappers",
"(",
"data_passed_from_pelican",
")",
":",
"if",
"data_passed_from_pelican",
".",
"_content",
":",
"full_content_of_page_or_post",
"=",
"data_passed_from_pelican",
".",
"_content",
"else",
":",
"return",
"all_instances_of_pre_elements",
"=",
"re",
".",
"findall",
"(",
"'<pre>.*?</pre>'",
",",
"full_content_of_page_or_post",
",",
"re",
".",
"DOTALL",
")",
"if",
"(",
"len",
"(",
"all_instances_of_pre_elements",
")",
">",
"0",
")",
":",
"updated_full_content_of_page_or_post",
"=",
"full_content_of_page_or_post",
"for",
"pre_element_to_parse",
"in",
"all_instances_of_pre_elements",
":",
"replacement_text_with_beginning_of_each_line_wrapped_in_span",
"=",
"re",
".",
"sub",
"(",
"'(<pre.*?>|\\\\n(?!</pre>))'",
",",
"'\\\\1<span class=\"code-line\">'",
",",
"pre_element_to_parse",
")",
"replacement_text_with_full_line_wrapped_in_span",
"=",
"re",
".",
"sub",
"(",
"'((?<!</pre>)$|(?<!</pre>)\\\\n)'",
",",
"'</span>\\\\1'",
",",
"replacement_text_with_beginning_of_each_line_wrapped_in_span",
")",
"updated_full_content_of_page_or_post",
"=",
"updated_full_content_of_page_or_post",
".",
"replace",
"(",
"pre_element_to_parse",
",",
"replacement_text_with_full_line_wrapped_in_span",
")",
"data_passed_from_pelican",
".",
"_content",
"=",
"updated_full_content_of_page_or_post"
] |
a function to read through each page and post as it comes through from pelican .
|
train
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.