id_within_dataset
int64 1
55.5k
| snippet
stringlengths 19
14.2k
| tokens
listlengths 6
1.63k
| nl
stringlengths 6
352
| split_within_dataset
stringclasses 1
value | is_duplicated
bool 2
classes |
|---|---|---|---|---|---|
6,748
|
def template_question():
s3.prep = (lambda r: ((r.method == 'options') and (r.representation == 's3json')))
return s3_rest_controller()
|
[
"def",
"template_question",
"(",
")",
":",
"s3",
".",
"prep",
"=",
"(",
"lambda",
"r",
":",
"(",
"(",
"r",
".",
"method",
"==",
"'options'",
")",
"and",
"(",
"r",
".",
"representation",
"==",
"'s3json'",
")",
")",
")",
"return",
"s3_rest_controller",
"(",
")"
] |
restful crud controller for options .
|
train
| false
|
6,749
|
def test_find_number_false_exponent():
s = '2e'
r = find_number(s)
assert (s[r[0]:r[1]] == '2'), s[r[0]:r[1]]
|
[
"def",
"test_find_number_false_exponent",
"(",
")",
":",
"s",
"=",
"'2e'",
"r",
"=",
"find_number",
"(",
"s",
")",
"assert",
"(",
"s",
"[",
"r",
"[",
"0",
"]",
":",
"r",
"[",
"1",
"]",
"]",
"==",
"'2'",
")",
",",
"s",
"[",
"r",
"[",
"0",
"]",
":",
"r",
"[",
"1",
"]",
"]"
] |
tests that we dont include an e after a number .
|
train
| false
|
6,751
|
def release_address(client, allocation_id, check_mode=False):
err_msg = ''
if check_mode:
return (True, '')
ip_released = False
params = {'AllocationId': allocation_id}
try:
client.release_address(**params)
ip_released = True
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return (ip_released, err_msg)
|
[
"def",
"release_address",
"(",
"client",
",",
"allocation_id",
",",
"check_mode",
"=",
"False",
")",
":",
"err_msg",
"=",
"''",
"if",
"check_mode",
":",
"return",
"(",
"True",
",",
"''",
")",
"ip_released",
"=",
"False",
"params",
"=",
"{",
"'AllocationId'",
":",
"allocation_id",
"}",
"try",
":",
"client",
".",
"release_address",
"(",
"**",
"params",
")",
"ip_released",
"=",
"True",
"except",
"botocore",
".",
"exceptions",
".",
"ClientError",
"as",
"e",
":",
"err_msg",
"=",
"str",
"(",
"e",
")",
"return",
"(",
"ip_released",
",",
"err_msg",
")"
] |
release a previously allocated elastic ip address .
|
train
| false
|
6,753
|
def is_entity(key):
key_parts = key.split(KEY_DELIMITER)
if (len(key_parts) != 3):
return False
last_part = key_parts[(-1)]
if (not last_part.endswith(KIND_SEPARATOR)):
return False
last_part = last_part[:(- len(KIND_SEPARATOR))]
if (KIND_SEPARATOR in last_part):
return False
return (':' in last_part)
|
[
"def",
"is_entity",
"(",
"key",
")",
":",
"key_parts",
"=",
"key",
".",
"split",
"(",
"KEY_DELIMITER",
")",
"if",
"(",
"len",
"(",
"key_parts",
")",
"!=",
"3",
")",
":",
"return",
"False",
"last_part",
"=",
"key_parts",
"[",
"(",
"-",
"1",
")",
"]",
"if",
"(",
"not",
"last_part",
".",
"endswith",
"(",
"KIND_SEPARATOR",
")",
")",
":",
"return",
"False",
"last_part",
"=",
"last_part",
"[",
":",
"(",
"-",
"len",
"(",
"KIND_SEPARATOR",
")",
")",
"]",
"if",
"(",
"KIND_SEPARATOR",
"in",
"last_part",
")",
":",
"return",
"False",
"return",
"(",
"':'",
"in",
"last_part",
")"
] |
determines whether a given string is an entity key .
|
train
| false
|
6,754
|
def saveMeshes(filename, objects):
ext = os.path.splitext(filename)[1].lower()
if (ext == '.stl'):
stl.saveScene(filename, objects)
return
if (ext == '.amf'):
amf.saveScene(filename, objects)
return
print ('Error: Unknown model extension: %s' % ext)
|
[
"def",
"saveMeshes",
"(",
"filename",
",",
"objects",
")",
":",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"[",
"1",
"]",
".",
"lower",
"(",
")",
"if",
"(",
"ext",
"==",
"'.stl'",
")",
":",
"stl",
".",
"saveScene",
"(",
"filename",
",",
"objects",
")",
"return",
"if",
"(",
"ext",
"==",
"'.amf'",
")",
":",
"amf",
".",
"saveScene",
"(",
"filename",
",",
"objects",
")",
"return",
"print",
"(",
"'Error: Unknown model extension: %s'",
"%",
"ext",
")"
] |
save a list of objects into the file given by the filename .
|
train
| false
|
6,755
|
def update_session_auth_hash(request, user):
request.session.cycle_key()
if (hasattr(user, 'get_session_auth_hash') and (request.user == user)):
request.session[HASH_SESSION_KEY] = user.get_session_auth_hash()
|
[
"def",
"update_session_auth_hash",
"(",
"request",
",",
"user",
")",
":",
"request",
".",
"session",
".",
"cycle_key",
"(",
")",
"if",
"(",
"hasattr",
"(",
"user",
",",
"'get_session_auth_hash'",
")",
"and",
"(",
"request",
".",
"user",
"==",
"user",
")",
")",
":",
"request",
".",
"session",
"[",
"HASH_SESSION_KEY",
"]",
"=",
"user",
".",
"get_session_auth_hash",
"(",
")"
] |
updating a users password logs out all sessions for the user .
|
train
| false
|
6,756
|
@depends(HAS_PYVMOMI)
def get_vmotion_enabled(host, username, password, protocol=None, port=None, host_names=None):
service_instance = salt.utils.vmware.get_service_instance(host=host, username=username, password=password, protocol=protocol, port=port)
host_names = _check_hosts(service_instance, host, host_names)
ret = {}
for host_name in host_names:
host_ref = _get_host_ref(service_instance, host, host_name=host_name)
vmotion_vnic = host_ref.configManager.vmotionSystem.netConfig.selectedVnic
if vmotion_vnic:
ret.update({host_name: {'VMotion Enabled': True}})
else:
ret.update({host_name: {'VMotion Enabled': False}})
return ret
|
[
"@",
"depends",
"(",
"HAS_PYVMOMI",
")",
"def",
"get_vmotion_enabled",
"(",
"host",
",",
"username",
",",
"password",
",",
"protocol",
"=",
"None",
",",
"port",
"=",
"None",
",",
"host_names",
"=",
"None",
")",
":",
"service_instance",
"=",
"salt",
".",
"utils",
".",
"vmware",
".",
"get_service_instance",
"(",
"host",
"=",
"host",
",",
"username",
"=",
"username",
",",
"password",
"=",
"password",
",",
"protocol",
"=",
"protocol",
",",
"port",
"=",
"port",
")",
"host_names",
"=",
"_check_hosts",
"(",
"service_instance",
",",
"host",
",",
"host_names",
")",
"ret",
"=",
"{",
"}",
"for",
"host_name",
"in",
"host_names",
":",
"host_ref",
"=",
"_get_host_ref",
"(",
"service_instance",
",",
"host",
",",
"host_name",
"=",
"host_name",
")",
"vmotion_vnic",
"=",
"host_ref",
".",
"configManager",
".",
"vmotionSystem",
".",
"netConfig",
".",
"selectedVnic",
"if",
"vmotion_vnic",
":",
"ret",
".",
"update",
"(",
"{",
"host_name",
":",
"{",
"'VMotion Enabled'",
":",
"True",
"}",
"}",
")",
"else",
":",
"ret",
".",
"update",
"(",
"{",
"host_name",
":",
"{",
"'VMotion Enabled'",
":",
"False",
"}",
"}",
")",
"return",
"ret"
] |
get the vmotion enabled status for a given host or a list of host_names .
|
train
| true
|
6,757
|
def init(mpstate):
return SerialModule(mpstate)
|
[
"def",
"init",
"(",
"mpstate",
")",
":",
"return",
"SerialModule",
"(",
"mpstate",
")"
] |
initialize the style transfer backend .
|
train
| false
|
6,758
|
def _footer_css_urls(request, package_name):
package = settings.PIPELINE_CSS.get(package_name, {})
paths = ([package['output_filename']] if (not settings.DEBUG) else package['source_filenames'])
return [_footer_static_url(request, path) for path in paths]
|
[
"def",
"_footer_css_urls",
"(",
"request",
",",
"package_name",
")",
":",
"package",
"=",
"settings",
".",
"PIPELINE_CSS",
".",
"get",
"(",
"package_name",
",",
"{",
"}",
")",
"paths",
"=",
"(",
"[",
"package",
"[",
"'output_filename'",
"]",
"]",
"if",
"(",
"not",
"settings",
".",
"DEBUG",
")",
"else",
"package",
"[",
"'source_filenames'",
"]",
")",
"return",
"[",
"_footer_static_url",
"(",
"request",
",",
"path",
")",
"for",
"path",
"in",
"paths",
"]"
] |
construct absolute urls to css assets in a package .
|
train
| false
|
6,759
|
def _zpkbilinear(z, p, k, fs):
z = atleast_1d(z)
p = atleast_1d(p)
degree = _relative_degree(z, p)
fs2 = (2 * fs)
z_z = ((fs2 + z) / (fs2 - z))
p_z = ((fs2 + p) / (fs2 - p))
z_z = append(z_z, (- ones(degree)))
k_z = (k * real((prod((fs2 - z)) / prod((fs2 - p)))))
return (z_z, p_z, k_z)
|
[
"def",
"_zpkbilinear",
"(",
"z",
",",
"p",
",",
"k",
",",
"fs",
")",
":",
"z",
"=",
"atleast_1d",
"(",
"z",
")",
"p",
"=",
"atleast_1d",
"(",
"p",
")",
"degree",
"=",
"_relative_degree",
"(",
"z",
",",
"p",
")",
"fs2",
"=",
"(",
"2",
"*",
"fs",
")",
"z_z",
"=",
"(",
"(",
"fs2",
"+",
"z",
")",
"/",
"(",
"fs2",
"-",
"z",
")",
")",
"p_z",
"=",
"(",
"(",
"fs2",
"+",
"p",
")",
"/",
"(",
"fs2",
"-",
"p",
")",
")",
"z_z",
"=",
"append",
"(",
"z_z",
",",
"(",
"-",
"ones",
"(",
"degree",
")",
")",
")",
"k_z",
"=",
"(",
"k",
"*",
"real",
"(",
"(",
"prod",
"(",
"(",
"fs2",
"-",
"z",
")",
")",
"/",
"prod",
"(",
"(",
"fs2",
"-",
"p",
")",
")",
")",
")",
")",
"return",
"(",
"z_z",
",",
"p_z",
",",
"k_z",
")"
] |
bilinear transformation to convert a filter from analog to digital .
|
train
| false
|
6,760
|
def _make_class_unpicklable(cls):
def _break_on_call_reduce(self, protocol=None):
raise TypeError(('%r cannot be pickled' % self))
cls.__reduce_ex__ = _break_on_call_reduce
cls.__module__ = '<unknown>'
|
[
"def",
"_make_class_unpicklable",
"(",
"cls",
")",
":",
"def",
"_break_on_call_reduce",
"(",
"self",
",",
"protocol",
"=",
"None",
")",
":",
"raise",
"TypeError",
"(",
"(",
"'%r cannot be pickled'",
"%",
"self",
")",
")",
"cls",
".",
"__reduce_ex__",
"=",
"_break_on_call_reduce",
"cls",
".",
"__module__",
"=",
"'<unknown>'"
] |
make the given class un-picklable .
|
train
| true
|
6,762
|
@not_implemented_for('directed')
def node_connected_component(G, n):
return set(_plain_bfs(G, n))
|
[
"@",
"not_implemented_for",
"(",
"'directed'",
")",
"def",
"node_connected_component",
"(",
"G",
",",
"n",
")",
":",
"return",
"set",
"(",
"_plain_bfs",
"(",
"G",
",",
"n",
")",
")"
] |
return the nodes in the component of graph containing node n .
|
train
| false
|
6,764
|
def speed_elemwise_collapse():
shape = (30, 40, 50, 600)
a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32'))
a = theano._asarray(numpy.random.rand(*shape), dtype='float32')
a2 = tcn.shared_constructor(a, 'a')
a3 = a2[:, ::2, :, :]
b = tcn.CudaNdarrayType((False, False, False, False))()
c = (a3 + (b * tensor.exp((1 + (b ** a3)))))
f = pfunc([b], [c], mode=mode_with_gpu)
v = theano._asarray(numpy.random.rand(*shape), dtype='float32')
v = v[:, ::2, :, :]
v = cuda_ndarray.CudaNdarray(v)
time.time()
for i in range(100):
f(v)
time.time()
|
[
"def",
"speed_elemwise_collapse",
"(",
")",
":",
"shape",
"=",
"(",
"30",
",",
"40",
",",
"50",
",",
"600",
")",
"a",
"=",
"cuda_ndarray",
".",
"CudaNdarray",
"(",
"theano",
".",
"_asarray",
"(",
"numpy",
".",
"random",
".",
"rand",
"(",
"*",
"shape",
")",
",",
"dtype",
"=",
"'float32'",
")",
")",
"a",
"=",
"theano",
".",
"_asarray",
"(",
"numpy",
".",
"random",
".",
"rand",
"(",
"*",
"shape",
")",
",",
"dtype",
"=",
"'float32'",
")",
"a2",
"=",
"tcn",
".",
"shared_constructor",
"(",
"a",
",",
"'a'",
")",
"a3",
"=",
"a2",
"[",
":",
",",
":",
":",
"2",
",",
":",
",",
":",
"]",
"b",
"=",
"tcn",
".",
"CudaNdarrayType",
"(",
"(",
"False",
",",
"False",
",",
"False",
",",
"False",
")",
")",
"(",
")",
"c",
"=",
"(",
"a3",
"+",
"(",
"b",
"*",
"tensor",
".",
"exp",
"(",
"(",
"1",
"+",
"(",
"b",
"**",
"a3",
")",
")",
")",
")",
")",
"f",
"=",
"pfunc",
"(",
"[",
"b",
"]",
",",
"[",
"c",
"]",
",",
"mode",
"=",
"mode_with_gpu",
")",
"v",
"=",
"theano",
".",
"_asarray",
"(",
"numpy",
".",
"random",
".",
"rand",
"(",
"*",
"shape",
")",
",",
"dtype",
"=",
"'float32'",
")",
"v",
"=",
"v",
"[",
":",
",",
":",
":",
"2",
",",
":",
",",
":",
"]",
"v",
"=",
"cuda_ndarray",
".",
"CudaNdarray",
"(",
"v",
")",
"time",
".",
"time",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"100",
")",
":",
"f",
"(",
"v",
")",
"time",
".",
"time",
"(",
")"
] |
used to time if the collapse of ccontiguous dims are useful .
|
train
| false
|
6,766
|
def test_idempotent():
dirty = u'<span>invalid & </span> < extra http://link.com<em>'
clean = bleach.clean(dirty)
eq_(clean, bleach.clean(clean))
linked = bleach.linkify(dirty)
eq_(linked, bleach.linkify(linked))
|
[
"def",
"test_idempotent",
"(",
")",
":",
"dirty",
"=",
"u'<span>invalid & </span> < extra http://link.com<em>'",
"clean",
"=",
"bleach",
".",
"clean",
"(",
"dirty",
")",
"eq_",
"(",
"clean",
",",
"bleach",
".",
"clean",
"(",
"clean",
")",
")",
"linked",
"=",
"bleach",
".",
"linkify",
"(",
"dirty",
")",
"eq_",
"(",
"linked",
",",
"bleach",
".",
"linkify",
"(",
"linked",
")",
")"
] |
make sure that applying the filter twice doesnt change anything .
|
train
| false
|
6,767
|
def p_command_end(p):
p[0] = ('END',)
|
[
"def",
"p_command_end",
"(",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"(",
"'END'",
",",
")"
] |
command : end .
|
train
| false
|
6,770
|
def _api_reset_quota(name, output, kwargs):
BPSMeter.do.reset_quota(force=True)
|
[
"def",
"_api_reset_quota",
"(",
"name",
",",
"output",
",",
"kwargs",
")",
":",
"BPSMeter",
".",
"do",
".",
"reset_quota",
"(",
"force",
"=",
"True",
")"
] |
reset quota left .
|
train
| false
|
6,771
|
def dump_memory(signum, frame):
timestamp = datetime.now().isoformat()
format_str = '{}/meliae.{}.{}.{{}}.dump'.format(tempfile.gettempdir(), timestamp, os.getpid())
scanner.dump_all_objects(format_str.format('pre-gc'))
for gen in xrange(3):
gc.collect(gen)
scanner.dump_all_objects(format_str.format('gc-gen-{}'.format(gen)))
|
[
"def",
"dump_memory",
"(",
"signum",
",",
"frame",
")",
":",
"timestamp",
"=",
"datetime",
".",
"now",
"(",
")",
".",
"isoformat",
"(",
")",
"format_str",
"=",
"'{}/meliae.{}.{}.{{}}.dump'",
".",
"format",
"(",
"tempfile",
".",
"gettempdir",
"(",
")",
",",
"timestamp",
",",
"os",
".",
"getpid",
"(",
")",
")",
"scanner",
".",
"dump_all_objects",
"(",
"format_str",
".",
"format",
"(",
"'pre-gc'",
")",
")",
"for",
"gen",
"in",
"xrange",
"(",
"3",
")",
":",
"gc",
".",
"collect",
"(",
"gen",
")",
"scanner",
".",
"dump_all_objects",
"(",
"format_str",
".",
"format",
"(",
"'gc-gen-{}'",
".",
"format",
"(",
"gen",
")",
")",
")"
] |
dump memory stats for the current process to a temp directory .
|
train
| false
|
6,772
|
def _get_db(profile):
server = _get_conn(profile)
db = _get_db(profile)
return db
|
[
"def",
"_get_db",
"(",
"profile",
")",
":",
"server",
"=",
"_get_conn",
"(",
"profile",
")",
"db",
"=",
"_get_db",
"(",
"profile",
")",
"return",
"db"
] |
wraps _get_conn() to return a db .
|
train
| false
|
6,773
|
@dec.onlyif(has_pywin32, 'This test requires win32api to run')
def test_find_cmd_pythonw():
path = find_cmd('pythonw')
assert path.lower().endswith('pythonw.exe'), path
|
[
"@",
"dec",
".",
"onlyif",
"(",
"has_pywin32",
",",
"'This test requires win32api to run'",
")",
"def",
"test_find_cmd_pythonw",
"(",
")",
":",
"path",
"=",
"find_cmd",
"(",
"'pythonw'",
")",
"assert",
"path",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"'pythonw.exe'",
")",
",",
"path"
] |
try to find pythonw on windows .
|
train
| false
|
6,774
|
def howitworks(request):
if request.user.is_authenticated():
return redirect('/home/')
else:
return render_to_response('howitworks.html', {})
|
[
"def",
"howitworks",
"(",
"request",
")",
":",
"if",
"request",
".",
"user",
".",
"is_authenticated",
"(",
")",
":",
"return",
"redirect",
"(",
"'/home/'",
")",
"else",
":",
"return",
"render_to_response",
"(",
"'howitworks.html'",
",",
"{",
"}",
")"
] |
proxy view .
|
train
| false
|
6,775
|
def str_encode(value, encoder='base64'):
try:
out = value.encode(encoder)
except LookupError:
raise SaltInvocationError('You must specify a valid encoder')
except AttributeError:
raise SaltInvocationError('Value must be an encode-able string')
return out
|
[
"def",
"str_encode",
"(",
"value",
",",
"encoder",
"=",
"'base64'",
")",
":",
"try",
":",
"out",
"=",
"value",
".",
"encode",
"(",
"encoder",
")",
"except",
"LookupError",
":",
"raise",
"SaltInvocationError",
"(",
"'You must specify a valid encoder'",
")",
"except",
"AttributeError",
":",
"raise",
"SaltInvocationError",
"(",
"'Value must be an encode-able string'",
")",
"return",
"out"
] |
encode character string in the series/index using indicated encoding .
|
train
| false
|
6,777
|
def _service_bus_error_handler(http_error):
return _general_error_handler(http_error)
|
[
"def",
"_service_bus_error_handler",
"(",
"http_error",
")",
":",
"return",
"_general_error_handler",
"(",
"http_error",
")"
] |
simple error handler for service bus service .
|
train
| false
|
6,778
|
def record_played_exploration_in_collection_context(user_id, collection_id, exploration_id):
progress_model = user_models.CollectionProgressModel.get_or_create(user_id, collection_id)
if (exploration_id not in progress_model.completed_explorations):
progress_model.completed_explorations.append(exploration_id)
progress_model.put()
|
[
"def",
"record_played_exploration_in_collection_context",
"(",
"user_id",
",",
"collection_id",
",",
"exploration_id",
")",
":",
"progress_model",
"=",
"user_models",
".",
"CollectionProgressModel",
".",
"get_or_create",
"(",
"user_id",
",",
"collection_id",
")",
"if",
"(",
"exploration_id",
"not",
"in",
"progress_model",
".",
"completed_explorations",
")",
":",
"progress_model",
".",
"completed_explorations",
".",
"append",
"(",
"exploration_id",
")",
"progress_model",
".",
"put",
"(",
")"
] |
records a exploration by a given user in a given collection context as having been played .
|
train
| false
|
6,780
|
def eigval_decomp(sym_array):
(eigva, W) = decomp.eig(sym_array, left=True, right=False)
k = np.argmax(eigva)
return (W, eigva, k)
|
[
"def",
"eigval_decomp",
"(",
"sym_array",
")",
":",
"(",
"eigva",
",",
"W",
")",
"=",
"decomp",
".",
"eig",
"(",
"sym_array",
",",
"left",
"=",
"True",
",",
"right",
"=",
"False",
")",
"k",
"=",
"np",
".",
"argmax",
"(",
"eigva",
")",
"return",
"(",
"W",
",",
"eigva",
",",
"k",
")"
] |
returns w: array of eigenvectors eigva: list of eigenvalues k: largest eigenvector .
|
train
| false
|
6,782
|
def trial_division(n):
prime_factors = []
if (n < 2):
return prime_factors
for p in eratosthenes((int((n ** 0.5)) + 1)):
if ((p * p) > n):
break
while ((n % p) == 0):
prime_factors.append(p)
n //= p
if (n > 1):
prime_factors.append(n)
return prime_factors
|
[
"def",
"trial_division",
"(",
"n",
")",
":",
"prime_factors",
"=",
"[",
"]",
"if",
"(",
"n",
"<",
"2",
")",
":",
"return",
"prime_factors",
"for",
"p",
"in",
"eratosthenes",
"(",
"(",
"int",
"(",
"(",
"n",
"**",
"0.5",
")",
")",
"+",
"1",
")",
")",
":",
"if",
"(",
"(",
"p",
"*",
"p",
")",
">",
"n",
")",
":",
"break",
"while",
"(",
"(",
"n",
"%",
"p",
")",
"==",
"0",
")",
":",
"prime_factors",
".",
"append",
"(",
"p",
")",
"n",
"//=",
"p",
"if",
"(",
"n",
">",
"1",
")",
":",
"prime_factors",
".",
"append",
"(",
"n",
")",
"return",
"prime_factors"
] |
uses trial division to find prime factors of n .
|
train
| false
|
6,783
|
@handle_response_format
@treeio_login_required
def dashboard_widget_edit(request, widget_id, response_format='html'):
user = request.user.profile
widget_object = get_object_or_404(Widget, pk=widget_id)
if (widget_object.user == user):
perspective = user.get_perspective()
module = perspective.get_modules().filter(name=widget_object.module_name)[0]
widget = _get_widget(request, module, widget_object.widget_name)
widget_object.widget = widget
if ('view' in widget):
try:
content = unicode(widget['view'](request, response_format=response_format).content, 'utf_8')
widget_content = _get_widget_content(content, response_format=response_format)
except Exception:
widget_content = ''
widget['content'] = widget_content
if request.POST:
form = WidgetForm(user, request.POST, instance=widget_object)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('core_dashboard_index'))
else:
form = WidgetForm(user, instance=widget_object)
return render_to_response('core/dashboard/widget_edit', {'widget': widget_object, 'form': form}, context_instance=RequestContext(request), response_format=response_format)
return HttpResponseRedirect(reverse('home'))
|
[
"@",
"handle_response_format",
"@",
"treeio_login_required",
"def",
"dashboard_widget_edit",
"(",
"request",
",",
"widget_id",
",",
"response_format",
"=",
"'html'",
")",
":",
"user",
"=",
"request",
".",
"user",
".",
"profile",
"widget_object",
"=",
"get_object_or_404",
"(",
"Widget",
",",
"pk",
"=",
"widget_id",
")",
"if",
"(",
"widget_object",
".",
"user",
"==",
"user",
")",
":",
"perspective",
"=",
"user",
".",
"get_perspective",
"(",
")",
"module",
"=",
"perspective",
".",
"get_modules",
"(",
")",
".",
"filter",
"(",
"name",
"=",
"widget_object",
".",
"module_name",
")",
"[",
"0",
"]",
"widget",
"=",
"_get_widget",
"(",
"request",
",",
"module",
",",
"widget_object",
".",
"widget_name",
")",
"widget_object",
".",
"widget",
"=",
"widget",
"if",
"(",
"'view'",
"in",
"widget",
")",
":",
"try",
":",
"content",
"=",
"unicode",
"(",
"widget",
"[",
"'view'",
"]",
"(",
"request",
",",
"response_format",
"=",
"response_format",
")",
".",
"content",
",",
"'utf_8'",
")",
"widget_content",
"=",
"_get_widget_content",
"(",
"content",
",",
"response_format",
"=",
"response_format",
")",
"except",
"Exception",
":",
"widget_content",
"=",
"''",
"widget",
"[",
"'content'",
"]",
"=",
"widget_content",
"if",
"request",
".",
"POST",
":",
"form",
"=",
"WidgetForm",
"(",
"user",
",",
"request",
".",
"POST",
",",
"instance",
"=",
"widget_object",
")",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"form",
".",
"save",
"(",
")",
"return",
"HttpResponseRedirect",
"(",
"reverse",
"(",
"'core_dashboard_index'",
")",
")",
"else",
":",
"form",
"=",
"WidgetForm",
"(",
"user",
",",
"instance",
"=",
"widget_object",
")",
"return",
"render_to_response",
"(",
"'core/dashboard/widget_edit'",
",",
"{",
"'widget'",
":",
"widget_object",
",",
"'form'",
":",
"form",
"}",
",",
"context_instance",
"=",
"RequestContext",
"(",
"request",
")",
",",
"response_format",
"=",
"response_format",
")",
"return",
"HttpResponseRedirect",
"(",
"reverse",
"(",
"'home'",
")",
")"
] |
edit an existing widget on the dashboard .
|
train
| false
|
6,785
|
def _nth_root1(p, n, x, prec):
if rs_is_puiseux(p, x):
return rs_puiseux2(_nth_root1, p, n, x, prec)
R = p.ring
zm = R.zero_monom
if (zm not in p):
raise NotImplementedError('No constant term in series')
n = as_int(n)
assert (p[zm] == 1)
p1 = R(1)
if (p == 1):
return p
if (n == 0):
return R(1)
if (n == 1):
return p
if (n < 0):
n = (- n)
sign = 1
else:
sign = 0
for precx in _giant_steps(prec):
tmp = rs_pow(p1, (n + 1), x, precx)
tmp = rs_mul(tmp, p, x, precx)
p1 += ((p1 / n) - (tmp / n))
if sign:
return p1
else:
return _series_inversion1(p1, x, prec)
|
[
"def",
"_nth_root1",
"(",
"p",
",",
"n",
",",
"x",
",",
"prec",
")",
":",
"if",
"rs_is_puiseux",
"(",
"p",
",",
"x",
")",
":",
"return",
"rs_puiseux2",
"(",
"_nth_root1",
",",
"p",
",",
"n",
",",
"x",
",",
"prec",
")",
"R",
"=",
"p",
".",
"ring",
"zm",
"=",
"R",
".",
"zero_monom",
"if",
"(",
"zm",
"not",
"in",
"p",
")",
":",
"raise",
"NotImplementedError",
"(",
"'No constant term in series'",
")",
"n",
"=",
"as_int",
"(",
"n",
")",
"assert",
"(",
"p",
"[",
"zm",
"]",
"==",
"1",
")",
"p1",
"=",
"R",
"(",
"1",
")",
"if",
"(",
"p",
"==",
"1",
")",
":",
"return",
"p",
"if",
"(",
"n",
"==",
"0",
")",
":",
"return",
"R",
"(",
"1",
")",
"if",
"(",
"n",
"==",
"1",
")",
":",
"return",
"p",
"if",
"(",
"n",
"<",
"0",
")",
":",
"n",
"=",
"(",
"-",
"n",
")",
"sign",
"=",
"1",
"else",
":",
"sign",
"=",
"0",
"for",
"precx",
"in",
"_giant_steps",
"(",
"prec",
")",
":",
"tmp",
"=",
"rs_pow",
"(",
"p1",
",",
"(",
"n",
"+",
"1",
")",
",",
"x",
",",
"precx",
")",
"tmp",
"=",
"rs_mul",
"(",
"tmp",
",",
"p",
",",
"x",
",",
"precx",
")",
"p1",
"+=",
"(",
"(",
"p1",
"/",
"n",
")",
"-",
"(",
"tmp",
"/",
"n",
")",
")",
"if",
"sign",
":",
"return",
"p1",
"else",
":",
"return",
"_series_inversion1",
"(",
"p1",
",",
"x",
",",
"prec",
")"
] |
univariate series expansion of the nth root of p .
|
train
| false
|
6,787
|
def woeid_search(query):
query = (u'q=select * from geo.places where text="%s"' % query)
body = web.get((u'http://query.yahooapis.com/v1/public/yql?' + query), dont_decode=True)
parsed = xmltodict.parse(body).get(u'query')
results = parsed.get(u'results')
if ((results is None) or (results.get(u'place') is None)):
return None
if (type(results.get(u'place')) is list):
return results.get(u'place')[0]
return results.get(u'place')
|
[
"def",
"woeid_search",
"(",
"query",
")",
":",
"query",
"=",
"(",
"u'q=select * from geo.places where text=\"%s\"'",
"%",
"query",
")",
"body",
"=",
"web",
".",
"get",
"(",
"(",
"u'http://query.yahooapis.com/v1/public/yql?'",
"+",
"query",
")",
",",
"dont_decode",
"=",
"True",
")",
"parsed",
"=",
"xmltodict",
".",
"parse",
"(",
"body",
")",
".",
"get",
"(",
"u'query'",
")",
"results",
"=",
"parsed",
".",
"get",
"(",
"u'results'",
")",
"if",
"(",
"(",
"results",
"is",
"None",
")",
"or",
"(",
"results",
".",
"get",
"(",
"u'place'",
")",
"is",
"None",
")",
")",
":",
"return",
"None",
"if",
"(",
"type",
"(",
"results",
".",
"get",
"(",
"u'place'",
")",
")",
"is",
"list",
")",
":",
"return",
"results",
".",
"get",
"(",
"u'place'",
")",
"[",
"0",
"]",
"return",
"results",
".",
"get",
"(",
"u'place'",
")"
] |
find the first where on earth id for the given query .
|
train
| false
|
6,788
|
@task
@timed
def i18n_clean():
sh('git clean -fdX conf/locale')
|
[
"@",
"task",
"@",
"timed",
"def",
"i18n_clean",
"(",
")",
":",
"sh",
"(",
"'git clean -fdX conf/locale'",
")"
] |
clean the i18n directory of artifacts .
|
train
| false
|
6,790
|
def get_course_by_id(course_key, depth=0):
with modulestore().bulk_operations(course_key):
course = modulestore().get_course(course_key, depth=depth)
if course:
return course
else:
raise Http404('Course not found: {}.'.format(unicode(course_key)))
|
[
"def",
"get_course_by_id",
"(",
"course_key",
",",
"depth",
"=",
"0",
")",
":",
"with",
"modulestore",
"(",
")",
".",
"bulk_operations",
"(",
"course_key",
")",
":",
"course",
"=",
"modulestore",
"(",
")",
".",
"get_course",
"(",
"course_key",
",",
"depth",
"=",
"depth",
")",
"if",
"course",
":",
"return",
"course",
"else",
":",
"raise",
"Http404",
"(",
"'Course not found: {}.'",
".",
"format",
"(",
"unicode",
"(",
"course_key",
")",
")",
")"
] |
given a course id .
|
train
| false
|
6,792
|
def find_substring(string, pattern1, pattern2=None):
if (not pattern1):
logging.debug('pattern1: get empty string.')
return None
pattern = pattern1
if pattern2:
pattern += ('|%s' % pattern2)
ret = re.findall(pattern, string)
if (not ret):
logging.debug('Could not find matched string with pattern: %s', pattern)
return None
return ret[0]
|
[
"def",
"find_substring",
"(",
"string",
",",
"pattern1",
",",
"pattern2",
"=",
"None",
")",
":",
"if",
"(",
"not",
"pattern1",
")",
":",
"logging",
".",
"debug",
"(",
"'pattern1: get empty string.'",
")",
"return",
"None",
"pattern",
"=",
"pattern1",
"if",
"pattern2",
":",
"pattern",
"+=",
"(",
"'|%s'",
"%",
"pattern2",
")",
"ret",
"=",
"re",
".",
"findall",
"(",
"pattern",
",",
"string",
")",
"if",
"(",
"not",
"ret",
")",
":",
"logging",
".",
"debug",
"(",
"'Could not find matched string with pattern: %s'",
",",
"pattern",
")",
"return",
"None",
"return",
"ret",
"[",
"0",
"]"
] |
return the match of pattern1 in string .
|
train
| false
|
6,794
|
def _get_column_letter(col_idx):
if (not (1 <= col_idx <= 18278)):
raise ValueError('Invalid column index {0}'.format(col_idx))
letters = []
while (col_idx > 0):
(col_idx, remainder) = divmod(col_idx, 26)
if (remainder == 0):
remainder = 26
col_idx -= 1
letters.append(chr((remainder + 64)))
return ''.join(reversed(letters))
|
[
"def",
"_get_column_letter",
"(",
"col_idx",
")",
":",
"if",
"(",
"not",
"(",
"1",
"<=",
"col_idx",
"<=",
"18278",
")",
")",
":",
"raise",
"ValueError",
"(",
"'Invalid column index {0}'",
".",
"format",
"(",
"col_idx",
")",
")",
"letters",
"=",
"[",
"]",
"while",
"(",
"col_idx",
">",
"0",
")",
":",
"(",
"col_idx",
",",
"remainder",
")",
"=",
"divmod",
"(",
"col_idx",
",",
"26",
")",
"if",
"(",
"remainder",
"==",
"0",
")",
":",
"remainder",
"=",
"26",
"col_idx",
"-=",
"1",
"letters",
".",
"append",
"(",
"chr",
"(",
"(",
"remainder",
"+",
"64",
")",
")",
")",
"return",
"''",
".",
"join",
"(",
"reversed",
"(",
"letters",
")",
")"
] |
convert a column number into a column letter right shift the column col_idx by 26 to find column letters in reverse order .
|
train
| false
|
6,796
|
def time_cache(time_add_setting):
def _temp(key_func):
dct = {}
_time_caches[time_add_setting] = dct
def wrapper(*args, **kwargs):
generator = key_func(*args, **kwargs)
key = next(generator)
try:
(expiry, value) = dct[key]
if (expiry > time.time()):
return value
except KeyError:
pass
value = next(generator)
time_add = getattr(settings, time_add_setting)
if (key is not None):
dct[key] = ((time.time() + time_add), value)
return value
return wrapper
return _temp
|
[
"def",
"time_cache",
"(",
"time_add_setting",
")",
":",
"def",
"_temp",
"(",
"key_func",
")",
":",
"dct",
"=",
"{",
"}",
"_time_caches",
"[",
"time_add_setting",
"]",
"=",
"dct",
"def",
"wrapper",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"generator",
"=",
"key_func",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"key",
"=",
"next",
"(",
"generator",
")",
"try",
":",
"(",
"expiry",
",",
"value",
")",
"=",
"dct",
"[",
"key",
"]",
"if",
"(",
"expiry",
">",
"time",
".",
"time",
"(",
")",
")",
":",
"return",
"value",
"except",
"KeyError",
":",
"pass",
"value",
"=",
"next",
"(",
"generator",
")",
"time_add",
"=",
"getattr",
"(",
"settings",
",",
"time_add_setting",
")",
"if",
"(",
"key",
"is",
"not",
"None",
")",
":",
"dct",
"[",
"key",
"]",
"=",
"(",
"(",
"time",
".",
"time",
"(",
")",
"+",
"time_add",
")",
",",
"value",
")",
"return",
"value",
"return",
"wrapper",
"return",
"_temp"
] |
this decorator works as follows: call it with a setting and after that use the function with a callable that returns the key .
|
train
| false
|
6,797
|
def test_pushpop(size):
result = pushpop(size)
print('== Result ==')
print(result)
assert (result == list(reversed(range(size))))
|
[
"def",
"test_pushpop",
"(",
"size",
")",
":",
"result",
"=",
"pushpop",
"(",
"size",
")",
"print",
"(",
"'== Result =='",
")",
"print",
"(",
"result",
")",
"assert",
"(",
"result",
"==",
"list",
"(",
"reversed",
"(",
"range",
"(",
"size",
")",
")",
")",
")"
] |
test basic push pop operation on a stack object .
|
train
| false
|
6,798
|
def obfs_ps_script(script):
strippedCode = re.sub(re.compile('<#.*?#>', re.DOTALL), '', script)
strippedCode = '\n'.join([line for line in strippedCode.split('\n') if ((line.strip() != '') and (not line.strip().startswith('#')) and (not line.strip().lower().startswith('write-verbose ')) and (not line.strip().lower().startswith('write-debug ')))])
return strippedCode
|
[
"def",
"obfs_ps_script",
"(",
"script",
")",
":",
"strippedCode",
"=",
"re",
".",
"sub",
"(",
"re",
".",
"compile",
"(",
"'<#.*?#>'",
",",
"re",
".",
"DOTALL",
")",
",",
"''",
",",
"script",
")",
"strippedCode",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"line",
"for",
"line",
"in",
"strippedCode",
".",
"split",
"(",
"'\\n'",
")",
"if",
"(",
"(",
"line",
".",
"strip",
"(",
")",
"!=",
"''",
")",
"and",
"(",
"not",
"line",
".",
"strip",
"(",
")",
".",
"startswith",
"(",
"'#'",
")",
")",
"and",
"(",
"not",
"line",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'write-verbose '",
")",
")",
"and",
"(",
"not",
"line",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'write-debug '",
")",
")",
")",
"]",
")",
"return",
"strippedCode"
] |
strip block comments .
|
train
| false
|
6,800
|
def stringify_children(node):
parts = [node.text]
for c in node.getchildren():
parts.append(etree.tostring(c, with_tail=True, encoding='unicode'))
return u''.join(filter(None, parts))
|
[
"def",
"stringify_children",
"(",
"node",
")",
":",
"parts",
"=",
"[",
"node",
".",
"text",
"]",
"for",
"c",
"in",
"node",
".",
"getchildren",
"(",
")",
":",
"parts",
".",
"append",
"(",
"etree",
".",
"tostring",
"(",
"c",
",",
"with_tail",
"=",
"True",
",",
"encoding",
"=",
"'unicode'",
")",
")",
"return",
"u''",
".",
"join",
"(",
"filter",
"(",
"None",
",",
"parts",
")",
")"
] |
return all contents of an xml tree .
|
train
| false
|
6,801
|
def untar_backup_files(source):
logging.info("Untarring backup file '{0}'...".format(source))
try:
tar = tarfile.open(source, 'r:gz')
tar.extractall(path='/')
tar.close()
except tarfile.TarError as tar_error:
logging.exception(tar_error)
raise backup_exceptions.BRException("Exception while untarring backup file '{0}'.".format(source))
logging.info("Done untarring '{0}'.".format(source))
|
[
"def",
"untar_backup_files",
"(",
"source",
")",
":",
"logging",
".",
"info",
"(",
"\"Untarring backup file '{0}'...\"",
".",
"format",
"(",
"source",
")",
")",
"try",
":",
"tar",
"=",
"tarfile",
".",
"open",
"(",
"source",
",",
"'r:gz'",
")",
"tar",
".",
"extractall",
"(",
"path",
"=",
"'/'",
")",
"tar",
".",
"close",
"(",
")",
"except",
"tarfile",
".",
"TarError",
"as",
"tar_error",
":",
"logging",
".",
"exception",
"(",
"tar_error",
")",
"raise",
"backup_exceptions",
".",
"BRException",
"(",
"\"Exception while untarring backup file '{0}'.\"",
".",
"format",
"(",
"source",
")",
")",
"logging",
".",
"info",
"(",
"\"Done untarring '{0}'.\"",
".",
"format",
"(",
"source",
")",
")"
] |
restores a previous backup into the cassandra directory structure from a tar ball .
|
train
| false
|
6,802
|
def test_parametric_styles():
chart = None
for style in STYLES:
line = Line(style=style('#f4e83a'))
line.add('_', [1, 2, 3])
line.x_labels = 'abc'
new_chart = line.render()
assert (chart != new_chart)
chart = new_chart
|
[
"def",
"test_parametric_styles",
"(",
")",
":",
"chart",
"=",
"None",
"for",
"style",
"in",
"STYLES",
":",
"line",
"=",
"Line",
"(",
"style",
"=",
"style",
"(",
"'#f4e83a'",
")",
")",
"line",
".",
"add",
"(",
"'_'",
",",
"[",
"1",
",",
"2",
",",
"3",
"]",
")",
"line",
".",
"x_labels",
"=",
"'abc'",
"new_chart",
"=",
"line",
".",
"render",
"(",
")",
"assert",
"(",
"chart",
"!=",
"new_chart",
")",
"chart",
"=",
"new_chart"
] |
test that no parametric produce the same result .
|
train
| false
|
6,803
|
def _split_list(s, predicate):
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return (yes, no)
|
[
"def",
"_split_list",
"(",
"s",
",",
"predicate",
")",
":",
"yes",
"=",
"[",
"]",
"no",
"=",
"[",
"]",
"for",
"x",
"in",
"s",
":",
"if",
"predicate",
"(",
"x",
")",
":",
"yes",
".",
"append",
"(",
"x",
")",
"else",
":",
"no",
".",
"append",
"(",
"x",
")",
"return",
"(",
"yes",
",",
"no",
")"
] |
split sequence s via predicate .
|
train
| false
|
6,804
|
def recalculate_cart(request):
from cartridge.shop import checkout
from cartridge.shop.forms import DiscountForm
from cartridge.shop.models import Cart
if (request.session.get(u'cart') != request.cart.pk):
request.session[u'cart'] = request.cart.pk
request.cart = Cart.objects.from_request(request)
discount_code = request.session.get(u'discount_code', u'')
if discount_code:
names = (u'free_shipping', u'discount_code', u'discount_total')
clear_session(request, *names)
discount_form = DiscountForm(request, {u'discount_code': discount_code})
if discount_form.is_valid():
discount_form.set_discount()
handler = (lambda s: (import_dotted_path(s) if s else (lambda *args: None)))
billship_handler = handler(settings.SHOP_HANDLER_BILLING_SHIPPING)
tax_handler = handler(settings.SHOP_HANDLER_TAX)
try:
if (request.session[u'order'][u'step'] >= checkout.CHECKOUT_STEP_FIRST):
billship_handler(request, None)
tax_handler(request, None)
except (checkout.CheckoutError, ValueError, KeyError):
pass
|
[
"def",
"recalculate_cart",
"(",
"request",
")",
":",
"from",
"cartridge",
".",
"shop",
"import",
"checkout",
"from",
"cartridge",
".",
"shop",
".",
"forms",
"import",
"DiscountForm",
"from",
"cartridge",
".",
"shop",
".",
"models",
"import",
"Cart",
"if",
"(",
"request",
".",
"session",
".",
"get",
"(",
"u'cart'",
")",
"!=",
"request",
".",
"cart",
".",
"pk",
")",
":",
"request",
".",
"session",
"[",
"u'cart'",
"]",
"=",
"request",
".",
"cart",
".",
"pk",
"request",
".",
"cart",
"=",
"Cart",
".",
"objects",
".",
"from_request",
"(",
"request",
")",
"discount_code",
"=",
"request",
".",
"session",
".",
"get",
"(",
"u'discount_code'",
",",
"u''",
")",
"if",
"discount_code",
":",
"names",
"=",
"(",
"u'free_shipping'",
",",
"u'discount_code'",
",",
"u'discount_total'",
")",
"clear_session",
"(",
"request",
",",
"*",
"names",
")",
"discount_form",
"=",
"DiscountForm",
"(",
"request",
",",
"{",
"u'discount_code'",
":",
"discount_code",
"}",
")",
"if",
"discount_form",
".",
"is_valid",
"(",
")",
":",
"discount_form",
".",
"set_discount",
"(",
")",
"handler",
"=",
"(",
"lambda",
"s",
":",
"(",
"import_dotted_path",
"(",
"s",
")",
"if",
"s",
"else",
"(",
"lambda",
"*",
"args",
":",
"None",
")",
")",
")",
"billship_handler",
"=",
"handler",
"(",
"settings",
".",
"SHOP_HANDLER_BILLING_SHIPPING",
")",
"tax_handler",
"=",
"handler",
"(",
"settings",
".",
"SHOP_HANDLER_TAX",
")",
"try",
":",
"if",
"(",
"request",
".",
"session",
"[",
"u'order'",
"]",
"[",
"u'step'",
"]",
">=",
"checkout",
".",
"CHECKOUT_STEP_FIRST",
")",
":",
"billship_handler",
"(",
"request",
",",
"None",
")",
"tax_handler",
"(",
"request",
",",
"None",
")",
"except",
"(",
"checkout",
".",
"CheckoutError",
",",
"ValueError",
",",
"KeyError",
")",
":",
"pass"
] |
updates an existing discount code .
|
train
| false
|
6,805
|
def libvlc_video_set_callbacks(mp, lock, unlock, display, opaque):
f = (_Cfunctions.get('libvlc_video_set_callbacks', None) or _Cfunction('libvlc_video_set_callbacks', ((1,), (1,), (1,), (1,), (1,)), None, None, MediaPlayer, VideoLockCb, VideoUnlockCb, VideoDisplayCb, ctypes.c_void_p))
return f(mp, lock, unlock, display, opaque)
|
[
"def",
"libvlc_video_set_callbacks",
"(",
"mp",
",",
"lock",
",",
"unlock",
",",
"display",
",",
"opaque",
")",
":",
"f",
"=",
"(",
"_Cfunctions",
".",
"get",
"(",
"'libvlc_video_set_callbacks'",
",",
"None",
")",
"or",
"_Cfunction",
"(",
"'libvlc_video_set_callbacks'",
",",
"(",
"(",
"1",
",",
")",
",",
"(",
"1",
",",
")",
",",
"(",
"1",
",",
")",
",",
"(",
"1",
",",
")",
",",
"(",
"1",
",",
")",
")",
",",
"None",
",",
"None",
",",
"MediaPlayer",
",",
"VideoLockCb",
",",
"VideoUnlockCb",
",",
"VideoDisplayCb",
",",
"ctypes",
".",
"c_void_p",
")",
")",
"return",
"f",
"(",
"mp",
",",
"lock",
",",
"unlock",
",",
"display",
",",
"opaque",
")"
] |
set callbacks and private data to render decoded video to a custom area in memory .
|
train
| true
|
6,807
|
def runMultiLink():
topo = simpleMultiLinkTopo(n=2)
net = Mininet(topo=topo)
net.start()
CLI(net)
net.stop()
|
[
"def",
"runMultiLink",
"(",
")",
":",
"topo",
"=",
"simpleMultiLinkTopo",
"(",
"n",
"=",
"2",
")",
"net",
"=",
"Mininet",
"(",
"topo",
"=",
"topo",
")",
"net",
".",
"start",
"(",
")",
"CLI",
"(",
"net",
")",
"net",
".",
"stop",
"(",
")"
] |
create and run multiple link network .
|
train
| false
|
6,808
|
def test_has_a_stack_list():
assert hasattr(io.FileSystem, 'stack'), 'FileSystem should have a stack'
assert isinstance(io.FileSystem.stack, list), 'FileSystem.stack should be a list'
|
[
"def",
"test_has_a_stack_list",
"(",
")",
":",
"assert",
"hasattr",
"(",
"io",
".",
"FileSystem",
",",
"'stack'",
")",
",",
"'FileSystem should have a stack'",
"assert",
"isinstance",
"(",
"io",
".",
"FileSystem",
".",
"stack",
",",
"list",
")",
",",
"'FileSystem.stack should be a list'"
] |
filesystem stack list .
|
train
| false
|
6,809
|
def kill(coro):
return KillEvent(coro)
|
[
"def",
"kill",
"(",
"coro",
")",
":",
"return",
"KillEvent",
"(",
"coro",
")"
] |
kill the salt minion .
|
train
| false
|
6,811
|
def tobs(data):
return BytesIO(tob(data))
|
[
"def",
"tobs",
"(",
"data",
")",
":",
"return",
"BytesIO",
"(",
"tob",
"(",
"data",
")",
")"
] |
transforms bytes or unicode into a byte stream .
|
train
| false
|
6,812
|
def levenshtein_similarity(string1, string2):
return (1 - (levenshtein(string1, string2) / float(max(len(string1), len(string2), 1.0))))
|
[
"def",
"levenshtein_similarity",
"(",
"string1",
",",
"string2",
")",
":",
"return",
"(",
"1",
"-",
"(",
"levenshtein",
"(",
"string1",
",",
"string2",
")",
"/",
"float",
"(",
"max",
"(",
"len",
"(",
"string1",
")",
",",
"len",
"(",
"string2",
")",
",",
"1.0",
")",
")",
")",
")"
] |
returns the similarity of string1 and string2 as a number between 0 .
|
train
| false
|
6,813
|
def _calculate_cut(lemmawords, stems):
(umt, wmt) = (0.0, 0.0)
for stem in stems:
cut = (set(lemmawords) & set(stems[stem]))
if cut:
cutcount = len(cut)
stemcount = len(stems[stem])
umt += (cutcount * (len(lemmawords) - cutcount))
wmt += (cutcount * (stemcount - cutcount))
return (umt, wmt)
|
[
"def",
"_calculate_cut",
"(",
"lemmawords",
",",
"stems",
")",
":",
"(",
"umt",
",",
"wmt",
")",
"=",
"(",
"0.0",
",",
"0.0",
")",
"for",
"stem",
"in",
"stems",
":",
"cut",
"=",
"(",
"set",
"(",
"lemmawords",
")",
"&",
"set",
"(",
"stems",
"[",
"stem",
"]",
")",
")",
"if",
"cut",
":",
"cutcount",
"=",
"len",
"(",
"cut",
")",
"stemcount",
"=",
"len",
"(",
"stems",
"[",
"stem",
"]",
")",
"umt",
"+=",
"(",
"cutcount",
"*",
"(",
"len",
"(",
"lemmawords",
")",
"-",
"cutcount",
")",
")",
"wmt",
"+=",
"(",
"cutcount",
"*",
"(",
"stemcount",
"-",
"cutcount",
")",
")",
"return",
"(",
"umt",
",",
"wmt",
")"
] |
count understemmed and overstemmed pairs for pair with common words .
|
train
| false
|
6,815
|
def get_manual_url():
if ((VERSION[2] == 0) and (VERSION[4] != u'final')):
manual_ver = u'dev'
else:
manual_ver = (u'%s.%s' % (VERSION[0], VERSION[1]))
return (u'https://www.reviewboard.org/docs/manual/%s/' % manual_ver)
|
[
"def",
"get_manual_url",
"(",
")",
":",
"if",
"(",
"(",
"VERSION",
"[",
"2",
"]",
"==",
"0",
")",
"and",
"(",
"VERSION",
"[",
"4",
"]",
"!=",
"u'final'",
")",
")",
":",
"manual_ver",
"=",
"u'dev'",
"else",
":",
"manual_ver",
"=",
"(",
"u'%s.%s'",
"%",
"(",
"VERSION",
"[",
"0",
"]",
",",
"VERSION",
"[",
"1",
"]",
")",
")",
"return",
"(",
"u'https://www.reviewboard.org/docs/manual/%s/'",
"%",
"manual_ver",
")"
] |
return the url to the review board manual for this version .
|
train
| false
|
6,817
|
def split_at_angle(contour, curvature, angle):
segments = []
kink_index = [i for i in range(len(curvature)) if (curvature[i] < angle)]
for (s, e) in zip(([0] + kink_index), (kink_index + [None])):
if (e is not None):
segments.append(contour[s:(e + 1)])
else:
segments.append(contour[s:e])
return segments
|
[
"def",
"split_at_angle",
"(",
"contour",
",",
"curvature",
",",
"angle",
")",
":",
"segments",
"=",
"[",
"]",
"kink_index",
"=",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"curvature",
")",
")",
"if",
"(",
"curvature",
"[",
"i",
"]",
"<",
"angle",
")",
"]",
"for",
"(",
"s",
",",
"e",
")",
"in",
"zip",
"(",
"(",
"[",
"0",
"]",
"+",
"kink_index",
")",
",",
"(",
"kink_index",
"+",
"[",
"None",
"]",
")",
")",
":",
"if",
"(",
"e",
"is",
"not",
"None",
")",
":",
"segments",
".",
"append",
"(",
"contour",
"[",
"s",
":",
"(",
"e",
"+",
"1",
")",
"]",
")",
"else",
":",
"segments",
".",
"append",
"(",
"contour",
"[",
"s",
":",
"e",
"]",
")",
"return",
"segments"
] |
contour is array shape=(number of points .
|
train
| false
|
6,819
|
def string_to_dimension_level(astring):
if (not astring):
raise ArgumentError('Drilldown string should not be empty')
ident = '[\\w\\d_]'
pattern = ('(?P<dim>%s+)(@(?P<hier>%s+))?(:(?P<level>%s+))?' % (ident, ident, ident))
match = re.match(pattern, astring)
if match:
d = match.groupdict()
return (d['dim'], d['hier'], d['level'])
else:
raise ArgumentError(("String '%s' does not match drilldown level pattern 'dimension@hierarchy:level'" % astring))
|
[
"def",
"string_to_dimension_level",
"(",
"astring",
")",
":",
"if",
"(",
"not",
"astring",
")",
":",
"raise",
"ArgumentError",
"(",
"'Drilldown string should not be empty'",
")",
"ident",
"=",
"'[\\\\w\\\\d_]'",
"pattern",
"=",
"(",
"'(?P<dim>%s+)(@(?P<hier>%s+))?(:(?P<level>%s+))?'",
"%",
"(",
"ident",
",",
"ident",
",",
"ident",
")",
")",
"match",
"=",
"re",
".",
"match",
"(",
"pattern",
",",
"astring",
")",
"if",
"match",
":",
"d",
"=",
"match",
".",
"groupdict",
"(",
")",
"return",
"(",
"d",
"[",
"'dim'",
"]",
",",
"d",
"[",
"'hier'",
"]",
",",
"d",
"[",
"'level'",
"]",
")",
"else",
":",
"raise",
"ArgumentError",
"(",
"(",
"\"String '%s' does not match drilldown level pattern 'dimension@hierarchy:level'\"",
"%",
"astring",
")",
")"
] |
converts astring into a dimension level tuple .
|
train
| false
|
6,820
|
@signals.contributor_removed.connect
def remove_contributor_from_subscriptions(node, user):
if (user._id not in node.admin_contributor_ids):
node_subscriptions = get_all_node_subscriptions(user, node)
for subscription in node_subscriptions:
subscription.remove_user_from_subscription(user)
|
[
"@",
"signals",
".",
"contributor_removed",
".",
"connect",
"def",
"remove_contributor_from_subscriptions",
"(",
"node",
",",
"user",
")",
":",
"if",
"(",
"user",
".",
"_id",
"not",
"in",
"node",
".",
"admin_contributor_ids",
")",
":",
"node_subscriptions",
"=",
"get_all_node_subscriptions",
"(",
"user",
",",
"node",
")",
"for",
"subscription",
"in",
"node_subscriptions",
":",
"subscription",
".",
"remove_user_from_subscription",
"(",
"user",
")"
] |
remove contributor from node subscriptions unless the user is an admin on any of nodes parent projects .
|
train
| false
|
6,821
|
def treepositions_no_leaves(tree):
treepositions = tree.treepositions()
prefixes = set()
for pos in treepositions:
for length in range(len(pos)):
prefixes.add(pos[:length])
return [pos for pos in treepositions if (pos in prefixes)]
|
[
"def",
"treepositions_no_leaves",
"(",
"tree",
")",
":",
"treepositions",
"=",
"tree",
".",
"treepositions",
"(",
")",
"prefixes",
"=",
"set",
"(",
")",
"for",
"pos",
"in",
"treepositions",
":",
"for",
"length",
"in",
"range",
"(",
"len",
"(",
"pos",
")",
")",
":",
"prefixes",
".",
"add",
"(",
"pos",
"[",
":",
"length",
"]",
")",
"return",
"[",
"pos",
"for",
"pos",
"in",
"treepositions",
"if",
"(",
"pos",
"in",
"prefixes",
")",
"]"
] |
returns all the tree positions in the given tree which are not leaf nodes .
|
train
| false
|
6,822
|
def print_sentiment(filename):
credentials = GoogleCredentials.get_application_default()
service = discovery.build('language', 'v1', credentials=credentials)
with io.open(filename, 'r') as review_file:
review_file_contents = review_file.read()
service_request = service.documents().analyzeSentiment(body={'document': {'type': 'PLAIN_TEXT', 'content': review_file_contents}})
response = service_request.execute()
score = response['documentSentiment']['score']
magnitude = response['documentSentiment']['magnitude']
for (n, sentence) in enumerate(response['sentences']):
sentence_sentiment = sentence['sentiment']['score']
print 'Sentence {} has a sentiment score of {}'.format(n, sentence_sentiment)
print 'Overall Sentiment: score of {} with magnitude of {}'.format(score, magnitude)
|
[
"def",
"print_sentiment",
"(",
"filename",
")",
":",
"credentials",
"=",
"GoogleCredentials",
".",
"get_application_default",
"(",
")",
"service",
"=",
"discovery",
".",
"build",
"(",
"'language'",
",",
"'v1'",
",",
"credentials",
"=",
"credentials",
")",
"with",
"io",
".",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"review_file",
":",
"review_file_contents",
"=",
"review_file",
".",
"read",
"(",
")",
"service_request",
"=",
"service",
".",
"documents",
"(",
")",
".",
"analyzeSentiment",
"(",
"body",
"=",
"{",
"'document'",
":",
"{",
"'type'",
":",
"'PLAIN_TEXT'",
",",
"'content'",
":",
"review_file_contents",
"}",
"}",
")",
"response",
"=",
"service_request",
".",
"execute",
"(",
")",
"score",
"=",
"response",
"[",
"'documentSentiment'",
"]",
"[",
"'score'",
"]",
"magnitude",
"=",
"response",
"[",
"'documentSentiment'",
"]",
"[",
"'magnitude'",
"]",
"for",
"(",
"n",
",",
"sentence",
")",
"in",
"enumerate",
"(",
"response",
"[",
"'sentences'",
"]",
")",
":",
"sentence_sentiment",
"=",
"sentence",
"[",
"'sentiment'",
"]",
"[",
"'score'",
"]",
"print",
"'Sentence {} has a sentiment score of {}'",
".",
"format",
"(",
"n",
",",
"sentence_sentiment",
")",
"print",
"'Overall Sentiment: score of {} with magnitude of {}'",
".",
"format",
"(",
"score",
",",
"magnitude",
")"
] |
prints sentiment analysis on a given file contents .
|
train
| false
|
6,823
|
def set_descriptions(config):
log = logging.getLogger('gitosis.gitweb.set_descriptions')
for (section, name, topdir, subpath) in enum_cfg_repos(config):
try:
description = config.get(section, 'description')
except (NoSectionError, NoOptionError):
continue
if (not description):
continue
if (not os.path.exists(os.path.join(topdir, subpath))):
log.warning(('Cannot find %(name)r in %(topdir)r' % dict(name=name, topdir=topdir)))
continue
path = os.path.join(topdir, subpath, 'description')
tmp = ('%s.%d.tmp' % (path, os.getpid()))
f = file(tmp, 'w')
try:
print >>f, description
finally:
f.close()
os.rename(tmp, path)
|
[
"def",
"set_descriptions",
"(",
"config",
")",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"'gitosis.gitweb.set_descriptions'",
")",
"for",
"(",
"section",
",",
"name",
",",
"topdir",
",",
"subpath",
")",
"in",
"enum_cfg_repos",
"(",
"config",
")",
":",
"try",
":",
"description",
"=",
"config",
".",
"get",
"(",
"section",
",",
"'description'",
")",
"except",
"(",
"NoSectionError",
",",
"NoOptionError",
")",
":",
"continue",
"if",
"(",
"not",
"description",
")",
":",
"continue",
"if",
"(",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"topdir",
",",
"subpath",
")",
")",
")",
":",
"log",
".",
"warning",
"(",
"(",
"'Cannot find %(name)r in %(topdir)r'",
"%",
"dict",
"(",
"name",
"=",
"name",
",",
"topdir",
"=",
"topdir",
")",
")",
")",
"continue",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"topdir",
",",
"subpath",
",",
"'description'",
")",
"tmp",
"=",
"(",
"'%s.%d.tmp'",
"%",
"(",
"path",
",",
"os",
".",
"getpid",
"(",
")",
")",
")",
"f",
"=",
"file",
"(",
"tmp",
",",
"'w'",
")",
"try",
":",
"print",
">>",
"f",
",",
"description",
"finally",
":",
"f",
".",
"close",
"(",
")",
"os",
".",
"rename",
"(",
"tmp",
",",
"path",
")"
] |
set descriptions for gitweb use .
|
train
| false
|
6,824
|
def _cholesky(a):
import scipy.linalg
if (a.ndim != 2):
raise ValueError('Dimension must be 2 to perform cholesky decomposition')
(xdim, ydim) = a.shape
if (xdim != ydim):
raise ValueError('Input must be a square matrix to perform cholesky decomposition')
if (not (len(set((a.chunks[0] + a.chunks[1]))) == 1)):
msg = 'All chunks must be a square matrix to perform cholesky decomposition. Use .rechunk method to change the size of chunks.'
raise ValueError(msg)
vdim = len(a.chunks[0])
hdim = len(a.chunks[1])
token = tokenize(a)
name = ('cholesky-' + token)
name_lt_dot = ('cholesky-lt-dot-' + token)
name_upper = ('cholesky-upper-' + token)
dsk = {}
for i in range(vdim):
for j in range(hdim):
if (i < j):
dsk[(name, i, j)] = (np.zeros, (a.chunks[0][i], a.chunks[1][j]))
dsk[(name_upper, j, i)] = (name, i, j)
elif (i == j):
target = (a.name, i, j)
if (i > 0):
prevs = []
for p in range(i):
prev = (name_lt_dot, i, p, i, p)
dsk[prev] = (np.dot, (name, i, p), (name_upper, p, i))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
dsk[(name, i, i)] = (_cholesky_lower, target)
dsk[(name_upper, i, i)] = (np.transpose, (name, i, i))
else:
target = (a.name, j, i)
if (j > 0):
prevs = []
for p in range(j):
prev = (name_lt_dot, j, p, i, p)
dsk[prev] = (np.dot, (name, j, p), (name_upper, p, i))
prevs.append(prev)
target = (operator.sub, target, (sum, prevs))
dsk[(name_upper, j, i)] = (_solve_triangular_lower, (name, j, j), target)
dsk[(name, i, j)] = (np.transpose, (name_upper, j, i))
dsk.update(a.dask)
cho = scipy.linalg.cholesky(np.array([[1, 2], [2, 5]], dtype=a.dtype))
lower = Array(dsk, name, shape=a.shape, chunks=a.chunks, dtype=cho.dtype)
upper = Array(dsk, name_upper, shape=a.shape, chunks=a.chunks, dtype=cho.dtype)
return (lower, upper)
|
[
"def",
"_cholesky",
"(",
"a",
")",
":",
"import",
"scipy",
".",
"linalg",
"if",
"(",
"a",
".",
"ndim",
"!=",
"2",
")",
":",
"raise",
"ValueError",
"(",
"'Dimension must be 2 to perform cholesky decomposition'",
")",
"(",
"xdim",
",",
"ydim",
")",
"=",
"a",
".",
"shape",
"if",
"(",
"xdim",
"!=",
"ydim",
")",
":",
"raise",
"ValueError",
"(",
"'Input must be a square matrix to perform cholesky decomposition'",
")",
"if",
"(",
"not",
"(",
"len",
"(",
"set",
"(",
"(",
"a",
".",
"chunks",
"[",
"0",
"]",
"+",
"a",
".",
"chunks",
"[",
"1",
"]",
")",
")",
")",
"==",
"1",
")",
")",
":",
"msg",
"=",
"'All chunks must be a square matrix to perform cholesky decomposition. Use .rechunk method to change the size of chunks.'",
"raise",
"ValueError",
"(",
"msg",
")",
"vdim",
"=",
"len",
"(",
"a",
".",
"chunks",
"[",
"0",
"]",
")",
"hdim",
"=",
"len",
"(",
"a",
".",
"chunks",
"[",
"1",
"]",
")",
"token",
"=",
"tokenize",
"(",
"a",
")",
"name",
"=",
"(",
"'cholesky-'",
"+",
"token",
")",
"name_lt_dot",
"=",
"(",
"'cholesky-lt-dot-'",
"+",
"token",
")",
"name_upper",
"=",
"(",
"'cholesky-upper-'",
"+",
"token",
")",
"dsk",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"vdim",
")",
":",
"for",
"j",
"in",
"range",
"(",
"hdim",
")",
":",
"if",
"(",
"i",
"<",
"j",
")",
":",
"dsk",
"[",
"(",
"name",
",",
"i",
",",
"j",
")",
"]",
"=",
"(",
"np",
".",
"zeros",
",",
"(",
"a",
".",
"chunks",
"[",
"0",
"]",
"[",
"i",
"]",
",",
"a",
".",
"chunks",
"[",
"1",
"]",
"[",
"j",
"]",
")",
")",
"dsk",
"[",
"(",
"name_upper",
",",
"j",
",",
"i",
")",
"]",
"=",
"(",
"name",
",",
"i",
",",
"j",
")",
"elif",
"(",
"i",
"==",
"j",
")",
":",
"target",
"=",
"(",
"a",
".",
"name",
",",
"i",
",",
"j",
")",
"if",
"(",
"i",
">",
"0",
")",
":",
"prevs",
"=",
"[",
"]",
"for",
"p",
"in",
"range",
"(",
"i",
")",
":",
"prev",
"=",
"(",
"name_lt_dot",
",",
"i",
",",
"p",
",",
"i",
",",
"p",
")",
"dsk",
"[",
"prev",
"]",
"=",
"(",
"np",
".",
"dot",
",",
"(",
"name",
",",
"i",
",",
"p",
")",
",",
"(",
"name_upper",
",",
"p",
",",
"i",
")",
")",
"prevs",
".",
"append",
"(",
"prev",
")",
"target",
"=",
"(",
"operator",
".",
"sub",
",",
"target",
",",
"(",
"sum",
",",
"prevs",
")",
")",
"dsk",
"[",
"(",
"name",
",",
"i",
",",
"i",
")",
"]",
"=",
"(",
"_cholesky_lower",
",",
"target",
")",
"dsk",
"[",
"(",
"name_upper",
",",
"i",
",",
"i",
")",
"]",
"=",
"(",
"np",
".",
"transpose",
",",
"(",
"name",
",",
"i",
",",
"i",
")",
")",
"else",
":",
"target",
"=",
"(",
"a",
".",
"name",
",",
"j",
",",
"i",
")",
"if",
"(",
"j",
">",
"0",
")",
":",
"prevs",
"=",
"[",
"]",
"for",
"p",
"in",
"range",
"(",
"j",
")",
":",
"prev",
"=",
"(",
"name_lt_dot",
",",
"j",
",",
"p",
",",
"i",
",",
"p",
")",
"dsk",
"[",
"prev",
"]",
"=",
"(",
"np",
".",
"dot",
",",
"(",
"name",
",",
"j",
",",
"p",
")",
",",
"(",
"name_upper",
",",
"p",
",",
"i",
")",
")",
"prevs",
".",
"append",
"(",
"prev",
")",
"target",
"=",
"(",
"operator",
".",
"sub",
",",
"target",
",",
"(",
"sum",
",",
"prevs",
")",
")",
"dsk",
"[",
"(",
"name_upper",
",",
"j",
",",
"i",
")",
"]",
"=",
"(",
"_solve_triangular_lower",
",",
"(",
"name",
",",
"j",
",",
"j",
")",
",",
"target",
")",
"dsk",
"[",
"(",
"name",
",",
"i",
",",
"j",
")",
"]",
"=",
"(",
"np",
".",
"transpose",
",",
"(",
"name_upper",
",",
"j",
",",
"i",
")",
")",
"dsk",
".",
"update",
"(",
"a",
".",
"dask",
")",
"cho",
"=",
"scipy",
".",
"linalg",
".",
"cholesky",
"(",
"np",
".",
"array",
"(",
"[",
"[",
"1",
",",
"2",
"]",
",",
"[",
"2",
",",
"5",
"]",
"]",
",",
"dtype",
"=",
"a",
".",
"dtype",
")",
")",
"lower",
"=",
"Array",
"(",
"dsk",
",",
"name",
",",
"shape",
"=",
"a",
".",
"shape",
",",
"chunks",
"=",
"a",
".",
"chunks",
",",
"dtype",
"=",
"cho",
".",
"dtype",
")",
"upper",
"=",
"Array",
"(",
"dsk",
",",
"name_upper",
",",
"shape",
"=",
"a",
".",
"shape",
",",
"chunks",
"=",
"a",
".",
"chunks",
",",
"dtype",
"=",
"cho",
".",
"dtype",
")",
"return",
"(",
"lower",
",",
"upper",
")"
] |
common code for cholesky() and cho_factor() .
|
train
| false
|
6,825
|
def get_things_todo(as_list=False):
data = frappe.get_list(u'ToDo', fields=([u'name', u'description'] if as_list else u'count(*)'), filters=[[u'ToDo', u'status', u'=', u'Open']], or_filters=[[u'ToDo', u'owner', u'=', frappe.session.user], [u'ToDo', u'assigned_by', u'=', frappe.session.user]], as_list=True)
if as_list:
return data
else:
return data[0][0]
|
[
"def",
"get_things_todo",
"(",
"as_list",
"=",
"False",
")",
":",
"data",
"=",
"frappe",
".",
"get_list",
"(",
"u'ToDo'",
",",
"fields",
"=",
"(",
"[",
"u'name'",
",",
"u'description'",
"]",
"if",
"as_list",
"else",
"u'count(*)'",
")",
",",
"filters",
"=",
"[",
"[",
"u'ToDo'",
",",
"u'status'",
",",
"u'='",
",",
"u'Open'",
"]",
"]",
",",
"or_filters",
"=",
"[",
"[",
"u'ToDo'",
",",
"u'owner'",
",",
"u'='",
",",
"frappe",
".",
"session",
".",
"user",
"]",
",",
"[",
"u'ToDo'",
",",
"u'assigned_by'",
",",
"u'='",
",",
"frappe",
".",
"session",
".",
"user",
"]",
"]",
",",
"as_list",
"=",
"True",
")",
"if",
"as_list",
":",
"return",
"data",
"else",
":",
"return",
"data",
"[",
"0",
"]",
"[",
"0",
"]"
] |
returns a count of incomplete todos .
|
train
| false
|
6,826
|
def placement_authorize(context, action, target=None):
placement_init()
if (target is None):
target = {'project_id': context.tenant, 'user_id': context.user}
credentials = context.to_policy_values()
result = _ENFORCER_PLACEMENT.enforce(action, target, credentials, do_raise=False, exc=None, action=action)
if (result is False):
LOG.debug('Policy check for %(action)s failed with credentials %(credentials)s', {'action': action, 'credentials': credentials})
return result
|
[
"def",
"placement_authorize",
"(",
"context",
",",
"action",
",",
"target",
"=",
"None",
")",
":",
"placement_init",
"(",
")",
"if",
"(",
"target",
"is",
"None",
")",
":",
"target",
"=",
"{",
"'project_id'",
":",
"context",
".",
"tenant",
",",
"'user_id'",
":",
"context",
".",
"user",
"}",
"credentials",
"=",
"context",
".",
"to_policy_values",
"(",
")",
"result",
"=",
"_ENFORCER_PLACEMENT",
".",
"enforce",
"(",
"action",
",",
"target",
",",
"credentials",
",",
"do_raise",
"=",
"False",
",",
"exc",
"=",
"None",
",",
"action",
"=",
"action",
")",
"if",
"(",
"result",
"is",
"False",
")",
":",
"LOG",
".",
"debug",
"(",
"'Policy check for %(action)s failed with credentials %(credentials)s'",
",",
"{",
"'action'",
":",
"action",
",",
"'credentials'",
":",
"credentials",
"}",
")",
"return",
"result"
] |
verifies that the action is valid on the target in this context .
|
train
| false
|
6,828
|
def get_tags(client, stream_name, check_mode=False):
err_msg = ''
success = False
params = {'StreamName': stream_name}
results = dict()
try:
if (not check_mode):
results = client.list_tags_for_stream(**params)['Tags']
else:
results = [{'Key': 'DryRunMode', 'Value': 'true'}]
success = True
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return (success, err_msg, results)
|
[
"def",
"get_tags",
"(",
"client",
",",
"stream_name",
",",
"check_mode",
"=",
"False",
")",
":",
"err_msg",
"=",
"''",
"success",
"=",
"False",
"params",
"=",
"{",
"'StreamName'",
":",
"stream_name",
"}",
"results",
"=",
"dict",
"(",
")",
"try",
":",
"if",
"(",
"not",
"check_mode",
")",
":",
"results",
"=",
"client",
".",
"list_tags_for_stream",
"(",
"**",
"params",
")",
"[",
"'Tags'",
"]",
"else",
":",
"results",
"=",
"[",
"{",
"'Key'",
":",
"'DryRunMode'",
",",
"'Value'",
":",
"'true'",
"}",
"]",
"success",
"=",
"True",
"except",
"botocore",
".",
"exceptions",
".",
"ClientError",
"as",
"e",
":",
"err_msg",
"=",
"str",
"(",
"e",
")",
"return",
"(",
"success",
",",
"err_msg",
",",
"results",
")"
] |
return the tags associated with an efs instance .
|
train
| false
|
6,829
|
def p_and_expression_1(t):
pass
|
[
"def",
"p_and_expression_1",
"(",
"t",
")",
":",
"pass"
] |
and_expression : equality_expression .
|
train
| false
|
6,831
|
def volumelevel(level):
finder = _getfinder()
args = {}
attrs = {}
if (level < 0):
level = 0
elif (level > 7):
level = 7
args['----'] = level
(_reply, args, attrs) = finder.send('aevt', 'stvl', args, attrs)
if ('errn' in args):
raise Error, aetools.decodeerror(args)
if ('----' in args):
return args['----']
|
[
"def",
"volumelevel",
"(",
"level",
")",
":",
"finder",
"=",
"_getfinder",
"(",
")",
"args",
"=",
"{",
"}",
"attrs",
"=",
"{",
"}",
"if",
"(",
"level",
"<",
"0",
")",
":",
"level",
"=",
"0",
"elif",
"(",
"level",
">",
"7",
")",
":",
"level",
"=",
"7",
"args",
"[",
"'----'",
"]",
"=",
"level",
"(",
"_reply",
",",
"args",
",",
"attrs",
")",
"=",
"finder",
".",
"send",
"(",
"'aevt'",
",",
"'stvl'",
",",
"args",
",",
"attrs",
")",
"if",
"(",
"'errn'",
"in",
"args",
")",
":",
"raise",
"Error",
",",
"aetools",
".",
"decodeerror",
"(",
"args",
")",
"if",
"(",
"'----'",
"in",
"args",
")",
":",
"return",
"args",
"[",
"'----'",
"]"
] |
set the audio output level .
|
train
| false
|
6,832
|
def pci_device_get_all_by_instance_uuid(context, instance_uuid):
return IMPL.pci_device_get_all_by_instance_uuid(context, instance_uuid)
|
[
"def",
"pci_device_get_all_by_instance_uuid",
"(",
"context",
",",
"instance_uuid",
")",
":",
"return",
"IMPL",
".",
"pci_device_get_all_by_instance_uuid",
"(",
"context",
",",
"instance_uuid",
")"
] |
get pci devices allocated to instance .
|
train
| false
|
6,834
|
def test_metrics_agents_ping(dcos_api_session):
for agent in dcos_api_session.slaves:
response = dcos_api_session.metrics.get('ping', node=agent)
assert (response.status_code == 200), 'Status code: {}, Content {}'.format(response.status_code, response.content)
assert response.json()['ok'], 'Status code: {}, Content {}'.format(response.status_code, response.content)
'agent.'
for agent in dcos_api_session.public_slaves:
response = dcos_api_session.metrics.get('ping', node=agent)
assert (response.status_code == 200), 'Status code: {}, Content {}'.format(response.status_code, response.content)
assert response.json()['ok'], 'Status code: {}, Content {}'.format(response.status_code, response.content)
|
[
"def",
"test_metrics_agents_ping",
"(",
"dcos_api_session",
")",
":",
"for",
"agent",
"in",
"dcos_api_session",
".",
"slaves",
":",
"response",
"=",
"dcos_api_session",
".",
"metrics",
".",
"get",
"(",
"'ping'",
",",
"node",
"=",
"agent",
")",
"assert",
"(",
"response",
".",
"status_code",
"==",
"200",
")",
",",
"'Status code: {}, Content {}'",
".",
"format",
"(",
"response",
".",
"status_code",
",",
"response",
".",
"content",
")",
"assert",
"response",
".",
"json",
"(",
")",
"[",
"'ok'",
"]",
",",
"'Status code: {}, Content {}'",
".",
"format",
"(",
"response",
".",
"status_code",
",",
"response",
".",
"content",
")",
"for",
"agent",
"in",
"dcos_api_session",
".",
"public_slaves",
":",
"response",
"=",
"dcos_api_session",
".",
"metrics",
".",
"get",
"(",
"'ping'",
",",
"node",
"=",
"agent",
")",
"assert",
"(",
"response",
".",
"status_code",
"==",
"200",
")",
",",
"'Status code: {}, Content {}'",
".",
"format",
"(",
"response",
".",
"status_code",
",",
"response",
".",
"content",
")",
"assert",
"response",
".",
"json",
"(",
")",
"[",
"'ok'",
"]",
",",
"'Status code: {}, Content {}'",
".",
"format",
"(",
"response",
".",
"status_code",
",",
"response",
".",
"content",
")"
] |
test that the metrics service is up on masters .
|
train
| false
|
6,836
|
def task_test():
return {'task_dep': ['locale', 'doctest'], 'actions': ['py.test tests/']}
|
[
"def",
"task_test",
"(",
")",
":",
"return",
"{",
"'task_dep'",
":",
"[",
"'locale'",
",",
"'doctest'",
"]",
",",
"'actions'",
":",
"[",
"'py.test tests/'",
"]",
"}"
] |
run unit-tests using py .
|
train
| false
|
6,837
|
def test_dimensionless_to_si():
testunit = ((1.0 * u.kpc) / (1.0 * u.Mpc))
assert (testunit.unit.physical_type == u'dimensionless')
assert_allclose(testunit.si, 0.001)
|
[
"def",
"test_dimensionless_to_si",
"(",
")",
":",
"testunit",
"=",
"(",
"(",
"1.0",
"*",
"u",
".",
"kpc",
")",
"/",
"(",
"1.0",
"*",
"u",
".",
"Mpc",
")",
")",
"assert",
"(",
"testunit",
".",
"unit",
".",
"physical_type",
"==",
"u'dimensionless'",
")",
"assert_allclose",
"(",
"testunit",
".",
"si",
",",
"0.001",
")"
] |
issue #1150: test for conversion of dimensionless quantities to the si system .
|
train
| false
|
6,838
|
def range_boundaries(range_string):
m = ABSOLUTE_RE.match(range_string)
(min_col, min_row, sep, max_col, max_row) = m.groups()
min_col = column_index_from_string(min_col)
min_row = int(min_row)
if ((max_col is None) or (max_row is None)):
max_col = min_col
max_row = min_row
else:
max_col = column_index_from_string(max_col)
max_row = int(max_row)
return (min_col, min_row, max_col, max_row)
|
[
"def",
"range_boundaries",
"(",
"range_string",
")",
":",
"m",
"=",
"ABSOLUTE_RE",
".",
"match",
"(",
"range_string",
")",
"(",
"min_col",
",",
"min_row",
",",
"sep",
",",
"max_col",
",",
"max_row",
")",
"=",
"m",
".",
"groups",
"(",
")",
"min_col",
"=",
"column_index_from_string",
"(",
"min_col",
")",
"min_row",
"=",
"int",
"(",
"min_row",
")",
"if",
"(",
"(",
"max_col",
"is",
"None",
")",
"or",
"(",
"max_row",
"is",
"None",
")",
")",
":",
"max_col",
"=",
"min_col",
"max_row",
"=",
"min_row",
"else",
":",
"max_col",
"=",
"column_index_from_string",
"(",
"max_col",
")",
"max_row",
"=",
"int",
"(",
"max_row",
")",
"return",
"(",
"min_col",
",",
"min_row",
",",
"max_col",
",",
"max_row",
")"
] |
convert a range string into a tuple of boundaries: cell coordinates will be converted into a range with the cell at both end .
|
train
| false
|
6,839
|
def get_pixmap(icon_name):
global plugin_icon_resources, plugin_name
if (not icon_name.startswith(u'images/')):
pixmap = QPixmap()
pixmap.load(I(icon_name))
return pixmap
if plugin_name:
local_images_dir = get_local_images_dir(plugin_name)
local_image_path = os.path.join(local_images_dir, icon_name.replace(u'images/', u''))
if os.path.exists(local_image_path):
pixmap = QPixmap()
pixmap.load(local_image_path)
return pixmap
if (icon_name in plugin_icon_resources):
pixmap = QPixmap()
pixmap.loadFromData(plugin_icon_resources[icon_name])
return pixmap
return None
|
[
"def",
"get_pixmap",
"(",
"icon_name",
")",
":",
"global",
"plugin_icon_resources",
",",
"plugin_name",
"if",
"(",
"not",
"icon_name",
".",
"startswith",
"(",
"u'images/'",
")",
")",
":",
"pixmap",
"=",
"QPixmap",
"(",
")",
"pixmap",
".",
"load",
"(",
"I",
"(",
"icon_name",
")",
")",
"return",
"pixmap",
"if",
"plugin_name",
":",
"local_images_dir",
"=",
"get_local_images_dir",
"(",
"plugin_name",
")",
"local_image_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"local_images_dir",
",",
"icon_name",
".",
"replace",
"(",
"u'images/'",
",",
"u''",
")",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"local_image_path",
")",
":",
"pixmap",
"=",
"QPixmap",
"(",
")",
"pixmap",
".",
"load",
"(",
"local_image_path",
")",
"return",
"pixmap",
"if",
"(",
"icon_name",
"in",
"plugin_icon_resources",
")",
":",
"pixmap",
"=",
"QPixmap",
"(",
")",
"pixmap",
".",
"loadFromData",
"(",
"plugin_icon_resources",
"[",
"icon_name",
"]",
")",
"return",
"pixmap",
"return",
"None"
] |
retrieve a qpixmap for the named image any icons belonging to the plugin must be prefixed with images/ .
|
train
| false
|
6,840
|
def test_pricing_module_is_active():
shop = Shop(currency='USD', prices_include_tax=False)
customer = AnonymousContact()
product = Product(sku='6.0745')
pricing_mod = get_pricing_module()
pricing_ctx = pricing_mod.get_context_from_data(shop, customer)
pi = product.get_price_info(pricing_ctx, quantity=2)
price = shop.create_price
assert (pi.price == price('12.149'))
assert (pi.base_price == price('48.596'))
assert (pi.quantity == 2)
assert (pi.discounted_unit_price == price('6.0745'))
assert (pi.base_unit_price == price('24.298'))
assert (pi.discount_rate == Decimal('0.75'))
|
[
"def",
"test_pricing_module_is_active",
"(",
")",
":",
"shop",
"=",
"Shop",
"(",
"currency",
"=",
"'USD'",
",",
"prices_include_tax",
"=",
"False",
")",
"customer",
"=",
"AnonymousContact",
"(",
")",
"product",
"=",
"Product",
"(",
"sku",
"=",
"'6.0745'",
")",
"pricing_mod",
"=",
"get_pricing_module",
"(",
")",
"pricing_ctx",
"=",
"pricing_mod",
".",
"get_context_from_data",
"(",
"shop",
",",
"customer",
")",
"pi",
"=",
"product",
".",
"get_price_info",
"(",
"pricing_ctx",
",",
"quantity",
"=",
"2",
")",
"price",
"=",
"shop",
".",
"create_price",
"assert",
"(",
"pi",
".",
"price",
"==",
"price",
"(",
"'12.149'",
")",
")",
"assert",
"(",
"pi",
".",
"base_price",
"==",
"price",
"(",
"'48.596'",
")",
")",
"assert",
"(",
"pi",
".",
"quantity",
"==",
"2",
")",
"assert",
"(",
"pi",
".",
"discounted_unit_price",
"==",
"price",
"(",
"'6.0745'",
")",
")",
"assert",
"(",
"pi",
".",
"base_unit_price",
"==",
"price",
"(",
"'24.298'",
")",
")",
"assert",
"(",
"pi",
".",
"discount_rate",
"==",
"Decimal",
"(",
"'0.75'",
")",
")"
] |
make sure that our custom pricing module is active .
|
train
| false
|
6,841
|
def get_curr_bc_added_field(curr_bc, ids_bcs_added_field, fasta_label, all_bcs, barcode_type='golay_12', disable_bc_correction=False, added_demultiplex_field=None):
if added_demultiplex_field:
added_field = get_added_demultiplex_field(ids_bcs_added_field, fasta_label, added_demultiplex_field)
else:
added_field = None
if disable_bc_correction:
num_errors = 0
corrected_bc = get_exact_bc_matches(curr_bc, all_bcs)
else:
(corrected_bc, num_errors) = attempt_bc_correction(curr_bc, all_bcs, barcode_type)
return (corrected_bc, num_errors, added_field)
|
[
"def",
"get_curr_bc_added_field",
"(",
"curr_bc",
",",
"ids_bcs_added_field",
",",
"fasta_label",
",",
"all_bcs",
",",
"barcode_type",
"=",
"'golay_12'",
",",
"disable_bc_correction",
"=",
"False",
",",
"added_demultiplex_field",
"=",
"None",
")",
":",
"if",
"added_demultiplex_field",
":",
"added_field",
"=",
"get_added_demultiplex_field",
"(",
"ids_bcs_added_field",
",",
"fasta_label",
",",
"added_demultiplex_field",
")",
"else",
":",
"added_field",
"=",
"None",
"if",
"disable_bc_correction",
":",
"num_errors",
"=",
"0",
"corrected_bc",
"=",
"get_exact_bc_matches",
"(",
"curr_bc",
",",
"all_bcs",
")",
"else",
":",
"(",
"corrected_bc",
",",
"num_errors",
")",
"=",
"attempt_bc_correction",
"(",
"curr_bc",
",",
"all_bcs",
",",
"barcode_type",
")",
"return",
"(",
"corrected_bc",
",",
"num_errors",
",",
"added_field",
")"
] |
attempts to correct barcode .
|
train
| false
|
6,842
|
def GetVmodlName(typ):
try:
return vmodlNames[typ]
except KeyError:
return typ.__name__
|
[
"def",
"GetVmodlName",
"(",
"typ",
")",
":",
"try",
":",
"return",
"vmodlNames",
"[",
"typ",
"]",
"except",
"KeyError",
":",
"return",
"typ",
".",
"__name__"
] |
get vmodl type name from type .
|
train
| false
|
6,843
|
def getYIntersectionInsideYSegment(segmentFirstY, segmentSecondY, beginComplex, endComplex, x):
yIntersection = euclidean.getYIntersectionIfExists(beginComplex, endComplex, x)
if (yIntersection == None):
return None
if (yIntersection < min(segmentFirstY, segmentSecondY)):
return None
if (yIntersection <= max(segmentFirstY, segmentSecondY)):
return yIntersection
return None
|
[
"def",
"getYIntersectionInsideYSegment",
"(",
"segmentFirstY",
",",
"segmentSecondY",
",",
"beginComplex",
",",
"endComplex",
",",
"x",
")",
":",
"yIntersection",
"=",
"euclidean",
".",
"getYIntersectionIfExists",
"(",
"beginComplex",
",",
"endComplex",
",",
"x",
")",
"if",
"(",
"yIntersection",
"==",
"None",
")",
":",
"return",
"None",
"if",
"(",
"yIntersection",
"<",
"min",
"(",
"segmentFirstY",
",",
"segmentSecondY",
")",
")",
":",
"return",
"None",
"if",
"(",
"yIntersection",
"<=",
"max",
"(",
"segmentFirstY",
",",
"segmentSecondY",
")",
")",
":",
"return",
"yIntersection",
"return",
"None"
] |
get the y intersection inside the y segment if it does .
|
train
| false
|
6,844
|
def check_stack_depends(name):
def check(n, filename):
nodes = get_nodes_by_name(n, name)
depends = [e.attributes for e in nodes]
packages = [d['stack'].value for d in depends]
return [StackDepend(p) for p in packages]
return check
|
[
"def",
"check_stack_depends",
"(",
"name",
")",
":",
"def",
"check",
"(",
"n",
",",
"filename",
")",
":",
"nodes",
"=",
"get_nodes_by_name",
"(",
"n",
",",
"name",
")",
"depends",
"=",
"[",
"e",
".",
"attributes",
"for",
"e",
"in",
"nodes",
"]",
"packages",
"=",
"[",
"d",
"[",
"'stack'",
"]",
".",
"value",
"for",
"d",
"in",
"depends",
"]",
"return",
"[",
"StackDepend",
"(",
"p",
")",
"for",
"p",
"in",
"packages",
"]",
"return",
"check"
] |
validator for stack depends .
|
train
| false
|
6,845
|
def delete_obj(base_mapper, states, uowtransaction):
cached_connections = _cached_connection_dict(base_mapper)
states_to_delete = _organize_states_for_delete(base_mapper, states, uowtransaction)
table_to_mapper = base_mapper._sorted_tables
for table in reversed(list(table_to_mapper.keys())):
delete = _collect_delete_commands(base_mapper, uowtransaction, table, states_to_delete)
mapper = table_to_mapper[table]
_emit_delete_statements(base_mapper, uowtransaction, cached_connections, mapper, table, delete)
for (state, state_dict, mapper, has_identity, connection) in states_to_delete:
mapper.dispatch.after_delete(mapper, connection, state)
|
[
"def",
"delete_obj",
"(",
"base_mapper",
",",
"states",
",",
"uowtransaction",
")",
":",
"cached_connections",
"=",
"_cached_connection_dict",
"(",
"base_mapper",
")",
"states_to_delete",
"=",
"_organize_states_for_delete",
"(",
"base_mapper",
",",
"states",
",",
"uowtransaction",
")",
"table_to_mapper",
"=",
"base_mapper",
".",
"_sorted_tables",
"for",
"table",
"in",
"reversed",
"(",
"list",
"(",
"table_to_mapper",
".",
"keys",
"(",
")",
")",
")",
":",
"delete",
"=",
"_collect_delete_commands",
"(",
"base_mapper",
",",
"uowtransaction",
",",
"table",
",",
"states_to_delete",
")",
"mapper",
"=",
"table_to_mapper",
"[",
"table",
"]",
"_emit_delete_statements",
"(",
"base_mapper",
",",
"uowtransaction",
",",
"cached_connections",
",",
"mapper",
",",
"table",
",",
"delete",
")",
"for",
"(",
"state",
",",
"state_dict",
",",
"mapper",
",",
"has_identity",
",",
"connection",
")",
"in",
"states_to_delete",
":",
"mapper",
".",
"dispatch",
".",
"after_delete",
"(",
"mapper",
",",
"connection",
",",
"state",
")"
] |
issue delete statements for a list of objects .
|
train
| false
|
6,846
|
def from_timestamp(timestamp):
try:
return datetime.utcfromtimestamp(int(timestamp)).replace(tzinfo=utc)
except (ValueError, TypeError):
return None
|
[
"def",
"from_timestamp",
"(",
"timestamp",
")",
":",
"try",
":",
"return",
"datetime",
".",
"utcfromtimestamp",
"(",
"int",
"(",
"timestamp",
")",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"utc",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"return",
"None"
] |
convert a timestamp into a timezone-aware datetime .
|
train
| false
|
6,847
|
def _check_asset(location, asset_name):
content_location = StaticContent.compute_location(location.course_key, asset_name)
try:
contentstore().find(content_location)
except NotFoundError:
return False
else:
return True
|
[
"def",
"_check_asset",
"(",
"location",
",",
"asset_name",
")",
":",
"content_location",
"=",
"StaticContent",
".",
"compute_location",
"(",
"location",
".",
"course_key",
",",
"asset_name",
")",
"try",
":",
"contentstore",
"(",
")",
".",
"find",
"(",
"content_location",
")",
"except",
"NotFoundError",
":",
"return",
"False",
"else",
":",
"return",
"True"
] |
check that asset with asset_name exists in assets .
|
train
| false
|
6,848
|
def grad_scale(x, multiplier):
return GradScale(multiplier)(x)
|
[
"def",
"grad_scale",
"(",
"x",
",",
"multiplier",
")",
":",
"return",
"GradScale",
"(",
"multiplier",
")",
"(",
"x",
")"
] |
this op scale or inverse the gradient in the backpropagation .
|
train
| false
|
6,849
|
@addon_view
@non_atomic_requests
def overview_series(request, addon, group, start, end, format):
date_range = check_series_params_or_404(group, start, end, format)
check_stats_permission(request, addon)
dls = get_series(DownloadCount, addon=addon.id, date__range=date_range)
updates = get_series(UpdateCount, addon=addon.id, date__range=date_range)
series = zip_overview(dls, updates)
return render_json(request, addon, series)
|
[
"@",
"addon_view",
"@",
"non_atomic_requests",
"def",
"overview_series",
"(",
"request",
",",
"addon",
",",
"group",
",",
"start",
",",
"end",
",",
"format",
")",
":",
"date_range",
"=",
"check_series_params_or_404",
"(",
"group",
",",
"start",
",",
"end",
",",
"format",
")",
"check_stats_permission",
"(",
"request",
",",
"addon",
")",
"dls",
"=",
"get_series",
"(",
"DownloadCount",
",",
"addon",
"=",
"addon",
".",
"id",
",",
"date__range",
"=",
"date_range",
")",
"updates",
"=",
"get_series",
"(",
"UpdateCount",
",",
"addon",
"=",
"addon",
".",
"id",
",",
"date__range",
"=",
"date_range",
")",
"series",
"=",
"zip_overview",
"(",
"dls",
",",
"updates",
")",
"return",
"render_json",
"(",
"request",
",",
"addon",
",",
"series",
")"
] |
combines downloads_series and updates_series into one payload .
|
train
| false
|
6,850
|
def CopyStreamPart(source, destination, content_size):
bytes_copied = 0
bytes_left = content_size
while (bytes_left > 0):
bytes = source.read(min(bytes_left, COPY_BLOCK_SIZE))
bytes_read = len(bytes)
if (bytes_read == 0):
break
destination.write(bytes)
bytes_copied += bytes_read
bytes_left -= bytes_read
return bytes_copied
|
[
"def",
"CopyStreamPart",
"(",
"source",
",",
"destination",
",",
"content_size",
")",
":",
"bytes_copied",
"=",
"0",
"bytes_left",
"=",
"content_size",
"while",
"(",
"bytes_left",
">",
"0",
")",
":",
"bytes",
"=",
"source",
".",
"read",
"(",
"min",
"(",
"bytes_left",
",",
"COPY_BLOCK_SIZE",
")",
")",
"bytes_read",
"=",
"len",
"(",
"bytes",
")",
"if",
"(",
"bytes_read",
"==",
"0",
")",
":",
"break",
"destination",
".",
"write",
"(",
"bytes",
")",
"bytes_copied",
"+=",
"bytes_read",
"bytes_left",
"-=",
"bytes_read",
"return",
"bytes_copied"
] |
copy a portion of a stream from one file-like object to another .
|
train
| false
|
6,852
|
def history(zpool=None, internal=False, verbose=False):
ret = {}
zpool_cmd = _check_zpool()
cmd = '{zpool_cmd} history {verbose}{internal}{zpool}'.format(zpool_cmd=zpool_cmd, verbose=('-l ' if verbose else ''), internal=('-i ' if internal else ''), zpool=(zpool if zpool else ''))
res = __salt__['cmd.run_all'](cmd, python_shell=False)
if (res['retcode'] != 0):
if zpool:
ret[zpool] = (res['stderr'] if (('stderr' in res) and (res['stderr'] != '')) else res['stdout'])
else:
ret['error'] = (res['stderr'] if (('stderr' in res) and (res['stderr'] != '')) else res['stdout'])
else:
pool = 'unknown'
for line in res['stdout'].splitlines():
if line.startswith('History for'):
pool = line[13:(-2)]
ret[pool] = []
else:
if (line == ''):
continue
ret[pool].append(line)
return ret
|
[
"def",
"history",
"(",
"zpool",
"=",
"None",
",",
"internal",
"=",
"False",
",",
"verbose",
"=",
"False",
")",
":",
"ret",
"=",
"{",
"}",
"zpool_cmd",
"=",
"_check_zpool",
"(",
")",
"cmd",
"=",
"'{zpool_cmd} history {verbose}{internal}{zpool}'",
".",
"format",
"(",
"zpool_cmd",
"=",
"zpool_cmd",
",",
"verbose",
"=",
"(",
"'-l '",
"if",
"verbose",
"else",
"''",
")",
",",
"internal",
"=",
"(",
"'-i '",
"if",
"internal",
"else",
"''",
")",
",",
"zpool",
"=",
"(",
"zpool",
"if",
"zpool",
"else",
"''",
")",
")",
"res",
"=",
"__salt__",
"[",
"'cmd.run_all'",
"]",
"(",
"cmd",
",",
"python_shell",
"=",
"False",
")",
"if",
"(",
"res",
"[",
"'retcode'",
"]",
"!=",
"0",
")",
":",
"if",
"zpool",
":",
"ret",
"[",
"zpool",
"]",
"=",
"(",
"res",
"[",
"'stderr'",
"]",
"if",
"(",
"(",
"'stderr'",
"in",
"res",
")",
"and",
"(",
"res",
"[",
"'stderr'",
"]",
"!=",
"''",
")",
")",
"else",
"res",
"[",
"'stdout'",
"]",
")",
"else",
":",
"ret",
"[",
"'error'",
"]",
"=",
"(",
"res",
"[",
"'stderr'",
"]",
"if",
"(",
"(",
"'stderr'",
"in",
"res",
")",
"and",
"(",
"res",
"[",
"'stderr'",
"]",
"!=",
"''",
")",
")",
"else",
"res",
"[",
"'stdout'",
"]",
")",
"else",
":",
"pool",
"=",
"'unknown'",
"for",
"line",
"in",
"res",
"[",
"'stdout'",
"]",
".",
"splitlines",
"(",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"'History for'",
")",
":",
"pool",
"=",
"line",
"[",
"13",
":",
"(",
"-",
"2",
")",
"]",
"ret",
"[",
"pool",
"]",
"=",
"[",
"]",
"else",
":",
"if",
"(",
"line",
"==",
"''",
")",
":",
"continue",
"ret",
"[",
"pool",
"]",
".",
"append",
"(",
"line",
")",
"return",
"ret"
] |
return the history for an image .
|
train
| false
|
6,853
|
def get_option(file_name, section, option, separator='='):
inifile = _Ini.get_ini_file(file_name, separator=separator)
return inifile.get(section, {}).get(option, None)
|
[
"def",
"get_option",
"(",
"file_name",
",",
"section",
",",
"option",
",",
"separator",
"=",
"'='",
")",
":",
"inifile",
"=",
"_Ini",
".",
"get_ini_file",
"(",
"file_name",
",",
"separator",
"=",
"separator",
")",
"return",
"inifile",
".",
"get",
"(",
"section",
",",
"{",
"}",
")",
".",
"get",
"(",
"option",
",",
"None",
")"
] |
get_option -> value get the value of an ldap global option .
|
train
| true
|
6,854
|
def passwd_check(hashed_passphrase, passphrase):
try:
(algorithm, salt, pw_digest) = hashed_passphrase.split(':', 2)
except (ValueError, TypeError):
return False
try:
h = hashlib.new(algorithm)
except ValueError:
return False
if (len(pw_digest) == 0):
return False
h.update((cast_bytes(passphrase, 'utf-8') + cast_bytes(salt, 'ascii')))
return (h.hexdigest() == pw_digest)
|
[
"def",
"passwd_check",
"(",
"hashed_passphrase",
",",
"passphrase",
")",
":",
"try",
":",
"(",
"algorithm",
",",
"salt",
",",
"pw_digest",
")",
"=",
"hashed_passphrase",
".",
"split",
"(",
"':'",
",",
"2",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"return",
"False",
"try",
":",
"h",
"=",
"hashlib",
".",
"new",
"(",
"algorithm",
")",
"except",
"ValueError",
":",
"return",
"False",
"if",
"(",
"len",
"(",
"pw_digest",
")",
"==",
"0",
")",
":",
"return",
"False",
"h",
".",
"update",
"(",
"(",
"cast_bytes",
"(",
"passphrase",
",",
"'utf-8'",
")",
"+",
"cast_bytes",
"(",
"salt",
",",
"'ascii'",
")",
")",
")",
"return",
"(",
"h",
".",
"hexdigest",
"(",
")",
"==",
"pw_digest",
")"
] |
verify that a given passphrase matches its hashed version .
|
train
| true
|
6,855
|
def id_srandi(t):
t = np.asfortranarray(t)
_id.id_srandi(t)
|
[
"def",
"id_srandi",
"(",
"t",
")",
":",
"t",
"=",
"np",
".",
"asfortranarray",
"(",
"t",
")",
"_id",
".",
"id_srandi",
"(",
"t",
")"
] |
initialize seed values for :func:id_srand .
|
train
| false
|
6,857
|
def test_frame_attr_getattr():
sc = SkyCoord(1, 2, frame=u'icrs', unit=u'deg', equinox=u'J1999', obstime=u'J2001')
assert (sc.equinox == u'J1999')
assert (sc.obstime == u'J2001')
sc = SkyCoord(1, 2, frame=u'fk4', unit=u'deg', equinox=u'J1999', obstime=u'J2001')
assert (sc.equinox == Time(u'J1999'))
assert (sc.obstime == Time(u'J2001'))
sc = SkyCoord(1, 2, frame=u'fk4', unit=u'deg', equinox=u'J1999')
assert (sc.equinox == Time(u'J1999'))
assert (sc.obstime == Time(u'J1999'))
|
[
"def",
"test_frame_attr_getattr",
"(",
")",
":",
"sc",
"=",
"SkyCoord",
"(",
"1",
",",
"2",
",",
"frame",
"=",
"u'icrs'",
",",
"unit",
"=",
"u'deg'",
",",
"equinox",
"=",
"u'J1999'",
",",
"obstime",
"=",
"u'J2001'",
")",
"assert",
"(",
"sc",
".",
"equinox",
"==",
"u'J1999'",
")",
"assert",
"(",
"sc",
".",
"obstime",
"==",
"u'J2001'",
")",
"sc",
"=",
"SkyCoord",
"(",
"1",
",",
"2",
",",
"frame",
"=",
"u'fk4'",
",",
"unit",
"=",
"u'deg'",
",",
"equinox",
"=",
"u'J1999'",
",",
"obstime",
"=",
"u'J2001'",
")",
"assert",
"(",
"sc",
".",
"equinox",
"==",
"Time",
"(",
"u'J1999'",
")",
")",
"assert",
"(",
"sc",
".",
"obstime",
"==",
"Time",
"(",
"u'J2001'",
")",
")",
"sc",
"=",
"SkyCoord",
"(",
"1",
",",
"2",
",",
"frame",
"=",
"u'fk4'",
",",
"unit",
"=",
"u'deg'",
",",
"equinox",
"=",
"u'J1999'",
")",
"assert",
"(",
"sc",
".",
"equinox",
"==",
"Time",
"(",
"u'J1999'",
")",
")",
"assert",
"(",
"sc",
".",
"obstime",
"==",
"Time",
"(",
"u'J1999'",
")",
")"
] |
when accessing frame attributes like equinox .
|
train
| false
|
6,858
|
@handle_response_format
@treeio_login_required
def contact_add_typed(request, type_id, response_format='html'):
contact_type = get_object_or_404(ContactType, pk=type_id)
if (not request.user.profile.has_permission(contact_type, mode='x')):
return user_denied(request, message=("You don't have access to create " + unicode(contact_type)))
if request.POST:
if ('cancel' not in request.POST):
form = ContactForm(request.user.profile, contact_type, request.POST, files=request.FILES)
if form.is_valid():
contact = form.save(request, contact_type)
contact.set_user_from_request(request)
return HttpResponseRedirect(reverse('identities_contact_view', args=[contact.id]))
else:
return HttpResponseRedirect(reverse('identities_index'))
else:
form = ContactForm(request.user.profile, contact_type)
types = Object.filter_by_request(request, ContactType.objects.order_by('name'))
return render_to_response('identities/contact_add_typed', {'type': contact_type, 'types': types, 'form': form}, context_instance=RequestContext(request), response_format=response_format)
|
[
"@",
"handle_response_format",
"@",
"treeio_login_required",
"def",
"contact_add_typed",
"(",
"request",
",",
"type_id",
",",
"response_format",
"=",
"'html'",
")",
":",
"contact_type",
"=",
"get_object_or_404",
"(",
"ContactType",
",",
"pk",
"=",
"type_id",
")",
"if",
"(",
"not",
"request",
".",
"user",
".",
"profile",
".",
"has_permission",
"(",
"contact_type",
",",
"mode",
"=",
"'x'",
")",
")",
":",
"return",
"user_denied",
"(",
"request",
",",
"message",
"=",
"(",
"\"You don't have access to create \"",
"+",
"unicode",
"(",
"contact_type",
")",
")",
")",
"if",
"request",
".",
"POST",
":",
"if",
"(",
"'cancel'",
"not",
"in",
"request",
".",
"POST",
")",
":",
"form",
"=",
"ContactForm",
"(",
"request",
".",
"user",
".",
"profile",
",",
"contact_type",
",",
"request",
".",
"POST",
",",
"files",
"=",
"request",
".",
"FILES",
")",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"contact",
"=",
"form",
".",
"save",
"(",
"request",
",",
"contact_type",
")",
"contact",
".",
"set_user_from_request",
"(",
"request",
")",
"return",
"HttpResponseRedirect",
"(",
"reverse",
"(",
"'identities_contact_view'",
",",
"args",
"=",
"[",
"contact",
".",
"id",
"]",
")",
")",
"else",
":",
"return",
"HttpResponseRedirect",
"(",
"reverse",
"(",
"'identities_index'",
")",
")",
"else",
":",
"form",
"=",
"ContactForm",
"(",
"request",
".",
"user",
".",
"profile",
",",
"contact_type",
")",
"types",
"=",
"Object",
".",
"filter_by_request",
"(",
"request",
",",
"ContactType",
".",
"objects",
".",
"order_by",
"(",
"'name'",
")",
")",
"return",
"render_to_response",
"(",
"'identities/contact_add_typed'",
",",
"{",
"'type'",
":",
"contact_type",
",",
"'types'",
":",
"types",
",",
"'form'",
":",
"form",
"}",
",",
"context_instance",
"=",
"RequestContext",
"(",
"request",
")",
",",
"response_format",
"=",
"response_format",
")"
] |
contact add with preselected type .
|
train
| false
|
6,859
|
def test_pick_channels_regexp():
ch_names = ['MEG 2331', 'MEG 2332', 'MEG 2333']
assert_array_equal(pick_channels_regexp(ch_names, 'MEG ...1'), [0])
assert_array_equal(pick_channels_regexp(ch_names, 'MEG ...[2-3]'), [1, 2])
assert_array_equal(pick_channels_regexp(ch_names, 'MEG *'), [0, 1, 2])
|
[
"def",
"test_pick_channels_regexp",
"(",
")",
":",
"ch_names",
"=",
"[",
"'MEG 2331'",
",",
"'MEG 2332'",
",",
"'MEG 2333'",
"]",
"assert_array_equal",
"(",
"pick_channels_regexp",
"(",
"ch_names",
",",
"'MEG ...1'",
")",
",",
"[",
"0",
"]",
")",
"assert_array_equal",
"(",
"pick_channels_regexp",
"(",
"ch_names",
",",
"'MEG ...[2-3]'",
")",
",",
"[",
"1",
",",
"2",
"]",
")",
"assert_array_equal",
"(",
"pick_channels_regexp",
"(",
"ch_names",
",",
"'MEG *'",
")",
",",
"[",
"0",
",",
"1",
",",
"2",
"]",
")"
] |
test pick with regular expression .
|
train
| false
|
6,861
|
def cr_uid_id_context(method):
method._api = 'cr_uid_id_context'
return method
|
[
"def",
"cr_uid_id_context",
"(",
"method",
")",
":",
"method",
".",
"_api",
"=",
"'cr_uid_id_context'",
"return",
"method"
] |
decorate a traditional-style method that takes cr .
|
train
| false
|
6,863
|
def remove_vlan_binding(netid):
LOG.debug(_('remove_vlan_binding() called'))
session = db.get_session()
try:
binding = session.query(network_models_v2.Vlan_Binding).filter_by(network_id=netid).one()
session.delete(binding)
session.flush()
return binding
except exc.NoResultFound:
pass
|
[
"def",
"remove_vlan_binding",
"(",
"netid",
")",
":",
"LOG",
".",
"debug",
"(",
"_",
"(",
"'remove_vlan_binding() called'",
")",
")",
"session",
"=",
"db",
".",
"get_session",
"(",
")",
"try",
":",
"binding",
"=",
"session",
".",
"query",
"(",
"network_models_v2",
".",
"Vlan_Binding",
")",
".",
"filter_by",
"(",
"network_id",
"=",
"netid",
")",
".",
"one",
"(",
")",
"session",
".",
"delete",
"(",
"binding",
")",
"session",
".",
"flush",
"(",
")",
"return",
"binding",
"except",
"exc",
".",
"NoResultFound",
":",
"pass"
] |
removes a vlan to network association .
|
train
| false
|
6,864
|
def _evaluate_for_annotation(evaluator, annotation, index=None):
if (annotation is not None):
definitions = evaluator.eval_element(_fix_forward_reference(evaluator, annotation))
if (index is not None):
definitions = list(itertools.chain.from_iterable((definition.py__getitem__(index) for definition in definitions if ((definition.type == 'tuple') and (len(list(definition.py__iter__())) >= index)))))
return list(itertools.chain.from_iterable((evaluator.execute(d) for d in definitions)))
else:
return []
|
[
"def",
"_evaluate_for_annotation",
"(",
"evaluator",
",",
"annotation",
",",
"index",
"=",
"None",
")",
":",
"if",
"(",
"annotation",
"is",
"not",
"None",
")",
":",
"definitions",
"=",
"evaluator",
".",
"eval_element",
"(",
"_fix_forward_reference",
"(",
"evaluator",
",",
"annotation",
")",
")",
"if",
"(",
"index",
"is",
"not",
"None",
")",
":",
"definitions",
"=",
"list",
"(",
"itertools",
".",
"chain",
".",
"from_iterable",
"(",
"(",
"definition",
".",
"py__getitem__",
"(",
"index",
")",
"for",
"definition",
"in",
"definitions",
"if",
"(",
"(",
"definition",
".",
"type",
"==",
"'tuple'",
")",
"and",
"(",
"len",
"(",
"list",
"(",
"definition",
".",
"py__iter__",
"(",
")",
")",
")",
">=",
"index",
")",
")",
")",
")",
")",
"return",
"list",
"(",
"itertools",
".",
"chain",
".",
"from_iterable",
"(",
"(",
"evaluator",
".",
"execute",
"(",
"d",
")",
"for",
"d",
"in",
"definitions",
")",
")",
")",
"else",
":",
"return",
"[",
"]"
] |
evaluates a string-node .
|
train
| false
|
6,865
|
def read_file_content(fileName):
try:
with open(fileName, u'rU') as f:
content = f.read()
except IOError as reason:
raise NinjaIOException(reason)
except:
raise
return content
|
[
"def",
"read_file_content",
"(",
"fileName",
")",
":",
"try",
":",
"with",
"open",
"(",
"fileName",
",",
"u'rU'",
")",
"as",
"f",
":",
"content",
"=",
"f",
".",
"read",
"(",
")",
"except",
"IOError",
"as",
"reason",
":",
"raise",
"NinjaIOException",
"(",
"reason",
")",
"except",
":",
"raise",
"return",
"content"
] |
read a file content .
|
train
| false
|
6,866
|
def convert_BlobProperty(model, prop, kwargs):
return f.FileField(**kwargs)
|
[
"def",
"convert_BlobProperty",
"(",
"model",
",",
"prop",
",",
"kwargs",
")",
":",
"return",
"f",
".",
"FileField",
"(",
"**",
"kwargs",
")"
] |
returns a form field for a db .
|
train
| false
|
6,868
|
def is_editable(obj, request):
if hasattr(obj, u'is_editable'):
return obj.is_editable(request)
else:
codename = get_permission_codename(u'change', obj._meta)
perm = (u'%s.%s' % (obj._meta.app_label, codename))
return (request.user.is_authenticated() and has_site_permission(request.user) and request.user.has_perm(perm))
|
[
"def",
"is_editable",
"(",
"obj",
",",
"request",
")",
":",
"if",
"hasattr",
"(",
"obj",
",",
"u'is_editable'",
")",
":",
"return",
"obj",
".",
"is_editable",
"(",
"request",
")",
"else",
":",
"codename",
"=",
"get_permission_codename",
"(",
"u'change'",
",",
"obj",
".",
"_meta",
")",
"perm",
"=",
"(",
"u'%s.%s'",
"%",
"(",
"obj",
".",
"_meta",
".",
"app_label",
",",
"codename",
")",
")",
"return",
"(",
"request",
".",
"user",
".",
"is_authenticated",
"(",
")",
"and",
"has_site_permission",
"(",
"request",
".",
"user",
")",
"and",
"request",
".",
"user",
".",
"has_perm",
"(",
"perm",
")",
")"
] |
returns true if the object is editable for the request .
|
train
| true
|
6,869
|
def _mantissa(dval):
bb = _double_as_bytes(dval)
mantissa = (bb[1] & (15 << 48))
mantissa += (bb[2] << 40)
mantissa += (bb[3] << 32)
mantissa += bb[4]
return mantissa
|
[
"def",
"_mantissa",
"(",
"dval",
")",
":",
"bb",
"=",
"_double_as_bytes",
"(",
"dval",
")",
"mantissa",
"=",
"(",
"bb",
"[",
"1",
"]",
"&",
"(",
"15",
"<<",
"48",
")",
")",
"mantissa",
"+=",
"(",
"bb",
"[",
"2",
"]",
"<<",
"40",
")",
"mantissa",
"+=",
"(",
"bb",
"[",
"3",
"]",
"<<",
"32",
")",
"mantissa",
"+=",
"bb",
"[",
"4",
"]",
"return",
"mantissa"
] |
extract the _mantissa bits from a double-precision floating point value .
|
train
| true
|
6,870
|
def action_allowed(request, app, action):
return action_allowed_user(request.user, app, action)
|
[
"def",
"action_allowed",
"(",
"request",
",",
"app",
",",
"action",
")",
":",
"return",
"action_allowed_user",
"(",
"request",
".",
"user",
",",
"app",
",",
"action",
")"
] |
determines if the request user has permission to do a certain action admin:% is true if the user has any of: as rules .
|
train
| false
|
6,871
|
@decorators.memoize
def _check_zpool():
return salt.utils.which('zpool')
|
[
"@",
"decorators",
".",
"memoize",
"def",
"_check_zpool",
"(",
")",
":",
"return",
"salt",
".",
"utils",
".",
"which",
"(",
"'zpool'",
")"
] |
looks to see if zpool is present on the system .
|
train
| false
|
6,872
|
def get_worker_name(queue):
name = None
if queue:
name = u'{hostname}.{pid}.{queue}'.format(hostname=socket.gethostname(), pid=os.getpid(), queue=queue)
return name
|
[
"def",
"get_worker_name",
"(",
"queue",
")",
":",
"name",
"=",
"None",
"if",
"queue",
":",
"name",
"=",
"u'{hostname}.{pid}.{queue}'",
".",
"format",
"(",
"hostname",
"=",
"socket",
".",
"gethostname",
"(",
")",
",",
"pid",
"=",
"os",
".",
"getpid",
"(",
")",
",",
"queue",
"=",
"queue",
")",
"return",
"name"
] |
when limiting worker to a specific queue .
|
train
| false
|
6,873
|
def _pretty_xml(element, level=0):
i = ('\n' + (level * ' '))
if len(element):
if ((not element.text) or (not element.text.strip())):
element.text = (i + ' ')
if ((not element.tail) or (not element.tail.strip())):
element.tail = i
for sub_element in element:
_pretty_xml(sub_element, (level + 1))
if ((not sub_element.tail) or (not sub_element.tail.strip())):
sub_element.tail = i
elif (level and ((not element.tail) or (not element.tail.strip()))):
element.tail = i
if (not level):
return ('<?xml version="1.0"?>\n%s' % ET.tostring(element, 'unicode'))
|
[
"def",
"_pretty_xml",
"(",
"element",
",",
"level",
"=",
"0",
")",
":",
"i",
"=",
"(",
"'\\n'",
"+",
"(",
"level",
"*",
"' '",
")",
")",
"if",
"len",
"(",
"element",
")",
":",
"if",
"(",
"(",
"not",
"element",
".",
"text",
")",
"or",
"(",
"not",
"element",
".",
"text",
".",
"strip",
"(",
")",
")",
")",
":",
"element",
".",
"text",
"=",
"(",
"i",
"+",
"' '",
")",
"if",
"(",
"(",
"not",
"element",
".",
"tail",
")",
"or",
"(",
"not",
"element",
".",
"tail",
".",
"strip",
"(",
")",
")",
")",
":",
"element",
".",
"tail",
"=",
"i",
"for",
"sub_element",
"in",
"element",
":",
"_pretty_xml",
"(",
"sub_element",
",",
"(",
"level",
"+",
"1",
")",
")",
"if",
"(",
"(",
"not",
"sub_element",
".",
"tail",
")",
"or",
"(",
"not",
"sub_element",
".",
"tail",
".",
"strip",
"(",
")",
")",
")",
":",
"sub_element",
".",
"tail",
"=",
"i",
"elif",
"(",
"level",
"and",
"(",
"(",
"not",
"element",
".",
"tail",
")",
"or",
"(",
"not",
"element",
".",
"tail",
".",
"strip",
"(",
")",
")",
")",
")",
":",
"element",
".",
"tail",
"=",
"i",
"if",
"(",
"not",
"level",
")",
":",
"return",
"(",
"'<?xml version=\"1.0\"?>\\n%s'",
"%",
"ET",
".",
"tostring",
"(",
"element",
",",
"'unicode'",
")",
")"
] |
indent an elementtree element and its children .
|
train
| false
|
6,874
|
def noheaders():
global _noheaders
if (_noheaders is None):
import mimetools
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
_noheaders = mimetools.Message(StringIO(), 0)
_noheaders.fp.close()
return _noheaders
|
[
"def",
"noheaders",
"(",
")",
":",
"global",
"_noheaders",
"if",
"(",
"_noheaders",
"is",
"None",
")",
":",
"import",
"mimetools",
"try",
":",
"from",
"cStringIO",
"import",
"StringIO",
"except",
"ImportError",
":",
"from",
"StringIO",
"import",
"StringIO",
"_noheaders",
"=",
"mimetools",
".",
"Message",
"(",
"StringIO",
"(",
")",
",",
"0",
")",
"_noheaders",
".",
"fp",
".",
"close",
"(",
")",
"return",
"_noheaders"
] |
return an empty mimetools .
|
train
| false
|
6,876
|
def pipeline_factory_v21(loader, global_conf, **local_conf):
return _load_pipeline(loader, local_conf[CONF.api.auth_strategy].split())
|
[
"def",
"pipeline_factory_v21",
"(",
"loader",
",",
"global_conf",
",",
"**",
"local_conf",
")",
":",
"return",
"_load_pipeline",
"(",
"loader",
",",
"local_conf",
"[",
"CONF",
".",
"api",
".",
"auth_strategy",
"]",
".",
"split",
"(",
")",
")"
] |
a paste pipeline replica that keys off of auth_strategy .
|
train
| false
|
6,878
|
@require_POST
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('staff')
def list_entrance_exam_instructor_tasks(request, course_id):
course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_by_id(course_id)
student = request.POST.get('unique_student_identifier', None)
if (student is not None):
student = get_student_from_identifier(student)
try:
entrance_exam_key = course_id.make_usage_key_from_deprecated_string(course.entrance_exam_id)
except InvalidKeyError:
return HttpResponseBadRequest(_('Course has no valid entrance exam section.'))
if student:
tasks = lms.djangoapps.instructor_task.api.get_entrance_exam_instructor_task_history(course_id, entrance_exam_key, student)
else:
tasks = lms.djangoapps.instructor_task.api.get_entrance_exam_instructor_task_history(course_id, entrance_exam_key)
response_payload = {'tasks': map(extract_task_features, tasks)}
return JsonResponse(response_payload)
|
[
"@",
"require_POST",
"@",
"ensure_csrf_cookie",
"@",
"cache_control",
"(",
"no_cache",
"=",
"True",
",",
"no_store",
"=",
"True",
",",
"must_revalidate",
"=",
"True",
")",
"@",
"require_level",
"(",
"'staff'",
")",
"def",
"list_entrance_exam_instructor_tasks",
"(",
"request",
",",
"course_id",
")",
":",
"course_id",
"=",
"SlashSeparatedCourseKey",
".",
"from_deprecated_string",
"(",
"course_id",
")",
"course",
"=",
"get_course_by_id",
"(",
"course_id",
")",
"student",
"=",
"request",
".",
"POST",
".",
"get",
"(",
"'unique_student_identifier'",
",",
"None",
")",
"if",
"(",
"student",
"is",
"not",
"None",
")",
":",
"student",
"=",
"get_student_from_identifier",
"(",
"student",
")",
"try",
":",
"entrance_exam_key",
"=",
"course_id",
".",
"make_usage_key_from_deprecated_string",
"(",
"course",
".",
"entrance_exam_id",
")",
"except",
"InvalidKeyError",
":",
"return",
"HttpResponseBadRequest",
"(",
"_",
"(",
"'Course has no valid entrance exam section.'",
")",
")",
"if",
"student",
":",
"tasks",
"=",
"lms",
".",
"djangoapps",
".",
"instructor_task",
".",
"api",
".",
"get_entrance_exam_instructor_task_history",
"(",
"course_id",
",",
"entrance_exam_key",
",",
"student",
")",
"else",
":",
"tasks",
"=",
"lms",
".",
"djangoapps",
".",
"instructor_task",
".",
"api",
".",
"get_entrance_exam_instructor_task_history",
"(",
"course_id",
",",
"entrance_exam_key",
")",
"response_payload",
"=",
"{",
"'tasks'",
":",
"map",
"(",
"extract_task_features",
",",
"tasks",
")",
"}",
"return",
"JsonResponse",
"(",
"response_payload",
")"
] |
list entrance exam related instructor tasks .
|
train
| false
|
6,879
|
def test_none(value):
return (value is None)
|
[
"def",
"test_none",
"(",
"value",
")",
":",
"return",
"(",
"value",
"is",
"None",
")"
] |
call each rl_* method with a none focuswidget .
|
train
| false
|
6,880
|
def checksum_question(question, timestamp):
challenge = u''.join((settings.SECRET_KEY, question, timestamp))
sha = hashlib.sha1(challenge.encode(u'utf-8'))
return sha.hexdigest()
|
[
"def",
"checksum_question",
"(",
"question",
",",
"timestamp",
")",
":",
"challenge",
"=",
"u''",
".",
"join",
"(",
"(",
"settings",
".",
"SECRET_KEY",
",",
"question",
",",
"timestamp",
")",
")",
"sha",
"=",
"hashlib",
".",
"sha1",
"(",
"challenge",
".",
"encode",
"(",
"u'utf-8'",
")",
")",
"return",
"sha",
".",
"hexdigest",
"(",
")"
] |
returns checksum for a question .
|
train
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.