id_within_dataset int64 1 55.5k | snippet stringlengths 19 14.2k | tokens listlengths 6 1.63k | nl stringlengths 6 352 | split_within_dataset stringclasses 1 value | is_duplicated bool 2 classes |
|---|---|---|---|---|---|
37,894 | def get_tool_dependency(app, id):
return app.install_model.context.query(app.install_model.ToolDependency).get(app.security.decode_id(id))
| [
"def",
"get_tool_dependency",
"(",
"app",
",",
"id",
")",
":",
"return",
"app",
".",
"install_model",
".",
"context",
".",
"query",
"(",
"app",
".",
"install_model",
".",
"ToolDependency",
")",
".",
"get",
"(",
"app",
".",
"security",
".",
"decode_id",
"(",
"id",
")",
")"
] | get a tool_dependency from the database via id . | train | false |
37,896 | def include_version(version):
kwargs = {}
if (version != settings.API_CURRENT_VERSION):
kwargs['namespace'] = ('api-v%d' % version)
return include(('mkt.api.v%d.urls' % version), **kwargs)
| [
"def",
"include_version",
"(",
"version",
")",
":",
"kwargs",
"=",
"{",
"}",
"if",
"(",
"version",
"!=",
"settings",
".",
"API_CURRENT_VERSION",
")",
":",
"kwargs",
"[",
"'namespace'",
"]",
"=",
"(",
"'api-v%d'",
"%",
"version",
")",
"return",
"include",
"(",
"(",
"'mkt.api.v%d.urls'",
"%",
"version",
")",
",",
"**",
"kwargs",
")"
] | returns an include statement containing url patterns for the passed api version . | train | false |
37,898 | def db_pass():
if (not env.db_pass):
env.db_pass = getpass(u'Enter the database password: ')
return env.db_pass
| [
"def",
"db_pass",
"(",
")",
":",
"if",
"(",
"not",
"env",
".",
"db_pass",
")",
":",
"env",
".",
"db_pass",
"=",
"getpass",
"(",
"u'Enter the database password: '",
")",
"return",
"env",
".",
"db_pass"
] | prompts for the database password if unknown . | train | false |
37,899 | def install_web2py():
sudo(('wget https://raw.githubusercontent.com/web2py/web2py/master/scripts/%s' % INSTALL_SCRIPT))
sudo(('chmod +x %s' % INSTALL_SCRIPT))
sudo(('./' + INSTALL_SCRIPT))
| [
"def",
"install_web2py",
"(",
")",
":",
"sudo",
"(",
"(",
"'wget https://raw.githubusercontent.com/web2py/web2py/master/scripts/%s'",
"%",
"INSTALL_SCRIPT",
")",
")",
"sudo",
"(",
"(",
"'chmod +x %s'",
"%",
"INSTALL_SCRIPT",
")",
")",
"sudo",
"(",
"(",
"'./'",
"+",
"INSTALL_SCRIPT",
")",
")"
] | fab -h username@host install_web2py . | train | false |
37,900 | @pytest.mark.network
def test_upgrade_to_same_version_from_url(script):
result = script.pip('install', 'INITools==0.3', expect_error=True)
assert ((script.site_packages / 'initools') in result.files_created), sorted(result.files_created.keys())
result2 = script.pip('install', 'https://pypi.python.org/packages/source/I/INITools/INITools-0.3.tar.gz', expect_error=True)
assert (not result2.files_updated), 'INITools 0.3 reinstalled same version'
result3 = script.pip('uninstall', 'initools', '-y', expect_error=True)
assert_all_changes(result, result3, [(script.venv / 'build'), 'cache'])
| [
"@",
"pytest",
".",
"mark",
".",
"network",
"def",
"test_upgrade_to_same_version_from_url",
"(",
"script",
")",
":",
"result",
"=",
"script",
".",
"pip",
"(",
"'install'",
",",
"'INITools==0.3'",
",",
"expect_error",
"=",
"True",
")",
"assert",
"(",
"(",
"script",
".",
"site_packages",
"/",
"'initools'",
")",
"in",
"result",
".",
"files_created",
")",
",",
"sorted",
"(",
"result",
".",
"files_created",
".",
"keys",
"(",
")",
")",
"result2",
"=",
"script",
".",
"pip",
"(",
"'install'",
",",
"'https://pypi.python.org/packages/source/I/INITools/INITools-0.3.tar.gz'",
",",
"expect_error",
"=",
"True",
")",
"assert",
"(",
"not",
"result2",
".",
"files_updated",
")",
",",
"'INITools 0.3 reinstalled same version'",
"result3",
"=",
"script",
".",
"pip",
"(",
"'uninstall'",
",",
"'initools'",
",",
"'-y'",
",",
"expect_error",
"=",
"True",
")",
"assert_all_changes",
"(",
"result",
",",
"result3",
",",
"[",
"(",
"script",
".",
"venv",
"/",
"'build'",
")",
",",
"'cache'",
"]",
")"
] | when installing from a url the same version that is already installed . | train | false |
37,901 | @public
def free_group(symbols):
_free_group = FreeGroup(symbols)
return ((_free_group,) + tuple(_free_group.generators))
| [
"@",
"public",
"def",
"free_group",
"(",
"symbols",
")",
":",
"_free_group",
"=",
"FreeGroup",
"(",
"symbols",
")",
"return",
"(",
"(",
"_free_group",
",",
")",
"+",
"tuple",
"(",
"_free_group",
".",
"generators",
")",
")"
] | construct a free group returning (freegroup . | train | false |
37,902 | def associate_profile_to_role(profile_name, role_name, region=None, key=None, keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if (not role_exists(role_name, region, key, keyid, profile)):
log.error('IAM role {0} does not exist.'.format(role_name))
return False
if (not instance_profile_exists(profile_name, region, key, keyid, profile)):
log.error('Instance profile {0} does not exist.'.format(profile_name))
return False
associated = profile_associated(role_name, profile_name, region, key, keyid, profile)
if associated:
return True
else:
try:
conn.add_role_to_instance_profile(profile_name, role_name)
msg = 'Added {0} instance profile to {1} role.'
log.info(msg.format(profile_name, role_name))
return True
except boto.exception.BotoServerError as e:
log.debug(e)
msg = 'Failed to add {0} instance profile to {1} role.'
log.error(msg.format(profile_name, role_name))
return False
| [
"def",
"associate_profile_to_role",
"(",
"profile_name",
",",
"role_name",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
")",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"if",
"(",
"not",
"role_exists",
"(",
"role_name",
",",
"region",
",",
"key",
",",
"keyid",
",",
"profile",
")",
")",
":",
"log",
".",
"error",
"(",
"'IAM role {0} does not exist.'",
".",
"format",
"(",
"role_name",
")",
")",
"return",
"False",
"if",
"(",
"not",
"instance_profile_exists",
"(",
"profile_name",
",",
"region",
",",
"key",
",",
"keyid",
",",
"profile",
")",
")",
":",
"log",
".",
"error",
"(",
"'Instance profile {0} does not exist.'",
".",
"format",
"(",
"profile_name",
")",
")",
"return",
"False",
"associated",
"=",
"profile_associated",
"(",
"role_name",
",",
"profile_name",
",",
"region",
",",
"key",
",",
"keyid",
",",
"profile",
")",
"if",
"associated",
":",
"return",
"True",
"else",
":",
"try",
":",
"conn",
".",
"add_role_to_instance_profile",
"(",
"profile_name",
",",
"role_name",
")",
"msg",
"=",
"'Added {0} instance profile to {1} role.'",
"log",
".",
"info",
"(",
"msg",
".",
"format",
"(",
"profile_name",
",",
"role_name",
")",
")",
"return",
"True",
"except",
"boto",
".",
"exception",
".",
"BotoServerError",
"as",
"e",
":",
"log",
".",
"debug",
"(",
"e",
")",
"msg",
"=",
"'Failed to add {0} instance profile to {1} role.'",
"log",
".",
"error",
"(",
"msg",
".",
"format",
"(",
"profile_name",
",",
"role_name",
")",
")",
"return",
"False"
] | associate an instance profile with an iam role . | train | true |
37,903 | def _default_docx_path():
_thisdir = os.path.split(__file__)[0]
return os.path.join(_thisdir, 'templates', 'default.docx')
| [
"def",
"_default_docx_path",
"(",
")",
":",
"_thisdir",
"=",
"os",
".",
"path",
".",
"split",
"(",
"__file__",
")",
"[",
"0",
"]",
"return",
"os",
".",
"path",
".",
"join",
"(",
"_thisdir",
",",
"'templates'",
",",
"'default.docx'",
")"
] | return the path to the built-in default . | train | true |
37,904 | def test_clickable_image():
import matplotlib.pyplot as plt
im = np.random.RandomState(0).randn(100, 100)
clk = ClickableImage(im)
clicks = [(12, 8), (46, 48), (10, 24)]
for click in clicks:
_fake_click(clk.fig, clk.ax, click, xform='data')
assert_allclose(np.array(clicks), np.array(clk.coords))
assert_true((len(clicks) == len(clk.coords)))
lt = clk.to_layout()
assert_true((lt.pos.shape[0] == len(clicks)))
assert_allclose((lt.pos[(1, 0)] / lt.pos[(2, 0)]), (clicks[1][0] / float(clicks[2][0])))
clk.plot_clicks()
plt.close('all')
| [
"def",
"test_clickable_image",
"(",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"im",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"0",
")",
".",
"randn",
"(",
"100",
",",
"100",
")",
"clk",
"=",
"ClickableImage",
"(",
"im",
")",
"clicks",
"=",
"[",
"(",
"12",
",",
"8",
")",
",",
"(",
"46",
",",
"48",
")",
",",
"(",
"10",
",",
"24",
")",
"]",
"for",
"click",
"in",
"clicks",
":",
"_fake_click",
"(",
"clk",
".",
"fig",
",",
"clk",
".",
"ax",
",",
"click",
",",
"xform",
"=",
"'data'",
")",
"assert_allclose",
"(",
"np",
".",
"array",
"(",
"clicks",
")",
",",
"np",
".",
"array",
"(",
"clk",
".",
"coords",
")",
")",
"assert_true",
"(",
"(",
"len",
"(",
"clicks",
")",
"==",
"len",
"(",
"clk",
".",
"coords",
")",
")",
")",
"lt",
"=",
"clk",
".",
"to_layout",
"(",
")",
"assert_true",
"(",
"(",
"lt",
".",
"pos",
".",
"shape",
"[",
"0",
"]",
"==",
"len",
"(",
"clicks",
")",
")",
")",
"assert_allclose",
"(",
"(",
"lt",
".",
"pos",
"[",
"(",
"1",
",",
"0",
")",
"]",
"/",
"lt",
".",
"pos",
"[",
"(",
"2",
",",
"0",
")",
"]",
")",
",",
"(",
"clicks",
"[",
"1",
"]",
"[",
"0",
"]",
"/",
"float",
"(",
"clicks",
"[",
"2",
"]",
"[",
"0",
"]",
")",
")",
")",
"clk",
".",
"plot_clicks",
"(",
")",
"plt",
".",
"close",
"(",
"'all'",
")"
] | test the clickableimage class . | train | false |
37,905 | def _bro2neo(rec):
if ('id_orig_h' in rec):
rec['src'] = rec.pop('id_orig_h')
if ('id_resp_h' in rec):
rec['dst'] = rec.pop('id_resp_h')
if ('ts' in rec):
rec['start_time'] = rec['end_time'] = rec.pop('ts')
return rec
| [
"def",
"_bro2neo",
"(",
"rec",
")",
":",
"if",
"(",
"'id_orig_h'",
"in",
"rec",
")",
":",
"rec",
"[",
"'src'",
"]",
"=",
"rec",
".",
"pop",
"(",
"'id_orig_h'",
")",
"if",
"(",
"'id_resp_h'",
"in",
"rec",
")",
":",
"rec",
"[",
"'dst'",
"]",
"=",
"rec",
".",
"pop",
"(",
"'id_resp_h'",
")",
"if",
"(",
"'ts'",
"in",
"rec",
")",
":",
"rec",
"[",
"'start_time'",
"]",
"=",
"rec",
"[",
"'end_time'",
"]",
"=",
"rec",
".",
"pop",
"(",
"'ts'",
")",
"return",
"rec"
] | prepares a document for db . | train | false |
37,907 | @world.absorb
def log_in(username='robot', password='test', email='robot@edx.org', name='Robot'):
url = '/auto_auth'
params = {'username': username, 'password': password, 'email': email, 'full_name': name}
url += ('?' + urllib.urlencode(params))
world.visit(url)
user = User.objects.get(username=username)
world.scenario_dict['USER'] = user
| [
"@",
"world",
".",
"absorb",
"def",
"log_in",
"(",
"username",
"=",
"'robot'",
",",
"password",
"=",
"'test'",
",",
"email",
"=",
"'robot@edx.org'",
",",
"name",
"=",
"'Robot'",
")",
":",
"url",
"=",
"'/auto_auth'",
"params",
"=",
"{",
"'username'",
":",
"username",
",",
"'password'",
":",
"password",
",",
"'email'",
":",
"email",
",",
"'full_name'",
":",
"name",
"}",
"url",
"+=",
"(",
"'?'",
"+",
"urllib",
".",
"urlencode",
"(",
"params",
")",
")",
"world",
".",
"visit",
"(",
"url",
")",
"user",
"=",
"User",
".",
"objects",
".",
"get",
"(",
"username",
"=",
"username",
")",
"world",
".",
"scenario_dict",
"[",
"'USER'",
"]",
"=",
"user"
] | use the auto_auth feature to programmatically log the user in . | train | false |
37,908 | def get_module_path(module, *joins):
module = scrub(module)
return get_pymodule_path(((local.module_app[module] + u'.') + module), *joins)
| [
"def",
"get_module_path",
"(",
"module",
",",
"*",
"joins",
")",
":",
"module",
"=",
"scrub",
"(",
"module",
")",
"return",
"get_pymodule_path",
"(",
"(",
"(",
"local",
".",
"module_app",
"[",
"module",
"]",
"+",
"u'.'",
")",
"+",
"module",
")",
",",
"*",
"joins",
")"
] | return module *modname* base path . | train | false |
37,909 | def power_iteration_kron(A, C, max_iter=1000, tol=0.001, random_state=0):
AS_size = C.shape[0]
rng = check_random_state(random_state)
B = rng.randn(AS_size, AS_size)
B /= linalg.norm(B, 'fro')
ATA = np.dot(A.T, A)
CCT = np.dot(C, C.T)
L0 = np.inf
for _ in range(max_iter):
Y = np.dot(np.dot(ATA, B), CCT)
L = linalg.norm(Y, 'fro')
if (abs((L - L0)) < tol):
break
B = (Y / L)
L0 = L
return L
| [
"def",
"power_iteration_kron",
"(",
"A",
",",
"C",
",",
"max_iter",
"=",
"1000",
",",
"tol",
"=",
"0.001",
",",
"random_state",
"=",
"0",
")",
":",
"AS_size",
"=",
"C",
".",
"shape",
"[",
"0",
"]",
"rng",
"=",
"check_random_state",
"(",
"random_state",
")",
"B",
"=",
"rng",
".",
"randn",
"(",
"AS_size",
",",
"AS_size",
")",
"B",
"/=",
"linalg",
".",
"norm",
"(",
"B",
",",
"'fro'",
")",
"ATA",
"=",
"np",
".",
"dot",
"(",
"A",
".",
"T",
",",
"A",
")",
"CCT",
"=",
"np",
".",
"dot",
"(",
"C",
",",
"C",
".",
"T",
")",
"L0",
"=",
"np",
".",
"inf",
"for",
"_",
"in",
"range",
"(",
"max_iter",
")",
":",
"Y",
"=",
"np",
".",
"dot",
"(",
"np",
".",
"dot",
"(",
"ATA",
",",
"B",
")",
",",
"CCT",
")",
"L",
"=",
"linalg",
".",
"norm",
"(",
"Y",
",",
"'fro'",
")",
"if",
"(",
"abs",
"(",
"(",
"L",
"-",
"L0",
")",
")",
"<",
"tol",
")",
":",
"break",
"B",
"=",
"(",
"Y",
"/",
"L",
")",
"L0",
"=",
"L",
"return",
"L"
] | find the largest singular value for the matrix kron . | train | false |
37,911 | def calculate_file_checksum(path):
hasher = hashlib.md5()
with io.FileIO(path, 'r') as fp:
while True:
buf = fp.read(65536)
if (not buf):
break
hasher.update(buf)
return hasher.hexdigest()
| [
"def",
"calculate_file_checksum",
"(",
"path",
")",
":",
"hasher",
"=",
"hashlib",
".",
"md5",
"(",
")",
"with",
"io",
".",
"FileIO",
"(",
"path",
",",
"'r'",
")",
"as",
"fp",
":",
"while",
"True",
":",
"buf",
"=",
"fp",
".",
"read",
"(",
"65536",
")",
"if",
"(",
"not",
"buf",
")",
":",
"break",
"hasher",
".",
"update",
"(",
"buf",
")",
"return",
"hasher",
".",
"hexdigest",
"(",
")"
] | calculate the md5 sum for a file: read chunks of a file and update the hasher . | train | false |
37,912 | def set_display_sleep(minutes):
value = _validate_sleep(minutes)
cmd = 'systemsetup -setdisplaysleep {0}'.format(value)
salt.utils.mac_utils.execute_return_success(cmd)
return salt.utils.mac_utils.confirm_updated(str(value), get_display_sleep)
| [
"def",
"set_display_sleep",
"(",
"minutes",
")",
":",
"value",
"=",
"_validate_sleep",
"(",
"minutes",
")",
"cmd",
"=",
"'systemsetup -setdisplaysleep {0}'",
".",
"format",
"(",
"value",
")",
"salt",
".",
"utils",
".",
"mac_utils",
".",
"execute_return_success",
"(",
"cmd",
")",
"return",
"salt",
".",
"utils",
".",
"mac_utils",
".",
"confirm_updated",
"(",
"str",
"(",
"value",
")",
",",
"get_display_sleep",
")"
] | set the amount of idle time until the display sleeps . | train | true |
37,915 | def test_train1():
sp = SequencePattern()
ts2s = TFLearnSeq2Seq(sp)
ofn = ('test_%s' % ts2s.canonical_weights_fn(0))
print(('using weights filename %s' % ofn))
if os.path.exists(ofn):
os.unlink(ofn)
tf.reset_default_graph()
ts2s.train(num_epochs=1, num_points=10000, weights_output_fn=ofn)
assert os.path.exists(ofn)
| [
"def",
"test_train1",
"(",
")",
":",
"sp",
"=",
"SequencePattern",
"(",
")",
"ts2s",
"=",
"TFLearnSeq2Seq",
"(",
"sp",
")",
"ofn",
"=",
"(",
"'test_%s'",
"%",
"ts2s",
".",
"canonical_weights_fn",
"(",
"0",
")",
")",
"print",
"(",
"(",
"'using weights filename %s'",
"%",
"ofn",
")",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"ofn",
")",
":",
"os",
".",
"unlink",
"(",
"ofn",
")",
"tf",
".",
"reset_default_graph",
"(",
")",
"ts2s",
".",
"train",
"(",
"num_epochs",
"=",
"1",
",",
"num_points",
"=",
"10000",
",",
"weights_output_fn",
"=",
"ofn",
")",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"ofn",
")"
] | test simple training of an embedding_rnn seq2seq model . | train | false |
37,916 | def test_ordering():
app1id = app_factory().id
app2id = app_factory().id
app3id = app_factory().id
semi_arbitrary_order = [app2id, app3id, app1id]
addons = manual_order(Webapp.objects.all(), semi_arbitrary_order)
eq_(semi_arbitrary_order, [addon.id for addon in addons])
| [
"def",
"test_ordering",
"(",
")",
":",
"app1id",
"=",
"app_factory",
"(",
")",
".",
"id",
"app2id",
"=",
"app_factory",
"(",
")",
".",
"id",
"app3id",
"=",
"app_factory",
"(",
")",
".",
"id",
"semi_arbitrary_order",
"=",
"[",
"app2id",
",",
"app3id",
",",
"app1id",
"]",
"addons",
"=",
"manual_order",
"(",
"Webapp",
".",
"objects",
".",
"all",
"(",
")",
",",
"semi_arbitrary_order",
")",
"eq_",
"(",
"semi_arbitrary_order",
",",
"[",
"addon",
".",
"id",
"for",
"addon",
"in",
"addons",
"]",
")"
] | given a specific set of primary keys . | train | false |
37,917 | def parse_siteclass_omegas(line, site_classes):
line_floats = re.findall('\\d{1,3}\\.\\d{5}', line)
if ((not site_classes) or (len(line_floats) == 0)):
return
for n in range(len(line_floats)):
site_classes[n]['omega'] = line_floats[n]
return site_classes
| [
"def",
"parse_siteclass_omegas",
"(",
"line",
",",
"site_classes",
")",
":",
"line_floats",
"=",
"re",
".",
"findall",
"(",
"'\\\\d{1,3}\\\\.\\\\d{5}'",
",",
"line",
")",
"if",
"(",
"(",
"not",
"site_classes",
")",
"or",
"(",
"len",
"(",
"line_floats",
")",
"==",
"0",
")",
")",
":",
"return",
"for",
"n",
"in",
"range",
"(",
"len",
"(",
"line_floats",
")",
")",
":",
"site_classes",
"[",
"n",
"]",
"[",
"'omega'",
"]",
"=",
"line_floats",
"[",
"n",
"]",
"return",
"site_classes"
] | for models which have multiple site classes . | train | false |
37,918 | def _encode_asn1_utf8_str(backend, string):
s = backend._lib.ASN1_UTF8STRING_new()
res = backend._lib.ASN1_STRING_set(s, string.encode('utf8'), len(string.encode('utf8')))
backend.openssl_assert((res == 1))
return s
| [
"def",
"_encode_asn1_utf8_str",
"(",
"backend",
",",
"string",
")",
":",
"s",
"=",
"backend",
".",
"_lib",
".",
"ASN1_UTF8STRING_new",
"(",
")",
"res",
"=",
"backend",
".",
"_lib",
".",
"ASN1_STRING_set",
"(",
"s",
",",
"string",
".",
"encode",
"(",
"'utf8'",
")",
",",
"len",
"(",
"string",
".",
"encode",
"(",
"'utf8'",
")",
")",
")",
"backend",
".",
"openssl_assert",
"(",
"(",
"res",
"==",
"1",
")",
")",
"return",
"s"
] | create an asn1_utf8string from a python unicode string . | train | false |
37,919 | def convert_command_args(args):
assert isinstance(args, list)
def convert(arg):
if six.PY2:
if isinstance(arg, six.text_type):
arg = arg.encode(arg_encoding())
elif isinstance(arg, bytes):
arg = arg.decode(arg_encoding(), 'surrogateescape')
return arg
return [convert(a) for a in args]
| [
"def",
"convert_command_args",
"(",
"args",
")",
":",
"assert",
"isinstance",
"(",
"args",
",",
"list",
")",
"def",
"convert",
"(",
"arg",
")",
":",
"if",
"six",
".",
"PY2",
":",
"if",
"isinstance",
"(",
"arg",
",",
"six",
".",
"text_type",
")",
":",
"arg",
"=",
"arg",
".",
"encode",
"(",
"arg_encoding",
"(",
")",
")",
"elif",
"isinstance",
"(",
"arg",
",",
"bytes",
")",
":",
"arg",
"=",
"arg",
".",
"decode",
"(",
"arg_encoding",
"(",
")",
",",
"'surrogateescape'",
")",
"return",
"arg",
"return",
"[",
"convert",
"(",
"a",
")",
"for",
"a",
"in",
"args",
"]"
] | convert command arguments to bytestrings on python 2 and surrogate-escaped strings on python 3 . | train | false |
37,921 | def flood(stepFunction, fullSet, initSet, relevant=None):
if (fullSet is None):
flooded = set(initSet)
else:
full = set(fullSet)
flooded = full.intersection(set(initSet))
if (relevant is None):
relevant = full.copy()
if relevant:
relevant = set(relevant)
change = flooded.copy()
while (len(change) > 0):
new = set()
for m in change:
if (fullSet is None):
new.update(stepFunction(m))
else:
new.update(full.intersection(stepFunction(m)))
change = new.difference(flooded)
flooded.update(change)
if ((relevant is not None) and relevant.issubset(flooded)):
break
return list(flooded)
| [
"def",
"flood",
"(",
"stepFunction",
",",
"fullSet",
",",
"initSet",
",",
"relevant",
"=",
"None",
")",
":",
"if",
"(",
"fullSet",
"is",
"None",
")",
":",
"flooded",
"=",
"set",
"(",
"initSet",
")",
"else",
":",
"full",
"=",
"set",
"(",
"fullSet",
")",
"flooded",
"=",
"full",
".",
"intersection",
"(",
"set",
"(",
"initSet",
")",
")",
"if",
"(",
"relevant",
"is",
"None",
")",
":",
"relevant",
"=",
"full",
".",
"copy",
"(",
")",
"if",
"relevant",
":",
"relevant",
"=",
"set",
"(",
"relevant",
")",
"change",
"=",
"flooded",
".",
"copy",
"(",
")",
"while",
"(",
"len",
"(",
"change",
")",
">",
"0",
")",
":",
"new",
"=",
"set",
"(",
")",
"for",
"m",
"in",
"change",
":",
"if",
"(",
"fullSet",
"is",
"None",
")",
":",
"new",
".",
"update",
"(",
"stepFunction",
"(",
"m",
")",
")",
"else",
":",
"new",
".",
"update",
"(",
"full",
".",
"intersection",
"(",
"stepFunction",
"(",
"m",
")",
")",
")",
"change",
"=",
"new",
".",
"difference",
"(",
"flooded",
")",
"flooded",
".",
"update",
"(",
"change",
")",
"if",
"(",
"(",
"relevant",
"is",
"not",
"None",
")",
"and",
"relevant",
".",
"issubset",
"(",
"flooded",
")",
")",
":",
"break",
"return",
"list",
"(",
"flooded",
")"
] | returns a list of elements of fullset linked to some element of initset through the neighborhood-setfunction . | train | false |
37,923 | def build_slug():
wordlist = open(get_resource_path('wordlist.txt')).read().split('\n')
wordlist.remove('')
r = SystemRandom()
return '-'.join((r.choice(wordlist) for x in range(2)))
| [
"def",
"build_slug",
"(",
")",
":",
"wordlist",
"=",
"open",
"(",
"get_resource_path",
"(",
"'wordlist.txt'",
")",
")",
".",
"read",
"(",
")",
".",
"split",
"(",
"'\\n'",
")",
"wordlist",
".",
"remove",
"(",
"''",
")",
"r",
"=",
"SystemRandom",
"(",
")",
"return",
"'-'",
".",
"join",
"(",
"(",
"r",
".",
"choice",
"(",
"wordlist",
")",
"for",
"x",
"in",
"range",
"(",
"2",
")",
")",
")"
] | returns a random string made from two words from the wordlist . | train | false |
37,924 | @utils.arg('server', metavar='<server>', help=_('Name or ID of server.'))
@utils.arg('private_key', metavar='<private-key>', help=_('Private key (used locally to decrypt password) (Optional). When specified, the command displays the clear (decrypted) VM password. When not specified, the ciphered VM password is displayed.'), nargs='?', default=None)
def do_get_password(cs, args):
server = _find_server(cs, args.server)
data = server.get_password(args.private_key)
print(data)
| [
"@",
"utils",
".",
"arg",
"(",
"'server'",
",",
"metavar",
"=",
"'<server>'",
",",
"help",
"=",
"_",
"(",
"'Name or ID of server.'",
")",
")",
"@",
"utils",
".",
"arg",
"(",
"'private_key'",
",",
"metavar",
"=",
"'<private-key>'",
",",
"help",
"=",
"_",
"(",
"'Private key (used locally to decrypt password) (Optional). When specified, the command displays the clear (decrypted) VM password. When not specified, the ciphered VM password is displayed.'",
")",
",",
"nargs",
"=",
"'?'",
",",
"default",
"=",
"None",
")",
"def",
"do_get_password",
"(",
"cs",
",",
"args",
")",
":",
"server",
"=",
"_find_server",
"(",
"cs",
",",
"args",
".",
"server",
")",
"data",
"=",
"server",
".",
"get_password",
"(",
"args",
".",
"private_key",
")",
"print",
"(",
"data",
")"
] | get the admin password for a server . | train | false |
37,926 | def parent(pid):
try:
return psutil.Process(pid).parent().pid
except Exception:
return 0
| [
"def",
"parent",
"(",
"pid",
")",
":",
"try",
":",
"return",
"psutil",
".",
"Process",
"(",
"pid",
")",
".",
"parent",
"(",
")",
".",
"pid",
"except",
"Exception",
":",
"return",
"0"
] | parent -> int arguments: pid : pid of the process . | train | false |
37,929 | def filter_format(filter_template, assertion_values):
return (filter_template % tuple(map(escape_filter_chars, assertion_values)))
| [
"def",
"filter_format",
"(",
"filter_template",
",",
"assertion_values",
")",
":",
"return",
"(",
"filter_template",
"%",
"tuple",
"(",
"map",
"(",
"escape_filter_chars",
",",
"assertion_values",
")",
")",
")"
] | filter_template string containing %s as placeholder for assertion values . | train | false |
37,930 | def normalize_props(props):
if (not isinstance(props, dict)):
props = dict.fromkeys(props)
props = dict(((key, (value if isinstance(value, basestring) else (('{%s}' % key) if (value is None) else str(value)))) for (key, value) in props.iteritems()))
return props
| [
"def",
"normalize_props",
"(",
"props",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"props",
",",
"dict",
")",
")",
":",
"props",
"=",
"dict",
".",
"fromkeys",
"(",
"props",
")",
"props",
"=",
"dict",
"(",
"(",
"(",
"key",
",",
"(",
"value",
"if",
"isinstance",
"(",
"value",
",",
"basestring",
")",
"else",
"(",
"(",
"'{%s}'",
"%",
"key",
")",
"if",
"(",
"value",
"is",
"None",
")",
"else",
"str",
"(",
"value",
")",
")",
")",
")",
"for",
"(",
"key",
",",
"value",
")",
"in",
"props",
".",
"iteritems",
"(",
")",
")",
")",
"return",
"props"
] | returns a normalized property list/dict so that : - a list gives {k: "{k}"} - a dict gives {k: v if v is not none else "{%s}" % v} . | train | false |
37,931 | @contextmanager
def time_block(message):
tic = time.time()
(yield)
dt = (time.time() - tic)
log = (app_log.info if (dt > 1) else app_log.debug)
log('%s in %.2f ms', message, (1000.0 * dt))
| [
"@",
"contextmanager",
"def",
"time_block",
"(",
"message",
")",
":",
"tic",
"=",
"time",
".",
"time",
"(",
")",
"(",
"yield",
")",
"dt",
"=",
"(",
"time",
".",
"time",
"(",
")",
"-",
"tic",
")",
"log",
"=",
"(",
"app_log",
".",
"info",
"if",
"(",
"dt",
">",
"1",
")",
"else",
"app_log",
".",
"debug",
")",
"log",
"(",
"'%s in %.2f ms'",
",",
"message",
",",
"(",
"1000.0",
"*",
"dt",
")",
")"
] | context manager for timing a block logs millisecond timings of the block . | train | false |
37,932 | def replacer(dst):
return links.get(dst, dst)
| [
"def",
"replacer",
"(",
"dst",
")",
":",
"return",
"links",
".",
"get",
"(",
"dst",
",",
"dst",
")"
] | replace links . | train | false |
37,933 | def test_prewitt_v_mask():
np.random.seed(0)
result = filters.prewitt_v(np.random.uniform(size=(10, 10)), np.zeros((10, 10), bool))
assert_allclose(result, 0)
| [
"def",
"test_prewitt_v_mask",
"(",
")",
":",
"np",
".",
"random",
".",
"seed",
"(",
"0",
")",
"result",
"=",
"filters",
".",
"prewitt_v",
"(",
"np",
".",
"random",
".",
"uniform",
"(",
"size",
"=",
"(",
"10",
",",
"10",
")",
")",
",",
"np",
".",
"zeros",
"(",
"(",
"10",
",",
"10",
")",
",",
"bool",
")",
")",
"assert_allclose",
"(",
"result",
",",
"0",
")"
] | vertical prewitt on a masked array should be zero . | train | false |
37,934 | @mock_ec2
def test_eip_disassociate_arg_error():
conn = boto.connect_ec2(u'the_key', u'the_secret')
with assert_raises(EC2ResponseError) as cm:
conn.disassociate_address()
cm.exception.code.should.equal(u'MissingParameter')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
| [
"@",
"mock_ec2",
"def",
"test_eip_disassociate_arg_error",
"(",
")",
":",
"conn",
"=",
"boto",
".",
"connect_ec2",
"(",
"u'the_key'",
",",
"u'the_secret'",
")",
"with",
"assert_raises",
"(",
"EC2ResponseError",
")",
"as",
"cm",
":",
"conn",
".",
"disassociate_address",
"(",
")",
"cm",
".",
"exception",
".",
"code",
".",
"should",
".",
"equal",
"(",
"u'MissingParameter'",
")",
"cm",
".",
"exception",
".",
"status",
".",
"should",
".",
"equal",
"(",
"400",
")",
"cm",
".",
"exception",
".",
"request_id",
".",
"should_not",
".",
"be",
".",
"none"
] | invalid arguments disassociate address . | train | false |
37,937 | def modify_mesh(x, insert_1, insert_2):
return np.sort(np.hstack((x, (0.5 * (x[insert_1] + x[(insert_1 + 1)])), (((2 * x[insert_2]) + x[(insert_2 + 1)]) / 3), ((x[insert_2] + (2 * x[(insert_2 + 1)])) / 3))))
| [
"def",
"modify_mesh",
"(",
"x",
",",
"insert_1",
",",
"insert_2",
")",
":",
"return",
"np",
".",
"sort",
"(",
"np",
".",
"hstack",
"(",
"(",
"x",
",",
"(",
"0.5",
"*",
"(",
"x",
"[",
"insert_1",
"]",
"+",
"x",
"[",
"(",
"insert_1",
"+",
"1",
")",
"]",
")",
")",
",",
"(",
"(",
"(",
"2",
"*",
"x",
"[",
"insert_2",
"]",
")",
"+",
"x",
"[",
"(",
"insert_2",
"+",
"1",
")",
"]",
")",
"/",
"3",
")",
",",
"(",
"(",
"x",
"[",
"insert_2",
"]",
"+",
"(",
"2",
"*",
"x",
"[",
"(",
"insert_2",
"+",
"1",
")",
"]",
")",
")",
"/",
"3",
")",
")",
")",
")"
] | insert nodes into a mesh . | train | false |
37,938 | def delphi_solution_comments(row):
if hasattr(row, 'delphi_solution'):
row = row.delphi_solution
try:
solution_id = row.id
problem_id = row.problem_id
except AttributeError:
return None
ctable = current.s3db.delphi_comment
query = (ctable.solution_id == solution_id)
comments = current.db(query).count()
url = URL(c='delphi', f='problem', args=[problem_id, 'solution', solution_id, 'discuss'])
return A(comments, _href=url)
| [
"def",
"delphi_solution_comments",
"(",
"row",
")",
":",
"if",
"hasattr",
"(",
"row",
",",
"'delphi_solution'",
")",
":",
"row",
"=",
"row",
".",
"delphi_solution",
"try",
":",
"solution_id",
"=",
"row",
".",
"id",
"problem_id",
"=",
"row",
".",
"problem_id",
"except",
"AttributeError",
":",
"return",
"None",
"ctable",
"=",
"current",
".",
"s3db",
".",
"delphi_comment",
"query",
"=",
"(",
"ctable",
".",
"solution_id",
"==",
"solution_id",
")",
"comments",
"=",
"current",
".",
"db",
"(",
"query",
")",
".",
"count",
"(",
")",
"url",
"=",
"URL",
"(",
"c",
"=",
"'delphi'",
",",
"f",
"=",
"'problem'",
",",
"args",
"=",
"[",
"problem_id",
",",
"'solution'",
",",
"solution_id",
",",
"'discuss'",
"]",
")",
"return",
"A",
"(",
"comments",
",",
"_href",
"=",
"url",
")"
] | clickable number of comments for a solution . | train | false |
37,939 | def make_subrequest(env, method=None, path=None, body=None, headers=None, agent='Swift', swift_source=None, make_env=make_env):
query_string = None
path = (path or '')
if (path and ('?' in path)):
(path, query_string) = path.split('?', 1)
newenv = make_env(env, method, path=unquote(path), agent=agent, query_string=query_string, swift_source=swift_source)
if (not headers):
headers = {}
if body:
return Request.blank(path, environ=newenv, body=body, headers=headers)
else:
return Request.blank(path, environ=newenv, headers=headers)
| [
"def",
"make_subrequest",
"(",
"env",
",",
"method",
"=",
"None",
",",
"path",
"=",
"None",
",",
"body",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"agent",
"=",
"'Swift'",
",",
"swift_source",
"=",
"None",
",",
"make_env",
"=",
"make_env",
")",
":",
"query_string",
"=",
"None",
"path",
"=",
"(",
"path",
"or",
"''",
")",
"if",
"(",
"path",
"and",
"(",
"'?'",
"in",
"path",
")",
")",
":",
"(",
"path",
",",
"query_string",
")",
"=",
"path",
".",
"split",
"(",
"'?'",
",",
"1",
")",
"newenv",
"=",
"make_env",
"(",
"env",
",",
"method",
",",
"path",
"=",
"unquote",
"(",
"path",
")",
",",
"agent",
"=",
"agent",
",",
"query_string",
"=",
"query_string",
",",
"swift_source",
"=",
"swift_source",
")",
"if",
"(",
"not",
"headers",
")",
":",
"headers",
"=",
"{",
"}",
"if",
"body",
":",
"return",
"Request",
".",
"blank",
"(",
"path",
",",
"environ",
"=",
"newenv",
",",
"body",
"=",
"body",
",",
"headers",
"=",
"headers",
")",
"else",
":",
"return",
"Request",
".",
"blank",
"(",
"path",
",",
"environ",
"=",
"newenv",
",",
"headers",
"=",
"headers",
")"
] | makes a new swob . | train | false |
37,940 | def libvlc_video_set_logo_string(p_mi, option, psz_value):
f = (_Cfunctions.get('libvlc_video_set_logo_string', None) or _Cfunction('libvlc_video_set_logo_string', ((1,), (1,), (1,)), None, None, MediaPlayer, ctypes.c_uint, ctypes.c_char_p))
return f(p_mi, option, psz_value)
| [
"def",
"libvlc_video_set_logo_string",
"(",
"p_mi",
",",
"option",
",",
"psz_value",
")",
":",
"f",
"=",
"(",
"_Cfunctions",
".",
"get",
"(",
"'libvlc_video_set_logo_string'",
",",
"None",
")",
"or",
"_Cfunction",
"(",
"'libvlc_video_set_logo_string'",
",",
"(",
"(",
"1",
",",
")",
",",
"(",
"1",
",",
")",
",",
"(",
"1",
",",
")",
")",
",",
"None",
",",
"None",
",",
"MediaPlayer",
",",
"ctypes",
".",
"c_uint",
",",
"ctypes",
".",
"c_char_p",
")",
")",
"return",
"f",
"(",
"p_mi",
",",
"option",
",",
"psz_value",
")"
] | set logo option as string . | train | true |
37,941 | def dmp_ground_p(f, c, u):
if ((c is not None) and (not c)):
return dmp_zero_p(f, u)
while u:
if (len(f) != 1):
return False
f = f[0]
u -= 1
if (c is None):
return (len(f) <= 1)
else:
return (f == [c])
| [
"def",
"dmp_ground_p",
"(",
"f",
",",
"c",
",",
"u",
")",
":",
"if",
"(",
"(",
"c",
"is",
"not",
"None",
")",
"and",
"(",
"not",
"c",
")",
")",
":",
"return",
"dmp_zero_p",
"(",
"f",
",",
"u",
")",
"while",
"u",
":",
"if",
"(",
"len",
"(",
"f",
")",
"!=",
"1",
")",
":",
"return",
"False",
"f",
"=",
"f",
"[",
"0",
"]",
"u",
"-=",
"1",
"if",
"(",
"c",
"is",
"None",
")",
":",
"return",
"(",
"len",
"(",
"f",
")",
"<=",
"1",
")",
"else",
":",
"return",
"(",
"f",
"==",
"[",
"c",
"]",
")"
] | return true if f is constant in k[x] . | train | false |
37,942 | def encrypt_string(s, key=''):
key += ' '
s = encode_utf8(s)
a = []
for i in xrange(len(s)):
try:
a.append(chr((ord(s[i]) + (ord(key[(i % len(key))]) % 256))))
except:
raise EncryptionError()
s = ''.join(a)
s = base64.urlsafe_b64encode(s)
return s
| [
"def",
"encrypt_string",
"(",
"s",
",",
"key",
"=",
"''",
")",
":",
"key",
"+=",
"' '",
"s",
"=",
"encode_utf8",
"(",
"s",
")",
"a",
"=",
"[",
"]",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"s",
")",
")",
":",
"try",
":",
"a",
".",
"append",
"(",
"chr",
"(",
"(",
"ord",
"(",
"s",
"[",
"i",
"]",
")",
"+",
"(",
"ord",
"(",
"key",
"[",
"(",
"i",
"%",
"len",
"(",
"key",
")",
")",
"]",
")",
"%",
"256",
")",
")",
")",
")",
"except",
":",
"raise",
"EncryptionError",
"(",
")",
"s",
"=",
"''",
".",
"join",
"(",
"a",
")",
"s",
"=",
"base64",
".",
"urlsafe_b64encode",
"(",
"s",
")",
"return",
"s"
] | returns the given string as an encrypted bytestring . | train | false |
37,943 | @library.filter
def class_selected(a, b):
if (a == b):
return jinja2.Markup('class="selected"')
else:
return ''
| [
"@",
"library",
".",
"filter",
"def",
"class_selected",
"(",
"a",
",",
"b",
")",
":",
"if",
"(",
"a",
"==",
"b",
")",
":",
"return",
"jinja2",
".",
"Markup",
"(",
"'class=\"selected\"'",
")",
"else",
":",
"return",
"''"
] | return class="selected" if a == b . | train | false |
37,944 | def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert (len(inputs) == len(targets))
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, ((len(inputs) - batchsize) + 1), batchsize):
if shuffle:
excerpt = indices[start_idx:(start_idx + batchsize)]
else:
excerpt = slice(start_idx, (start_idx + batchsize))
(yield (inputs[excerpt], targets[excerpt]))
| [
"def",
"iterate_minibatches",
"(",
"inputs",
",",
"targets",
",",
"batchsize",
",",
"shuffle",
"=",
"False",
")",
":",
"assert",
"(",
"len",
"(",
"inputs",
")",
"==",
"len",
"(",
"targets",
")",
")",
"if",
"shuffle",
":",
"indices",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"inputs",
")",
")",
"np",
".",
"random",
".",
"shuffle",
"(",
"indices",
")",
"for",
"start_idx",
"in",
"range",
"(",
"0",
",",
"(",
"(",
"len",
"(",
"inputs",
")",
"-",
"batchsize",
")",
"+",
"1",
")",
",",
"batchsize",
")",
":",
"if",
"shuffle",
":",
"excerpt",
"=",
"indices",
"[",
"start_idx",
":",
"(",
"start_idx",
"+",
"batchsize",
")",
"]",
"else",
":",
"excerpt",
"=",
"slice",
"(",
"start_idx",
",",
"(",
"start_idx",
"+",
"batchsize",
")",
")",
"(",
"yield",
"(",
"inputs",
"[",
"excerpt",
"]",
",",
"targets",
"[",
"excerpt",
"]",
")",
")"
] | generates one epoch of batches of inputs and targets . | train | false |
37,945 | def _structured_bootstrap(args, n_boot, units, func, func_kwargs, rs):
unique_units = np.unique(units)
n_units = len(unique_units)
args = [[a[(units == unit)] for unit in unique_units] for a in args]
boot_dist = []
for i in range(int(n_boot)):
resampler = rs.randint(0, n_units, n_units)
sample = [np.take(a, resampler, axis=0) for a in args]
lengths = map(len, sample[0])
resampler = [rs.randint(0, n, n) for n in lengths]
sample = [[c.take(r, axis=0) for (c, r) in zip(a, resampler)] for a in sample]
sample = list(map(np.concatenate, sample))
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
| [
"def",
"_structured_bootstrap",
"(",
"args",
",",
"n_boot",
",",
"units",
",",
"func",
",",
"func_kwargs",
",",
"rs",
")",
":",
"unique_units",
"=",
"np",
".",
"unique",
"(",
"units",
")",
"n_units",
"=",
"len",
"(",
"unique_units",
")",
"args",
"=",
"[",
"[",
"a",
"[",
"(",
"units",
"==",
"unit",
")",
"]",
"for",
"unit",
"in",
"unique_units",
"]",
"for",
"a",
"in",
"args",
"]",
"boot_dist",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"int",
"(",
"n_boot",
")",
")",
":",
"resampler",
"=",
"rs",
".",
"randint",
"(",
"0",
",",
"n_units",
",",
"n_units",
")",
"sample",
"=",
"[",
"np",
".",
"take",
"(",
"a",
",",
"resampler",
",",
"axis",
"=",
"0",
")",
"for",
"a",
"in",
"args",
"]",
"lengths",
"=",
"map",
"(",
"len",
",",
"sample",
"[",
"0",
"]",
")",
"resampler",
"=",
"[",
"rs",
".",
"randint",
"(",
"0",
",",
"n",
",",
"n",
")",
"for",
"n",
"in",
"lengths",
"]",
"sample",
"=",
"[",
"[",
"c",
".",
"take",
"(",
"r",
",",
"axis",
"=",
"0",
")",
"for",
"(",
"c",
",",
"r",
")",
"in",
"zip",
"(",
"a",
",",
"resampler",
")",
"]",
"for",
"a",
"in",
"sample",
"]",
"sample",
"=",
"list",
"(",
"map",
"(",
"np",
".",
"concatenate",
",",
"sample",
")",
")",
"boot_dist",
".",
"append",
"(",
"func",
"(",
"*",
"sample",
",",
"**",
"func_kwargs",
")",
")",
"return",
"np",
".",
"array",
"(",
"boot_dist",
")"
] | resample units instead of datapoints . | train | false |
37,947 | def sort_common_subsequences(items):
def contains(seq, x):
for item in seq:
if (item is x):
return True
elif (item.is_sequence_constructor and contains(item.args, x)):
return True
return False
def lower_than(a, b):
return (b.is_sequence_constructor and contains(b.args, a))
for (pos, item) in enumerate(items):
key = item[1]
new_pos = pos
for i in range((pos - 1), (-1), (-1)):
if lower_than(key, items[i][0]):
new_pos = i
if (new_pos != pos):
for i in range(pos, new_pos, (-1)):
items[i] = items[(i - 1)]
items[new_pos] = item
| [
"def",
"sort_common_subsequences",
"(",
"items",
")",
":",
"def",
"contains",
"(",
"seq",
",",
"x",
")",
":",
"for",
"item",
"in",
"seq",
":",
"if",
"(",
"item",
"is",
"x",
")",
":",
"return",
"True",
"elif",
"(",
"item",
".",
"is_sequence_constructor",
"and",
"contains",
"(",
"item",
".",
"args",
",",
"x",
")",
")",
":",
"return",
"True",
"return",
"False",
"def",
"lower_than",
"(",
"a",
",",
"b",
")",
":",
"return",
"(",
"b",
".",
"is_sequence_constructor",
"and",
"contains",
"(",
"b",
".",
"args",
",",
"a",
")",
")",
"for",
"(",
"pos",
",",
"item",
")",
"in",
"enumerate",
"(",
"items",
")",
":",
"key",
"=",
"item",
"[",
"1",
"]",
"new_pos",
"=",
"pos",
"for",
"i",
"in",
"range",
"(",
"(",
"pos",
"-",
"1",
")",
",",
"(",
"-",
"1",
")",
",",
"(",
"-",
"1",
")",
")",
":",
"if",
"lower_than",
"(",
"key",
",",
"items",
"[",
"i",
"]",
"[",
"0",
"]",
")",
":",
"new_pos",
"=",
"i",
"if",
"(",
"new_pos",
"!=",
"pos",
")",
":",
"for",
"i",
"in",
"range",
"(",
"pos",
",",
"new_pos",
",",
"(",
"-",
"1",
")",
")",
":",
"items",
"[",
"i",
"]",
"=",
"items",
"[",
"(",
"i",
"-",
"1",
")",
"]",
"items",
"[",
"new_pos",
"]",
"=",
"item"
] | sort items/subsequences so that all items and subsequences that an item contains appear before the item itself . | train | false |
37,948 | def bytes_to_str(s, encoding='utf-8'):
if (six.PY3 and isinstance(s, bytes)):
return s.decode(encoding)
return s
| [
"def",
"bytes_to_str",
"(",
"s",
",",
"encoding",
"=",
"'utf-8'",
")",
":",
"if",
"(",
"six",
".",
"PY3",
"and",
"isinstance",
"(",
"s",
",",
"bytes",
")",
")",
":",
"return",
"s",
".",
"decode",
"(",
"encoding",
")",
"return",
"s"
] | returns a str if a bytes object is given . | train | true |
37,950 | @pytest.fixture
def capfd(request):
if ('capsys' in request.fixturenames):
request.raiseerror(error_capsysfderror)
if (not hasattr(os, 'dup')):
pytest.skip('capfd funcarg needs os.dup')
request.node._capfuncarg = c = CaptureFixture(FDCapture, request)
return c
| [
"@",
"pytest",
".",
"fixture",
"def",
"capfd",
"(",
"request",
")",
":",
"if",
"(",
"'capsys'",
"in",
"request",
".",
"fixturenames",
")",
":",
"request",
".",
"raiseerror",
"(",
"error_capsysfderror",
")",
"if",
"(",
"not",
"hasattr",
"(",
"os",
",",
"'dup'",
")",
")",
":",
"pytest",
".",
"skip",
"(",
"'capfd funcarg needs os.dup'",
")",
"request",
".",
"node",
".",
"_capfuncarg",
"=",
"c",
"=",
"CaptureFixture",
"(",
"FDCapture",
",",
"request",
")",
"return",
"c"
] | enable capturing of writes to file descriptors 1 and 2 and make captured output available via capfd . | train | false |
37,951 | def get_url_without_trailing_slash(value):
result = (value[:(-1)] if value.endswith('/') else value)
return result
| [
"def",
"get_url_without_trailing_slash",
"(",
"value",
")",
":",
"result",
"=",
"(",
"value",
"[",
":",
"(",
"-",
"1",
")",
"]",
"if",
"value",
".",
"endswith",
"(",
"'/'",
")",
"else",
"value",
")",
"return",
"result"
] | function which strips a trailing slash from the provided url if one is present . | train | false |
37,952 | def distribution_report():
def prep(r):
r.method = 'report'
return True
s3.prep = prep
return s3_rest_controller('supply', 'distribution')
| [
"def",
"distribution_report",
"(",
")",
":",
"def",
"prep",
"(",
"r",
")",
":",
"r",
".",
"method",
"=",
"'report'",
"return",
"True",
"s3",
".",
"prep",
"=",
"prep",
"return",
"s3_rest_controller",
"(",
"'supply'",
",",
"'distribution'",
")"
] | restful crud controller for supply distributions - limited to just seeing aggregated data for differential permissions . | train | false |
37,954 | def _unHtml(s):
return subSGMLRefs(re_spacessub(' ', re_unhtmlsub('', s)).strip())
| [
"def",
"_unHtml",
"(",
"s",
")",
":",
"return",
"subSGMLRefs",
"(",
"re_spacessub",
"(",
"' '",
",",
"re_unhtmlsub",
"(",
"''",
",",
"s",
")",
")",
".",
"strip",
"(",
")",
")"
] | return a string without tags and no multiple spaces . | train | false |
37,956 | def get_freq(freq):
if isinstance(freq, compat.string_types):
(base, mult) = get_freq_code(freq)
freq = base
return freq
| [
"def",
"get_freq",
"(",
"freq",
")",
":",
"if",
"isinstance",
"(",
"freq",
",",
"compat",
".",
"string_types",
")",
":",
"(",
"base",
",",
"mult",
")",
"=",
"get_freq_code",
"(",
"freq",
")",
"freq",
"=",
"base",
"return",
"freq"
] | return frequency code of given frequency str . | train | false |
37,957 | def process_bundle(rels):
concepts = {}
for rel in rels:
rel_name = rel[u'rel_name']
closures = rel[u'closures']
schema = rel[u'schema']
filename = rel[u'filename']
concept_list = clause2concepts(filename, rel_name, schema, closures)
for c in concept_list:
label = c.prefLabel
if (label in concepts):
for data in c.extension:
concepts[label].augment(data)
concepts[label].close()
else:
concepts[label] = c
return concepts
| [
"def",
"process_bundle",
"(",
"rels",
")",
":",
"concepts",
"=",
"{",
"}",
"for",
"rel",
"in",
"rels",
":",
"rel_name",
"=",
"rel",
"[",
"u'rel_name'",
"]",
"closures",
"=",
"rel",
"[",
"u'closures'",
"]",
"schema",
"=",
"rel",
"[",
"u'schema'",
"]",
"filename",
"=",
"rel",
"[",
"u'filename'",
"]",
"concept_list",
"=",
"clause2concepts",
"(",
"filename",
",",
"rel_name",
",",
"schema",
",",
"closures",
")",
"for",
"c",
"in",
"concept_list",
":",
"label",
"=",
"c",
".",
"prefLabel",
"if",
"(",
"label",
"in",
"concepts",
")",
":",
"for",
"data",
"in",
"c",
".",
"extension",
":",
"concepts",
"[",
"label",
"]",
".",
"augment",
"(",
"data",
")",
"concepts",
"[",
"label",
"]",
".",
"close",
"(",
")",
"else",
":",
"concepts",
"[",
"label",
"]",
"=",
"c",
"return",
"concepts"
] | given a list of relation metadata bundles . | train | false |
37,958 | def get_handlers():
if hasattr(settings, 'RAPIDSMS_HANDLERS'):
return [import_class(name) for name in settings.RAPIDSMS_HANDLERS]
warn('Please set RAPIDSMS_HANDLERS to the handlers that should be installed. The old behavior of installing all defined handlers, possibly modified by INSTALLED_HANDLERS and/or EXCLUDED_HANDLERS, is deprecated and will be removed', DeprecationWarning)
handlers = _find_handlers(_apps())
if (hasattr(settings, 'INSTALLED_HANDLERS') and (settings.INSTALLED_HANDLERS is not None)):
copy = [handler for handler in handlers]
handlers = []
while (len(copy) > 0):
for prefix in settings.INSTALLED_HANDLERS:
if copy[(-1)].__module__.startswith(prefix):
handlers.append(copy[(-1)])
break
copy.pop()
if (hasattr(settings, 'EXCLUDED_HANDLERS') and (settings.EXCLUDED_HANDLERS is not None)):
for prefix in settings.EXCLUDED_HANDLERS:
handlers = [handler for handler in handlers if (not handler.__module__.startswith(prefix))]
return handlers
| [
"def",
"get_handlers",
"(",
")",
":",
"if",
"hasattr",
"(",
"settings",
",",
"'RAPIDSMS_HANDLERS'",
")",
":",
"return",
"[",
"import_class",
"(",
"name",
")",
"for",
"name",
"in",
"settings",
".",
"RAPIDSMS_HANDLERS",
"]",
"warn",
"(",
"'Please set RAPIDSMS_HANDLERS to the handlers that should be installed. The old behavior of installing all defined handlers, possibly modified by INSTALLED_HANDLERS and/or EXCLUDED_HANDLERS, is deprecated and will be removed'",
",",
"DeprecationWarning",
")",
"handlers",
"=",
"_find_handlers",
"(",
"_apps",
"(",
")",
")",
"if",
"(",
"hasattr",
"(",
"settings",
",",
"'INSTALLED_HANDLERS'",
")",
"and",
"(",
"settings",
".",
"INSTALLED_HANDLERS",
"is",
"not",
"None",
")",
")",
":",
"copy",
"=",
"[",
"handler",
"for",
"handler",
"in",
"handlers",
"]",
"handlers",
"=",
"[",
"]",
"while",
"(",
"len",
"(",
"copy",
")",
">",
"0",
")",
":",
"for",
"prefix",
"in",
"settings",
".",
"INSTALLED_HANDLERS",
":",
"if",
"copy",
"[",
"(",
"-",
"1",
")",
"]",
".",
"__module__",
".",
"startswith",
"(",
"prefix",
")",
":",
"handlers",
".",
"append",
"(",
"copy",
"[",
"(",
"-",
"1",
")",
"]",
")",
"break",
"copy",
".",
"pop",
"(",
")",
"if",
"(",
"hasattr",
"(",
"settings",
",",
"'EXCLUDED_HANDLERS'",
")",
"and",
"(",
"settings",
".",
"EXCLUDED_HANDLERS",
"is",
"not",
"None",
")",
")",
":",
"for",
"prefix",
"in",
"settings",
".",
"EXCLUDED_HANDLERS",
":",
"handlers",
"=",
"[",
"handler",
"for",
"handler",
"in",
"handlers",
"if",
"(",
"not",
"handler",
".",
"__module__",
".",
"startswith",
"(",
"prefix",
")",
")",
"]",
"return",
"handlers"
] | return a list of the handler classes to use in the current project . | train | false |
37,961 | @profiler.trace
@memoized_with_request(novaclient)
def list_extensions(nova_api):
blacklist = set(getattr(settings, 'OPENSTACK_NOVA_EXTENSIONS_BLACKLIST', []))
return tuple((extension for extension in nova_list_extensions.ListExtManager(nova_api).show_all() if (extension.name not in blacklist)))
| [
"@",
"profiler",
".",
"trace",
"@",
"memoized_with_request",
"(",
"novaclient",
")",
"def",
"list_extensions",
"(",
"nova_api",
")",
":",
"blacklist",
"=",
"set",
"(",
"getattr",
"(",
"settings",
",",
"'OPENSTACK_NOVA_EXTENSIONS_BLACKLIST'",
",",
"[",
"]",
")",
")",
"return",
"tuple",
"(",
"(",
"extension",
"for",
"extension",
"in",
"nova_list_extensions",
".",
"ListExtManager",
"(",
"nova_api",
")",
".",
"show_all",
"(",
")",
"if",
"(",
"extension",
".",
"name",
"not",
"in",
"blacklist",
")",
")",
")"
] | list all nova extensions . | train | false |
37,963 | def batch_iter(data, batch_size, num_epochs, shuffle=True):
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = (int(((len(data) - 1) / batch_size)) + 1)
for epoch in range(num_epochs):
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = (batch_num * batch_size)
end_index = min(((batch_num + 1) * batch_size), data_size)
(yield shuffled_data[start_index:end_index])
| [
"def",
"batch_iter",
"(",
"data",
",",
"batch_size",
",",
"num_epochs",
",",
"shuffle",
"=",
"True",
")",
":",
"data",
"=",
"np",
".",
"array",
"(",
"data",
")",
"data_size",
"=",
"len",
"(",
"data",
")",
"num_batches_per_epoch",
"=",
"(",
"int",
"(",
"(",
"(",
"len",
"(",
"data",
")",
"-",
"1",
")",
"/",
"batch_size",
")",
")",
"+",
"1",
")",
"for",
"epoch",
"in",
"range",
"(",
"num_epochs",
")",
":",
"if",
"shuffle",
":",
"shuffle_indices",
"=",
"np",
".",
"random",
".",
"permutation",
"(",
"np",
".",
"arange",
"(",
"data_size",
")",
")",
"shuffled_data",
"=",
"data",
"[",
"shuffle_indices",
"]",
"else",
":",
"shuffled_data",
"=",
"data",
"for",
"batch_num",
"in",
"range",
"(",
"num_batches_per_epoch",
")",
":",
"start_index",
"=",
"(",
"batch_num",
"*",
"batch_size",
")",
"end_index",
"=",
"min",
"(",
"(",
"(",
"batch_num",
"+",
"1",
")",
"*",
"batch_size",
")",
",",
"data_size",
")",
"(",
"yield",
"shuffled_data",
"[",
"start_index",
":",
"end_index",
"]",
")"
] | generates a batch iterator for a dataset . | train | true |
37,964 | def _sqlite_bytelower(bytestring):
if (not six.PY2):
return bytestring.lower()
return buffer(bytes(bytestring).lower())
| [
"def",
"_sqlite_bytelower",
"(",
"bytestring",
")",
":",
"if",
"(",
"not",
"six",
".",
"PY2",
")",
":",
"return",
"bytestring",
".",
"lower",
"(",
")",
"return",
"buffer",
"(",
"bytes",
"(",
"bytestring",
")",
".",
"lower",
"(",
")",
")"
] | a custom bytelower sqlite function so we can compare bytestrings in a semi case insensitive fashion . | train | false |
37,965 | def test_smart_boolean():
assert hug.types.smart_boolean('true')
assert hug.types.smart_boolean('t')
assert hug.types.smart_boolean('1')
assert hug.types.smart_boolean(1)
assert (not hug.types.smart_boolean(''))
assert (not hug.types.smart_boolean('false'))
assert (not hug.types.smart_boolean('f'))
assert (not hug.types.smart_boolean('0'))
assert (not hug.types.smart_boolean(0))
assert hug.types.smart_boolean(True)
assert (not hug.types.smart_boolean(None))
assert (not hug.types.smart_boolean(False))
with pytest.raises(KeyError):
hug.types.smart_boolean('bacon')
| [
"def",
"test_smart_boolean",
"(",
")",
":",
"assert",
"hug",
".",
"types",
".",
"smart_boolean",
"(",
"'true'",
")",
"assert",
"hug",
".",
"types",
".",
"smart_boolean",
"(",
"'t'",
")",
"assert",
"hug",
".",
"types",
".",
"smart_boolean",
"(",
"'1'",
")",
"assert",
"hug",
".",
"types",
".",
"smart_boolean",
"(",
"1",
")",
"assert",
"(",
"not",
"hug",
".",
"types",
".",
"smart_boolean",
"(",
"''",
")",
")",
"assert",
"(",
"not",
"hug",
".",
"types",
".",
"smart_boolean",
"(",
"'false'",
")",
")",
"assert",
"(",
"not",
"hug",
".",
"types",
".",
"smart_boolean",
"(",
"'f'",
")",
")",
"assert",
"(",
"not",
"hug",
".",
"types",
".",
"smart_boolean",
"(",
"'0'",
")",
")",
"assert",
"(",
"not",
"hug",
".",
"types",
".",
"smart_boolean",
"(",
"0",
")",
")",
"assert",
"hug",
".",
"types",
".",
"smart_boolean",
"(",
"True",
")",
"assert",
"(",
"not",
"hug",
".",
"types",
".",
"smart_boolean",
"(",
"None",
")",
")",
"assert",
"(",
"not",
"hug",
".",
"types",
".",
"smart_boolean",
"(",
"False",
")",
")",
"with",
"pytest",
".",
"raises",
"(",
"KeyError",
")",
":",
"hug",
".",
"types",
".",
"smart_boolean",
"(",
"'bacon'",
")"
] | test to ensure that the smart boolean type works as expected . | train | false |
37,969 | def _check_plural_arg_is_num(plural_arg):
if _STRING_RE.match(plural_arg):
return False
fn_match = _FUNCTION_RE.match(plural_arg)
if (fn_match and (fn_match.group(1) in _functions)):
return False
for var in _string_vars:
if (var in plural_arg.upper()):
return False
for var in _num_vars:
if (var in plural_arg.upper()):
return True
return None
| [
"def",
"_check_plural_arg_is_num",
"(",
"plural_arg",
")",
":",
"if",
"_STRING_RE",
".",
"match",
"(",
"plural_arg",
")",
":",
"return",
"False",
"fn_match",
"=",
"_FUNCTION_RE",
".",
"match",
"(",
"plural_arg",
")",
"if",
"(",
"fn_match",
"and",
"(",
"fn_match",
".",
"group",
"(",
"1",
")",
"in",
"_functions",
")",
")",
":",
"return",
"False",
"for",
"var",
"in",
"_string_vars",
":",
"if",
"(",
"var",
"in",
"plural_arg",
".",
"upper",
"(",
")",
")",
":",
"return",
"False",
"for",
"var",
"in",
"_num_vars",
":",
"if",
"(",
"var",
"in",
"plural_arg",
".",
"upper",
"(",
")",
")",
":",
"return",
"True",
"return",
"None"
] | check to see if a string matches the known ways in which a plural argument can be a number . | train | false |
37,970 | def test_info_preserved_pickle_copy_init(mixin_cols):
def pickle_roundtrip(c):
return pickle.loads(pickle.dumps(c))
def init_from_class(c):
return c.__class__(c)
attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')
for colname in ('i', 'm'):
m = mixin_cols[colname]
m.info.name = colname
m.info.format = '{0}'
m.info.description = 'd'
m.info.meta = {'a': 1}
for func in (copy.copy, copy.deepcopy, pickle_roundtrip, init_from_class):
m2 = func(m)
for attr in attrs:
assert (getattr(m2.info, attr) == getattr(m.info, attr))
| [
"def",
"test_info_preserved_pickle_copy_init",
"(",
"mixin_cols",
")",
":",
"def",
"pickle_roundtrip",
"(",
"c",
")",
":",
"return",
"pickle",
".",
"loads",
"(",
"pickle",
".",
"dumps",
"(",
"c",
")",
")",
"def",
"init_from_class",
"(",
"c",
")",
":",
"return",
"c",
".",
"__class__",
"(",
"c",
")",
"attrs",
"=",
"(",
"'name'",
",",
"'unit'",
",",
"'dtype'",
",",
"'format'",
",",
"'description'",
",",
"'meta'",
")",
"for",
"colname",
"in",
"(",
"'i'",
",",
"'m'",
")",
":",
"m",
"=",
"mixin_cols",
"[",
"colname",
"]",
"m",
".",
"info",
".",
"name",
"=",
"colname",
"m",
".",
"info",
".",
"format",
"=",
"'{0}'",
"m",
".",
"info",
".",
"description",
"=",
"'d'",
"m",
".",
"info",
".",
"meta",
"=",
"{",
"'a'",
":",
"1",
"}",
"for",
"func",
"in",
"(",
"copy",
".",
"copy",
",",
"copy",
".",
"deepcopy",
",",
"pickle_roundtrip",
",",
"init_from_class",
")",
":",
"m2",
"=",
"func",
"(",
"m",
")",
"for",
"attr",
"in",
"attrs",
":",
"assert",
"(",
"getattr",
"(",
"m2",
".",
"info",
",",
"attr",
")",
"==",
"getattr",
"(",
"m",
".",
"info",
",",
"attr",
")",
")"
] | test copy . | train | false |
37,972 | def get_formatted_email(user):
if (user == u'Administrator'):
return user
from email.utils import formataddr
fullname = get_fullname(user)
return formataddr((fullname, user))
| [
"def",
"get_formatted_email",
"(",
"user",
")",
":",
"if",
"(",
"user",
"==",
"u'Administrator'",
")",
":",
"return",
"user",
"from",
"email",
".",
"utils",
"import",
"formataddr",
"fullname",
"=",
"get_fullname",
"(",
"user",
")",
"return",
"formataddr",
"(",
"(",
"fullname",
",",
"user",
")",
")"
] | get email address of user formatted as: john doe <johndoe@example . | train | false |
37,973 | def _remap_date_counts(**kwargs):
for (label, qs) in kwargs.iteritems():
res = defaultdict((lambda : {label: 0}))
for x in qs:
key = date(x['year'], x['month'], x.get('day', 1))
res[key][label] += x['count']
(yield res)
| [
"def",
"_remap_date_counts",
"(",
"**",
"kwargs",
")",
":",
"for",
"(",
"label",
",",
"qs",
")",
"in",
"kwargs",
".",
"iteritems",
"(",
")",
":",
"res",
"=",
"defaultdict",
"(",
"(",
"lambda",
":",
"{",
"label",
":",
"0",
"}",
")",
")",
"for",
"x",
"in",
"qs",
":",
"key",
"=",
"date",
"(",
"x",
"[",
"'year'",
"]",
",",
"x",
"[",
"'month'",
"]",
",",
"x",
".",
"get",
"(",
"'day'",
",",
"1",
")",
")",
"res",
"[",
"key",
"]",
"[",
"label",
"]",
"+=",
"x",
"[",
"'count'",
"]",
"(",
"yield",
"res",
")"
] | remap the query result . | train | false |
37,974 | def _get_user(username):
if (not username):
return None
try:
if (c.userobj and (c.userobj.name == username)):
return c.userobj
except AttributeError:
pass
except TypeError:
pass
return model.User.get(username)
| [
"def",
"_get_user",
"(",
"username",
")",
":",
"if",
"(",
"not",
"username",
")",
":",
"return",
"None",
"try",
":",
"if",
"(",
"c",
".",
"userobj",
"and",
"(",
"c",
".",
"userobj",
".",
"name",
"==",
"username",
")",
")",
":",
"return",
"c",
".",
"userobj",
"except",
"AttributeError",
":",
"pass",
"except",
"TypeError",
":",
"pass",
"return",
"model",
".",
"User",
".",
"get",
"(",
"username",
")"
] | retrieve user with provided user_id args: user_id: username of the user for which object is to retrieve returns: obj . | train | false |
37,975 | def all_subclasses(cls):
subclasses = cls.__subclasses__()
for subcls in cls.__subclasses__():
subclasses.extend(all_subclasses(subcls))
return subclasses
| [
"def",
"all_subclasses",
"(",
"cls",
")",
":",
"subclasses",
"=",
"cls",
".",
"__subclasses__",
"(",
")",
"for",
"subcls",
"in",
"cls",
".",
"__subclasses__",
"(",
")",
":",
"subclasses",
".",
"extend",
"(",
"all_subclasses",
"(",
"subcls",
")",
")",
"return",
"subclasses"
] | returns a list of *all* subclasses of cls . | train | false |
37,976 | def properties_root_device_name(properties):
root_device_name = None
for bdm in properties.get('mappings', []):
if (bdm['virtual'] == 'root'):
root_device_name = bdm['device']
if ('root_device_name' in properties):
root_device_name = properties['root_device_name']
return root_device_name
| [
"def",
"properties_root_device_name",
"(",
"properties",
")",
":",
"root_device_name",
"=",
"None",
"for",
"bdm",
"in",
"properties",
".",
"get",
"(",
"'mappings'",
",",
"[",
"]",
")",
":",
"if",
"(",
"bdm",
"[",
"'virtual'",
"]",
"==",
"'root'",
")",
":",
"root_device_name",
"=",
"bdm",
"[",
"'device'",
"]",
"if",
"(",
"'root_device_name'",
"in",
"properties",
")",
":",
"root_device_name",
"=",
"properties",
"[",
"'root_device_name'",
"]",
"return",
"root_device_name"
] | get root device name from image meta data . | train | false |
37,978 | def reservation_get_all_by_project(context, project_id):
return IMPL.reservation_get_all_by_project(context, project_id)
| [
"def",
"reservation_get_all_by_project",
"(",
"context",
",",
"project_id",
")",
":",
"return",
"IMPL",
".",
"reservation_get_all_by_project",
"(",
"context",
",",
"project_id",
")"
] | retrieve all reservations associated with a given project . | train | false |
37,980 | def generate_public_id():
u = uuid.uuid4().bytes
return int128_to_b36(u)
| [
"def",
"generate_public_id",
"(",
")",
":",
"u",
"=",
"uuid",
".",
"uuid4",
"(",
")",
".",
"bytes",
"return",
"int128_to_b36",
"(",
"u",
")"
] | returns a base-36 string uuid . | train | false |
37,981 | def remove_driver(notification_driver):
_get_drivers()
removed = False
if (notification_driver in drivers):
drivers.remove(notification_driver)
removed = True
else:
for driver in drivers:
if (_object_name(driver) == notification_driver):
drivers.remove(driver)
removed = True
if (not removed):
raise ValueError(('Cannot remove; %s is not in list' % notification_driver))
| [
"def",
"remove_driver",
"(",
"notification_driver",
")",
":",
"_get_drivers",
"(",
")",
"removed",
"=",
"False",
"if",
"(",
"notification_driver",
"in",
"drivers",
")",
":",
"drivers",
".",
"remove",
"(",
"notification_driver",
")",
"removed",
"=",
"True",
"else",
":",
"for",
"driver",
"in",
"drivers",
":",
"if",
"(",
"_object_name",
"(",
"driver",
")",
"==",
"notification_driver",
")",
":",
"drivers",
".",
"remove",
"(",
"driver",
")",
"removed",
"=",
"True",
"if",
"(",
"not",
"removed",
")",
":",
"raise",
"ValueError",
"(",
"(",
"'Cannot remove; %s is not in list'",
"%",
"notification_driver",
")",
")"
] | remove a notification driver at runtime . | train | false |
37,983 | def org_site_has_assets(row, tablename='org_facility'):
if (not current.deployment_settings.has_module('asset')):
return False
if hasattr(row, tablename):
row = row[tablename]
try:
id = row.id
except AttributeError:
return None
s3db = current.s3db
atable = s3db.asset_asset
stable = s3db[tablename]
query = (((atable.deleted != True) & (stable.id == id)) & (atable.site_id == stable.site_id))
asset = current.db(query).select(atable.id, limitby=(0, 1)).first()
if asset:
return True
else:
return False
| [
"def",
"org_site_has_assets",
"(",
"row",
",",
"tablename",
"=",
"'org_facility'",
")",
":",
"if",
"(",
"not",
"current",
".",
"deployment_settings",
".",
"has_module",
"(",
"'asset'",
")",
")",
":",
"return",
"False",
"if",
"hasattr",
"(",
"row",
",",
"tablename",
")",
":",
"row",
"=",
"row",
"[",
"tablename",
"]",
"try",
":",
"id",
"=",
"row",
".",
"id",
"except",
"AttributeError",
":",
"return",
"None",
"s3db",
"=",
"current",
".",
"s3db",
"atable",
"=",
"s3db",
".",
"asset_asset",
"stable",
"=",
"s3db",
"[",
"tablename",
"]",
"query",
"=",
"(",
"(",
"(",
"atable",
".",
"deleted",
"!=",
"True",
")",
"&",
"(",
"stable",
".",
"id",
"==",
"id",
")",
")",
"&",
"(",
"atable",
".",
"site_id",
"==",
"stable",
".",
"site_id",
")",
")",
"asset",
"=",
"current",
".",
"db",
"(",
"query",
")",
".",
"select",
"(",
"atable",
".",
"id",
",",
"limitby",
"=",
"(",
"0",
",",
"1",
")",
")",
".",
"first",
"(",
")",
"if",
"asset",
":",
"return",
"True",
"else",
":",
"return",
"False"
] | whether a site has assets . | train | false |
37,984 | def getCircleIntersectionsFromCircleNodes(circleNodes):
if (len(circleNodes) < 1):
return []
circleIntersections = []
index = 0
pixelTable = {}
for circleNode in circleNodes:
euclidean.addElementToPixelListFromPoint(circleNode, pixelTable, circleNode.circle)
accumulatedCircleNodeTable = {}
for circleNodeIndex in xrange(len(circleNodes)):
circleNodeBehind = circleNodes[circleNodeIndex]
circleNodeIndexMinusOne = (circleNodeIndex - 1)
if (circleNodeIndexMinusOne >= 0):
circleNodeAdditional = circleNodes[circleNodeIndexMinusOne]
euclidean.addElementToPixelListFromPoint(circleNodeAdditional, accumulatedCircleNodeTable, (0.5 * circleNodeAdditional.circle))
withinNodes = circleNodeBehind.getWithinNodes(accumulatedCircleNodeTable)
for circleNodeAhead in withinNodes:
circleIntersectionForward = CircleIntersection(circleNodeAhead, index, circleNodeBehind)
if (not circleIntersectionForward.isWithinCircles(pixelTable)):
circleIntersections.append(circleIntersectionForward)
circleNodeBehind.circleIntersections.append(circleIntersectionForward)
index += 1
circleIntersectionBackward = CircleIntersection(circleNodeBehind, index, circleNodeAhead)
if (not circleIntersectionBackward.isWithinCircles(pixelTable)):
circleIntersections.append(circleIntersectionBackward)
circleNodeAhead.circleIntersections.append(circleIntersectionBackward)
index += 1
return circleIntersections
| [
"def",
"getCircleIntersectionsFromCircleNodes",
"(",
"circleNodes",
")",
":",
"if",
"(",
"len",
"(",
"circleNodes",
")",
"<",
"1",
")",
":",
"return",
"[",
"]",
"circleIntersections",
"=",
"[",
"]",
"index",
"=",
"0",
"pixelTable",
"=",
"{",
"}",
"for",
"circleNode",
"in",
"circleNodes",
":",
"euclidean",
".",
"addElementToPixelListFromPoint",
"(",
"circleNode",
",",
"pixelTable",
",",
"circleNode",
".",
"circle",
")",
"accumulatedCircleNodeTable",
"=",
"{",
"}",
"for",
"circleNodeIndex",
"in",
"xrange",
"(",
"len",
"(",
"circleNodes",
")",
")",
":",
"circleNodeBehind",
"=",
"circleNodes",
"[",
"circleNodeIndex",
"]",
"circleNodeIndexMinusOne",
"=",
"(",
"circleNodeIndex",
"-",
"1",
")",
"if",
"(",
"circleNodeIndexMinusOne",
">=",
"0",
")",
":",
"circleNodeAdditional",
"=",
"circleNodes",
"[",
"circleNodeIndexMinusOne",
"]",
"euclidean",
".",
"addElementToPixelListFromPoint",
"(",
"circleNodeAdditional",
",",
"accumulatedCircleNodeTable",
",",
"(",
"0.5",
"*",
"circleNodeAdditional",
".",
"circle",
")",
")",
"withinNodes",
"=",
"circleNodeBehind",
".",
"getWithinNodes",
"(",
"accumulatedCircleNodeTable",
")",
"for",
"circleNodeAhead",
"in",
"withinNodes",
":",
"circleIntersectionForward",
"=",
"CircleIntersection",
"(",
"circleNodeAhead",
",",
"index",
",",
"circleNodeBehind",
")",
"if",
"(",
"not",
"circleIntersectionForward",
".",
"isWithinCircles",
"(",
"pixelTable",
")",
")",
":",
"circleIntersections",
".",
"append",
"(",
"circleIntersectionForward",
")",
"circleNodeBehind",
".",
"circleIntersections",
".",
"append",
"(",
"circleIntersectionForward",
")",
"index",
"+=",
"1",
"circleIntersectionBackward",
"=",
"CircleIntersection",
"(",
"circleNodeBehind",
",",
"index",
",",
"circleNodeAhead",
")",
"if",
"(",
"not",
"circleIntersectionBackward",
".",
"isWithinCircles",
"(",
"pixelTable",
")",
")",
":",
"circleIntersections",
".",
"append",
"(",
"circleIntersectionBackward",
")",
"circleNodeAhead",
".",
"circleIntersections",
".",
"append",
"(",
"circleIntersectionBackward",
")",
"index",
"+=",
"1",
"return",
"circleIntersections"
] | get all the circle intersections which exist between all the circle nodes . | train | false |
37,986 | def _old_api_error(obj_name):
raise SystemExit(('%s has been removed in PyInstaller 2.0. Please update your spec-file. See http://www.pyinstaller.org/wiki/MigrateTo2.0 for details' % obj_name))
| [
"def",
"_old_api_error",
"(",
"obj_name",
")",
":",
"raise",
"SystemExit",
"(",
"(",
"'%s has been removed in PyInstaller 2.0. Please update your spec-file. See http://www.pyinstaller.org/wiki/MigrateTo2.0 for details'",
"%",
"obj_name",
")",
")"
] | cause pyinstall to exit when . | train | false |
37,988 | def cancel_job(job):
default_scheduler.cancel_job(job)
| [
"def",
"cancel_job",
"(",
"job",
")",
":",
"default_scheduler",
".",
"cancel_job",
"(",
"job",
")"
] | delete a scheduled job on the default scheduler . | train | false |
37,989 | def count_failures(runner):
return [TestResults(f, t) for (f, t) in runner._name2ft.values() if (f > 0)]
| [
"def",
"count_failures",
"(",
"runner",
")",
":",
"return",
"[",
"TestResults",
"(",
"f",
",",
"t",
")",
"for",
"(",
"f",
",",
"t",
")",
"in",
"runner",
".",
"_name2ft",
".",
"values",
"(",
")",
"if",
"(",
"f",
">",
"0",
")",
"]"
] | count number of failures in a doctest runner . | train | false |
37,990 | def safe_rm(tgt):
try:
os.remove(tgt)
except (IOError, OSError):
pass
| [
"def",
"safe_rm",
"(",
"tgt",
")",
":",
"try",
":",
"os",
".",
"remove",
"(",
"tgt",
")",
"except",
"(",
"IOError",
",",
"OSError",
")",
":",
"pass"
] | safely remove a file . | train | false |
37,991 | @register_opt()
@local_optimizer([nlinalg.ExtractDiag])
def local_gpu_extract_diagonal(node):
if (isinstance(node.op, nlinalg.ExtractDiag) and isinstance(node.inputs[0].type, theano.tensor.TensorType)):
inp = node.inputs[0]
if (inp.owner and isinstance(inp.owner.op, HostFromGpu)):
return [host_from_gpu(nlinalg.extract_diag(as_cuda_ndarray_variable(inp)))]
if isinstance(node.op, GpuFromHost):
host_input = node.inputs[0]
if (host_input.owner and isinstance(host_input.owner.op, nlinalg.ExtractDiag) and isinstance(host_input.owner.inputs[0].type, theano.tensor.TensorType)):
diag_node = host_input.owner
return [nlinalg.extract_diag(as_cuda_ndarray_variable(diag_node.inputs[0]))]
return False
| [
"@",
"register_opt",
"(",
")",
"@",
"local_optimizer",
"(",
"[",
"nlinalg",
".",
"ExtractDiag",
"]",
")",
"def",
"local_gpu_extract_diagonal",
"(",
"node",
")",
":",
"if",
"(",
"isinstance",
"(",
"node",
".",
"op",
",",
"nlinalg",
".",
"ExtractDiag",
")",
"and",
"isinstance",
"(",
"node",
".",
"inputs",
"[",
"0",
"]",
".",
"type",
",",
"theano",
".",
"tensor",
".",
"TensorType",
")",
")",
":",
"inp",
"=",
"node",
".",
"inputs",
"[",
"0",
"]",
"if",
"(",
"inp",
".",
"owner",
"and",
"isinstance",
"(",
"inp",
".",
"owner",
".",
"op",
",",
"HostFromGpu",
")",
")",
":",
"return",
"[",
"host_from_gpu",
"(",
"nlinalg",
".",
"extract_diag",
"(",
"as_cuda_ndarray_variable",
"(",
"inp",
")",
")",
")",
"]",
"if",
"isinstance",
"(",
"node",
".",
"op",
",",
"GpuFromHost",
")",
":",
"host_input",
"=",
"node",
".",
"inputs",
"[",
"0",
"]",
"if",
"(",
"host_input",
".",
"owner",
"and",
"isinstance",
"(",
"host_input",
".",
"owner",
".",
"op",
",",
"nlinalg",
".",
"ExtractDiag",
")",
"and",
"isinstance",
"(",
"host_input",
".",
"owner",
".",
"inputs",
"[",
"0",
"]",
".",
"type",
",",
"theano",
".",
"tensor",
".",
"TensorType",
")",
")",
":",
"diag_node",
"=",
"host_input",
".",
"owner",
"return",
"[",
"nlinalg",
".",
"extract_diag",
"(",
"as_cuda_ndarray_variable",
"(",
"diag_node",
".",
"inputs",
"[",
"0",
"]",
")",
")",
"]",
"return",
"False"
] | extract_diagonal(host_from_gpu()) -> host_from_gpu gpu_from_host -> extract_diagonal . | train | false |
37,992 | def assert_array_almost_equal_nulp(x, y, nulp=1):
numpy.testing.assert_array_almost_equal_nulp(cupy.asnumpy(x), cupy.asnumpy(y), nulp=nulp)
| [
"def",
"assert_array_almost_equal_nulp",
"(",
"x",
",",
"y",
",",
"nulp",
"=",
"1",
")",
":",
"numpy",
".",
"testing",
".",
"assert_array_almost_equal_nulp",
"(",
"cupy",
".",
"asnumpy",
"(",
"x",
")",
",",
"cupy",
".",
"asnumpy",
"(",
"y",
")",
",",
"nulp",
"=",
"nulp",
")"
] | compare two arrays relatively to their spacing . | train | false |
37,993 | def _filter_unstarted_categories(category_map, course):
now = datetime.now(UTC())
result_map = {}
unfiltered_queue = [category_map]
filtered_queue = [result_map]
while unfiltered_queue:
unfiltered_map = unfiltered_queue.pop()
filtered_map = filtered_queue.pop()
filtered_map['children'] = []
filtered_map['entries'] = {}
filtered_map['subcategories'] = {}
for (child, c_type) in unfiltered_map['children']:
if ((child in unfiltered_map['entries']) and (c_type == TYPE_ENTRY)):
if (course.self_paced or (unfiltered_map['entries'][child]['start_date'] <= now)):
filtered_map['children'].append((child, c_type))
filtered_map['entries'][child] = {}
for key in unfiltered_map['entries'][child]:
if (key != 'start_date'):
filtered_map['entries'][child][key] = unfiltered_map['entries'][child][key]
else:
log.debug(u'Filtering out:%s with start_date: %s', child, unfiltered_map['entries'][child]['start_date'])
elif (course.self_paced or (unfiltered_map['subcategories'][child]['start_date'] < now)):
filtered_map['children'].append((child, c_type))
filtered_map['subcategories'][child] = {}
unfiltered_queue.append(unfiltered_map['subcategories'][child])
filtered_queue.append(filtered_map['subcategories'][child])
return result_map
| [
"def",
"_filter_unstarted_categories",
"(",
"category_map",
",",
"course",
")",
":",
"now",
"=",
"datetime",
".",
"now",
"(",
"UTC",
"(",
")",
")",
"result_map",
"=",
"{",
"}",
"unfiltered_queue",
"=",
"[",
"category_map",
"]",
"filtered_queue",
"=",
"[",
"result_map",
"]",
"while",
"unfiltered_queue",
":",
"unfiltered_map",
"=",
"unfiltered_queue",
".",
"pop",
"(",
")",
"filtered_map",
"=",
"filtered_queue",
".",
"pop",
"(",
")",
"filtered_map",
"[",
"'children'",
"]",
"=",
"[",
"]",
"filtered_map",
"[",
"'entries'",
"]",
"=",
"{",
"}",
"filtered_map",
"[",
"'subcategories'",
"]",
"=",
"{",
"}",
"for",
"(",
"child",
",",
"c_type",
")",
"in",
"unfiltered_map",
"[",
"'children'",
"]",
":",
"if",
"(",
"(",
"child",
"in",
"unfiltered_map",
"[",
"'entries'",
"]",
")",
"and",
"(",
"c_type",
"==",
"TYPE_ENTRY",
")",
")",
":",
"if",
"(",
"course",
".",
"self_paced",
"or",
"(",
"unfiltered_map",
"[",
"'entries'",
"]",
"[",
"child",
"]",
"[",
"'start_date'",
"]",
"<=",
"now",
")",
")",
":",
"filtered_map",
"[",
"'children'",
"]",
".",
"append",
"(",
"(",
"child",
",",
"c_type",
")",
")",
"filtered_map",
"[",
"'entries'",
"]",
"[",
"child",
"]",
"=",
"{",
"}",
"for",
"key",
"in",
"unfiltered_map",
"[",
"'entries'",
"]",
"[",
"child",
"]",
":",
"if",
"(",
"key",
"!=",
"'start_date'",
")",
":",
"filtered_map",
"[",
"'entries'",
"]",
"[",
"child",
"]",
"[",
"key",
"]",
"=",
"unfiltered_map",
"[",
"'entries'",
"]",
"[",
"child",
"]",
"[",
"key",
"]",
"else",
":",
"log",
".",
"debug",
"(",
"u'Filtering out:%s with start_date: %s'",
",",
"child",
",",
"unfiltered_map",
"[",
"'entries'",
"]",
"[",
"child",
"]",
"[",
"'start_date'",
"]",
")",
"elif",
"(",
"course",
".",
"self_paced",
"or",
"(",
"unfiltered_map",
"[",
"'subcategories'",
"]",
"[",
"child",
"]",
"[",
"'start_date'",
"]",
"<",
"now",
")",
")",
":",
"filtered_map",
"[",
"'children'",
"]",
".",
"append",
"(",
"(",
"child",
",",
"c_type",
")",
")",
"filtered_map",
"[",
"'subcategories'",
"]",
"[",
"child",
"]",
"=",
"{",
"}",
"unfiltered_queue",
".",
"append",
"(",
"unfiltered_map",
"[",
"'subcategories'",
"]",
"[",
"child",
"]",
")",
"filtered_queue",
".",
"append",
"(",
"filtered_map",
"[",
"'subcategories'",
"]",
"[",
"child",
"]",
")",
"return",
"result_map"
] | returns a subset of categories from the provided map which have not yet met the start date includes information about category children . | train | false |
37,994 | def find_points_of_interest(geotag, location):
area_found = False
area = ''
min_dist = None
near_bart = False
bart_dist = 'N/A'
bart = ''
for (a, coords) in settings.BOXES.items():
if in_box(geotag, coords):
area = a
area_found = True
for (station, coords) in settings.TRANSIT_STATIONS.items():
dist = coord_distance(coords[0], coords[1], geotag[0], geotag[1])
if (((min_dist is None) or (dist < min_dist)) and (dist < settings.MAX_TRANSIT_DIST)):
bart = station
near_bart = True
if ((min_dist is None) or (dist < min_dist)):
bart_dist = dist
if (len(area) == 0):
for hood in settings.NEIGHBORHOODS:
if (hood in location.lower()):
area = hood
return {'area_found': area_found, 'area': area, 'near_bart': near_bart, 'bart_dist': bart_dist, 'bart': bart}
| [
"def",
"find_points_of_interest",
"(",
"geotag",
",",
"location",
")",
":",
"area_found",
"=",
"False",
"area",
"=",
"''",
"min_dist",
"=",
"None",
"near_bart",
"=",
"False",
"bart_dist",
"=",
"'N/A'",
"bart",
"=",
"''",
"for",
"(",
"a",
",",
"coords",
")",
"in",
"settings",
".",
"BOXES",
".",
"items",
"(",
")",
":",
"if",
"in_box",
"(",
"geotag",
",",
"coords",
")",
":",
"area",
"=",
"a",
"area_found",
"=",
"True",
"for",
"(",
"station",
",",
"coords",
")",
"in",
"settings",
".",
"TRANSIT_STATIONS",
".",
"items",
"(",
")",
":",
"dist",
"=",
"coord_distance",
"(",
"coords",
"[",
"0",
"]",
",",
"coords",
"[",
"1",
"]",
",",
"geotag",
"[",
"0",
"]",
",",
"geotag",
"[",
"1",
"]",
")",
"if",
"(",
"(",
"(",
"min_dist",
"is",
"None",
")",
"or",
"(",
"dist",
"<",
"min_dist",
")",
")",
"and",
"(",
"dist",
"<",
"settings",
".",
"MAX_TRANSIT_DIST",
")",
")",
":",
"bart",
"=",
"station",
"near_bart",
"=",
"True",
"if",
"(",
"(",
"min_dist",
"is",
"None",
")",
"or",
"(",
"dist",
"<",
"min_dist",
")",
")",
":",
"bart_dist",
"=",
"dist",
"if",
"(",
"len",
"(",
"area",
")",
"==",
"0",
")",
":",
"for",
"hood",
"in",
"settings",
".",
"NEIGHBORHOODS",
":",
"if",
"(",
"hood",
"in",
"location",
".",
"lower",
"(",
")",
")",
":",
"area",
"=",
"hood",
"return",
"{",
"'area_found'",
":",
"area_found",
",",
"'area'",
":",
"area",
",",
"'near_bart'",
":",
"near_bart",
",",
"'bart_dist'",
":",
"bart_dist",
",",
"'bart'",
":",
"bart",
"}"
] | find points of interest . | train | false |
37,995 | def new(rsa_key):
return PKCS115_SigScheme(rsa_key)
| [
"def",
"new",
"(",
"rsa_key",
")",
":",
"return",
"PKCS115_SigScheme",
"(",
"rsa_key",
")"
] | create a new cast-128 cipher . | train | false |
37,996 | def _sd_version():
return salt.utils.systemd.version(__context__)
| [
"def",
"_sd_version",
"(",
")",
":",
"return",
"salt",
".",
"utils",
".",
"systemd",
".",
"version",
"(",
"__context__",
")"
] | returns __context__ . | train | false |
37,997 | def translate_file(input_path, output_path):
js = get_file_contents(input_path)
py_code = translate_js(js)
lib_name = os.path.basename(output_path).split('.')[0]
head = ("__all__ = [%s]\n\n# Don't look below, you will not understand this Python code :) I don't.\n\n" % repr(lib_name))
tail = ('\n\n# Add lib to the module scope\n%s = var.to_python()' % lib_name)
out = ((head + py_code) + tail)
write_file_contents(output_path, out)
| [
"def",
"translate_file",
"(",
"input_path",
",",
"output_path",
")",
":",
"js",
"=",
"get_file_contents",
"(",
"input_path",
")",
"py_code",
"=",
"translate_js",
"(",
"js",
")",
"lib_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"output_path",
")",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"head",
"=",
"(",
"\"__all__ = [%s]\\n\\n# Don't look below, you will not understand this Python code :) I don't.\\n\\n\"",
"%",
"repr",
"(",
"lib_name",
")",
")",
"tail",
"=",
"(",
"'\\n\\n# Add lib to the module scope\\n%s = var.to_python()'",
"%",
"lib_name",
")",
"out",
"=",
"(",
"(",
"head",
"+",
"py_code",
")",
"+",
"tail",
")",
"write_file_contents",
"(",
"output_path",
",",
"out",
")"
] | translates input js file to python and saves the it to the output path . | train | true |
37,998 | def rule_expand(component, text):
global rline_mpstate
if ((component[0] == '<') and (component[(-1)] == '>')):
return component[1:(-1)].split('|')
if (component in rline_mpstate.completion_functions):
return rline_mpstate.completion_functions[component](text)
return [component]
| [
"def",
"rule_expand",
"(",
"component",
",",
"text",
")",
":",
"global",
"rline_mpstate",
"if",
"(",
"(",
"component",
"[",
"0",
"]",
"==",
"'<'",
")",
"and",
"(",
"component",
"[",
"(",
"-",
"1",
")",
"]",
"==",
"'>'",
")",
")",
":",
"return",
"component",
"[",
"1",
":",
"(",
"-",
"1",
")",
"]",
".",
"split",
"(",
"'|'",
")",
"if",
"(",
"component",
"in",
"rline_mpstate",
".",
"completion_functions",
")",
":",
"return",
"rline_mpstate",
".",
"completion_functions",
"[",
"component",
"]",
"(",
"text",
")",
"return",
"[",
"component",
"]"
] | expand one rule component . | train | true |
38,000 | def unset_mount(module, args):
to_write = []
changed = False
escaped_name = _escape_fstab(args['name'])
for line in open(args['fstab'], 'r').readlines():
if (not line.strip()):
to_write.append(line)
continue
if line.strip().startswith('#'):
to_write.append(line)
continue
if (((get_platform() == 'SunOS') and (len(line.split()) != 7)) or ((get_platform() != 'SunOS') and (len(line.split()) != 6))):
to_write.append(line)
continue
ld = {}
if (get_platform() == 'SunOS'):
(ld['src'], dash, ld['name'], ld['fstype'], ld['passno'], ld['boot'], ld['opts']) = line.split()
else:
(ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno']) = line.split()
if (ld['name'] != escaped_name):
to_write.append(line)
continue
changed = True
if (changed and (not module.check_mode)):
write_fstab(to_write, args['fstab'])
return (args['name'], changed)
| [
"def",
"unset_mount",
"(",
"module",
",",
"args",
")",
":",
"to_write",
"=",
"[",
"]",
"changed",
"=",
"False",
"escaped_name",
"=",
"_escape_fstab",
"(",
"args",
"[",
"'name'",
"]",
")",
"for",
"line",
"in",
"open",
"(",
"args",
"[",
"'fstab'",
"]",
",",
"'r'",
")",
".",
"readlines",
"(",
")",
":",
"if",
"(",
"not",
"line",
".",
"strip",
"(",
")",
")",
":",
"to_write",
".",
"append",
"(",
"line",
")",
"continue",
"if",
"line",
".",
"strip",
"(",
")",
".",
"startswith",
"(",
"'#'",
")",
":",
"to_write",
".",
"append",
"(",
"line",
")",
"continue",
"if",
"(",
"(",
"(",
"get_platform",
"(",
")",
"==",
"'SunOS'",
")",
"and",
"(",
"len",
"(",
"line",
".",
"split",
"(",
")",
")",
"!=",
"7",
")",
")",
"or",
"(",
"(",
"get_platform",
"(",
")",
"!=",
"'SunOS'",
")",
"and",
"(",
"len",
"(",
"line",
".",
"split",
"(",
")",
")",
"!=",
"6",
")",
")",
")",
":",
"to_write",
".",
"append",
"(",
"line",
")",
"continue",
"ld",
"=",
"{",
"}",
"if",
"(",
"get_platform",
"(",
")",
"==",
"'SunOS'",
")",
":",
"(",
"ld",
"[",
"'src'",
"]",
",",
"dash",
",",
"ld",
"[",
"'name'",
"]",
",",
"ld",
"[",
"'fstype'",
"]",
",",
"ld",
"[",
"'passno'",
"]",
",",
"ld",
"[",
"'boot'",
"]",
",",
"ld",
"[",
"'opts'",
"]",
")",
"=",
"line",
".",
"split",
"(",
")",
"else",
":",
"(",
"ld",
"[",
"'src'",
"]",
",",
"ld",
"[",
"'name'",
"]",
",",
"ld",
"[",
"'fstype'",
"]",
",",
"ld",
"[",
"'opts'",
"]",
",",
"ld",
"[",
"'dump'",
"]",
",",
"ld",
"[",
"'passno'",
"]",
")",
"=",
"line",
".",
"split",
"(",
")",
"if",
"(",
"ld",
"[",
"'name'",
"]",
"!=",
"escaped_name",
")",
":",
"to_write",
".",
"append",
"(",
"line",
")",
"continue",
"changed",
"=",
"True",
"if",
"(",
"changed",
"and",
"(",
"not",
"module",
".",
"check_mode",
")",
")",
":",
"write_fstab",
"(",
"to_write",
",",
"args",
"[",
"'fstab'",
"]",
")",
"return",
"(",
"args",
"[",
"'name'",
"]",
",",
"changed",
")"
] | remove a mount point from fstab . | train | false |
38,001 | def test_history_import_abspath_in_metadata():
with HistoryArchive() as history_archive:
history_archive.write_metafiles(dataset_file_name=os.path.join(history_archive.temp_directory, 'outside.txt'))
history_archive.write_file('datasets/Pasted_Entry_1.txt', 'foo')
history_archive.write_outside()
_run_jihaw_cleanup(history_archive, 'Absolute path in datasets_attrs.txt allowed')
| [
"def",
"test_history_import_abspath_in_metadata",
"(",
")",
":",
"with",
"HistoryArchive",
"(",
")",
"as",
"history_archive",
":",
"history_archive",
".",
"write_metafiles",
"(",
"dataset_file_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"history_archive",
".",
"temp_directory",
",",
"'outside.txt'",
")",
")",
"history_archive",
".",
"write_file",
"(",
"'datasets/Pasted_Entry_1.txt'",
",",
"'foo'",
")",
"history_archive",
".",
"write_outside",
"(",
")",
"_run_jihaw_cleanup",
"(",
"history_archive",
",",
"'Absolute path in datasets_attrs.txt allowed'",
")"
] | ensure that dataset_attrs . | train | false |
38,003 | def safeExpandUser(filepath):
retVal = filepath
try:
retVal = os.path.expanduser(filepath)
except UnicodeError:
_ = locale.getdefaultlocale()
encoding = (_[1] if (_ and (len(_) > 1)) else UNICODE_ENCODING)
retVal = getUnicode(os.path.expanduser(filepath.encode(encoding)), encoding=encoding)
return retVal
| [
"def",
"safeExpandUser",
"(",
"filepath",
")",
":",
"retVal",
"=",
"filepath",
"try",
":",
"retVal",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"filepath",
")",
"except",
"UnicodeError",
":",
"_",
"=",
"locale",
".",
"getdefaultlocale",
"(",
")",
"encoding",
"=",
"(",
"_",
"[",
"1",
"]",
"if",
"(",
"_",
"and",
"(",
"len",
"(",
"_",
")",
">",
"1",
")",
")",
"else",
"UNICODE_ENCODING",
")",
"retVal",
"=",
"getUnicode",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"filepath",
".",
"encode",
"(",
"encoding",
")",
")",
",",
"encoding",
"=",
"encoding",
")",
"return",
"retVal"
] | patch for a python issue18171 . | train | false |
38,004 | def quota_get(context, project_id, resource, user_id=None):
return IMPL.quota_get(context, project_id, resource, user_id=user_id)
| [
"def",
"quota_get",
"(",
"context",
",",
"project_id",
",",
"resource",
",",
"user_id",
"=",
"None",
")",
":",
"return",
"IMPL",
".",
"quota_get",
"(",
"context",
",",
"project_id",
",",
"resource",
",",
"user_id",
"=",
"user_id",
")"
] | retrieve a quota or raise if it does not exist . | train | false |
38,005 | @treeio_login_required
def ajax_agent_lookup(request, response_format='html'):
agents = []
if (request.GET and ('term' in request.GET)):
agents = ServiceAgent.objects.filter((Q(related_user__name__icontains=request.GET['term']) | Q(related_user__contact__name__icontains=request.GET['term'])))
return render_to_response('services/ajax_agent_lookup', {'agents': agents}, context_instance=RequestContext(request), response_format=response_format)
| [
"@",
"treeio_login_required",
"def",
"ajax_agent_lookup",
"(",
"request",
",",
"response_format",
"=",
"'html'",
")",
":",
"agents",
"=",
"[",
"]",
"if",
"(",
"request",
".",
"GET",
"and",
"(",
"'term'",
"in",
"request",
".",
"GET",
")",
")",
":",
"agents",
"=",
"ServiceAgent",
".",
"objects",
".",
"filter",
"(",
"(",
"Q",
"(",
"related_user__name__icontains",
"=",
"request",
".",
"GET",
"[",
"'term'",
"]",
")",
"|",
"Q",
"(",
"related_user__contact__name__icontains",
"=",
"request",
".",
"GET",
"[",
"'term'",
"]",
")",
")",
")",
"return",
"render_to_response",
"(",
"'services/ajax_agent_lookup'",
",",
"{",
"'agents'",
":",
"agents",
"}",
",",
"context_instance",
"=",
"RequestContext",
"(",
"request",
")",
",",
"response_format",
"=",
"response_format",
")"
] | returns a list of matching agents . | train | false |
38,006 | def convertToTransformedPaths(dictionary):
if ((dictionary.__class__ == Vector3) or (dictionary.__class__.__name__ == 'Vector3Index')):
return
keys = getKeys(dictionary)
if (keys == None):
return
for key in keys:
value = dictionary[key]
if (value.__class__.__name__ == 'XMLElement'):
if (value.object != None):
dictionary[key] = value.object.getTransformedPaths()
else:
convertToTransformedPaths(dictionary[key])
| [
"def",
"convertToTransformedPaths",
"(",
"dictionary",
")",
":",
"if",
"(",
"(",
"dictionary",
".",
"__class__",
"==",
"Vector3",
")",
"or",
"(",
"dictionary",
".",
"__class__",
".",
"__name__",
"==",
"'Vector3Index'",
")",
")",
":",
"return",
"keys",
"=",
"getKeys",
"(",
"dictionary",
")",
"if",
"(",
"keys",
"==",
"None",
")",
":",
"return",
"for",
"key",
"in",
"keys",
":",
"value",
"=",
"dictionary",
"[",
"key",
"]",
"if",
"(",
"value",
".",
"__class__",
".",
"__name__",
"==",
"'XMLElement'",
")",
":",
"if",
"(",
"value",
".",
"object",
"!=",
"None",
")",
":",
"dictionary",
"[",
"key",
"]",
"=",
"value",
".",
"object",
".",
"getTransformedPaths",
"(",
")",
"else",
":",
"convertToTransformedPaths",
"(",
"dictionary",
"[",
"key",
"]",
")"
] | recursively convert any xmlelements to paths . | train | false |
38,007 | def create_html_filename(coord_filename, name_ending):
outpath = (coord_filename.split('/')[(-1)] + name_ending)
return outpath
| [
"def",
"create_html_filename",
"(",
"coord_filename",
",",
"name_ending",
")",
":",
"outpath",
"=",
"(",
"coord_filename",
".",
"split",
"(",
"'/'",
")",
"[",
"(",
"-",
"1",
")",
"]",
"+",
"name_ending",
")",
"return",
"outpath"
] | generate html filename using the given coord filename . | train | false |
38,009 | def filter_pathdir(val):
return os.path.dirname((val or u''))
| [
"def",
"filter_pathdir",
"(",
"val",
")",
":",
"return",
"os",
".",
"path",
".",
"dirname",
"(",
"(",
"val",
"or",
"u''",
")",
")"
] | directory containing the given path . | train | false |
38,011 | def in_parallel(changes, sleep_when_empty=timedelta(seconds=60)):
if all((isinstance(c, NoOp) for c in changes)):
sleep = (min((c.sleep for c in changes)) if changes else sleep_when_empty)
return NoOp(sleep=sleep)
return _InParallel(changes=changes)
| [
"def",
"in_parallel",
"(",
"changes",
",",
"sleep_when_empty",
"=",
"timedelta",
"(",
"seconds",
"=",
"60",
")",
")",
":",
"if",
"all",
"(",
"(",
"isinstance",
"(",
"c",
",",
"NoOp",
")",
"for",
"c",
"in",
"changes",
")",
")",
":",
"sleep",
"=",
"(",
"min",
"(",
"(",
"c",
".",
"sleep",
"for",
"c",
"in",
"changes",
")",
")",
"if",
"changes",
"else",
"sleep_when_empty",
")",
"return",
"NoOp",
"(",
"sleep",
"=",
"sleep",
")",
"return",
"_InParallel",
"(",
"changes",
"=",
"changes",
")"
] | run a series of changes in parallel . | train | false |
38,012 | def test_nm1_wrong_nn_obj():
ratio = 'auto'
nn = 'rnd'
nm1 = NearMiss(ratio=ratio, random_state=RND_SEED, version=VERSION_NEARMISS, return_indices=True, n_neighbors=nn)
assert_raises(ValueError, nm1.fit_sample, X, Y)
| [
"def",
"test_nm1_wrong_nn_obj",
"(",
")",
":",
"ratio",
"=",
"'auto'",
"nn",
"=",
"'rnd'",
"nm1",
"=",
"NearMiss",
"(",
"ratio",
"=",
"ratio",
",",
"random_state",
"=",
"RND_SEED",
",",
"version",
"=",
"VERSION_NEARMISS",
",",
"return_indices",
"=",
"True",
",",
"n_neighbors",
"=",
"nn",
")",
"assert_raises",
"(",
"ValueError",
",",
"nm1",
".",
"fit_sample",
",",
"X",
",",
"Y",
")"
] | test either if an error is raised with wrong nn object . | train | false |
38,013 | def _process_null_records(table, col_name, check_fkeys, delete=False):
records = 0
if (col_name in table.columns):
if check_fkeys:
fkey_found = False
fkeys = (table.c[col_name].foreign_keys or [])
for fkey in fkeys:
if (fkey.column.table.name == 'instances'):
fkey_found = True
if (not fkey_found):
return 0
if delete:
records = table.delete().where((table.c[col_name] == null())).execute().rowcount
else:
records = len(list(table.select().where((table.c[col_name] == null())).execute()))
return records
| [
"def",
"_process_null_records",
"(",
"table",
",",
"col_name",
",",
"check_fkeys",
",",
"delete",
"=",
"False",
")",
":",
"records",
"=",
"0",
"if",
"(",
"col_name",
"in",
"table",
".",
"columns",
")",
":",
"if",
"check_fkeys",
":",
"fkey_found",
"=",
"False",
"fkeys",
"=",
"(",
"table",
".",
"c",
"[",
"col_name",
"]",
".",
"foreign_keys",
"or",
"[",
"]",
")",
"for",
"fkey",
"in",
"fkeys",
":",
"if",
"(",
"fkey",
".",
"column",
".",
"table",
".",
"name",
"==",
"'instances'",
")",
":",
"fkey_found",
"=",
"True",
"if",
"(",
"not",
"fkey_found",
")",
":",
"return",
"0",
"if",
"delete",
":",
"records",
"=",
"table",
".",
"delete",
"(",
")",
".",
"where",
"(",
"(",
"table",
".",
"c",
"[",
"col_name",
"]",
"==",
"null",
"(",
")",
")",
")",
".",
"execute",
"(",
")",
".",
"rowcount",
"else",
":",
"records",
"=",
"len",
"(",
"list",
"(",
"table",
".",
"select",
"(",
")",
".",
"where",
"(",
"(",
"table",
".",
"c",
"[",
"col_name",
"]",
"==",
"null",
"(",
")",
")",
")",
".",
"execute",
"(",
")",
")",
")",
"return",
"records"
] | queries the database and optionally deletes the null records . | train | false |
38,017 | @task
def check_spam_account(instance_id, **kwargs):
from mozillians.users.models import AbuseReport, UserProfile
spam = akismet_spam_check(**kwargs)
profile = get_object_or_none(UserProfile, id=instance_id)
if (spam and profile):
kwargs = {'type': AbuseReport.TYPE_SPAM, 'profile': profile, 'reporter': None, 'is_akismet': True}
AbuseReport.objects.get_or_create(**kwargs)
| [
"@",
"task",
"def",
"check_spam_account",
"(",
"instance_id",
",",
"**",
"kwargs",
")",
":",
"from",
"mozillians",
".",
"users",
".",
"models",
"import",
"AbuseReport",
",",
"UserProfile",
"spam",
"=",
"akismet_spam_check",
"(",
"**",
"kwargs",
")",
"profile",
"=",
"get_object_or_none",
"(",
"UserProfile",
",",
"id",
"=",
"instance_id",
")",
"if",
"(",
"spam",
"and",
"profile",
")",
":",
"kwargs",
"=",
"{",
"'type'",
":",
"AbuseReport",
".",
"TYPE_SPAM",
",",
"'profile'",
":",
"profile",
",",
"'reporter'",
":",
"None",
",",
"'is_akismet'",
":",
"True",
"}",
"AbuseReport",
".",
"objects",
".",
"get_or_create",
"(",
"**",
"kwargs",
")"
] | task to check if profile is spam according to akismet . | train | false |
38,019 | def retry_until_ok(func, *args, **kwargs):
count = 0
while True:
try:
return func(*args, **kwargs)
except RuntimeError:
raise
except Exception:
count += 1
if (count > 120):
LOG.exception('Call to %s failed too many times in retry_until_ok', func)
raise
time.sleep(1)
| [
"def",
"retry_until_ok",
"(",
"func",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"count",
"=",
"0",
"while",
"True",
":",
"try",
":",
"return",
"func",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"except",
"RuntimeError",
":",
"raise",
"except",
"Exception",
":",
"count",
"+=",
"1",
"if",
"(",
"count",
">",
"120",
")",
":",
"LOG",
".",
"exception",
"(",
"'Call to %s failed too many times in retry_until_ok'",
",",
"func",
")",
"raise",
"time",
".",
"sleep",
"(",
"1",
")"
] | retry code block until it succeeds . | train | false |
38,020 | def getStrokeRadiusByPrefix(prefix, xmlElement):
strokeRadius = getFloatByPrefixBeginEnd((prefix + 'strokeRadius'), (prefix + 'strokeWidth'), 1.0, xmlElement)
return getFloatByPrefixBeginEnd((prefix + 'radius'), (prefix + 'diameter'), strokeRadius, xmlElement)
| [
"def",
"getStrokeRadiusByPrefix",
"(",
"prefix",
",",
"xmlElement",
")",
":",
"strokeRadius",
"=",
"getFloatByPrefixBeginEnd",
"(",
"(",
"prefix",
"+",
"'strokeRadius'",
")",
",",
"(",
"prefix",
"+",
"'strokeWidth'",
")",
",",
"1.0",
",",
"xmlElement",
")",
"return",
"getFloatByPrefixBeginEnd",
"(",
"(",
"prefix",
"+",
"'radius'",
")",
",",
"(",
"prefix",
"+",
"'diameter'",
")",
",",
"strokeRadius",
",",
"xmlElement",
")"
] | get strokeradius by prefix . | train | false |
38,021 | def is_soon(dt, window):
soon = (utcnow() + datetime.timedelta(seconds=window))
return (normalize_time(dt) <= soon)
| [
"def",
"is_soon",
"(",
"dt",
",",
"window",
")",
":",
"soon",
"=",
"(",
"utcnow",
"(",
")",
"+",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"window",
")",
")",
"return",
"(",
"normalize_time",
"(",
"dt",
")",
"<=",
"soon",
")"
] | determines if time is going to happen in the next window seconds . | train | true |
38,022 | def tuple_operator_in(model_pk, ids):
l = []
for id in ids:
k = []
for i in range(len(model_pk)):
k.append(eq(model_pk[i], id[i]))
l.append(and_(*k))
if (len(l) >= 1):
return or_(*l)
else:
return None
| [
"def",
"tuple_operator_in",
"(",
"model_pk",
",",
"ids",
")",
":",
"l",
"=",
"[",
"]",
"for",
"id",
"in",
"ids",
":",
"k",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"model_pk",
")",
")",
":",
"k",
".",
"append",
"(",
"eq",
"(",
"model_pk",
"[",
"i",
"]",
",",
"id",
"[",
"i",
"]",
")",
")",
"l",
".",
"append",
"(",
"and_",
"(",
"*",
"k",
")",
")",
"if",
"(",
"len",
"(",
"l",
")",
">=",
"1",
")",
":",
"return",
"or_",
"(",
"*",
"l",
")",
"else",
":",
"return",
"None"
] | the tuple_ operator only works on certain engines like mysql or postgresql . | train | false |
38,023 | def cartesian_product(G, H):
if (not (G.is_directed() == H.is_directed())):
raise nx.NetworkXError('G and H must be both directed or', 'both undirected')
GH = _init_product_graph(G, H)
GH.add_nodes_from(_node_product(G, H))
GH.add_edges_from(_edges_cross_nodes(G, H))
GH.add_edges_from(_nodes_cross_edges(G, H))
GH.name = (((('Cartesian product(' + G.name) + ',') + H.name) + ')')
return GH
| [
"def",
"cartesian_product",
"(",
"G",
",",
"H",
")",
":",
"if",
"(",
"not",
"(",
"G",
".",
"is_directed",
"(",
")",
"==",
"H",
".",
"is_directed",
"(",
")",
")",
")",
":",
"raise",
"nx",
".",
"NetworkXError",
"(",
"'G and H must be both directed or'",
",",
"'both undirected'",
")",
"GH",
"=",
"_init_product_graph",
"(",
"G",
",",
"H",
")",
"GH",
".",
"add_nodes_from",
"(",
"_node_product",
"(",
"G",
",",
"H",
")",
")",
"GH",
".",
"add_edges_from",
"(",
"_edges_cross_nodes",
"(",
"G",
",",
"H",
")",
")",
"GH",
".",
"add_edges_from",
"(",
"_nodes_cross_edges",
"(",
"G",
",",
"H",
")",
")",
"GH",
".",
"name",
"=",
"(",
"(",
"(",
"(",
"'Cartesian product('",
"+",
"G",
".",
"name",
")",
"+",
"','",
")",
"+",
"H",
".",
"name",
")",
"+",
"')'",
")",
"return",
"GH"
] | numpy version of itertools . | train | false |
38,025 | def daemon(path='.', address=None, port=None):
backend = FileSystemBackend(path)
server = TCPGitServer(backend, address, port)
server.serve_forever()
| [
"def",
"daemon",
"(",
"path",
"=",
"'.'",
",",
"address",
"=",
"None",
",",
"port",
"=",
"None",
")",
":",
"backend",
"=",
"FileSystemBackend",
"(",
"path",
")",
"server",
"=",
"TCPGitServer",
"(",
"backend",
",",
"address",
",",
"port",
")",
"server",
".",
"serve_forever",
"(",
")"
] | run a daemon serving git requests over tcp/ip . | train | false |
38,026 | def std_spec(batch_size, isotropic=True):
return DataSpec(batch_size=batch_size, scale_size=256, crop_size=224, isotropic=isotropic)
| [
"def",
"std_spec",
"(",
"batch_size",
",",
"isotropic",
"=",
"True",
")",
":",
"return",
"DataSpec",
"(",
"batch_size",
"=",
"batch_size",
",",
"scale_size",
"=",
"256",
",",
"crop_size",
"=",
"224",
",",
"isotropic",
"=",
"isotropic",
")"
] | parameters commonly used by "post-alexnet" architectures . | train | false |
38,027 | def generate_help_text():
def generate_cmds_with_explanations(summary, cmds):
text = '{0}:\n'.format(summary)
for (cmd, explanation) in cmds:
text += ' DCTB {0:<10} DCTB {1:<20}\n'.format(cmd, explanation)
return (text + '\n')
text = generate_cmds_with_explanations('Commands', ROOT_COMMANDS.items())
text += generate_cmds_with_explanations('Options', OPTION_NAMES.items())
text += generate_cmds_with_explanations('Actions', ACTIONS.items())
text += generate_cmds_with_explanations('Headers', HEADER_NAMES.items())
return text
| [
"def",
"generate_help_text",
"(",
")",
":",
"def",
"generate_cmds_with_explanations",
"(",
"summary",
",",
"cmds",
")",
":",
"text",
"=",
"'{0}:\\n'",
".",
"format",
"(",
"summary",
")",
"for",
"(",
"cmd",
",",
"explanation",
")",
"in",
"cmds",
":",
"text",
"+=",
"' DCTB {0:<10} DCTB {1:<20}\\n'",
".",
"format",
"(",
"cmd",
",",
"explanation",
")",
"return",
"(",
"text",
"+",
"'\\n'",
")",
"text",
"=",
"generate_cmds_with_explanations",
"(",
"'Commands'",
",",
"ROOT_COMMANDS",
".",
"items",
"(",
")",
")",
"text",
"+=",
"generate_cmds_with_explanations",
"(",
"'Options'",
",",
"OPTION_NAMES",
".",
"items",
"(",
")",
")",
"text",
"+=",
"generate_cmds_with_explanations",
"(",
"'Actions'",
",",
"ACTIONS",
".",
"items",
"(",
")",
")",
"text",
"+=",
"generate_cmds_with_explanations",
"(",
"'Headers'",
",",
"HEADER_NAMES",
".",
"items",
"(",
")",
")",
"return",
"text"
] | return a formatted string listing commands . | train | true |
38,029 | @require_context
@contextlib.contextmanager
def pinned(*arylist):
pmlist = []
for ary in arylist:
pm = current_context().mempin(ary, driver.host_pointer(ary), driver.host_memory_size(ary), mapped=False)
pmlist.append(pm)
(yield)
del pmlist
| [
"@",
"require_context",
"@",
"contextlib",
".",
"contextmanager",
"def",
"pinned",
"(",
"*",
"arylist",
")",
":",
"pmlist",
"=",
"[",
"]",
"for",
"ary",
"in",
"arylist",
":",
"pm",
"=",
"current_context",
"(",
")",
".",
"mempin",
"(",
"ary",
",",
"driver",
".",
"host_pointer",
"(",
"ary",
")",
",",
"driver",
".",
"host_memory_size",
"(",
"ary",
")",
",",
"mapped",
"=",
"False",
")",
"pmlist",
".",
"append",
"(",
"pm",
")",
"(",
"yield",
")",
"del",
"pmlist"
] | a context manager for temporary pinning a sequence of host ndarrays . | train | false |
38,030 | def check_window_qual_scores(qual_scores, window=50, min_average=25):
l = len(qual_scores)
window = min(window, l)
if (window == 0):
return True
window_score = sum(qual_scores[:window])
idx = 0
while (((window_score / float(window)) >= min_average) and (idx < (l - window))):
window_score += (qual_scores[(idx + window)] - qual_scores[idx])
idx += 1
if (idx == (l - window)):
return (True, idx)
else:
return (False, idx)
| [
"def",
"check_window_qual_scores",
"(",
"qual_scores",
",",
"window",
"=",
"50",
",",
"min_average",
"=",
"25",
")",
":",
"l",
"=",
"len",
"(",
"qual_scores",
")",
"window",
"=",
"min",
"(",
"window",
",",
"l",
")",
"if",
"(",
"window",
"==",
"0",
")",
":",
"return",
"True",
"window_score",
"=",
"sum",
"(",
"qual_scores",
"[",
":",
"window",
"]",
")",
"idx",
"=",
"0",
"while",
"(",
"(",
"(",
"window_score",
"/",
"float",
"(",
"window",
")",
")",
">=",
"min_average",
")",
"and",
"(",
"idx",
"<",
"(",
"l",
"-",
"window",
")",
")",
")",
":",
"window_score",
"+=",
"(",
"qual_scores",
"[",
"(",
"idx",
"+",
"window",
")",
"]",
"-",
"qual_scores",
"[",
"idx",
"]",
")",
"idx",
"+=",
"1",
"if",
"(",
"idx",
"==",
"(",
"l",
"-",
"window",
")",
")",
":",
"return",
"(",
"True",
",",
"idx",
")",
"else",
":",
"return",
"(",
"False",
",",
"idx",
")"
] | check that all windows have ave qual score > threshold . | train | false |
38,031 | def low_index_subgroups(G, N, Y=[]):
C = CosetTable(G, [])
R = G.relators()
len_short_rel = 5
R2 = set([rel for rel in R if (len(rel) > len_short_rel)])
R1 = set([rel.identity_cyclic_reduction() for rel in (set(R) - R2)])
R1_c_list = C.conjugates(R1)
S = []
descendant_subgroups(S, C, R1_c_list, C.A[0], R2, N, Y)
return S
| [
"def",
"low_index_subgroups",
"(",
"G",
",",
"N",
",",
"Y",
"=",
"[",
"]",
")",
":",
"C",
"=",
"CosetTable",
"(",
"G",
",",
"[",
"]",
")",
"R",
"=",
"G",
".",
"relators",
"(",
")",
"len_short_rel",
"=",
"5",
"R2",
"=",
"set",
"(",
"[",
"rel",
"for",
"rel",
"in",
"R",
"if",
"(",
"len",
"(",
"rel",
")",
">",
"len_short_rel",
")",
"]",
")",
"R1",
"=",
"set",
"(",
"[",
"rel",
".",
"identity_cyclic_reduction",
"(",
")",
"for",
"rel",
"in",
"(",
"set",
"(",
"R",
")",
"-",
"R2",
")",
"]",
")",
"R1_c_list",
"=",
"C",
".",
"conjugates",
"(",
"R1",
")",
"S",
"=",
"[",
"]",
"descendant_subgroups",
"(",
"S",
",",
"C",
",",
"R1_c_list",
",",
"C",
".",
"A",
"[",
"0",
"]",
",",
"R2",
",",
"N",
",",
"Y",
")",
"return",
"S"
] | implements the low index subgroups algorithm . | train | false |
38,032 | def copy_tree(src, dst, preserve_mode=1, preserve_times=1, preserve_symlinks=0, update=0, verbose=1, dry_run=0):
from distutils.file_util import copy_file
if ((not dry_run) and (not os.path.isdir(src))):
raise DistutilsFileError(("cannot copy tree '%s': not a directory" % src))
try:
names = os.listdir(src)
except OSError as e:
if dry_run:
names = []
else:
raise DistutilsFileError(("error listing files in '%s': %s" % (src, e.strerror)))
if (not dry_run):
mkpath(dst, verbose=verbose)
outputs = []
for n in names:
src_name = os.path.join(src, n)
dst_name = os.path.join(dst, n)
if n.startswith('.nfs'):
continue
if (preserve_symlinks and os.path.islink(src_name)):
link_dest = os.readlink(src_name)
if (verbose >= 1):
log.info('linking %s -> %s', dst_name, link_dest)
if (not dry_run):
os.symlink(link_dest, dst_name)
outputs.append(dst_name)
elif os.path.isdir(src_name):
outputs.extend(copy_tree(src_name, dst_name, preserve_mode, preserve_times, preserve_symlinks, update, verbose=verbose, dry_run=dry_run))
else:
copy_file(src_name, dst_name, preserve_mode, preserve_times, update, verbose=verbose, dry_run=dry_run)
outputs.append(dst_name)
return outputs
| [
"def",
"copy_tree",
"(",
"src",
",",
"dst",
",",
"preserve_mode",
"=",
"1",
",",
"preserve_times",
"=",
"1",
",",
"preserve_symlinks",
"=",
"0",
",",
"update",
"=",
"0",
",",
"verbose",
"=",
"1",
",",
"dry_run",
"=",
"0",
")",
":",
"from",
"distutils",
".",
"file_util",
"import",
"copy_file",
"if",
"(",
"(",
"not",
"dry_run",
")",
"and",
"(",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"src",
")",
")",
")",
":",
"raise",
"DistutilsFileError",
"(",
"(",
"\"cannot copy tree '%s': not a directory\"",
"%",
"src",
")",
")",
"try",
":",
"names",
"=",
"os",
".",
"listdir",
"(",
"src",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"dry_run",
":",
"names",
"=",
"[",
"]",
"else",
":",
"raise",
"DistutilsFileError",
"(",
"(",
"\"error listing files in '%s': %s\"",
"%",
"(",
"src",
",",
"e",
".",
"strerror",
")",
")",
")",
"if",
"(",
"not",
"dry_run",
")",
":",
"mkpath",
"(",
"dst",
",",
"verbose",
"=",
"verbose",
")",
"outputs",
"=",
"[",
"]",
"for",
"n",
"in",
"names",
":",
"src_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"src",
",",
"n",
")",
"dst_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dst",
",",
"n",
")",
"if",
"n",
".",
"startswith",
"(",
"'.nfs'",
")",
":",
"continue",
"if",
"(",
"preserve_symlinks",
"and",
"os",
".",
"path",
".",
"islink",
"(",
"src_name",
")",
")",
":",
"link_dest",
"=",
"os",
".",
"readlink",
"(",
"src_name",
")",
"if",
"(",
"verbose",
">=",
"1",
")",
":",
"log",
".",
"info",
"(",
"'linking %s -> %s'",
",",
"dst_name",
",",
"link_dest",
")",
"if",
"(",
"not",
"dry_run",
")",
":",
"os",
".",
"symlink",
"(",
"link_dest",
",",
"dst_name",
")",
"outputs",
".",
"append",
"(",
"dst_name",
")",
"elif",
"os",
".",
"path",
".",
"isdir",
"(",
"src_name",
")",
":",
"outputs",
".",
"extend",
"(",
"copy_tree",
"(",
"src_name",
",",
"dst_name",
",",
"preserve_mode",
",",
"preserve_times",
",",
"preserve_symlinks",
",",
"update",
",",
"verbose",
"=",
"verbose",
",",
"dry_run",
"=",
"dry_run",
")",
")",
"else",
":",
"copy_file",
"(",
"src_name",
",",
"dst_name",
",",
"preserve_mode",
",",
"preserve_times",
",",
"update",
",",
"verbose",
"=",
"verbose",
",",
"dry_run",
"=",
"dry_run",
")",
"outputs",
".",
"append",
"(",
"dst_name",
")",
"return",
"outputs"
] | copy a src tree to the dst folder . | train | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.