id_within_dataset
int64 1
55.5k
| snippet
stringlengths 19
14.2k
| tokens
listlengths 6
1.63k
| nl
stringlengths 6
352
| split_within_dataset
stringclasses 1
value | is_duplicated
bool 2
classes |
|---|---|---|---|---|---|
51,031
|
def inception_arg_scope(weight_decay=4e-05, use_batch_norm=True, batch_norm_decay=0.9997, batch_norm_epsilon=0.001):
batch_norm_params = {'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon, 'updates_collections': tf.GraphKeys.UPDATE_OPS}
if use_batch_norm:
normalizer_fn = slim.batch_norm
normalizer_params = batch_norm_params
else:
normalizer_fn = None
normalizer_params = {}
with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope([slim.conv2d], weights_initializer=slim.variance_scaling_initializer(), activation_fn=tf.nn.relu, normalizer_fn=normalizer_fn, normalizer_params=normalizer_params) as sc:
return sc
|
[
"def",
"inception_arg_scope",
"(",
"weight_decay",
"=",
"4e-05",
",",
"use_batch_norm",
"=",
"True",
",",
"batch_norm_decay",
"=",
"0.9997",
",",
"batch_norm_epsilon",
"=",
"0.001",
")",
":",
"batch_norm_params",
"=",
"{",
"'decay'",
":",
"batch_norm_decay",
",",
"'epsilon'",
":",
"batch_norm_epsilon",
",",
"'updates_collections'",
":",
"tf",
".",
"GraphKeys",
".",
"UPDATE_OPS",
"}",
"if",
"use_batch_norm",
":",
"normalizer_fn",
"=",
"slim",
".",
"batch_norm",
"normalizer_params",
"=",
"batch_norm_params",
"else",
":",
"normalizer_fn",
"=",
"None",
"normalizer_params",
"=",
"{",
"}",
"with",
"slim",
".",
"arg_scope",
"(",
"[",
"slim",
".",
"conv2d",
",",
"slim",
".",
"fully_connected",
"]",
",",
"weights_regularizer",
"=",
"slim",
".",
"l2_regularizer",
"(",
"weight_decay",
")",
")",
":",
"with",
"slim",
".",
"arg_scope",
"(",
"[",
"slim",
".",
"conv2d",
"]",
",",
"weights_initializer",
"=",
"slim",
".",
"variance_scaling_initializer",
"(",
")",
",",
"activation_fn",
"=",
"tf",
".",
"nn",
".",
"relu",
",",
"normalizer_fn",
"=",
"normalizer_fn",
",",
"normalizer_params",
"=",
"normalizer_params",
")",
"as",
"sc",
":",
"return",
"sc"
] |
defines the default arg scope for inception models .
|
train
| false
|
51,032
|
def save_processed_files(path, processed_files):
try:
path_dir = os.path.dirname(path)
if (not os.path.exists(path_dir)):
os.makedirs(path_dir)
except OSError as exc:
raise IOError(exc)
with open(path, 'w') as output_file:
for (path, timestamp) in list(processed_files.items()):
if (not os.path.isabs(path)):
raise TypeError(('Only absolute paths are acceptable: %s' % path))
output_file.write(('%s %i\n' % (path, timestamp)))
|
[
"def",
"save_processed_files",
"(",
"path",
",",
"processed_files",
")",
":",
"try",
":",
"path_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
"if",
"(",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path_dir",
")",
")",
":",
"os",
".",
"makedirs",
"(",
"path_dir",
")",
"except",
"OSError",
"as",
"exc",
":",
"raise",
"IOError",
"(",
"exc",
")",
"with",
"open",
"(",
"path",
",",
"'w'",
")",
"as",
"output_file",
":",
"for",
"(",
"path",
",",
"timestamp",
")",
"in",
"list",
"(",
"processed_files",
".",
"items",
"(",
")",
")",
":",
"if",
"(",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"path",
")",
")",
":",
"raise",
"TypeError",
"(",
"(",
"'Only absolute paths are acceptable: %s'",
"%",
"path",
")",
")",
"output_file",
".",
"write",
"(",
"(",
"'%s %i\\n'",
"%",
"(",
"path",
",",
"timestamp",
")",
")",
")"
] |
persists a dictionary of path => last modified timestamp mappings so that they can be loaded later and applied to another :class:~stem .
|
train
| false
|
51,033
|
def _prior_running_states(jid):
ret = []
active = __salt__['saltutil.is_running']('state.*')
for data in active:
try:
data_jid = int(data['jid'])
except ValueError:
continue
if (data_jid < int(jid)):
ret.append(data)
return ret
|
[
"def",
"_prior_running_states",
"(",
"jid",
")",
":",
"ret",
"=",
"[",
"]",
"active",
"=",
"__salt__",
"[",
"'saltutil.is_running'",
"]",
"(",
"'state.*'",
")",
"for",
"data",
"in",
"active",
":",
"try",
":",
"data_jid",
"=",
"int",
"(",
"data",
"[",
"'jid'",
"]",
")",
"except",
"ValueError",
":",
"continue",
"if",
"(",
"data_jid",
"<",
"int",
"(",
"jid",
")",
")",
":",
"ret",
".",
"append",
"(",
"data",
")",
"return",
"ret"
] |
return a list of dicts of prior calls to state functions .
|
train
| true
|
51,034
|
def call_effect(*args, **kwargs):
res = dict()
devices = _get_lights()
for dev_id in ((('id' not in kwargs) and sorted(devices.keys())) or _get_devices(kwargs)):
res[dev_id] = _set(dev_id, {'effect': kwargs.get('type', 'none')})
return res
|
[
"def",
"call_effect",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"res",
"=",
"dict",
"(",
")",
"devices",
"=",
"_get_lights",
"(",
")",
"for",
"dev_id",
"in",
"(",
"(",
"(",
"'id'",
"not",
"in",
"kwargs",
")",
"and",
"sorted",
"(",
"devices",
".",
"keys",
"(",
")",
")",
")",
"or",
"_get_devices",
"(",
"kwargs",
")",
")",
":",
"res",
"[",
"dev_id",
"]",
"=",
"_set",
"(",
"dev_id",
",",
"{",
"'effect'",
":",
"kwargs",
".",
"get",
"(",
"'type'",
",",
"'none'",
")",
"}",
")",
"return",
"res"
] |
set an effect to the lamp .
|
train
| true
|
51,035
|
def _can_set_locale(lc):
try:
with set_locale(lc):
pass
except locale.Error:
return False
else:
return True
|
[
"def",
"_can_set_locale",
"(",
"lc",
")",
":",
"try",
":",
"with",
"set_locale",
"(",
"lc",
")",
":",
"pass",
"except",
"locale",
".",
"Error",
":",
"return",
"False",
"else",
":",
"return",
"True"
] |
check to see if we can set a locale without throwing an exception .
|
train
| false
|
51,036
|
def string_to_timestamp(timestring):
ts = None
try:
delta = durationpy.from_str(timestring)
past = (datetime.datetime.utcnow() - delta)
ts = calendar.timegm(past.timetuple())
return ts
except Exception as e:
pass
if ts:
return ts
return 0
|
[
"def",
"string_to_timestamp",
"(",
"timestring",
")",
":",
"ts",
"=",
"None",
"try",
":",
"delta",
"=",
"durationpy",
".",
"from_str",
"(",
"timestring",
")",
"past",
"=",
"(",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"-",
"delta",
")",
"ts",
"=",
"calendar",
".",
"timegm",
"(",
"past",
".",
"timetuple",
"(",
")",
")",
"return",
"ts",
"except",
"Exception",
"as",
"e",
":",
"pass",
"if",
"ts",
":",
"return",
"ts",
"return",
"0"
] |
accepts a str .
|
train
| true
|
51,037
|
def swift_acl_translate(acl, group='', user='', xml=False):
swift_acl = {}
swift_acl['public-read'] = [['HTTP_X_CONTAINER_READ', '.r:*,.rlistings']]
swift_acl['public-read-write'] = [['HTTP_X_CONTAINER_WRITE', '.r:*'], ['HTTP_X_CONTAINER_READ', '.r:*,.rlistings']]
swift_acl['private'] = [['HTTP_X_CONTAINER_WRITE', '.'], ['HTTP_X_CONTAINER_READ', '.']]
if xml:
dom = parseString(acl)
acl = 'unknown'
for grant in dom.getElementsByTagName('Grant'):
permission = grant.getElementsByTagName('Permission')[0].firstChild.data
grantee = grant.getElementsByTagName('Grantee')[0].getAttributeNode('xsi:type').nodeValue
if ((permission == 'FULL_CONTROL') and (grantee == 'CanonicalUser') and (acl != 'public-read') and (acl != 'public-read-write')):
acl = 'private'
elif ((permission == 'READ') and (grantee == 'Group') and (acl != 'public-read-write')):
acl = 'public-read'
elif ((permission == 'WRITE') and (grantee == 'Group')):
acl = 'public-read-write'
else:
acl = 'unsupported'
if (acl == 'authenticated-read'):
return 'Unsupported'
elif (acl not in swift_acl):
return 'InvalidArgument'
return swift_acl[acl]
|
[
"def",
"swift_acl_translate",
"(",
"acl",
",",
"group",
"=",
"''",
",",
"user",
"=",
"''",
",",
"xml",
"=",
"False",
")",
":",
"swift_acl",
"=",
"{",
"}",
"swift_acl",
"[",
"'public-read'",
"]",
"=",
"[",
"[",
"'HTTP_X_CONTAINER_READ'",
",",
"'.r:*,.rlistings'",
"]",
"]",
"swift_acl",
"[",
"'public-read-write'",
"]",
"=",
"[",
"[",
"'HTTP_X_CONTAINER_WRITE'",
",",
"'.r:*'",
"]",
",",
"[",
"'HTTP_X_CONTAINER_READ'",
",",
"'.r:*,.rlistings'",
"]",
"]",
"swift_acl",
"[",
"'private'",
"]",
"=",
"[",
"[",
"'HTTP_X_CONTAINER_WRITE'",
",",
"'.'",
"]",
",",
"[",
"'HTTP_X_CONTAINER_READ'",
",",
"'.'",
"]",
"]",
"if",
"xml",
":",
"dom",
"=",
"parseString",
"(",
"acl",
")",
"acl",
"=",
"'unknown'",
"for",
"grant",
"in",
"dom",
".",
"getElementsByTagName",
"(",
"'Grant'",
")",
":",
"permission",
"=",
"grant",
".",
"getElementsByTagName",
"(",
"'Permission'",
")",
"[",
"0",
"]",
".",
"firstChild",
".",
"data",
"grantee",
"=",
"grant",
".",
"getElementsByTagName",
"(",
"'Grantee'",
")",
"[",
"0",
"]",
".",
"getAttributeNode",
"(",
"'xsi:type'",
")",
".",
"nodeValue",
"if",
"(",
"(",
"permission",
"==",
"'FULL_CONTROL'",
")",
"and",
"(",
"grantee",
"==",
"'CanonicalUser'",
")",
"and",
"(",
"acl",
"!=",
"'public-read'",
")",
"and",
"(",
"acl",
"!=",
"'public-read-write'",
")",
")",
":",
"acl",
"=",
"'private'",
"elif",
"(",
"(",
"permission",
"==",
"'READ'",
")",
"and",
"(",
"grantee",
"==",
"'Group'",
")",
"and",
"(",
"acl",
"!=",
"'public-read-write'",
")",
")",
":",
"acl",
"=",
"'public-read'",
"elif",
"(",
"(",
"permission",
"==",
"'WRITE'",
")",
"and",
"(",
"grantee",
"==",
"'Group'",
")",
")",
":",
"acl",
"=",
"'public-read-write'",
"else",
":",
"acl",
"=",
"'unsupported'",
"if",
"(",
"acl",
"==",
"'authenticated-read'",
")",
":",
"return",
"'Unsupported'",
"elif",
"(",
"acl",
"not",
"in",
"swift_acl",
")",
":",
"return",
"'InvalidArgument'",
"return",
"swift_acl",
"[",
"acl",
"]"
] |
takes an s3 style acl and returns a list of header/value pairs that implement that acl in swift .
|
train
| false
|
51,038
|
@raises(ValueError)
def test_raises_value_error_non_square():
gth_solve(np.array([[0.4, 0.6]]))
|
[
"@",
"raises",
"(",
"ValueError",
")",
"def",
"test_raises_value_error_non_square",
"(",
")",
":",
"gth_solve",
"(",
"np",
".",
"array",
"(",
"[",
"[",
"0.4",
",",
"0.6",
"]",
"]",
")",
")"
] |
test with non square input .
|
train
| false
|
51,042
|
def get_migrate_snapshot_name(volume):
return ('cinder-migrate-snapshot-%(id)s' % volume)
|
[
"def",
"get_migrate_snapshot_name",
"(",
"volume",
")",
":",
"return",
"(",
"'cinder-migrate-snapshot-%(id)s'",
"%",
"volume",
")"
] |
return name for snapshot that will be used to migrate the volume .
|
train
| false
|
51,044
|
def delete_sqlite_database(database=None, verbosity='1'):
try:
router = getattr(settings, 'SCREENSHOTS_ROUTER', 'default')
db_engine = settings.DATABASES[router]['ENGINE']
if (db_engine == settings.SQLITE3_ENGINE):
if (not database):
database = settings.DATABASES[router]['NAME']
if os.path.exists(database):
(log.info(('==> Removing database %s ...' % database)) if (int(verbosity) > 0) else None)
os.remove(database)
(log.info('====> Successfully removed database.') if (int(verbosity) > 0) else None)
except Exception as exc:
log.error(('====> EXCEPTION: %s' % exc))
|
[
"def",
"delete_sqlite_database",
"(",
"database",
"=",
"None",
",",
"verbosity",
"=",
"'1'",
")",
":",
"try",
":",
"router",
"=",
"getattr",
"(",
"settings",
",",
"'SCREENSHOTS_ROUTER'",
",",
"'default'",
")",
"db_engine",
"=",
"settings",
".",
"DATABASES",
"[",
"router",
"]",
"[",
"'ENGINE'",
"]",
"if",
"(",
"db_engine",
"==",
"settings",
".",
"SQLITE3_ENGINE",
")",
":",
"if",
"(",
"not",
"database",
")",
":",
"database",
"=",
"settings",
".",
"DATABASES",
"[",
"router",
"]",
"[",
"'NAME'",
"]",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"database",
")",
":",
"(",
"log",
".",
"info",
"(",
"(",
"'==> Removing database %s ...'",
"%",
"database",
")",
")",
"if",
"(",
"int",
"(",
"verbosity",
")",
">",
"0",
")",
"else",
"None",
")",
"os",
".",
"remove",
"(",
"database",
")",
"(",
"log",
".",
"info",
"(",
"'====> Successfully removed database.'",
")",
"if",
"(",
"int",
"(",
"verbosity",
")",
">",
"0",
")",
"else",
"None",
")",
"except",
"Exception",
"as",
"exc",
":",
"log",
".",
"error",
"(",
"(",
"'====> EXCEPTION: %s'",
"%",
"exc",
")",
")"
] |
delete the specified sqlite database or if none .
|
train
| false
|
51,046
|
def iter_minibatches(doc_iter, minibatch_size):
(X_text, y) = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
(yield (X_text, y))
(X_text, y) = get_minibatch(doc_iter, minibatch_size)
|
[
"def",
"iter_minibatches",
"(",
"doc_iter",
",",
"minibatch_size",
")",
":",
"(",
"X_text",
",",
"y",
")",
"=",
"get_minibatch",
"(",
"doc_iter",
",",
"minibatch_size",
")",
"while",
"len",
"(",
"X_text",
")",
":",
"(",
"yield",
"(",
"X_text",
",",
"y",
")",
")",
"(",
"X_text",
",",
"y",
")",
"=",
"get_minibatch",
"(",
"doc_iter",
",",
"minibatch_size",
")"
] |
generator of minibatches .
|
train
| false
|
51,047
|
def set_sysctl(key, value):
run_as_root(('/sbin/sysctl -n -e -w %(key)s=%(value)s' % locals()))
|
[
"def",
"set_sysctl",
"(",
"key",
",",
"value",
")",
":",
"run_as_root",
"(",
"(",
"'/sbin/sysctl -n -e -w %(key)s=%(value)s'",
"%",
"locals",
"(",
")",
")",
")"
] |
set a kernel parameter .
|
train
| false
|
51,048
|
def chrange(start, stop):
return list(map(chr, range(ord(start), (ord(stop) + 1))))
|
[
"def",
"chrange",
"(",
"start",
",",
"stop",
")",
":",
"return",
"list",
"(",
"map",
"(",
"chr",
",",
"range",
"(",
"ord",
"(",
"start",
")",
",",
"(",
"ord",
"(",
"stop",
")",
"+",
"1",
")",
")",
")",
")"
] |
construct an iterable of length-1 strings beginning with start and ending with stop .
|
train
| false
|
51,049
|
def document():
return s3_rest_controller()
|
[
"def",
"document",
"(",
")",
":",
"return",
"s3_rest_controller",
"(",
")"
] |
rest controller .
|
train
| false
|
51,050
|
def _is_cluster_bootstrapping(cluster_summary):
return ((cluster_summary.status.state != 'STARTING') and (not hasattr(cluster_summary.status.timeline, 'readydatetime')))
|
[
"def",
"_is_cluster_bootstrapping",
"(",
"cluster_summary",
")",
":",
"return",
"(",
"(",
"cluster_summary",
".",
"status",
".",
"state",
"!=",
"'STARTING'",
")",
"and",
"(",
"not",
"hasattr",
"(",
"cluster_summary",
".",
"status",
".",
"timeline",
",",
"'readydatetime'",
")",
")",
")"
] |
return true if *cluster_summary* is currently bootstrapping .
|
train
| false
|
51,052
|
def test_folder_locks(tmpdir):
package_name = 'dir_1'
tmpfile = join(tmpdir.strpath, package_name)
with DirectoryLock(tmpfile) as lock1:
assert (exists(lock1.lock_file_path) and isfile(lock1.lock_file_path))
with pytest.raises(LockError) as execinfo:
with DirectoryLock(tmpfile, retries=1) as lock2:
assert False
assert (exists(lock1.lock_file_path) and isfile(lock1.lock_file_path))
assert (not exists(lock1.lock_file_path))
|
[
"def",
"test_folder_locks",
"(",
"tmpdir",
")",
":",
"package_name",
"=",
"'dir_1'",
"tmpfile",
"=",
"join",
"(",
"tmpdir",
".",
"strpath",
",",
"package_name",
")",
"with",
"DirectoryLock",
"(",
"tmpfile",
")",
"as",
"lock1",
":",
"assert",
"(",
"exists",
"(",
"lock1",
".",
"lock_file_path",
")",
"and",
"isfile",
"(",
"lock1",
".",
"lock_file_path",
")",
")",
"with",
"pytest",
".",
"raises",
"(",
"LockError",
")",
"as",
"execinfo",
":",
"with",
"DirectoryLock",
"(",
"tmpfile",
",",
"retries",
"=",
"1",
")",
"as",
"lock2",
":",
"assert",
"False",
"assert",
"(",
"exists",
"(",
"lock1",
".",
"lock_file_path",
")",
"and",
"isfile",
"(",
"lock1",
".",
"lock_file_path",
")",
")",
"assert",
"(",
"not",
"exists",
"(",
"lock1",
".",
"lock_file_path",
")",
")"
] |
test on directory lock .
|
train
| false
|
51,054
|
def B(attr):
return range_property(attr, 0, 255)
|
[
"def",
"B",
"(",
"attr",
")",
":",
"return",
"range_property",
"(",
"attr",
",",
"0",
",",
"255",
")"
] |
unsigned byte .
|
train
| false
|
51,055
|
def read_raw_nicolet(input_fname, ch_type, montage=None, eog=(), ecg=(), emg=(), misc=(), preload=False, verbose=None):
return RawNicolet(input_fname, ch_type, montage=montage, eog=eog, ecg=ecg, emg=emg, misc=misc, preload=preload, verbose=verbose)
|
[
"def",
"read_raw_nicolet",
"(",
"input_fname",
",",
"ch_type",
",",
"montage",
"=",
"None",
",",
"eog",
"=",
"(",
")",
",",
"ecg",
"=",
"(",
")",
",",
"emg",
"=",
"(",
")",
",",
"misc",
"=",
"(",
")",
",",
"preload",
"=",
"False",
",",
"verbose",
"=",
"None",
")",
":",
"return",
"RawNicolet",
"(",
"input_fname",
",",
"ch_type",
",",
"montage",
"=",
"montage",
",",
"eog",
"=",
"eog",
",",
"ecg",
"=",
"ecg",
",",
"emg",
"=",
"emg",
",",
"misc",
"=",
"misc",
",",
"preload",
"=",
"preload",
",",
"verbose",
"=",
"verbose",
")"
] |
read nicolet data as raw object .
|
train
| false
|
51,056
|
@shell.deprecated_network
def do_tenant_network_list(cs, args):
networks = cs.tenant_networks.list()
utils.print_list(networks, ['ID', 'Label', 'CIDR'])
|
[
"@",
"shell",
".",
"deprecated_network",
"def",
"do_tenant_network_list",
"(",
"cs",
",",
"args",
")",
":",
"networks",
"=",
"cs",
".",
"tenant_networks",
".",
"list",
"(",
")",
"utils",
".",
"print_list",
"(",
"networks",
",",
"[",
"'ID'",
",",
"'Label'",
",",
"'CIDR'",
"]",
")"
] |
list tenant networks .
|
train
| false
|
51,057
|
@public
def cofactors(f, g, *gens, **args):
options.allowed_flags(args, ['polys'])
try:
((F, G), opt) = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed as exc:
(domain, (a, b)) = construct_domain(exc.exprs)
try:
(h, cff, cfg) = domain.cofactors(a, b)
except NotImplementedError:
raise ComputationFailed('cofactors', 2, exc)
else:
return (domain.to_sympy(h), domain.to_sympy(cff), domain.to_sympy(cfg))
(h, cff, cfg) = F.cofactors(G)
if (not opt.polys):
return (h.as_expr(), cff.as_expr(), cfg.as_expr())
else:
return (h, cff, cfg)
|
[
"@",
"public",
"def",
"cofactors",
"(",
"f",
",",
"g",
",",
"*",
"gens",
",",
"**",
"args",
")",
":",
"options",
".",
"allowed_flags",
"(",
"args",
",",
"[",
"'polys'",
"]",
")",
"try",
":",
"(",
"(",
"F",
",",
"G",
")",
",",
"opt",
")",
"=",
"parallel_poly_from_expr",
"(",
"(",
"f",
",",
"g",
")",
",",
"*",
"gens",
",",
"**",
"args",
")",
"except",
"PolificationFailed",
"as",
"exc",
":",
"(",
"domain",
",",
"(",
"a",
",",
"b",
")",
")",
"=",
"construct_domain",
"(",
"exc",
".",
"exprs",
")",
"try",
":",
"(",
"h",
",",
"cff",
",",
"cfg",
")",
"=",
"domain",
".",
"cofactors",
"(",
"a",
",",
"b",
")",
"except",
"NotImplementedError",
":",
"raise",
"ComputationFailed",
"(",
"'cofactors'",
",",
"2",
",",
"exc",
")",
"else",
":",
"return",
"(",
"domain",
".",
"to_sympy",
"(",
"h",
")",
",",
"domain",
".",
"to_sympy",
"(",
"cff",
")",
",",
"domain",
".",
"to_sympy",
"(",
"cfg",
")",
")",
"(",
"h",
",",
"cff",
",",
"cfg",
")",
"=",
"F",
".",
"cofactors",
"(",
"G",
")",
"if",
"(",
"not",
"opt",
".",
"polys",
")",
":",
"return",
"(",
"h",
".",
"as_expr",
"(",
")",
",",
"cff",
".",
"as_expr",
"(",
")",
",",
"cfg",
".",
"as_expr",
"(",
")",
")",
"else",
":",
"return",
"(",
"h",
",",
"cff",
",",
"cfg",
")"
] |
compute gcd and cofactors of f and g .
|
train
| false
|
51,058
|
@hgcommand
def pending(ui, repo, *pats, **opts):
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
quick = opts.get('quick', False)
short = opts.get('short', False)
m = LoadAllCL(ui, repo, web=((not quick) and (not short)))
names = m.keys()
names.sort()
for name in names:
cl = m[name]
if short:
ui.write((((name + ' DCTB ') + line1(cl.desc)) + '\n'))
else:
ui.write((cl.PendingText(quick=quick) + '\n'))
if short:
return 0
files = DefaultFiles(ui, repo, [])
if (len(files) > 0):
s = 'Changed files not in any CL:\n'
for f in files:
s += ((' DCTB ' + f) + '\n')
ui.write(s)
|
[
"@",
"hgcommand",
"def",
"pending",
"(",
"ui",
",",
"repo",
",",
"*",
"pats",
",",
"**",
"opts",
")",
":",
"if",
"codereview_disabled",
":",
"raise",
"hg_util",
".",
"Abort",
"(",
"codereview_disabled",
")",
"quick",
"=",
"opts",
".",
"get",
"(",
"'quick'",
",",
"False",
")",
"short",
"=",
"opts",
".",
"get",
"(",
"'short'",
",",
"False",
")",
"m",
"=",
"LoadAllCL",
"(",
"ui",
",",
"repo",
",",
"web",
"=",
"(",
"(",
"not",
"quick",
")",
"and",
"(",
"not",
"short",
")",
")",
")",
"names",
"=",
"m",
".",
"keys",
"(",
")",
"names",
".",
"sort",
"(",
")",
"for",
"name",
"in",
"names",
":",
"cl",
"=",
"m",
"[",
"name",
"]",
"if",
"short",
":",
"ui",
".",
"write",
"(",
"(",
"(",
"(",
"name",
"+",
"' DCTB '",
")",
"+",
"line1",
"(",
"cl",
".",
"desc",
")",
")",
"+",
"'\\n'",
")",
")",
"else",
":",
"ui",
".",
"write",
"(",
"(",
"cl",
".",
"PendingText",
"(",
"quick",
"=",
"quick",
")",
"+",
"'\\n'",
")",
")",
"if",
"short",
":",
"return",
"0",
"files",
"=",
"DefaultFiles",
"(",
"ui",
",",
"repo",
",",
"[",
"]",
")",
"if",
"(",
"len",
"(",
"files",
")",
">",
"0",
")",
":",
"s",
"=",
"'Changed files not in any CL:\\n'",
"for",
"f",
"in",
"files",
":",
"s",
"+=",
"(",
"(",
"' DCTB '",
"+",
"f",
")",
"+",
"'\\n'",
")",
"ui",
".",
"write",
"(",
"s",
")"
] |
show pending changes lists pending changes followed by a list of unassigned but modified files .
|
train
| false
|
51,059
|
def secure_compare(string1, string2):
if (len(string1) != len(string2)):
return False
result = True
for (c1, c2) in izip(string1, string2):
result &= (c1 == c2)
return result
|
[
"def",
"secure_compare",
"(",
"string1",
",",
"string2",
")",
":",
"if",
"(",
"len",
"(",
"string1",
")",
"!=",
"len",
"(",
"string2",
")",
")",
":",
"return",
"False",
"result",
"=",
"True",
"for",
"(",
"c1",
",",
"c2",
")",
"in",
"izip",
"(",
"string1",
",",
"string2",
")",
":",
"result",
"&=",
"(",
"c1",
"==",
"c2",
")",
"return",
"result"
] |
compare two strings while protecting against timing attacks .
|
train
| true
|
51,060
|
def _get_role_name(role):
if isinstance(role, dict):
return role['role']
elif isinstance(role, basestring):
return role
else:
LOGGER.warning(('role %s could not be resolved to a role name.' % role))
return None
|
[
"def",
"_get_role_name",
"(",
"role",
")",
":",
"if",
"isinstance",
"(",
"role",
",",
"dict",
")",
":",
"return",
"role",
"[",
"'role'",
"]",
"elif",
"isinstance",
"(",
"role",
",",
"basestring",
")",
":",
"return",
"role",
"else",
":",
"LOGGER",
".",
"warning",
"(",
"(",
"'role %s could not be resolved to a role name.'",
"%",
"role",
")",
")",
"return",
"None"
] |
resolves a role name from either a simple declaration or a dictionary style declaration .
|
train
| false
|
51,061
|
def _read_w(filename):
with open(filename, 'rb', buffering=0) as fid:
fid.read(2)
vertices_n = int(_read_3(fid))
vertices = np.zeros(vertices_n, dtype=np.int32)
data = np.zeros(vertices_n, dtype=np.float32)
for i in range(vertices_n):
vertices[i] = _read_3(fid)
data[i] = np.fromfile(fid, dtype='>f4', count=1)[0]
w = dict()
w['vertices'] = vertices
w['data'] = data
return w
|
[
"def",
"_read_w",
"(",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'rb'",
",",
"buffering",
"=",
"0",
")",
"as",
"fid",
":",
"fid",
".",
"read",
"(",
"2",
")",
"vertices_n",
"=",
"int",
"(",
"_read_3",
"(",
"fid",
")",
")",
"vertices",
"=",
"np",
".",
"zeros",
"(",
"vertices_n",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"data",
"=",
"np",
".",
"zeros",
"(",
"vertices_n",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"for",
"i",
"in",
"range",
"(",
"vertices_n",
")",
":",
"vertices",
"[",
"i",
"]",
"=",
"_read_3",
"(",
"fid",
")",
"data",
"[",
"i",
"]",
"=",
"np",
".",
"fromfile",
"(",
"fid",
",",
"dtype",
"=",
"'>f4'",
",",
"count",
"=",
"1",
")",
"[",
"0",
"]",
"w",
"=",
"dict",
"(",
")",
"w",
"[",
"'vertices'",
"]",
"=",
"vertices",
"w",
"[",
"'data'",
"]",
"=",
"data",
"return",
"w"
] |
read a w file .
|
train
| false
|
51,063
|
def average_node_connectivity(G, flow_func=None):
if G.is_directed():
iter_func = itertools.permutations
else:
iter_func = itertools.combinations
H = build_auxiliary_node_connectivity(G)
R = build_residual_network(H, 'capacity')
kwargs = dict(flow_func=flow_func, auxiliary=H, residual=R)
(num, den) = (0, 0)
for (u, v) in iter_func(G, 2):
num += local_node_connectivity(G, u, v, **kwargs)
den += 1
if (den == 0):
return 0
return (num / den)
|
[
"def",
"average_node_connectivity",
"(",
"G",
",",
"flow_func",
"=",
"None",
")",
":",
"if",
"G",
".",
"is_directed",
"(",
")",
":",
"iter_func",
"=",
"itertools",
".",
"permutations",
"else",
":",
"iter_func",
"=",
"itertools",
".",
"combinations",
"H",
"=",
"build_auxiliary_node_connectivity",
"(",
"G",
")",
"R",
"=",
"build_residual_network",
"(",
"H",
",",
"'capacity'",
")",
"kwargs",
"=",
"dict",
"(",
"flow_func",
"=",
"flow_func",
",",
"auxiliary",
"=",
"H",
",",
"residual",
"=",
"R",
")",
"(",
"num",
",",
"den",
")",
"=",
"(",
"0",
",",
"0",
")",
"for",
"(",
"u",
",",
"v",
")",
"in",
"iter_func",
"(",
"G",
",",
"2",
")",
":",
"num",
"+=",
"local_node_connectivity",
"(",
"G",
",",
"u",
",",
"v",
",",
"**",
"kwargs",
")",
"den",
"+=",
"1",
"if",
"(",
"den",
"==",
"0",
")",
":",
"return",
"0",
"return",
"(",
"num",
"/",
"den",
")"
] |
returns the average connectivity of a graph g .
|
train
| false
|
51,064
|
def _GetNumNonVisibleFiles(file_list):
return len([f for f in file_list if (not BufferIsVisible(GetBufferNumberForFilename(f, False)))])
|
[
"def",
"_GetNumNonVisibleFiles",
"(",
"file_list",
")",
":",
"return",
"len",
"(",
"[",
"f",
"for",
"f",
"in",
"file_list",
"if",
"(",
"not",
"BufferIsVisible",
"(",
"GetBufferNumberForFilename",
"(",
"f",
",",
"False",
")",
")",
")",
"]",
")"
] |
returns the number of file in the iterable list of files |file_list| which are not curerntly open in visible windows .
|
train
| false
|
51,065
|
@contextfunction
def identities_contact_list(context, contacts, skip_group=''):
request = context['request']
response_format = 'html'
if ('response_format' in context):
response_format = context['response_format']
contact_fields = ContactField.objects.all().filter(trash=False)
return Markup(render_to_string('identities/tags/contact_list', {'contacts': contacts, 'skip_group': skip_group, 'contact_fields': contact_fields}, context_instance=RequestContext(request), response_format=response_format))
|
[
"@",
"contextfunction",
"def",
"identities_contact_list",
"(",
"context",
",",
"contacts",
",",
"skip_group",
"=",
"''",
")",
":",
"request",
"=",
"context",
"[",
"'request'",
"]",
"response_format",
"=",
"'html'",
"if",
"(",
"'response_format'",
"in",
"context",
")",
":",
"response_format",
"=",
"context",
"[",
"'response_format'",
"]",
"contact_fields",
"=",
"ContactField",
".",
"objects",
".",
"all",
"(",
")",
".",
"filter",
"(",
"trash",
"=",
"False",
")",
"return",
"Markup",
"(",
"render_to_string",
"(",
"'identities/tags/contact_list'",
",",
"{",
"'contacts'",
":",
"contacts",
",",
"'skip_group'",
":",
"skip_group",
",",
"'contact_fields'",
":",
"contact_fields",
"}",
",",
"context_instance",
"=",
"RequestContext",
"(",
"request",
")",
",",
"response_format",
"=",
"response_format",
")",
")"
] |
print a list of contacts .
|
train
| false
|
51,066
|
def dup_isolate_all_roots_sqf(f, K, eps=None, inf=None, sup=None, fast=False, blackbox=False):
return (dup_isolate_real_roots_sqf(f, K, eps=eps, inf=inf, sup=sup, fast=fast, blackbox=blackbox), dup_isolate_complex_roots_sqf(f, K, eps=eps, inf=inf, sup=sup, blackbox=blackbox))
|
[
"def",
"dup_isolate_all_roots_sqf",
"(",
"f",
",",
"K",
",",
"eps",
"=",
"None",
",",
"inf",
"=",
"None",
",",
"sup",
"=",
"None",
",",
"fast",
"=",
"False",
",",
"blackbox",
"=",
"False",
")",
":",
"return",
"(",
"dup_isolate_real_roots_sqf",
"(",
"f",
",",
"K",
",",
"eps",
"=",
"eps",
",",
"inf",
"=",
"inf",
",",
"sup",
"=",
"sup",
",",
"fast",
"=",
"fast",
",",
"blackbox",
"=",
"blackbox",
")",
",",
"dup_isolate_complex_roots_sqf",
"(",
"f",
",",
"K",
",",
"eps",
"=",
"eps",
",",
"inf",
"=",
"inf",
",",
"sup",
"=",
"sup",
",",
"blackbox",
"=",
"blackbox",
")",
")"
] |
isolate real and complex roots of a square-free polynomial f .
|
train
| false
|
51,067
|
def sys_v_init_command_generator(command):
command_name = 'service'
if (command == 'is_enabled'):
command_name = 'chkconfig'
command = ''
elif (command == 'enable'):
command_name = 'chkconfig'
command = 'on'
elif (command == 'disable'):
command_name = 'chkconfig'
command = 'off'
elif (command == 'list'):
def list_command(service_name):
return ['chkconfig', '--list']
return list_command
elif (command == 'set_target'):
def set_target_command(target):
target = convert_systemd_target_to_runlevel(target)
return ['telinit', target]
return set_target_command
def method(service_name):
return [command_name, service_name, command]
return method
|
[
"def",
"sys_v_init_command_generator",
"(",
"command",
")",
":",
"command_name",
"=",
"'service'",
"if",
"(",
"command",
"==",
"'is_enabled'",
")",
":",
"command_name",
"=",
"'chkconfig'",
"command",
"=",
"''",
"elif",
"(",
"command",
"==",
"'enable'",
")",
":",
"command_name",
"=",
"'chkconfig'",
"command",
"=",
"'on'",
"elif",
"(",
"command",
"==",
"'disable'",
")",
":",
"command_name",
"=",
"'chkconfig'",
"command",
"=",
"'off'",
"elif",
"(",
"command",
"==",
"'list'",
")",
":",
"def",
"list_command",
"(",
"service_name",
")",
":",
"return",
"[",
"'chkconfig'",
",",
"'--list'",
"]",
"return",
"list_command",
"elif",
"(",
"command",
"==",
"'set_target'",
")",
":",
"def",
"set_target_command",
"(",
"target",
")",
":",
"target",
"=",
"convert_systemd_target_to_runlevel",
"(",
"target",
")",
"return",
"[",
"'telinit'",
",",
"target",
"]",
"return",
"set_target_command",
"def",
"method",
"(",
"service_name",
")",
":",
"return",
"[",
"command_name",
",",
"service_name",
",",
"command",
"]",
"return",
"method"
] |
generate lists of command arguments for sys_v style inits .
|
train
| false
|
51,068
|
def demographic_aggregate():
def clear_aggregates(r, **attr):
if (not s3_has_role(ADMIN)):
auth.permission.fail()
s3db.stats_demographic_rebuild_all_aggregates()
redirect(URL(c='stats', f='demographic_aggregate', args=''))
s3db.set_method('stats', 'demographic_aggregate', method='clear', action=clear_aggregates)
output = s3_rest_controller()
return output
|
[
"def",
"demographic_aggregate",
"(",
")",
":",
"def",
"clear_aggregates",
"(",
"r",
",",
"**",
"attr",
")",
":",
"if",
"(",
"not",
"s3_has_role",
"(",
"ADMIN",
")",
")",
":",
"auth",
".",
"permission",
".",
"fail",
"(",
")",
"s3db",
".",
"stats_demographic_rebuild_all_aggregates",
"(",
")",
"redirect",
"(",
"URL",
"(",
"c",
"=",
"'stats'",
",",
"f",
"=",
"'demographic_aggregate'",
",",
"args",
"=",
"''",
")",
")",
"s3db",
".",
"set_method",
"(",
"'stats'",
",",
"'demographic_aggregate'",
",",
"method",
"=",
"'clear'",
",",
"action",
"=",
"clear_aggregates",
")",
"output",
"=",
"s3_rest_controller",
"(",
")",
"return",
"output"
] |
rest controller .
|
train
| false
|
51,069
|
def GetInstance(region, instance_id):
try:
matches = ListInstances(region, instances=[instance_id])
except EC2ResponseError as e:
if (e.error_code == 'InvalidInstanceID.NotFound'):
return None
raise
if (len(matches) == 0):
return None
assert (len(matches) == 1)
return matches[0]
|
[
"def",
"GetInstance",
"(",
"region",
",",
"instance_id",
")",
":",
"try",
":",
"matches",
"=",
"ListInstances",
"(",
"region",
",",
"instances",
"=",
"[",
"instance_id",
"]",
")",
"except",
"EC2ResponseError",
"as",
"e",
":",
"if",
"(",
"e",
".",
"error_code",
"==",
"'InvalidInstanceID.NotFound'",
")",
":",
"return",
"None",
"raise",
"if",
"(",
"len",
"(",
"matches",
")",
"==",
"0",
")",
":",
"return",
"None",
"assert",
"(",
"len",
"(",
"matches",
")",
"==",
"1",
")",
"return",
"matches",
"[",
"0",
"]"
] |
find a specific instance given its id .
|
train
| false
|
51,070
|
def xavier_weight(nin, nout=None):
if (nout == None):
nout = nin
r = (numpy.sqrt(6.0) / numpy.sqrt((nin + nout)))
W = (((numpy.random.rand(nin, nout) * 2) * r) - r)
return W.astype('float32')
|
[
"def",
"xavier_weight",
"(",
"nin",
",",
"nout",
"=",
"None",
")",
":",
"if",
"(",
"nout",
"==",
"None",
")",
":",
"nout",
"=",
"nin",
"r",
"=",
"(",
"numpy",
".",
"sqrt",
"(",
"6.0",
")",
"/",
"numpy",
".",
"sqrt",
"(",
"(",
"nin",
"+",
"nout",
")",
")",
")",
"W",
"=",
"(",
"(",
"(",
"numpy",
".",
"random",
".",
"rand",
"(",
"nin",
",",
"nout",
")",
"*",
"2",
")",
"*",
"r",
")",
"-",
"r",
")",
"return",
"W",
".",
"astype",
"(",
"'float32'",
")"
] |
xavier init .
|
train
| false
|
51,071
|
@sopel.module.commands(u'safety')
def toggle_safety(bot, trigger):
if ((not trigger.admin) and (bot.privileges[trigger.sender][trigger.nick] < OP)):
bot.reply(u'Only channel operators can change safety settings')
return
allowed_states = [u'strict', u'on', u'off', u'local', u'local strict']
if ((not trigger.group(2)) or (trigger.group(2).lower() not in allowed_states)):
options = u' / '.join(allowed_states)
bot.reply((u'Available options: %s' % options))
return
channel = trigger.sender.lower()
bot.db.set_channel_value(channel, u'safety', trigger.group(2).lower())
bot.reply((u'Safety is now set to "%s" on this channel' % trigger.group(2)))
|
[
"@",
"sopel",
".",
"module",
".",
"commands",
"(",
"u'safety'",
")",
"def",
"toggle_safety",
"(",
"bot",
",",
"trigger",
")",
":",
"if",
"(",
"(",
"not",
"trigger",
".",
"admin",
")",
"and",
"(",
"bot",
".",
"privileges",
"[",
"trigger",
".",
"sender",
"]",
"[",
"trigger",
".",
"nick",
"]",
"<",
"OP",
")",
")",
":",
"bot",
".",
"reply",
"(",
"u'Only channel operators can change safety settings'",
")",
"return",
"allowed_states",
"=",
"[",
"u'strict'",
",",
"u'on'",
",",
"u'off'",
",",
"u'local'",
",",
"u'local strict'",
"]",
"if",
"(",
"(",
"not",
"trigger",
".",
"group",
"(",
"2",
")",
")",
"or",
"(",
"trigger",
".",
"group",
"(",
"2",
")",
".",
"lower",
"(",
")",
"not",
"in",
"allowed_states",
")",
")",
":",
"options",
"=",
"u' / '",
".",
"join",
"(",
"allowed_states",
")",
"bot",
".",
"reply",
"(",
"(",
"u'Available options: %s'",
"%",
"options",
")",
")",
"return",
"channel",
"=",
"trigger",
".",
"sender",
".",
"lower",
"(",
")",
"bot",
".",
"db",
".",
"set_channel_value",
"(",
"channel",
",",
"u'safety'",
",",
"trigger",
".",
"group",
"(",
"2",
")",
".",
"lower",
"(",
")",
")",
"bot",
".",
"reply",
"(",
"(",
"u'Safety is now set to \"%s\" on this channel'",
"%",
"trigger",
".",
"group",
"(",
"2",
")",
")",
")"
] |
set safety setting for channel .
|
train
| false
|
51,072
|
def _verbosity_filter(index, verbose):
if (not verbose):
return True
elif (verbose > 10):
return False
if (index == 0):
return False
verbose = (0.5 * ((11 - verbose) ** 2))
scale = sqrt((index / verbose))
next_scale = sqrt(((index + 1) / verbose))
return (int(next_scale) == int(scale))
|
[
"def",
"_verbosity_filter",
"(",
"index",
",",
"verbose",
")",
":",
"if",
"(",
"not",
"verbose",
")",
":",
"return",
"True",
"elif",
"(",
"verbose",
">",
"10",
")",
":",
"return",
"False",
"if",
"(",
"index",
"==",
"0",
")",
":",
"return",
"False",
"verbose",
"=",
"(",
"0.5",
"*",
"(",
"(",
"11",
"-",
"verbose",
")",
"**",
"2",
")",
")",
"scale",
"=",
"sqrt",
"(",
"(",
"index",
"/",
"verbose",
")",
")",
"next_scale",
"=",
"sqrt",
"(",
"(",
"(",
"index",
"+",
"1",
")",
"/",
"verbose",
")",
")",
"return",
"(",
"int",
"(",
"next_scale",
")",
"==",
"int",
"(",
"scale",
")",
")"
] |
returns false for indices increasingly apart .
|
train
| false
|
51,073
|
def validate_template(html):
import frappe
from jinja2 import TemplateSyntaxError
jenv = get_jenv()
try:
jenv.from_string(html)
except TemplateSyntaxError as e:
frappe.msgprint(u'Line {}: {}'.format(e.lineno, e.message))
frappe.throw(frappe._(u'Syntax error in template'))
|
[
"def",
"validate_template",
"(",
"html",
")",
":",
"import",
"frappe",
"from",
"jinja2",
"import",
"TemplateSyntaxError",
"jenv",
"=",
"get_jenv",
"(",
")",
"try",
":",
"jenv",
".",
"from_string",
"(",
"html",
")",
"except",
"TemplateSyntaxError",
"as",
"e",
":",
"frappe",
".",
"msgprint",
"(",
"u'Line {}: {}'",
".",
"format",
"(",
"e",
".",
"lineno",
",",
"e",
".",
"message",
")",
")",
"frappe",
".",
"throw",
"(",
"frappe",
".",
"_",
"(",
"u'Syntax error in template'",
")",
")"
] |
throws exception if there is a syntax error in the jinja template .
|
train
| false
|
51,077
|
def test_no_matches(keyhint, key_config_stub):
key_config_stub.set_bindings_for('normal', OrderedDict([('aa', 'cmd-aa'), ('ab', 'cmd-ab')]))
keyhint.update_keyhint('normal', 'z')
assert (not keyhint.text())
assert (not keyhint.isVisible())
|
[
"def",
"test_no_matches",
"(",
"keyhint",
",",
"key_config_stub",
")",
":",
"key_config_stub",
".",
"set_bindings_for",
"(",
"'normal'",
",",
"OrderedDict",
"(",
"[",
"(",
"'aa'",
",",
"'cmd-aa'",
")",
",",
"(",
"'ab'",
",",
"'cmd-ab'",
")",
"]",
")",
")",
"keyhint",
".",
"update_keyhint",
"(",
"'normal'",
",",
"'z'",
")",
"assert",
"(",
"not",
"keyhint",
".",
"text",
"(",
")",
")",
"assert",
"(",
"not",
"keyhint",
".",
"isVisible",
"(",
")",
")"
] |
ensure the widget isnt visible if there are no keystrings to show .
|
train
| false
|
51,078
|
def getRegistry():
return globalRegistry
|
[
"def",
"getRegistry",
"(",
")",
":",
"return",
"globalRegistry"
] |
returns the twisted global c{zope .
|
train
| false
|
51,079
|
def assign_value(x, y, z, values, save_file):
if (y > 127):
return None
result = save_file.set_block(x, y, z, values)
if LIGHTINGFIX:
relight_master.add(x, z)
return result
|
[
"def",
"assign_value",
"(",
"x",
",",
"y",
",",
"z",
",",
"values",
",",
"save_file",
")",
":",
"if",
"(",
"y",
">",
"127",
")",
":",
"return",
"None",
"result",
"=",
"save_file",
".",
"set_block",
"(",
"x",
",",
"y",
",",
"z",
",",
"values",
")",
"if",
"LIGHTINGFIX",
":",
"relight_master",
".",
"add",
"(",
"x",
",",
"z",
")",
"return",
"result"
] |
assign an index value to a location in mcmap .
|
train
| false
|
51,080
|
def public_methods(obj):
return [name for name in all_methods(obj) if (name[0] != '_')]
|
[
"def",
"public_methods",
"(",
"obj",
")",
":",
"return",
"[",
"name",
"for",
"name",
"in",
"all_methods",
"(",
"obj",
")",
"if",
"(",
"name",
"[",
"0",
"]",
"!=",
"'_'",
")",
"]"
] |
return a list of names of methods of obj which do not start with _ .
|
train
| false
|
51,081
|
def addVertexToAttributes(attributes, vertex):
if (vertex.x != 0.0):
attributes['x'] = str(vertex.x)
if (vertex.y != 0.0):
attributes['y'] = str(vertex.y)
if (vertex.z != 0.0):
attributes['z'] = str(vertex.z)
|
[
"def",
"addVertexToAttributes",
"(",
"attributes",
",",
"vertex",
")",
":",
"if",
"(",
"vertex",
".",
"x",
"!=",
"0.0",
")",
":",
"attributes",
"[",
"'x'",
"]",
"=",
"str",
"(",
"vertex",
".",
"x",
")",
"if",
"(",
"vertex",
".",
"y",
"!=",
"0.0",
")",
":",
"attributes",
"[",
"'y'",
"]",
"=",
"str",
"(",
"vertex",
".",
"y",
")",
"if",
"(",
"vertex",
".",
"z",
"!=",
"0.0",
")",
":",
"attributes",
"[",
"'z'",
"]",
"=",
"str",
"(",
"vertex",
".",
"z",
")"
] |
add to the attribute dictionary .
|
train
| false
|
51,082
|
def _getDisplayableArguments(obj, alwaysShow, fieldNames):
displayableArgs = []
if _PY3:
signature = inspect.signature(obj.__class__.__init__)
for name in fieldNames:
defaultValue = signature.parameters[name].default
fieldValue = getattr(obj, name, defaultValue)
if ((name in alwaysShow) or (fieldValue != defaultValue)):
displayableArgs.append((' %s=%r' % (name, fieldValue)))
else:
argspec = inspect.getargspec(obj.__class__.__init__)
defaults = dict(zip(reversed(argspec.args), reversed(argspec.defaults)))
for name in fieldNames:
defaultValue = defaults.get(name)
fieldValue = getattr(obj, name, defaultValue)
if ((name in alwaysShow) or (fieldValue != defaultValue)):
displayableArgs.append((' %s=%r' % (name, fieldValue)))
return displayableArgs
|
[
"def",
"_getDisplayableArguments",
"(",
"obj",
",",
"alwaysShow",
",",
"fieldNames",
")",
":",
"displayableArgs",
"=",
"[",
"]",
"if",
"_PY3",
":",
"signature",
"=",
"inspect",
".",
"signature",
"(",
"obj",
".",
"__class__",
".",
"__init__",
")",
"for",
"name",
"in",
"fieldNames",
":",
"defaultValue",
"=",
"signature",
".",
"parameters",
"[",
"name",
"]",
".",
"default",
"fieldValue",
"=",
"getattr",
"(",
"obj",
",",
"name",
",",
"defaultValue",
")",
"if",
"(",
"(",
"name",
"in",
"alwaysShow",
")",
"or",
"(",
"fieldValue",
"!=",
"defaultValue",
")",
")",
":",
"displayableArgs",
".",
"append",
"(",
"(",
"' %s=%r'",
"%",
"(",
"name",
",",
"fieldValue",
")",
")",
")",
"else",
":",
"argspec",
"=",
"inspect",
".",
"getargspec",
"(",
"obj",
".",
"__class__",
".",
"__init__",
")",
"defaults",
"=",
"dict",
"(",
"zip",
"(",
"reversed",
"(",
"argspec",
".",
"args",
")",
",",
"reversed",
"(",
"argspec",
".",
"defaults",
")",
")",
")",
"for",
"name",
"in",
"fieldNames",
":",
"defaultValue",
"=",
"defaults",
".",
"get",
"(",
"name",
")",
"fieldValue",
"=",
"getattr",
"(",
"obj",
",",
"name",
",",
"defaultValue",
")",
"if",
"(",
"(",
"name",
"in",
"alwaysShow",
")",
"or",
"(",
"fieldValue",
"!=",
"defaultValue",
")",
")",
":",
"displayableArgs",
".",
"append",
"(",
"(",
"' %s=%r'",
"%",
"(",
"name",
",",
"fieldValue",
")",
")",
")",
"return",
"displayableArgs"
] |
inspect the function signature of c{obj}s constructor .
|
train
| false
|
51,084
|
def create_large_course(store, load_factor):
course = CourseFactory.create(modulestore=store, start=datetime(2015, 3, 1, tzinfo=UTC))
with store.bulk_operations(course.id):
child_count = create_children(store, course, COURSE_CHILD_STRUCTURE['course'], load_factor)
return (course, child_count)
|
[
"def",
"create_large_course",
"(",
"store",
",",
"load_factor",
")",
":",
"course",
"=",
"CourseFactory",
".",
"create",
"(",
"modulestore",
"=",
"store",
",",
"start",
"=",
"datetime",
"(",
"2015",
",",
"3",
",",
"1",
",",
"tzinfo",
"=",
"UTC",
")",
")",
"with",
"store",
".",
"bulk_operations",
"(",
"course",
".",
"id",
")",
":",
"child_count",
"=",
"create_children",
"(",
"store",
",",
"course",
",",
"COURSE_CHILD_STRUCTURE",
"[",
"'course'",
"]",
",",
"load_factor",
")",
"return",
"(",
"course",
",",
"child_count",
")"
] |
create a large course .
|
train
| false
|
51,085
|
def sanitize_log_data(secret, data=None, leave_characters=LEAVE_CHARS):
replace_secret = (secret[:leave_characters] + ((len(secret) - leave_characters) * '*'))
if data:
return data.replace(secret, replace_secret)
return replace_secret
|
[
"def",
"sanitize_log_data",
"(",
"secret",
",",
"data",
"=",
"None",
",",
"leave_characters",
"=",
"LEAVE_CHARS",
")",
":",
"replace_secret",
"=",
"(",
"secret",
"[",
":",
"leave_characters",
"]",
"+",
"(",
"(",
"len",
"(",
"secret",
")",
"-",
"leave_characters",
")",
"*",
"'*'",
")",
")",
"if",
"data",
":",
"return",
"data",
".",
"replace",
"(",
"secret",
",",
"replace_secret",
")",
"return",
"replace_secret"
] |
clean private/secret data from log statements and other data .
|
train
| false
|
51,086
|
def album_for_mbid(release_id):
try:
return mb.album_for_id(release_id)
except mb.MusicBrainzAPIError as exc:
exc.log(log)
|
[
"def",
"album_for_mbid",
"(",
"release_id",
")",
":",
"try",
":",
"return",
"mb",
".",
"album_for_id",
"(",
"release_id",
")",
"except",
"mb",
".",
"MusicBrainzAPIError",
"as",
"exc",
":",
"exc",
".",
"log",
"(",
"log",
")"
] |
get an albuminfo object for a musicbrainz release id .
|
train
| false
|
51,087
|
def convert_coords(chunkx, chunkz):
return ((chunkx + chunkz), (chunkz - chunkx))
|
[
"def",
"convert_coords",
"(",
"chunkx",
",",
"chunkz",
")",
":",
"return",
"(",
"(",
"chunkx",
"+",
"chunkz",
")",
",",
"(",
"chunkz",
"-",
"chunkx",
")",
")"
] |
takes a coordinate where chunkx and chunkz are in the chunk coordinate system .
|
train
| false
|
51,089
|
def put_multi(entities, **ctx_options):
return [future.get_result() for future in put_multi_async(entities, **ctx_options)]
|
[
"def",
"put_multi",
"(",
"entities",
",",
"**",
"ctx_options",
")",
":",
"return",
"[",
"future",
".",
"get_result",
"(",
")",
"for",
"future",
"in",
"put_multi_async",
"(",
"entities",
",",
"**",
"ctx_options",
")",
"]"
] |
stores a sequence of model instances .
|
train
| false
|
51,091
|
def volumes_update(context, values_list):
return IMPL.volumes_update(context, values_list)
|
[
"def",
"volumes_update",
"(",
"context",
",",
"values_list",
")",
":",
"return",
"IMPL",
".",
"volumes_update",
"(",
"context",
",",
"values_list",
")"
] |
set the given properties on a list of volumes and update them .
|
train
| false
|
51,092
|
def _generate_scala_jar(target, sources, resources, env):
scalac = env['SCALAC']
java = env['JAVA']
jar = env['JAR']
options = ' '.join(env['SCALACFLAGS'])
classpath = ':'.join(env['JAVACLASSPATH'])
if (not classpath):
classpath = blade_util.get_cwd()
cmd = ('JAVACMD=%s %s -d %s -classpath %s %s %s' % (java, scalac, target, classpath, options, ' '.join(sources)))
if echospawn(args=[cmd], env=os.environ, sh=None, cmd=None, escape=None):
return 1
if resources:
resources_dir = target.replace('.jar', '.resources')
if os.path.exists(resources_dir):
cmd = [('%s uf %s' % (jar, target))]
for resource in resources:
cmd.append(("-C '%s' '%s'" % (resources_dir, os.path.relpath(resource, resources_dir))))
return echospawn(args=cmd, env=os.environ, sh=None, cmd=None, escape=None)
return None
|
[
"def",
"_generate_scala_jar",
"(",
"target",
",",
"sources",
",",
"resources",
",",
"env",
")",
":",
"scalac",
"=",
"env",
"[",
"'SCALAC'",
"]",
"java",
"=",
"env",
"[",
"'JAVA'",
"]",
"jar",
"=",
"env",
"[",
"'JAR'",
"]",
"options",
"=",
"' '",
".",
"join",
"(",
"env",
"[",
"'SCALACFLAGS'",
"]",
")",
"classpath",
"=",
"':'",
".",
"join",
"(",
"env",
"[",
"'JAVACLASSPATH'",
"]",
")",
"if",
"(",
"not",
"classpath",
")",
":",
"classpath",
"=",
"blade_util",
".",
"get_cwd",
"(",
")",
"cmd",
"=",
"(",
"'JAVACMD=%s %s -d %s -classpath %s %s %s'",
"%",
"(",
"java",
",",
"scalac",
",",
"target",
",",
"classpath",
",",
"options",
",",
"' '",
".",
"join",
"(",
"sources",
")",
")",
")",
"if",
"echospawn",
"(",
"args",
"=",
"[",
"cmd",
"]",
",",
"env",
"=",
"os",
".",
"environ",
",",
"sh",
"=",
"None",
",",
"cmd",
"=",
"None",
",",
"escape",
"=",
"None",
")",
":",
"return",
"1",
"if",
"resources",
":",
"resources_dir",
"=",
"target",
".",
"replace",
"(",
"'.jar'",
",",
"'.resources'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"resources_dir",
")",
":",
"cmd",
"=",
"[",
"(",
"'%s uf %s'",
"%",
"(",
"jar",
",",
"target",
")",
")",
"]",
"for",
"resource",
"in",
"resources",
":",
"cmd",
".",
"append",
"(",
"(",
"\"-C '%s' '%s'\"",
"%",
"(",
"resources_dir",
",",
"os",
".",
"path",
".",
"relpath",
"(",
"resource",
",",
"resources_dir",
")",
")",
")",
")",
"return",
"echospawn",
"(",
"args",
"=",
"cmd",
",",
"env",
"=",
"os",
".",
"environ",
",",
"sh",
"=",
"None",
",",
"cmd",
"=",
"None",
",",
"escape",
"=",
"None",
")",
"return",
"None"
] |
compile scala sources and generate a jar containing the classes and resources .
|
train
| false
|
51,093
|
def upload_view(request):
return HttpResponse(', '.join(request.FILES.keys()))
|
[
"def",
"upload_view",
"(",
"request",
")",
":",
"return",
"HttpResponse",
"(",
"', '",
".",
"join",
"(",
"request",
".",
"FILES",
".",
"keys",
"(",
")",
")",
")"
] |
prints keys of request .
|
train
| false
|
51,094
|
def parse_topic(topic, pretty=False):
parts = topic.split('/')
dev_id_format = ''
if pretty:
dev_id_format = '{} {}'
else:
dev_id_format = '{}_{}'
dev_id = slugify(dev_id_format.format(parts[1], parts[2]))
host_name = parts[1]
return (host_name, dev_id)
|
[
"def",
"parse_topic",
"(",
"topic",
",",
"pretty",
"=",
"False",
")",
":",
"parts",
"=",
"topic",
".",
"split",
"(",
"'/'",
")",
"dev_id_format",
"=",
"''",
"if",
"pretty",
":",
"dev_id_format",
"=",
"'{} {}'",
"else",
":",
"dev_id_format",
"=",
"'{}_{}'",
"dev_id",
"=",
"slugify",
"(",
"dev_id_format",
".",
"format",
"(",
"parts",
"[",
"1",
"]",
",",
"parts",
"[",
"2",
"]",
")",
")",
"host_name",
"=",
"parts",
"[",
"1",
"]",
"return",
"(",
"host_name",
",",
"dev_id",
")"
] |
parse an mqtt topic owntracks/user/dev .
|
train
| false
|
51,096
|
def ConvertToString(value):
if (type(value) in (int, long)):
return str(value)
elif (type(value) == float):
return repr(value)
else:
return value
|
[
"def",
"ConvertToString",
"(",
"value",
")",
":",
"if",
"(",
"type",
"(",
"value",
")",
"in",
"(",
"int",
",",
"long",
")",
")",
":",
"return",
"str",
"(",
"value",
")",
"elif",
"(",
"type",
"(",
"value",
")",
"==",
"float",
")",
":",
"return",
"repr",
"(",
"value",
")",
"else",
":",
"return",
"value"
] |
converts value .
|
train
| false
|
51,097
|
def get_introspection_module(namespace):
mod = sys.modules[('pgi.repository.' + namespace)]
return getattr(mod, '_introspection_module', mod)
|
[
"def",
"get_introspection_module",
"(",
"namespace",
")",
":",
"mod",
"=",
"sys",
".",
"modules",
"[",
"(",
"'pgi.repository.'",
"+",
"namespace",
")",
"]",
"return",
"getattr",
"(",
"mod",
",",
"'_introspection_module'",
",",
"mod",
")"
] |
raises importerror .
|
train
| false
|
51,098
|
def _read_from_url(url, config=None):
r = requests.get(url, stream=True, config=config, timeout=config.intersphinx_timeout)
r.raise_for_status()
r.raw.url = r.url
r.raw.read = functools.partial(r.raw.read, decode_content=True)
return r.raw
|
[
"def",
"_read_from_url",
"(",
"url",
",",
"config",
"=",
"None",
")",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"True",
",",
"config",
"=",
"config",
",",
"timeout",
"=",
"config",
".",
"intersphinx_timeout",
")",
"r",
".",
"raise_for_status",
"(",
")",
"r",
".",
"raw",
".",
"url",
"=",
"r",
".",
"url",
"r",
".",
"raw",
".",
"read",
"=",
"functools",
".",
"partial",
"(",
"r",
".",
"raw",
".",
"read",
",",
"decode_content",
"=",
"True",
")",
"return",
"r",
".",
"raw"
] |
reads data from *url* with an http *get* .
|
train
| false
|
51,100
|
def drop_retention_policy(database, name, **client_args):
client = _client(**client_args)
client.drop_retention_policy(name, database)
return True
|
[
"def",
"drop_retention_policy",
"(",
"database",
",",
"name",
",",
"**",
"client_args",
")",
":",
"client",
"=",
"_client",
"(",
"**",
"client_args",
")",
"client",
".",
"drop_retention_policy",
"(",
"name",
",",
"database",
")",
"return",
"True"
] |
drop a retention policy .
|
train
| true
|
51,101
|
def getIsIntersectingWithinLoop(loop, otherLoop, outsetLoop):
if euclidean.isLoopIntersectingLoop(loop, otherLoop):
return True
return (euclidean.isPathInsideLoop(otherLoop, loop) != euclidean.isPathInsideLoop(otherLoop, outsetLoop))
|
[
"def",
"getIsIntersectingWithinLoop",
"(",
"loop",
",",
"otherLoop",
",",
"outsetLoop",
")",
":",
"if",
"euclidean",
".",
"isLoopIntersectingLoop",
"(",
"loop",
",",
"otherLoop",
")",
":",
"return",
"True",
"return",
"(",
"euclidean",
".",
"isPathInsideLoop",
"(",
"otherLoop",
",",
"loop",
")",
"!=",
"euclidean",
".",
"isPathInsideLoop",
"(",
"otherLoop",
",",
"outsetLoop",
")",
")"
] |
determine if the loop is intersecting or is within the other loop .
|
train
| false
|
51,102
|
@verbose
def _setup_bem(bem, bem_extra, neeg, mri_head_t, verbose=None):
logger.info('')
if isinstance(bem, string_types):
logger.info(('Setting up the BEM model using %s...\n' % bem_extra))
bem = read_bem_solution(bem)
if (not isinstance(bem, ConductorModel)):
raise TypeError('bem must be a string or ConductorModel')
if bem['is_sphere']:
logger.info('Using the sphere model.\n')
if ((len(bem['layers']) == 0) and (neeg > 0)):
raise RuntimeError('Spherical model has zero shells, cannot use with EEG data')
if (bem['coord_frame'] != FIFF.FIFFV_COORD_HEAD):
raise RuntimeError('Spherical model is not in head coordinates')
else:
if ((neeg > 0) and (len(bem['surfs']) == 1)):
raise RuntimeError('Cannot use a homogeneous model in EEG calculations')
logger.info('Employing the head->MRI coordinate transform with the BEM model.')
bem['head_mri_t'] = _ensure_trans(mri_head_t, 'head', 'mri')
logger.info(('BEM model %s is now set up' % op.split(bem_extra)[1]))
logger.info('')
return bem
|
[
"@",
"verbose",
"def",
"_setup_bem",
"(",
"bem",
",",
"bem_extra",
",",
"neeg",
",",
"mri_head_t",
",",
"verbose",
"=",
"None",
")",
":",
"logger",
".",
"info",
"(",
"''",
")",
"if",
"isinstance",
"(",
"bem",
",",
"string_types",
")",
":",
"logger",
".",
"info",
"(",
"(",
"'Setting up the BEM model using %s...\\n'",
"%",
"bem_extra",
")",
")",
"bem",
"=",
"read_bem_solution",
"(",
"bem",
")",
"if",
"(",
"not",
"isinstance",
"(",
"bem",
",",
"ConductorModel",
")",
")",
":",
"raise",
"TypeError",
"(",
"'bem must be a string or ConductorModel'",
")",
"if",
"bem",
"[",
"'is_sphere'",
"]",
":",
"logger",
".",
"info",
"(",
"'Using the sphere model.\\n'",
")",
"if",
"(",
"(",
"len",
"(",
"bem",
"[",
"'layers'",
"]",
")",
"==",
"0",
")",
"and",
"(",
"neeg",
">",
"0",
")",
")",
":",
"raise",
"RuntimeError",
"(",
"'Spherical model has zero shells, cannot use with EEG data'",
")",
"if",
"(",
"bem",
"[",
"'coord_frame'",
"]",
"!=",
"FIFF",
".",
"FIFFV_COORD_HEAD",
")",
":",
"raise",
"RuntimeError",
"(",
"'Spherical model is not in head coordinates'",
")",
"else",
":",
"if",
"(",
"(",
"neeg",
">",
"0",
")",
"and",
"(",
"len",
"(",
"bem",
"[",
"'surfs'",
"]",
")",
"==",
"1",
")",
")",
":",
"raise",
"RuntimeError",
"(",
"'Cannot use a homogeneous model in EEG calculations'",
")",
"logger",
".",
"info",
"(",
"'Employing the head->MRI coordinate transform with the BEM model.'",
")",
"bem",
"[",
"'head_mri_t'",
"]",
"=",
"_ensure_trans",
"(",
"mri_head_t",
",",
"'head'",
",",
"'mri'",
")",
"logger",
".",
"info",
"(",
"(",
"'BEM model %s is now set up'",
"%",
"op",
".",
"split",
"(",
"bem_extra",
")",
"[",
"1",
"]",
")",
")",
"logger",
".",
"info",
"(",
"''",
")",
"return",
"bem"
] |
set up a bem for forward computation .
|
train
| false
|
51,103
|
def _acquireLock():
if _lock:
_lock.acquire()
|
[
"def",
"_acquireLock",
"(",
")",
":",
"if",
"_lock",
":",
"_lock",
".",
"acquire",
"(",
")"
] |
acquire the module-level lock for serializing access to shared data .
|
train
| false
|
51,106
|
def atomic_method(method):
def wrapper(*args, **kwargs):
try:
method(*args, **kwargs)
except Exception:
args[0].exceptionq.put(('A worker thread crashed:\n' + traceback.format_exc()))
return wrapper
|
[
"def",
"atomic_method",
"(",
"method",
")",
":",
"def",
"wrapper",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"try",
":",
"method",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"except",
"Exception",
":",
"args",
"[",
"0",
"]",
".",
"exceptionq",
".",
"put",
"(",
"(",
"'A worker thread crashed:\\n'",
"+",
"traceback",
".",
"format_exc",
"(",
")",
")",
")",
"return",
"wrapper"
] |
decorator to catch the exceptions that happen in detached thread atomic tasks and display them in the logs .
|
train
| false
|
51,107
|
def activeCount():
with _active_limbo_lock:
return (len(_active) + len(_limbo))
|
[
"def",
"activeCount",
"(",
")",
":",
"with",
"_active_limbo_lock",
":",
"return",
"(",
"len",
"(",
"_active",
")",
"+",
"len",
"(",
"_limbo",
")",
")"
] |
return the number of thread objects currently alive .
|
train
| false
|
51,111
|
def construct_yaml_str(self, node):
return self.construct_scalar(node)
|
[
"def",
"construct_yaml_str",
"(",
"self",
",",
"node",
")",
":",
"return",
"self",
".",
"construct_scalar",
"(",
"node",
")"
] |
override the default string handling function to always return unicode objects .
|
train
| false
|
51,112
|
def deaccent(text):
if (not isinstance(text, unicode)):
text = text.decode('utf8')
norm = unicodedata.normalize('NFD', text)
result = u('').join((ch for ch in norm if (unicodedata.category(ch) != 'Mn')))
return unicodedata.normalize('NFC', result)
|
[
"def",
"deaccent",
"(",
"text",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"text",
",",
"unicode",
")",
")",
":",
"text",
"=",
"text",
".",
"decode",
"(",
"'utf8'",
")",
"norm",
"=",
"unicodedata",
".",
"normalize",
"(",
"'NFD'",
",",
"text",
")",
"result",
"=",
"u",
"(",
"''",
")",
".",
"join",
"(",
"(",
"ch",
"for",
"ch",
"in",
"norm",
"if",
"(",
"unicodedata",
".",
"category",
"(",
"ch",
")",
"!=",
"'Mn'",
")",
")",
")",
"return",
"unicodedata",
".",
"normalize",
"(",
"'NFC'",
",",
"result",
")"
] |
remove accentuation from the given string .
|
train
| true
|
51,113
|
def setup_view(view, request, *args, **kwargs):
view.request = request
view.args = args
view.kwargs = kwargs
return view
|
[
"def",
"setup_view",
"(",
"view",
",",
"request",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"view",
".",
"request",
"=",
"request",
"view",
".",
"args",
"=",
"args",
"view",
".",
"kwargs",
"=",
"kwargs",
"return",
"view"
] |
mimic as_view() returned callable .
|
train
| false
|
51,114
|
def connect_to_redis():
global _connection_pool
if (_connection_pool is None):
url = config.get(REDIS_URL_SETTING_NAME, REDIS_URL_DEFAULT_VALUE)
log.debug(u'Using Redis at {}'.format(url))
_connection_pool = ConnectionPool.from_url(url)
return Redis(connection_pool=_connection_pool)
|
[
"def",
"connect_to_redis",
"(",
")",
":",
"global",
"_connection_pool",
"if",
"(",
"_connection_pool",
"is",
"None",
")",
":",
"url",
"=",
"config",
".",
"get",
"(",
"REDIS_URL_SETTING_NAME",
",",
"REDIS_URL_DEFAULT_VALUE",
")",
"log",
".",
"debug",
"(",
"u'Using Redis at {}'",
".",
"format",
"(",
"url",
")",
")",
"_connection_pool",
"=",
"ConnectionPool",
".",
"from_url",
"(",
"url",
")",
"return",
"Redis",
"(",
"connection_pool",
"=",
"_connection_pool",
")"
] |
connect to redis .
|
train
| false
|
51,115
|
def _restrict_gain_matrix(G, info):
if (not (len(info['chs']) == G.shape[0])):
raise ValueError(("G.shape[0] and length of info['chs'] do not match: %d != %d" % (G.shape[0], len(info['chs']))))
sel = pick_types(info, meg='grad', ref_meg=False, exclude=[])
if (len(sel) > 0):
G = G[sel]
logger.info((' %d planar channels' % len(sel)))
else:
sel = pick_types(info, meg='mag', ref_meg=False, exclude=[])
if (len(sel) > 0):
G = G[sel]
logger.info((' %d magnetometer or axial gradiometer channels' % len(sel)))
else:
sel = pick_types(info, meg=False, eeg=True, exclude=[])
if (len(sel) > 0):
G = G[sel]
logger.info((' %d EEG channels' % len(sel)))
else:
warn('Could not find MEG or EEG channels')
return G
|
[
"def",
"_restrict_gain_matrix",
"(",
"G",
",",
"info",
")",
":",
"if",
"(",
"not",
"(",
"len",
"(",
"info",
"[",
"'chs'",
"]",
")",
"==",
"G",
".",
"shape",
"[",
"0",
"]",
")",
")",
":",
"raise",
"ValueError",
"(",
"(",
"\"G.shape[0] and length of info['chs'] do not match: %d != %d\"",
"%",
"(",
"G",
".",
"shape",
"[",
"0",
"]",
",",
"len",
"(",
"info",
"[",
"'chs'",
"]",
")",
")",
")",
")",
"sel",
"=",
"pick_types",
"(",
"info",
",",
"meg",
"=",
"'grad'",
",",
"ref_meg",
"=",
"False",
",",
"exclude",
"=",
"[",
"]",
")",
"if",
"(",
"len",
"(",
"sel",
")",
">",
"0",
")",
":",
"G",
"=",
"G",
"[",
"sel",
"]",
"logger",
".",
"info",
"(",
"(",
"' %d planar channels'",
"%",
"len",
"(",
"sel",
")",
")",
")",
"else",
":",
"sel",
"=",
"pick_types",
"(",
"info",
",",
"meg",
"=",
"'mag'",
",",
"ref_meg",
"=",
"False",
",",
"exclude",
"=",
"[",
"]",
")",
"if",
"(",
"len",
"(",
"sel",
")",
">",
"0",
")",
":",
"G",
"=",
"G",
"[",
"sel",
"]",
"logger",
".",
"info",
"(",
"(",
"' %d magnetometer or axial gradiometer channels'",
"%",
"len",
"(",
"sel",
")",
")",
")",
"else",
":",
"sel",
"=",
"pick_types",
"(",
"info",
",",
"meg",
"=",
"False",
",",
"eeg",
"=",
"True",
",",
"exclude",
"=",
"[",
"]",
")",
"if",
"(",
"len",
"(",
"sel",
")",
">",
"0",
")",
":",
"G",
"=",
"G",
"[",
"sel",
"]",
"logger",
".",
"info",
"(",
"(",
"' %d EEG channels'",
"%",
"len",
"(",
"sel",
")",
")",
")",
"else",
":",
"warn",
"(",
"'Could not find MEG or EEG channels'",
")",
"return",
"G"
] |
restrict gain matrix entries for optimal depth weighting .
|
train
| false
|
51,116
|
def _get_host_ref(service_instance, host, host_name=None):
search_index = salt.utils.vmware.get_inventory(service_instance).searchIndex
if host_name:
host_ref = search_index.FindByDnsName(dnsName=host_name, vmSearch=False)
else:
host_ref = search_index.FindByDnsName(dnsName=host, vmSearch=False)
if (host_ref is None):
host_ref = search_index.FindByIp(ip=host, vmSearch=False)
return host_ref
|
[
"def",
"_get_host_ref",
"(",
"service_instance",
",",
"host",
",",
"host_name",
"=",
"None",
")",
":",
"search_index",
"=",
"salt",
".",
"utils",
".",
"vmware",
".",
"get_inventory",
"(",
"service_instance",
")",
".",
"searchIndex",
"if",
"host_name",
":",
"host_ref",
"=",
"search_index",
".",
"FindByDnsName",
"(",
"dnsName",
"=",
"host_name",
",",
"vmSearch",
"=",
"False",
")",
"else",
":",
"host_ref",
"=",
"search_index",
".",
"FindByDnsName",
"(",
"dnsName",
"=",
"host",
",",
"vmSearch",
"=",
"False",
")",
"if",
"(",
"host_ref",
"is",
"None",
")",
":",
"host_ref",
"=",
"search_index",
".",
"FindByIp",
"(",
"ip",
"=",
"host",
",",
"vmSearch",
"=",
"False",
")",
"return",
"host_ref"
] |
helper function that returns a host object either from the host location or the host_name .
|
train
| true
|
51,117
|
def simulate_get(app, path, **kwargs):
return simulate_request(app, 'GET', path, **kwargs)
|
[
"def",
"simulate_get",
"(",
"app",
",",
"path",
",",
"**",
"kwargs",
")",
":",
"return",
"simulate_request",
"(",
"app",
",",
"'GET'",
",",
"path",
",",
"**",
"kwargs",
")"
] |
simulates a get request to a wsgi application .
|
train
| false
|
51,118
|
def get_id_pairs(track_list):
return [(t[u'id'], t.get(u'playlistEntryId')) for t in track_list]
|
[
"def",
"get_id_pairs",
"(",
"track_list",
")",
":",
"return",
"[",
"(",
"t",
"[",
"u'id'",
"]",
",",
"t",
".",
"get",
"(",
"u'playlistEntryId'",
")",
")",
"for",
"t",
"in",
"track_list",
"]"
] |
create a list of tuples from a list of tracks .
|
train
| false
|
51,119
|
def startKeepingErrors():
warnings.warn('log.startKeepingErrors is deprecated since Twisted 2.5', category=DeprecationWarning, stacklevel=2)
global _keepErrors
_keepErrors = 1
|
[
"def",
"startKeepingErrors",
"(",
")",
":",
"warnings",
".",
"warn",
"(",
"'log.startKeepingErrors is deprecated since Twisted 2.5'",
",",
"category",
"=",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
")",
"global",
"_keepErrors",
"_keepErrors",
"=",
"1"
] |
deprecated in twisted 2 .
|
train
| false
|
51,120
|
def add_configuration_error(name, message):
errors = cache.get(u'configuration-errors', [])
errors.append({u'name': name, u'message': message})
cache.set(u'configuration-errors', errors)
|
[
"def",
"add_configuration_error",
"(",
"name",
",",
"message",
")",
":",
"errors",
"=",
"cache",
".",
"get",
"(",
"u'configuration-errors'",
",",
"[",
"]",
")",
"errors",
".",
"append",
"(",
"{",
"u'name'",
":",
"name",
",",
"u'message'",
":",
"message",
"}",
")",
"cache",
".",
"set",
"(",
"u'configuration-errors'",
",",
"errors",
")"
] |
logs configuration error .
|
train
| false
|
51,121
|
def get_all_networks(cluster, tenant_id, networks):
uri = ('/ws.v1/lswitch?fields=*&tag=%s&tag_scope=os_tid' % tenant_id)
try:
resp_obj = do_single_request(HTTP_GET, uri, cluster=cluster)
except NvpApiClient.NvpApiException:
raise exception.QuantumException()
if (not resp_obj):
return []
networks_result = copy(networks)
return networks_result
|
[
"def",
"get_all_networks",
"(",
"cluster",
",",
"tenant_id",
",",
"networks",
")",
":",
"uri",
"=",
"(",
"'/ws.v1/lswitch?fields=*&tag=%s&tag_scope=os_tid'",
"%",
"tenant_id",
")",
"try",
":",
"resp_obj",
"=",
"do_single_request",
"(",
"HTTP_GET",
",",
"uri",
",",
"cluster",
"=",
"cluster",
")",
"except",
"NvpApiClient",
".",
"NvpApiException",
":",
"raise",
"exception",
".",
"QuantumException",
"(",
")",
"if",
"(",
"not",
"resp_obj",
")",
":",
"return",
"[",
"]",
"networks_result",
"=",
"copy",
"(",
"networks",
")",
"return",
"networks_result"
] |
append the quantum network uuids we can find in the given cluster to "networks" .
|
train
| false
|
51,122
|
def getNewRepository():
return ExportRepository()
|
[
"def",
"getNewRepository",
"(",
")",
":",
"return",
"ExportRepository",
"(",
")"
] |
get the repository constructor .
|
train
| false
|
51,124
|
def data_type():
if FLAGS.use_fp16:
return tf.float16
else:
return tf.float32
|
[
"def",
"data_type",
"(",
")",
":",
"if",
"FLAGS",
".",
"use_fp16",
":",
"return",
"tf",
".",
"float16",
"else",
":",
"return",
"tf",
".",
"float32"
] |
return the type of the activations .
|
train
| false
|
51,125
|
def _wait_select(conn):
while 1:
try:
state = conn.poll()
if (state == POLL_OK):
break
elif (state == POLL_READ):
select.select([conn.fileno()], [], [], _WAIT_SELECT_TIMEOUT)
elif (state == POLL_WRITE):
select.select([], [conn.fileno()], [], _WAIT_SELECT_TIMEOUT)
else:
raise conn.OperationalError(('bad state from poll: %s' % state))
except KeyboardInterrupt:
conn.cancel()
continue
|
[
"def",
"_wait_select",
"(",
"conn",
")",
":",
"while",
"1",
":",
"try",
":",
"state",
"=",
"conn",
".",
"poll",
"(",
")",
"if",
"(",
"state",
"==",
"POLL_OK",
")",
":",
"break",
"elif",
"(",
"state",
"==",
"POLL_READ",
")",
":",
"select",
".",
"select",
"(",
"[",
"conn",
".",
"fileno",
"(",
")",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"_WAIT_SELECT_TIMEOUT",
")",
"elif",
"(",
"state",
"==",
"POLL_WRITE",
")",
":",
"select",
".",
"select",
"(",
"[",
"]",
",",
"[",
"conn",
".",
"fileno",
"(",
")",
"]",
",",
"[",
"]",
",",
"_WAIT_SELECT_TIMEOUT",
")",
"else",
":",
"raise",
"conn",
".",
"OperationalError",
"(",
"(",
"'bad state from poll: %s'",
"%",
"state",
")",
")",
"except",
"KeyboardInterrupt",
":",
"conn",
".",
"cancel",
"(",
")",
"continue"
] |
copy-pasted from psycopg2 .
|
train
| false
|
51,126
|
def _plot_boot_traces(ax, x, boot_data, color, err_kws, **kwargs):
err_kws.setdefault('alpha', 0.25)
err_kws.setdefault('linewidth', 0.25)
if ('lw' in err_kws):
err_kws['linewidth'] = err_kws.pop('lw')
ax.plot(x, boot_data.T, color=color, label='_nolegend_', **err_kws)
|
[
"def",
"_plot_boot_traces",
"(",
"ax",
",",
"x",
",",
"boot_data",
",",
"color",
",",
"err_kws",
",",
"**",
"kwargs",
")",
":",
"err_kws",
".",
"setdefault",
"(",
"'alpha'",
",",
"0.25",
")",
"err_kws",
".",
"setdefault",
"(",
"'linewidth'",
",",
"0.25",
")",
"if",
"(",
"'lw'",
"in",
"err_kws",
")",
":",
"err_kws",
"[",
"'linewidth'",
"]",
"=",
"err_kws",
".",
"pop",
"(",
"'lw'",
")",
"ax",
".",
"plot",
"(",
"x",
",",
"boot_data",
".",
"T",
",",
"color",
"=",
"color",
",",
"label",
"=",
"'_nolegend_'",
",",
"**",
"err_kws",
")"
] |
plot 250 traces from bootstrap .
|
train
| false
|
51,127
|
def _ips_get_pkgname(line):
return line.split()[0].split('@')[0].strip()
|
[
"def",
"_ips_get_pkgname",
"(",
"line",
")",
":",
"return",
"line",
".",
"split",
"(",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'@'",
")",
"[",
"0",
"]",
".",
"strip",
"(",
")"
] |
extracts package name from "pkg list -v" output .
|
train
| false
|
51,128
|
def libvlc_vlm_release(p_instance):
f = (_Cfunctions.get('libvlc_vlm_release', None) or _Cfunction('libvlc_vlm_release', ((1,),), None, None, Instance))
return f(p_instance)
|
[
"def",
"libvlc_vlm_release",
"(",
"p_instance",
")",
":",
"f",
"=",
"(",
"_Cfunctions",
".",
"get",
"(",
"'libvlc_vlm_release'",
",",
"None",
")",
"or",
"_Cfunction",
"(",
"'libvlc_vlm_release'",
",",
"(",
"(",
"1",
",",
")",
",",
")",
",",
"None",
",",
"None",
",",
"Instance",
")",
")",
"return",
"f",
"(",
"p_instance",
")"
] |
release the vlm instance related to the given l{instance} .
|
train
| false
|
51,129
|
def _fix_fill(fill):
if (LooseVersion(scipy.__version__) < LooseVersion('0.12')):
fill = fill[:, np.newaxis]
return fill
|
[
"def",
"_fix_fill",
"(",
"fill",
")",
":",
"if",
"(",
"LooseVersion",
"(",
"scipy",
".",
"__version__",
")",
"<",
"LooseVersion",
"(",
"'0.12'",
")",
")",
":",
"fill",
"=",
"fill",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"return",
"fill"
] |
helper to fix bug on old scipy .
|
train
| false
|
51,132
|
@calculator(65536)
def calculate_perf_counter_rawcount(previous, current, property_name):
return current[property_name]
|
[
"@",
"calculator",
"(",
"65536",
")",
"def",
"calculate_perf_counter_rawcount",
"(",
"previous",
",",
"current",
",",
"property_name",
")",
":",
"return",
"current",
"[",
"property_name",
"]"
] |
perf_counter_rawcount URL .
|
train
| false
|
51,133
|
def get_course_blocks(user, starting_block_usage_key, transformers=None, collected_block_structure=None):
if (not transformers):
transformers = BlockStructureTransformers(COURSE_BLOCK_ACCESS_TRANSFORMERS)
transformers.usage_info = CourseUsageInfo(starting_block_usage_key.course_key, user)
return get_block_structure_manager(starting_block_usage_key.course_key).get_transformed(transformers, starting_block_usage_key, collected_block_structure)
|
[
"def",
"get_course_blocks",
"(",
"user",
",",
"starting_block_usage_key",
",",
"transformers",
"=",
"None",
",",
"collected_block_structure",
"=",
"None",
")",
":",
"if",
"(",
"not",
"transformers",
")",
":",
"transformers",
"=",
"BlockStructureTransformers",
"(",
"COURSE_BLOCK_ACCESS_TRANSFORMERS",
")",
"transformers",
".",
"usage_info",
"=",
"CourseUsageInfo",
"(",
"starting_block_usage_key",
".",
"course_key",
",",
"user",
")",
"return",
"get_block_structure_manager",
"(",
"starting_block_usage_key",
".",
"course_key",
")",
".",
"get_transformed",
"(",
"transformers",
",",
"starting_block_usage_key",
",",
"collected_block_structure",
")"
] |
a higher order function implemented on top of the block_structure .
|
train
| false
|
51,134
|
def namegen_filename(obj, file_data):
return secure_filename(file_data.filename)
|
[
"def",
"namegen_filename",
"(",
"obj",
",",
"file_data",
")",
":",
"return",
"secure_filename",
"(",
"file_data",
".",
"filename",
")"
] |
generate secure filename for uploaded file .
|
train
| false
|
51,135
|
def load_user_configuration(config_path=None):
user_defined_config = dict()
user_config_file = file_find('openstack_user_config.yml', preferred_path=config_path, raise_if_missing=False)
if (user_config_file is not False):
with open(user_config_file, 'rb') as f:
user_defined_config.update((yaml.safe_load(f.read()) or {}))
base_dir = dir_find(config_path, 'conf.d', raise_if_missing=False)
if (base_dir is not False):
_extra_config(user_defined_config, base_dir)
if (not user_defined_config):
raise MissingDataSource((_get_search_paths(config_path) + _get_search_paths(config_path, 'conf.d')))
logger.debug('User configuration loaded from: {}'.format(user_config_file))
return user_defined_config
|
[
"def",
"load_user_configuration",
"(",
"config_path",
"=",
"None",
")",
":",
"user_defined_config",
"=",
"dict",
"(",
")",
"user_config_file",
"=",
"file_find",
"(",
"'openstack_user_config.yml'",
",",
"preferred_path",
"=",
"config_path",
",",
"raise_if_missing",
"=",
"False",
")",
"if",
"(",
"user_config_file",
"is",
"not",
"False",
")",
":",
"with",
"open",
"(",
"user_config_file",
",",
"'rb'",
")",
"as",
"f",
":",
"user_defined_config",
".",
"update",
"(",
"(",
"yaml",
".",
"safe_load",
"(",
"f",
".",
"read",
"(",
")",
")",
"or",
"{",
"}",
")",
")",
"base_dir",
"=",
"dir_find",
"(",
"config_path",
",",
"'conf.d'",
",",
"raise_if_missing",
"=",
"False",
")",
"if",
"(",
"base_dir",
"is",
"not",
"False",
")",
":",
"_extra_config",
"(",
"user_defined_config",
",",
"base_dir",
")",
"if",
"(",
"not",
"user_defined_config",
")",
":",
"raise",
"MissingDataSource",
"(",
"(",
"_get_search_paths",
"(",
"config_path",
")",
"+",
"_get_search_paths",
"(",
"config_path",
",",
"'conf.d'",
")",
")",
")",
"logger",
".",
"debug",
"(",
"'User configuration loaded from: {}'",
".",
"format",
"(",
"user_config_file",
")",
")",
"return",
"user_defined_config"
] |
create a user configuration dictionary from config files .
|
train
| false
|
51,136
|
def immediateAssignmentReject():
a = L2PseudoLength(l2pLength=19)
b = TpPd(pd=6)
c = MessageType(mesType=58)
d = PageModeAndSpareHalfOctets()
f = RequestReference()
g = WaitIndication()
h = RequestReference()
i = WaitIndication()
j = RequestReference()
k = WaitIndication()
l = RequestReference()
m = WaitIndication()
n = IraRestOctets()
packet = ((((((((((((a / b) / c) / d) / f) / g) / h) / i) / j) / k) / l) / m) / n)
return packet
|
[
"def",
"immediateAssignmentReject",
"(",
")",
":",
"a",
"=",
"L2PseudoLength",
"(",
"l2pLength",
"=",
"19",
")",
"b",
"=",
"TpPd",
"(",
"pd",
"=",
"6",
")",
"c",
"=",
"MessageType",
"(",
"mesType",
"=",
"58",
")",
"d",
"=",
"PageModeAndSpareHalfOctets",
"(",
")",
"f",
"=",
"RequestReference",
"(",
")",
"g",
"=",
"WaitIndication",
"(",
")",
"h",
"=",
"RequestReference",
"(",
")",
"i",
"=",
"WaitIndication",
"(",
")",
"j",
"=",
"RequestReference",
"(",
")",
"k",
"=",
"WaitIndication",
"(",
")",
"l",
"=",
"RequestReference",
"(",
")",
"m",
"=",
"WaitIndication",
"(",
")",
"n",
"=",
"IraRestOctets",
"(",
")",
"packet",
"=",
"(",
"(",
"(",
"(",
"(",
"(",
"(",
"(",
"(",
"(",
"(",
"(",
"a",
"/",
"b",
")",
"/",
"c",
")",
"/",
"d",
")",
"/",
"f",
")",
"/",
"g",
")",
"/",
"h",
")",
"/",
"i",
")",
"/",
"j",
")",
"/",
"k",
")",
"/",
"l",
")",
"/",
"m",
")",
"/",
"n",
")",
"return",
"packet"
] |
immediate assignment reject section 9 .
|
train
| true
|
51,138
|
def rand_number(length=0, exclude_numbers=()):
_digits = digits[:]
for excluded_number in set(exclude_numbers):
_digits = _digits.replace(str(excluded_number), '')
if (not _digits):
raise ValueError('Failed return random number.')
ru = ''.join((choice(_digits) for _ in xrange((length or randint(10, 30)))))
return ru
|
[
"def",
"rand_number",
"(",
"length",
"=",
"0",
",",
"exclude_numbers",
"=",
"(",
")",
")",
":",
"_digits",
"=",
"digits",
"[",
":",
"]",
"for",
"excluded_number",
"in",
"set",
"(",
"exclude_numbers",
")",
":",
"_digits",
"=",
"_digits",
".",
"replace",
"(",
"str",
"(",
"excluded_number",
")",
",",
"''",
")",
"if",
"(",
"not",
"_digits",
")",
":",
"raise",
"ValueError",
"(",
"'Failed return random number.'",
")",
"ru",
"=",
"''",
".",
"join",
"(",
"(",
"choice",
"(",
"_digits",
")",
"for",
"_",
"in",
"xrange",
"(",
"(",
"length",
"or",
"randint",
"(",
"10",
",",
"30",
")",
")",
")",
")",
")",
"return",
"ru"
] |
create a random string only with numbers :return: a random string only composed by numbers .
|
train
| false
|
51,139
|
def _find_network_id_novanet(cs, net_name):
network_id = None
for net_info in cs.networks.list():
if (net_name == net_info.label):
if (network_id is not None):
msg = (_("Multiple network name matches found for name '%s', use network ID to be more specific.") % net_name)
raise exceptions.NoUniqueMatch(msg)
else:
network_id = net_info.id
if (network_id is None):
msg = (_("No network name match for name '%s'") % net_name)
raise exceptions.ResourceNotFound((msg % {'network': net_name}))
else:
return network_id
|
[
"def",
"_find_network_id_novanet",
"(",
"cs",
",",
"net_name",
")",
":",
"network_id",
"=",
"None",
"for",
"net_info",
"in",
"cs",
".",
"networks",
".",
"list",
"(",
")",
":",
"if",
"(",
"net_name",
"==",
"net_info",
".",
"label",
")",
":",
"if",
"(",
"network_id",
"is",
"not",
"None",
")",
":",
"msg",
"=",
"(",
"_",
"(",
"\"Multiple network name matches found for name '%s', use network ID to be more specific.\"",
")",
"%",
"net_name",
")",
"raise",
"exceptions",
".",
"NoUniqueMatch",
"(",
"msg",
")",
"else",
":",
"network_id",
"=",
"net_info",
".",
"id",
"if",
"(",
"network_id",
"is",
"None",
")",
":",
"msg",
"=",
"(",
"_",
"(",
"\"No network name match for name '%s'\"",
")",
"%",
"net_name",
")",
"raise",
"exceptions",
".",
"ResourceNotFound",
"(",
"(",
"msg",
"%",
"{",
"'network'",
":",
"net_name",
"}",
")",
")",
"else",
":",
"return",
"network_id"
] |
get unique network id from network name .
|
train
| false
|
51,140
|
def generate_sharable_link_for_repository_in_tool_shed(repository, changeset_revision=None):
base_url = web.url_for('/', qualified=True).rstrip('/')
(protocol, base) = base_url.split('://')
sharable_url = ('%s://%s/view/%s/%s' % (protocol, base, repository.user.username, repository.name))
if changeset_revision:
sharable_url += ('/%s' % changeset_revision)
return sharable_url
|
[
"def",
"generate_sharable_link_for_repository_in_tool_shed",
"(",
"repository",
",",
"changeset_revision",
"=",
"None",
")",
":",
"base_url",
"=",
"web",
".",
"url_for",
"(",
"'/'",
",",
"qualified",
"=",
"True",
")",
".",
"rstrip",
"(",
"'/'",
")",
"(",
"protocol",
",",
"base",
")",
"=",
"base_url",
".",
"split",
"(",
"'://'",
")",
"sharable_url",
"=",
"(",
"'%s://%s/view/%s/%s'",
"%",
"(",
"protocol",
",",
"base",
",",
"repository",
".",
"user",
".",
"username",
",",
"repository",
".",
"name",
")",
")",
"if",
"changeset_revision",
":",
"sharable_url",
"+=",
"(",
"'/%s'",
"%",
"changeset_revision",
")",
"return",
"sharable_url"
] |
generate the url for sharing a repository that is in the tool shed .
|
train
| false
|
51,141
|
def get_recommendations(srs, count=10, source=SRC_MULTIREDDITS, to_omit=None, match_set=True, over18=False):
srs = tup(srs)
to_omit = (tup(to_omit) if to_omit else [])
rec_id36s = SRRecommendation.for_srs([sr._id36 for sr in srs], to_omit, (count * 2), source, match_set=match_set)
rec_srs = Subreddit._byID36(rec_id36s, return_dict=False)
filtered = [sr for sr in rec_srs if is_visible(sr)]
if ((not over18) and (not any((sr.over_18 for sr in srs)))):
filtered = [sr for sr in filtered if (not sr.over_18)]
return filtered[:count]
|
[
"def",
"get_recommendations",
"(",
"srs",
",",
"count",
"=",
"10",
",",
"source",
"=",
"SRC_MULTIREDDITS",
",",
"to_omit",
"=",
"None",
",",
"match_set",
"=",
"True",
",",
"over18",
"=",
"False",
")",
":",
"srs",
"=",
"tup",
"(",
"srs",
")",
"to_omit",
"=",
"(",
"tup",
"(",
"to_omit",
")",
"if",
"to_omit",
"else",
"[",
"]",
")",
"rec_id36s",
"=",
"SRRecommendation",
".",
"for_srs",
"(",
"[",
"sr",
".",
"_id36",
"for",
"sr",
"in",
"srs",
"]",
",",
"to_omit",
",",
"(",
"count",
"*",
"2",
")",
",",
"source",
",",
"match_set",
"=",
"match_set",
")",
"rec_srs",
"=",
"Subreddit",
".",
"_byID36",
"(",
"rec_id36s",
",",
"return_dict",
"=",
"False",
")",
"filtered",
"=",
"[",
"sr",
"for",
"sr",
"in",
"rec_srs",
"if",
"is_visible",
"(",
"sr",
")",
"]",
"if",
"(",
"(",
"not",
"over18",
")",
"and",
"(",
"not",
"any",
"(",
"(",
"sr",
".",
"over_18",
"for",
"sr",
"in",
"srs",
")",
")",
")",
")",
":",
"filtered",
"=",
"[",
"sr",
"for",
"sr",
"in",
"filtered",
"if",
"(",
"not",
"sr",
".",
"over_18",
")",
"]",
"return",
"filtered",
"[",
":",
"count",
"]"
] |
return subreddits recommended if you like the given subreddits .
|
train
| false
|
51,144
|
def process_sentence(sentence, start_word='<S>', end_word='</S>'):
try:
import nltk
except:
raise Exception('Hint : NLTK is required.')
if (start_word is not None):
process_sentence = [start_word]
else:
process_sentence = []
process_sentence.extend(nltk.tokenize.word_tokenize(sentence.lower()))
if (end_word is not None):
process_sentence.append(end_word)
return process_sentence
|
[
"def",
"process_sentence",
"(",
"sentence",
",",
"start_word",
"=",
"'<S>'",
",",
"end_word",
"=",
"'</S>'",
")",
":",
"try",
":",
"import",
"nltk",
"except",
":",
"raise",
"Exception",
"(",
"'Hint : NLTK is required.'",
")",
"if",
"(",
"start_word",
"is",
"not",
"None",
")",
":",
"process_sentence",
"=",
"[",
"start_word",
"]",
"else",
":",
"process_sentence",
"=",
"[",
"]",
"process_sentence",
".",
"extend",
"(",
"nltk",
".",
"tokenize",
".",
"word_tokenize",
"(",
"sentence",
".",
"lower",
"(",
")",
")",
")",
"if",
"(",
"end_word",
"is",
"not",
"None",
")",
":",
"process_sentence",
".",
"append",
"(",
"end_word",
")",
"return",
"process_sentence"
] |
converts a sentence string into a list of string words .
|
train
| true
|
51,146
|
def test_existing_path_FileLink():
tf = NamedTemporaryFile()
fl = display.FileLink(tf.name)
actual = fl._repr_html_()
expected = ("<a href='%s' target='_blank'>%s</a><br>" % (tf.name, tf.name))
nt.assert_equal(actual, expected)
|
[
"def",
"test_existing_path_FileLink",
"(",
")",
":",
"tf",
"=",
"NamedTemporaryFile",
"(",
")",
"fl",
"=",
"display",
".",
"FileLink",
"(",
"tf",
".",
"name",
")",
"actual",
"=",
"fl",
".",
"_repr_html_",
"(",
")",
"expected",
"=",
"(",
"\"<a href='%s' target='_blank'>%s</a><br>\"",
"%",
"(",
"tf",
".",
"name",
",",
"tf",
".",
"name",
")",
")",
"nt",
".",
"assert_equal",
"(",
"actual",
",",
"expected",
")"
] |
filelink: calling _repr_html_ functions as expected on existing filepath .
|
train
| false
|
51,147
|
def to_e164(name, origin=public_enum_domain, want_plus_prefix=True):
if (not (origin is None)):
name = name.relativize(origin)
dlabels = [d for d in name.labels if (d.isdigit() and (len(d) == 1))]
if (len(dlabels) != len(name.labels)):
raise dns.exception.SyntaxError('non-digit labels in ENUM domain name')
dlabels.reverse()
text = ''.join(dlabels)
if want_plus_prefix:
text = ('+' + text)
return text
|
[
"def",
"to_e164",
"(",
"name",
",",
"origin",
"=",
"public_enum_domain",
",",
"want_plus_prefix",
"=",
"True",
")",
":",
"if",
"(",
"not",
"(",
"origin",
"is",
"None",
")",
")",
":",
"name",
"=",
"name",
".",
"relativize",
"(",
"origin",
")",
"dlabels",
"=",
"[",
"d",
"for",
"d",
"in",
"name",
".",
"labels",
"if",
"(",
"d",
".",
"isdigit",
"(",
")",
"and",
"(",
"len",
"(",
"d",
")",
"==",
"1",
")",
")",
"]",
"if",
"(",
"len",
"(",
"dlabels",
")",
"!=",
"len",
"(",
"name",
".",
"labels",
")",
")",
":",
"raise",
"dns",
".",
"exception",
".",
"SyntaxError",
"(",
"'non-digit labels in ENUM domain name'",
")",
"dlabels",
".",
"reverse",
"(",
")",
"text",
"=",
"''",
".",
"join",
"(",
"dlabels",
")",
"if",
"want_plus_prefix",
":",
"text",
"=",
"(",
"'+'",
"+",
"text",
")",
"return",
"text"
] |
convert an enum domain name into an e .
|
train
| true
|
51,148
|
def result_from_tuple(r, app=None):
app = app_or_default(app)
Result = app.AsyncResult
if (not isinstance(r, ResultBase)):
(res, nodes) = r
if nodes:
return app.GroupResult(res, [result_from_tuple(child, app) for child in nodes])
(id, parent) = (res if isinstance(res, (list, tuple)) else (res, None))
if parent:
parent = result_from_tuple(parent, app)
return Result(id, parent=parent)
return r
|
[
"def",
"result_from_tuple",
"(",
"r",
",",
"app",
"=",
"None",
")",
":",
"app",
"=",
"app_or_default",
"(",
"app",
")",
"Result",
"=",
"app",
".",
"AsyncResult",
"if",
"(",
"not",
"isinstance",
"(",
"r",
",",
"ResultBase",
")",
")",
":",
"(",
"res",
",",
"nodes",
")",
"=",
"r",
"if",
"nodes",
":",
"return",
"app",
".",
"GroupResult",
"(",
"res",
",",
"[",
"result_from_tuple",
"(",
"child",
",",
"app",
")",
"for",
"child",
"in",
"nodes",
"]",
")",
"(",
"id",
",",
"parent",
")",
"=",
"(",
"res",
"if",
"isinstance",
"(",
"res",
",",
"(",
"list",
",",
"tuple",
")",
")",
"else",
"(",
"res",
",",
"None",
")",
")",
"if",
"parent",
":",
"parent",
"=",
"result_from_tuple",
"(",
"parent",
",",
"app",
")",
"return",
"Result",
"(",
"id",
",",
"parent",
"=",
"parent",
")",
"return",
"r"
] |
deserialize result from tuple .
|
train
| false
|
51,149
|
def parse_custom_settings(sections, custom_settings_list, origin, line_parser):
for setting_definition in custom_settings_list:
(_, key_tuples, value, _) = line_parser.parse(setting_definition)
for key_tuple in key_tuples:
append_to_sections(sections, key=key_tuple[1], value=value, origin=origin, section_name=key_tuple[0], from_cli=True)
|
[
"def",
"parse_custom_settings",
"(",
"sections",
",",
"custom_settings_list",
",",
"origin",
",",
"line_parser",
")",
":",
"for",
"setting_definition",
"in",
"custom_settings_list",
":",
"(",
"_",
",",
"key_tuples",
",",
"value",
",",
"_",
")",
"=",
"line_parser",
".",
"parse",
"(",
"setting_definition",
")",
"for",
"key_tuple",
"in",
"key_tuples",
":",
"append_to_sections",
"(",
"sections",
",",
"key",
"=",
"key_tuple",
"[",
"1",
"]",
",",
"value",
"=",
"value",
",",
"origin",
"=",
"origin",
",",
"section_name",
"=",
"key_tuple",
"[",
"0",
"]",
",",
"from_cli",
"=",
"True",
")"
] |
parses the custom settings given to coala via -s something=value .
|
train
| false
|
51,151
|
def assert3xx(response, expected_url, status_code=302, target_status_code=200):
if hasattr(response, 'redirect_chain'):
assert (len(response.redirect_chain) > 0), ("Response didn't redirect as expected: Response code was %d (expected %d)" % (response.status_code, status_code))
(url, status_code) = response.redirect_chain[(-1)]
assert (response.status_code == target_status_code), ("Response didn't redirect as expected: Final Response code was %d (expected %d)" % (response.status_code, target_status_code))
else:
assert (response.status_code == status_code), ("Response didn't redirect as expected: Response code was %d (expected %d)" % (response.status_code, status_code))
url = response['Location']
(scheme, netloc, path, query, fragment) = urlsplit(url)
(e_scheme, e_netloc, e_path, e_query, e_fragment) = urlsplit(expected_url)
if ((scheme and (not e_scheme)) and (netloc and (not e_netloc))):
expected_url = urlunsplit(('http', 'testserver', e_path, e_query, e_fragment))
msg = ("Response redirected to '%s', expected '%s'" % (url, expected_url))
assert (url == expected_url), msg
|
[
"def",
"assert3xx",
"(",
"response",
",",
"expected_url",
",",
"status_code",
"=",
"302",
",",
"target_status_code",
"=",
"200",
")",
":",
"if",
"hasattr",
"(",
"response",
",",
"'redirect_chain'",
")",
":",
"assert",
"(",
"len",
"(",
"response",
".",
"redirect_chain",
")",
">",
"0",
")",
",",
"(",
"\"Response didn't redirect as expected: Response code was %d (expected %d)\"",
"%",
"(",
"response",
".",
"status_code",
",",
"status_code",
")",
")",
"(",
"url",
",",
"status_code",
")",
"=",
"response",
".",
"redirect_chain",
"[",
"(",
"-",
"1",
")",
"]",
"assert",
"(",
"response",
".",
"status_code",
"==",
"target_status_code",
")",
",",
"(",
"\"Response didn't redirect as expected: Final Response code was %d (expected %d)\"",
"%",
"(",
"response",
".",
"status_code",
",",
"target_status_code",
")",
")",
"else",
":",
"assert",
"(",
"response",
".",
"status_code",
"==",
"status_code",
")",
",",
"(",
"\"Response didn't redirect as expected: Response code was %d (expected %d)\"",
"%",
"(",
"response",
".",
"status_code",
",",
"status_code",
")",
")",
"url",
"=",
"response",
"[",
"'Location'",
"]",
"(",
"scheme",
",",
"netloc",
",",
"path",
",",
"query",
",",
"fragment",
")",
"=",
"urlsplit",
"(",
"url",
")",
"(",
"e_scheme",
",",
"e_netloc",
",",
"e_path",
",",
"e_query",
",",
"e_fragment",
")",
"=",
"urlsplit",
"(",
"expected_url",
")",
"if",
"(",
"(",
"scheme",
"and",
"(",
"not",
"e_scheme",
")",
")",
"and",
"(",
"netloc",
"and",
"(",
"not",
"e_netloc",
")",
")",
")",
":",
"expected_url",
"=",
"urlunsplit",
"(",
"(",
"'http'",
",",
"'testserver'",
",",
"e_path",
",",
"e_query",
",",
"e_fragment",
")",
")",
"msg",
"=",
"(",
"\"Response redirected to '%s', expected '%s'\"",
"%",
"(",
"url",
",",
"expected_url",
")",
")",
"assert",
"(",
"url",
"==",
"expected_url",
")",
",",
"msg"
] |
asserts redirect and final redirect matches expected url .
|
train
| false
|
51,152
|
def countOf(a, b):
count = 0
for i in a:
if (i == b):
count += 1
return count
|
[
"def",
"countOf",
"(",
"a",
",",
"b",
")",
":",
"count",
"=",
"0",
"for",
"i",
"in",
"a",
":",
"if",
"(",
"i",
"==",
"b",
")",
":",
"count",
"+=",
"1",
"return",
"count"
] |
return the number of times b occurs in a .
|
train
| true
|
51,153
|
def barycentric_interpolate(xi, yi, x, axis=0):
return BarycentricInterpolator(xi, yi, axis=axis)(x)
|
[
"def",
"barycentric_interpolate",
"(",
"xi",
",",
"yi",
",",
"x",
",",
"axis",
"=",
"0",
")",
":",
"return",
"BarycentricInterpolator",
"(",
"xi",
",",
"yi",
",",
"axis",
"=",
"axis",
")",
"(",
"x",
")"
] |
convenience function for polynomial interpolation .
|
train
| false
|
51,156
|
def create_factory_from_config(table, config):
args = config.copy()
del args['name']
key = args.pop('type')
try:
factory = table[key]
except KeyError:
return None
return partial(factory, **args)
|
[
"def",
"create_factory_from_config",
"(",
"table",
",",
"config",
")",
":",
"args",
"=",
"config",
".",
"copy",
"(",
")",
"del",
"args",
"[",
"'name'",
"]",
"key",
"=",
"args",
".",
"pop",
"(",
"'type'",
")",
"try",
":",
"factory",
"=",
"table",
"[",
"key",
"]",
"except",
"KeyError",
":",
"return",
"None",
"return",
"partial",
"(",
"factory",
",",
"**",
"args",
")"
] |
create a benchmark parameter factory from a configuration stanza .
|
train
| false
|
51,157
|
def urlizetrunc(value, limit):
from django.utils.html import urlize
return urlize(value, trim_url_limit=int(limit), nofollow=True)
|
[
"def",
"urlizetrunc",
"(",
"value",
",",
"limit",
")",
":",
"from",
"django",
".",
"utils",
".",
"html",
"import",
"urlize",
"return",
"urlize",
"(",
"value",
",",
"trim_url_limit",
"=",
"int",
"(",
"limit",
")",
",",
"nofollow",
"=",
"True",
")"
] |
converts urls into clickable links .
|
train
| false
|
51,160
|
def generate_translations(item):
fr_prefix = u'(fran\xe7ais) '
es_prefix = u'(espa\xf1ol) '
oldname = unicode(item.name)
item.name = {'en': oldname, 'fr': (fr_prefix + oldname), 'es': (es_prefix + oldname)}
item.save()
|
[
"def",
"generate_translations",
"(",
"item",
")",
":",
"fr_prefix",
"=",
"u'(fran\\xe7ais) '",
"es_prefix",
"=",
"u'(espa\\xf1ol) '",
"oldname",
"=",
"unicode",
"(",
"item",
".",
"name",
")",
"item",
".",
"name",
"=",
"{",
"'en'",
":",
"oldname",
",",
"'fr'",
":",
"(",
"fr_prefix",
"+",
"oldname",
")",
",",
"'es'",
":",
"(",
"es_prefix",
"+",
"oldname",
")",
"}",
"item",
".",
"save",
"(",
")"
] |
generate french and spanish translations for the given item .
|
train
| false
|
51,162
|
def extract_tables(sql):
parsed = sqlparse.parse(sql)
if (not parsed):
return ()
insert_stmt = (parsed[0].token_first().value.lower() == 'insert')
stream = extract_from_part(parsed[0], stop_at_punctuation=insert_stmt)
identifiers = extract_table_identifiers(stream, allow_functions=(not insert_stmt))
return tuple((i for i in identifiers if i.name))
|
[
"def",
"extract_tables",
"(",
"sql",
")",
":",
"parsed",
"=",
"sqlparse",
".",
"parse",
"(",
"sql",
")",
"if",
"(",
"not",
"parsed",
")",
":",
"return",
"(",
")",
"insert_stmt",
"=",
"(",
"parsed",
"[",
"0",
"]",
".",
"token_first",
"(",
")",
".",
"value",
".",
"lower",
"(",
")",
"==",
"'insert'",
")",
"stream",
"=",
"extract_from_part",
"(",
"parsed",
"[",
"0",
"]",
",",
"stop_at_punctuation",
"=",
"insert_stmt",
")",
"identifiers",
"=",
"extract_table_identifiers",
"(",
"stream",
",",
"allow_functions",
"=",
"(",
"not",
"insert_stmt",
")",
")",
"return",
"tuple",
"(",
"(",
"i",
"for",
"i",
"in",
"identifiers",
"if",
"i",
".",
"name",
")",
")"
] |
extract the table names from an sql statment .
|
train
| false
|
51,163
|
def register_drainer(name):
def _inner(cls):
drainers[name] = cls
return cls
return _inner
|
[
"def",
"register_drainer",
"(",
"name",
")",
":",
"def",
"_inner",
"(",
"cls",
")",
":",
"drainers",
"[",
"name",
"]",
"=",
"cls",
"return",
"cls",
"return",
"_inner"
] |
decorator used to register a new result drainer type .
|
train
| false
|
51,164
|
def get_content_length(environ):
content_length = environ.get('CONTENT_LENGTH')
if (content_length is not None):
try:
return max(0, int(content_length))
except (ValueError, TypeError):
pass
|
[
"def",
"get_content_length",
"(",
"environ",
")",
":",
"content_length",
"=",
"environ",
".",
"get",
"(",
"'CONTENT_LENGTH'",
")",
"if",
"(",
"content_length",
"is",
"not",
"None",
")",
":",
"try",
":",
"return",
"max",
"(",
"0",
",",
"int",
"(",
"content_length",
")",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"pass"
] |
returns the content length from the wsgi environment as integer .
|
train
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.