id_within_dataset
int64 1
55.5k
| snippet
stringlengths 19
14.2k
| tokens
listlengths 6
1.63k
| nl
stringlengths 6
352
| split_within_dataset
stringclasses 1
value | is_duplicated
bool 2
classes |
|---|---|---|---|---|---|
52,005
|
def RequiresPackage(package_name):
def Decorator(test_function):
@functools.wraps(test_function)
def Wrapper(*args, **kwargs):
try:
pkg_resources.get_distribution(package_name)
except pkg_resources.DistributionNotFound:
raise unittest.SkipTest(('Skipping, package %s not installed' % package_name))
return test_function(*args, **kwargs)
return Wrapper
return Decorator
|
[
"def",
"RequiresPackage",
"(",
"package_name",
")",
":",
"def",
"Decorator",
"(",
"test_function",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"test_function",
")",
"def",
"Wrapper",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"try",
":",
"pkg_resources",
".",
"get_distribution",
"(",
"package_name",
")",
"except",
"pkg_resources",
".",
"DistributionNotFound",
":",
"raise",
"unittest",
".",
"SkipTest",
"(",
"(",
"'Skipping, package %s not installed'",
"%",
"package_name",
")",
")",
"return",
"test_function",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"return",
"Wrapper",
"return",
"Decorator"
] |
skip this test if required package isnt present .
|
train
| false
|
52,006
|
@not_implemented_for('directed')
def fiedler_vector(G, weight='weight', normalized=False, tol=1e-08, method='tracemin'):
if (len(G) < 2):
raise nx.NetworkXError('graph has less than two nodes.')
G = _preprocess_graph(G, weight)
if (not nx.is_connected(G)):
raise nx.NetworkXError('graph is not connected.')
if (len(G) == 2):
return array([1.0, (-1.0)])
find_fiedler = _get_fiedler_func(method)
L = nx.laplacian_matrix(G)
x = (None if (method != 'lobpcg') else _rcm_estimate(G, G))
return find_fiedler(L, x, normalized, tol)[1]
|
[
"@",
"not_implemented_for",
"(",
"'directed'",
")",
"def",
"fiedler_vector",
"(",
"G",
",",
"weight",
"=",
"'weight'",
",",
"normalized",
"=",
"False",
",",
"tol",
"=",
"1e-08",
",",
"method",
"=",
"'tracemin'",
")",
":",
"if",
"(",
"len",
"(",
"G",
")",
"<",
"2",
")",
":",
"raise",
"nx",
".",
"NetworkXError",
"(",
"'graph has less than two nodes.'",
")",
"G",
"=",
"_preprocess_graph",
"(",
"G",
",",
"weight",
")",
"if",
"(",
"not",
"nx",
".",
"is_connected",
"(",
"G",
")",
")",
":",
"raise",
"nx",
".",
"NetworkXError",
"(",
"'graph is not connected.'",
")",
"if",
"(",
"len",
"(",
"G",
")",
"==",
"2",
")",
":",
"return",
"array",
"(",
"[",
"1.0",
",",
"(",
"-",
"1.0",
")",
"]",
")",
"find_fiedler",
"=",
"_get_fiedler_func",
"(",
"method",
")",
"L",
"=",
"nx",
".",
"laplacian_matrix",
"(",
"G",
")",
"x",
"=",
"(",
"None",
"if",
"(",
"method",
"!=",
"'lobpcg'",
")",
"else",
"_rcm_estimate",
"(",
"G",
",",
"G",
")",
")",
"return",
"find_fiedler",
"(",
"L",
",",
"x",
",",
"normalized",
",",
"tol",
")",
"[",
"1",
"]"
] |
return the fiedler vector of a connected undirected graph .
|
train
| false
|
52,010
|
def edgelist_for_workflow_steps(steps):
edges = []
steps_to_index = dict(((step, i) for (i, step) in enumerate(steps)))
for step in steps:
edges.append((steps_to_index[step], steps_to_index[step]))
for conn in step.input_connections:
edges.append((steps_to_index[conn.output_step], steps_to_index[conn.input_step]))
return edges
|
[
"def",
"edgelist_for_workflow_steps",
"(",
"steps",
")",
":",
"edges",
"=",
"[",
"]",
"steps_to_index",
"=",
"dict",
"(",
"(",
"(",
"step",
",",
"i",
")",
"for",
"(",
"i",
",",
"step",
")",
"in",
"enumerate",
"(",
"steps",
")",
")",
")",
"for",
"step",
"in",
"steps",
":",
"edges",
".",
"append",
"(",
"(",
"steps_to_index",
"[",
"step",
"]",
",",
"steps_to_index",
"[",
"step",
"]",
")",
")",
"for",
"conn",
"in",
"step",
".",
"input_connections",
":",
"edges",
".",
"append",
"(",
"(",
"steps_to_index",
"[",
"conn",
".",
"output_step",
"]",
",",
"steps_to_index",
"[",
"conn",
".",
"input_step",
"]",
")",
")",
"return",
"edges"
] |
create a list of tuples representing edges between workflowsteps based on associated workflowstepconnections .
|
train
| false
|
52,012
|
def dup_lshift(f, n, K):
if (not f):
return f
else:
return (f + ([K.zero] * n))
|
[
"def",
"dup_lshift",
"(",
"f",
",",
"n",
",",
"K",
")",
":",
"if",
"(",
"not",
"f",
")",
":",
"return",
"f",
"else",
":",
"return",
"(",
"f",
"+",
"(",
"[",
"K",
".",
"zero",
"]",
"*",
"n",
")",
")"
] |
efficiently multiply f by x**n in k[x] .
|
train
| false
|
52,013
|
def freqs(b, a, worN=None, plot=None):
if (worN is None):
w = findfreqs(b, a, 200)
elif isinstance(worN, int):
N = worN
w = findfreqs(b, a, N)
else:
w = worN
w = atleast_1d(w)
s = (1j * w)
h = (polyval(b, s) / polyval(a, s))
if (plot is not None):
plot(w, h)
return (w, h)
|
[
"def",
"freqs",
"(",
"b",
",",
"a",
",",
"worN",
"=",
"None",
",",
"plot",
"=",
"None",
")",
":",
"if",
"(",
"worN",
"is",
"None",
")",
":",
"w",
"=",
"findfreqs",
"(",
"b",
",",
"a",
",",
"200",
")",
"elif",
"isinstance",
"(",
"worN",
",",
"int",
")",
":",
"N",
"=",
"worN",
"w",
"=",
"findfreqs",
"(",
"b",
",",
"a",
",",
"N",
")",
"else",
":",
"w",
"=",
"worN",
"w",
"=",
"atleast_1d",
"(",
"w",
")",
"s",
"=",
"(",
"1j",
"*",
"w",
")",
"h",
"=",
"(",
"polyval",
"(",
"b",
",",
"s",
")",
"/",
"polyval",
"(",
"a",
",",
"s",
")",
")",
"if",
"(",
"plot",
"is",
"not",
"None",
")",
":",
"plot",
"(",
"w",
",",
"h",
")",
"return",
"(",
"w",
",",
"h",
")"
] |
compute frequency response of analog filter .
|
train
| false
|
52,014
|
def getChainMatrixSVG(matrixSVG, xmlElement):
matrixSVG = matrixSVG.getOtherTimesSelf(getMatrixSVG(xmlElement).tricomplex)
if (xmlElement.parent != None):
matrixSVG = getChainMatrixSVG(matrixSVG, xmlElement.parent)
return matrixSVG
|
[
"def",
"getChainMatrixSVG",
"(",
"matrixSVG",
",",
"xmlElement",
")",
":",
"matrixSVG",
"=",
"matrixSVG",
".",
"getOtherTimesSelf",
"(",
"getMatrixSVG",
"(",
"xmlElement",
")",
".",
"tricomplex",
")",
"if",
"(",
"xmlElement",
".",
"parent",
"!=",
"None",
")",
":",
"matrixSVG",
"=",
"getChainMatrixSVG",
"(",
"matrixSVG",
",",
"xmlElement",
".",
"parent",
")",
"return",
"matrixSVG"
] |
get chain matrixsvg by svgelement .
|
train
| false
|
52,015
|
def ctrl_c(signum, frame):
SHUTDOWN_EVENT.set()
print_('\nCancelling...')
sys.exit(0)
|
[
"def",
"ctrl_c",
"(",
"signum",
",",
"frame",
")",
":",
"SHUTDOWN_EVENT",
".",
"set",
"(",
")",
"print_",
"(",
"'\\nCancelling...'",
")",
"sys",
".",
"exit",
"(",
"0",
")"
] |
catch ctrl-c key sequence and set a shutdown_event for our threaded operations .
|
train
| false
|
52,016
|
def denoms(eq, symbols=None):
pot = preorder_traversal(eq)
dens = set()
for p in pot:
den = denom(p)
if (den is S.One):
continue
for d in Mul.make_args(den):
dens.add(d)
if (not symbols):
return dens
rv = []
for d in dens:
free = d.free_symbols
if any(((s in free) for s in symbols)):
rv.append(d)
return set(rv)
|
[
"def",
"denoms",
"(",
"eq",
",",
"symbols",
"=",
"None",
")",
":",
"pot",
"=",
"preorder_traversal",
"(",
"eq",
")",
"dens",
"=",
"set",
"(",
")",
"for",
"p",
"in",
"pot",
":",
"den",
"=",
"denom",
"(",
"p",
")",
"if",
"(",
"den",
"is",
"S",
".",
"One",
")",
":",
"continue",
"for",
"d",
"in",
"Mul",
".",
"make_args",
"(",
"den",
")",
":",
"dens",
".",
"add",
"(",
"d",
")",
"if",
"(",
"not",
"symbols",
")",
":",
"return",
"dens",
"rv",
"=",
"[",
"]",
"for",
"d",
"in",
"dens",
":",
"free",
"=",
"d",
".",
"free_symbols",
"if",
"any",
"(",
"(",
"(",
"s",
"in",
"free",
")",
"for",
"s",
"in",
"symbols",
")",
")",
":",
"rv",
".",
"append",
"(",
"d",
")",
"return",
"set",
"(",
"rv",
")"
] |
return set of all denominators that appear in eq that contain any symbol in iterable symbols; if symbols is none then all denominators will be returned .
|
train
| false
|
52,017
|
def Unicode2Str(s, d):
return s.encode()
|
[
"def",
"Unicode2Str",
"(",
"s",
",",
"d",
")",
":",
"return",
"s",
".",
"encode",
"(",
")"
] |
convert a unicode object to a string using the default encoding .
|
train
| false
|
52,019
|
@conf.commands.register
def p0f(pkt):
(db, sig) = packet2p0f(pkt)
if db:
pb = db.get_base()
else:
pb = []
if (not pb):
warning('p0f base empty.')
return []
r = []
max = (len(sig[4].split(',')) + 5)
for b in pb:
d = p0f_correl(sig, b)
if (d == max):
r.append((b[6], b[7], (b[1] - pkt[IP].ttl)))
return r
|
[
"@",
"conf",
".",
"commands",
".",
"register",
"def",
"p0f",
"(",
"pkt",
")",
":",
"(",
"db",
",",
"sig",
")",
"=",
"packet2p0f",
"(",
"pkt",
")",
"if",
"db",
":",
"pb",
"=",
"db",
".",
"get_base",
"(",
")",
"else",
":",
"pb",
"=",
"[",
"]",
"if",
"(",
"not",
"pb",
")",
":",
"warning",
"(",
"'p0f base empty.'",
")",
"return",
"[",
"]",
"r",
"=",
"[",
"]",
"max",
"=",
"(",
"len",
"(",
"sig",
"[",
"4",
"]",
".",
"split",
"(",
"','",
")",
")",
"+",
"5",
")",
"for",
"b",
"in",
"pb",
":",
"d",
"=",
"p0f_correl",
"(",
"sig",
",",
"b",
")",
"if",
"(",
"d",
"==",
"max",
")",
":",
"r",
".",
"append",
"(",
"(",
"b",
"[",
"6",
"]",
",",
"b",
"[",
"7",
"]",
",",
"(",
"b",
"[",
"1",
"]",
"-",
"pkt",
"[",
"IP",
"]",
".",
"ttl",
")",
")",
")",
"return",
"r"
] |
passive os fingerprinting: which os emitted this tcp packet ? p0f -> accuracy .
|
train
| true
|
52,020
|
def load_ptb_dataset(path='data/ptb/'):
print 'Load or Download Penn TreeBank (PTB) dataset > {}'.format(path)
filename = 'simple-examples.tgz'
url = 'http://www.fit.vutbr.cz/~imikolov/rnnlm/'
maybe_download_and_extract(filename, path, url, extract=True)
data_path = os.path.join(path, 'simple-examples', 'data')
train_path = os.path.join(data_path, 'ptb.train.txt')
valid_path = os.path.join(data_path, 'ptb.valid.txt')
test_path = os.path.join(data_path, 'ptb.test.txt')
word_to_id = nlp.build_vocab(nlp.read_words(train_path))
train_data = nlp.words_to_word_ids(nlp.read_words(train_path), word_to_id)
valid_data = nlp.words_to_word_ids(nlp.read_words(valid_path), word_to_id)
test_data = nlp.words_to_word_ids(nlp.read_words(test_path), word_to_id)
vocabulary = len(word_to_id)
return (train_data, valid_data, test_data, vocabulary)
|
[
"def",
"load_ptb_dataset",
"(",
"path",
"=",
"'data/ptb/'",
")",
":",
"print",
"'Load or Download Penn TreeBank (PTB) dataset > {}'",
".",
"format",
"(",
"path",
")",
"filename",
"=",
"'simple-examples.tgz'",
"url",
"=",
"'http://www.fit.vutbr.cz/~imikolov/rnnlm/'",
"maybe_download_and_extract",
"(",
"filename",
",",
"path",
",",
"url",
",",
"extract",
"=",
"True",
")",
"data_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'simple-examples'",
",",
"'data'",
")",
"train_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_path",
",",
"'ptb.train.txt'",
")",
"valid_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_path",
",",
"'ptb.valid.txt'",
")",
"test_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_path",
",",
"'ptb.test.txt'",
")",
"word_to_id",
"=",
"nlp",
".",
"build_vocab",
"(",
"nlp",
".",
"read_words",
"(",
"train_path",
")",
")",
"train_data",
"=",
"nlp",
".",
"words_to_word_ids",
"(",
"nlp",
".",
"read_words",
"(",
"train_path",
")",
",",
"word_to_id",
")",
"valid_data",
"=",
"nlp",
".",
"words_to_word_ids",
"(",
"nlp",
".",
"read_words",
"(",
"valid_path",
")",
",",
"word_to_id",
")",
"test_data",
"=",
"nlp",
".",
"words_to_word_ids",
"(",
"nlp",
".",
"read_words",
"(",
"test_path",
")",
",",
"word_to_id",
")",
"vocabulary",
"=",
"len",
"(",
"word_to_id",
")",
"return",
"(",
"train_data",
",",
"valid_data",
",",
"test_data",
",",
"vocabulary",
")"
] |
penn treebank dataset is used in many language modeling papers .
|
train
| true
|
52,021
|
def test_successful_update(config_stub, basedir, download_stub, data_tmpdir, tmpdir, win_registry, caplog):
config_stub.data = {'content': {'host-block-lists': generic_blocklists(tmpdir), 'host-blocking-enabled': True, 'host-blocking-whitelist': None}}
host_blocker = adblock.HostBlocker()
host_blocker.adblock_update()
while host_blocker._in_progress:
current_download = host_blocker._in_progress[0]
with caplog.at_level(logging.ERROR):
current_download.finished.emit()
host_blocker.read_hosts()
assert_urls(host_blocker, whitelisted=[])
|
[
"def",
"test_successful_update",
"(",
"config_stub",
",",
"basedir",
",",
"download_stub",
",",
"data_tmpdir",
",",
"tmpdir",
",",
"win_registry",
",",
"caplog",
")",
":",
"config_stub",
".",
"data",
"=",
"{",
"'content'",
":",
"{",
"'host-block-lists'",
":",
"generic_blocklists",
"(",
"tmpdir",
")",
",",
"'host-blocking-enabled'",
":",
"True",
",",
"'host-blocking-whitelist'",
":",
"None",
"}",
"}",
"host_blocker",
"=",
"adblock",
".",
"HostBlocker",
"(",
")",
"host_blocker",
".",
"adblock_update",
"(",
")",
"while",
"host_blocker",
".",
"_in_progress",
":",
"current_download",
"=",
"host_blocker",
".",
"_in_progress",
"[",
"0",
"]",
"with",
"caplog",
".",
"at_level",
"(",
"logging",
".",
"ERROR",
")",
":",
"current_download",
".",
"finished",
".",
"emit",
"(",
")",
"host_blocker",
".",
"read_hosts",
"(",
")",
"assert_urls",
"(",
"host_blocker",
",",
"whitelisted",
"=",
"[",
"]",
")"
] |
ensure hosts from host-block-lists are blocked after an update .
|
train
| false
|
52,023
|
def set_clipboard(text):
if (not text):
return
clipboard = QtWidgets.QApplication.clipboard()
clipboard.setText(text, QtGui.QClipboard.Clipboard)
clipboard.setText(text, QtGui.QClipboard.Selection)
persist_clipboard()
|
[
"def",
"set_clipboard",
"(",
"text",
")",
":",
"if",
"(",
"not",
"text",
")",
":",
"return",
"clipboard",
"=",
"QtWidgets",
".",
"QApplication",
".",
"clipboard",
"(",
")",
"clipboard",
".",
"setText",
"(",
"text",
",",
"QtGui",
".",
"QClipboard",
".",
"Clipboard",
")",
"clipboard",
".",
"setText",
"(",
"text",
",",
"QtGui",
".",
"QClipboard",
".",
"Selection",
")",
"persist_clipboard",
"(",
")"
] |
sets the copy/paste buffer to text .
|
train
| false
|
52,024
|
def cancelRepository(repository):
getReadRepository(repository)
for setting in repository.displayEntities:
if (setting in repository.preferences):
setting.setStateToValue()
|
[
"def",
"cancelRepository",
"(",
"repository",
")",
":",
"getReadRepository",
"(",
"repository",
")",
"for",
"setting",
"in",
"repository",
".",
"displayEntities",
":",
"if",
"(",
"setting",
"in",
"repository",
".",
"preferences",
")",
":",
"setting",
".",
"setStateToValue",
"(",
")"
] |
read the repository then set all the entities to the read repository values .
|
train
| false
|
52,025
|
def _filter_activity_by_user(activity_list, users=[]):
if (not len(users)):
return activity_list
new_list = []
for activity in activity_list:
if (activity.user_id not in users):
new_list.append(activity)
return new_list
|
[
"def",
"_filter_activity_by_user",
"(",
"activity_list",
",",
"users",
"=",
"[",
"]",
")",
":",
"if",
"(",
"not",
"len",
"(",
"users",
")",
")",
":",
"return",
"activity_list",
"new_list",
"=",
"[",
"]",
"for",
"activity",
"in",
"activity_list",
":",
"if",
"(",
"activity",
".",
"user_id",
"not",
"in",
"users",
")",
":",
"new_list",
".",
"append",
"(",
"activity",
")",
"return",
"new_list"
] |
return the given activity_list with activities from the specified users removed .
|
train
| false
|
52,027
|
def create_router(name, ext_network=None, admin_state_up=True, profile=None):
conn = _auth(profile)
return conn.create_router(name, ext_network, admin_state_up)
|
[
"def",
"create_router",
"(",
"name",
",",
"ext_network",
"=",
"None",
",",
"admin_state_up",
"=",
"True",
",",
"profile",
"=",
"None",
")",
":",
"conn",
"=",
"_auth",
"(",
"profile",
")",
"return",
"conn",
".",
"create_router",
"(",
"name",
",",
"ext_network",
",",
"admin_state_up",
")"
] |
creates a new router cli example: .
|
train
| true
|
52,028
|
def raise_http_conflict_for_instance_invalid_state(exc, action, server_id):
attr = exc.kwargs.get('attr')
state = exc.kwargs.get('state')
if ((attr is not None) and (state is not None)):
msg = (_("Cannot '%(action)s' instance %(server_id)s while it is in %(attr)s %(state)s") % {'action': action, 'attr': attr, 'state': state, 'server_id': server_id})
else:
msg = (_("Instance %(server_id)s is in an invalid state for '%(action)s'") % {'action': action, 'server_id': server_id})
raise webob.exc.HTTPConflict(explanation=msg)
|
[
"def",
"raise_http_conflict_for_instance_invalid_state",
"(",
"exc",
",",
"action",
",",
"server_id",
")",
":",
"attr",
"=",
"exc",
".",
"kwargs",
".",
"get",
"(",
"'attr'",
")",
"state",
"=",
"exc",
".",
"kwargs",
".",
"get",
"(",
"'state'",
")",
"if",
"(",
"(",
"attr",
"is",
"not",
"None",
")",
"and",
"(",
"state",
"is",
"not",
"None",
")",
")",
":",
"msg",
"=",
"(",
"_",
"(",
"\"Cannot '%(action)s' instance %(server_id)s while it is in %(attr)s %(state)s\"",
")",
"%",
"{",
"'action'",
":",
"action",
",",
"'attr'",
":",
"attr",
",",
"'state'",
":",
"state",
",",
"'server_id'",
":",
"server_id",
"}",
")",
"else",
":",
"msg",
"=",
"(",
"_",
"(",
"\"Instance %(server_id)s is in an invalid state for '%(action)s'\"",
")",
"%",
"{",
"'action'",
":",
"action",
",",
"'server_id'",
":",
"server_id",
"}",
")",
"raise",
"webob",
".",
"exc",
".",
"HTTPConflict",
"(",
"explanation",
"=",
"msg",
")"
] |
return a webob .
|
train
| false
|
52,029
|
def random_genome():
alphabet = TestAlphabet()
new_genome = ''
for letter in range(3):
new_genome += random.choice(alphabet.letters)
return MutableSeq(new_genome, alphabet)
|
[
"def",
"random_genome",
"(",
")",
":",
"alphabet",
"=",
"TestAlphabet",
"(",
")",
"new_genome",
"=",
"''",
"for",
"letter",
"in",
"range",
"(",
"3",
")",
":",
"new_genome",
"+=",
"random",
".",
"choice",
"(",
"alphabet",
".",
"letters",
")",
"return",
"MutableSeq",
"(",
"new_genome",
",",
"alphabet",
")"
] |
return a random genome string .
|
train
| false
|
52,030
|
def chconfig(cmd, *args, **kwargs):
for k in kwargs.keys():
if k.startswith('__pub_'):
kwargs.pop(k)
if (('dracr.' + cmd) not in __salt__):
ret = {'retcode': (-1), 'message': (('dracr.' + cmd) + ' is not available')}
else:
ret = __salt__[('dracr.' + cmd)](*args, **kwargs)
if (cmd == 'change_password'):
if ('username' in kwargs):
__opts__['proxy']['admin_username'] = kwargs['username']
DETAILS['admin_username'] = kwargs['username']
if ('password' in kwargs):
__opts__['proxy']['admin_password'] = kwargs['password']
DETAILS['admin_password'] = kwargs['password']
return ret
|
[
"def",
"chconfig",
"(",
"cmd",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"for",
"k",
"in",
"kwargs",
".",
"keys",
"(",
")",
":",
"if",
"k",
".",
"startswith",
"(",
"'__pub_'",
")",
":",
"kwargs",
".",
"pop",
"(",
"k",
")",
"if",
"(",
"(",
"'dracr.'",
"+",
"cmd",
")",
"not",
"in",
"__salt__",
")",
":",
"ret",
"=",
"{",
"'retcode'",
":",
"(",
"-",
"1",
")",
",",
"'message'",
":",
"(",
"(",
"'dracr.'",
"+",
"cmd",
")",
"+",
"' is not available'",
")",
"}",
"else",
":",
"ret",
"=",
"__salt__",
"[",
"(",
"'dracr.'",
"+",
"cmd",
")",
"]",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"if",
"(",
"cmd",
"==",
"'change_password'",
")",
":",
"if",
"(",
"'username'",
"in",
"kwargs",
")",
":",
"__opts__",
"[",
"'proxy'",
"]",
"[",
"'admin_username'",
"]",
"=",
"kwargs",
"[",
"'username'",
"]",
"DETAILS",
"[",
"'admin_username'",
"]",
"=",
"kwargs",
"[",
"'username'",
"]",
"if",
"(",
"'password'",
"in",
"kwargs",
")",
":",
"__opts__",
"[",
"'proxy'",
"]",
"[",
"'admin_password'",
"]",
"=",
"kwargs",
"[",
"'password'",
"]",
"DETAILS",
"[",
"'admin_password'",
"]",
"=",
"kwargs",
"[",
"'password'",
"]",
"return",
"ret"
] |
this function is called by the :mod:salt .
|
train
| true
|
52,031
|
def pythonpath():
return {'pythonpath': sys.path}
|
[
"def",
"pythonpath",
"(",
")",
":",
"return",
"{",
"'pythonpath'",
":",
"sys",
".",
"path",
"}"
] |
return the python path .
|
train
| false
|
52,034
|
@register.filter
def docs_url(path_name):
return get_docs_url(path_name)
|
[
"@",
"register",
".",
"filter",
"def",
"docs_url",
"(",
"path_name",
")",
":",
"return",
"get_docs_url",
"(",
"path_name",
")"
] |
returns the absolute url to path_name in the rtd docs .
|
train
| false
|
52,035
|
def demo_grid_with_each_cbar_labelled(fig):
grid = ImageGrid(fig, 144, nrows_ncols=(2, 2), axes_pad=(0.45, 0.15), label_mode='1', share_all=True, cbar_location='right', cbar_mode='each', cbar_size='7%', cbar_pad='2%')
(Z, extent) = get_demo_image()
limits = ((0, 1), ((-2), 2), ((-1.7), 1.4), ((-1.5), 1))
for i in range(4):
im = grid[i].imshow(Z, extent=extent, interpolation='nearest', vmin=limits[i][0], vmax=limits[i][1])
grid.cbar_axes[i].colorbar(im)
for (i, cax) in enumerate(grid.cbar_axes):
cax.set_yticks((limits[i][0], limits[i][1]))
grid.axes_llc.set_xticks([(-2), 0, 2])
grid.axes_llc.set_yticks([(-2), 0, 2])
|
[
"def",
"demo_grid_with_each_cbar_labelled",
"(",
"fig",
")",
":",
"grid",
"=",
"ImageGrid",
"(",
"fig",
",",
"144",
",",
"nrows_ncols",
"=",
"(",
"2",
",",
"2",
")",
",",
"axes_pad",
"=",
"(",
"0.45",
",",
"0.15",
")",
",",
"label_mode",
"=",
"'1'",
",",
"share_all",
"=",
"True",
",",
"cbar_location",
"=",
"'right'",
",",
"cbar_mode",
"=",
"'each'",
",",
"cbar_size",
"=",
"'7%'",
",",
"cbar_pad",
"=",
"'2%'",
")",
"(",
"Z",
",",
"extent",
")",
"=",
"get_demo_image",
"(",
")",
"limits",
"=",
"(",
"(",
"0",
",",
"1",
")",
",",
"(",
"(",
"-",
"2",
")",
",",
"2",
")",
",",
"(",
"(",
"-",
"1.7",
")",
",",
"1.4",
")",
",",
"(",
"(",
"-",
"1.5",
")",
",",
"1",
")",
")",
"for",
"i",
"in",
"range",
"(",
"4",
")",
":",
"im",
"=",
"grid",
"[",
"i",
"]",
".",
"imshow",
"(",
"Z",
",",
"extent",
"=",
"extent",
",",
"interpolation",
"=",
"'nearest'",
",",
"vmin",
"=",
"limits",
"[",
"i",
"]",
"[",
"0",
"]",
",",
"vmax",
"=",
"limits",
"[",
"i",
"]",
"[",
"1",
"]",
")",
"grid",
".",
"cbar_axes",
"[",
"i",
"]",
".",
"colorbar",
"(",
"im",
")",
"for",
"(",
"i",
",",
"cax",
")",
"in",
"enumerate",
"(",
"grid",
".",
"cbar_axes",
")",
":",
"cax",
".",
"set_yticks",
"(",
"(",
"limits",
"[",
"i",
"]",
"[",
"0",
"]",
",",
"limits",
"[",
"i",
"]",
"[",
"1",
"]",
")",
")",
"grid",
".",
"axes_llc",
".",
"set_xticks",
"(",
"[",
"(",
"-",
"2",
")",
",",
"0",
",",
"2",
"]",
")",
"grid",
".",
"axes_llc",
".",
"set_yticks",
"(",
"[",
"(",
"-",
"2",
")",
",",
"0",
",",
"2",
"]",
")"
] |
a grid of 2x2 images .
|
train
| false
|
52,036
|
def depart_snippet_latex(self, node):
pass
|
[
"def",
"depart_snippet_latex",
"(",
"self",
",",
"node",
")",
":",
"pass"
] |
latex document generator depart handler .
|
train
| false
|
52,037
|
def start_request(request_id):
with _request_states_lock:
_request_states[request_id] = RequestState(request_id)
|
[
"def",
"start_request",
"(",
"request_id",
")",
":",
"with",
"_request_states_lock",
":",
"_request_states",
"[",
"request_id",
"]",
"=",
"RequestState",
"(",
"request_id",
")"
] |
starts a request with the provided request id .
|
train
| false
|
52,038
|
def revrt(X, m=None):
if (m is None):
m = len(X)
i = int(((m // 2) + 1))
y = (X[:i] + (np.r_[(0, X[i:], 0)] * 1j))
return (np.fft.irfft(y) * m)
|
[
"def",
"revrt",
"(",
"X",
",",
"m",
"=",
"None",
")",
":",
"if",
"(",
"m",
"is",
"None",
")",
":",
"m",
"=",
"len",
"(",
"X",
")",
"i",
"=",
"int",
"(",
"(",
"(",
"m",
"//",
"2",
")",
"+",
"1",
")",
")",
"y",
"=",
"(",
"X",
"[",
":",
"i",
"]",
"+",
"(",
"np",
".",
"r_",
"[",
"(",
"0",
",",
"X",
"[",
"i",
":",
"]",
",",
"0",
")",
"]",
"*",
"1j",
")",
")",
"return",
"(",
"np",
".",
"fft",
".",
"irfft",
"(",
"y",
")",
"*",
"m",
")"
] |
inverse of forrt .
|
train
| false
|
52,039
|
def wait_started(name, path=None, timeout=300):
if (not exists(name, path=path)):
raise CommandExecutionError('Container {0} does does exists'.format(name))
if (not (state(name, path=path) == 'running')):
raise CommandExecutionError('Container {0} is not running'.format(name))
ret = False
if running_systemd(name, path=path):
test_started = test_sd_started_state
logger = log.error
else:
test_started = test_bare_started_state
logger = log.debug
now = time.time()
expire = (now + timeout)
now = time.time()
started = test_started(name, path=path)
while ((time.time() < expire) and (not started)):
time.sleep(0.3)
started = test_started(name, path=path)
if (started is None):
logger('Assuming {0} is started, although we failed to detect that is fully started correctly'.format(name))
ret = True
else:
ret = started
return ret
|
[
"def",
"wait_started",
"(",
"name",
",",
"path",
"=",
"None",
",",
"timeout",
"=",
"300",
")",
":",
"if",
"(",
"not",
"exists",
"(",
"name",
",",
"path",
"=",
"path",
")",
")",
":",
"raise",
"CommandExecutionError",
"(",
"'Container {0} does does exists'",
".",
"format",
"(",
"name",
")",
")",
"if",
"(",
"not",
"(",
"state",
"(",
"name",
",",
"path",
"=",
"path",
")",
"==",
"'running'",
")",
")",
":",
"raise",
"CommandExecutionError",
"(",
"'Container {0} is not running'",
".",
"format",
"(",
"name",
")",
")",
"ret",
"=",
"False",
"if",
"running_systemd",
"(",
"name",
",",
"path",
"=",
"path",
")",
":",
"test_started",
"=",
"test_sd_started_state",
"logger",
"=",
"log",
".",
"error",
"else",
":",
"test_started",
"=",
"test_bare_started_state",
"logger",
"=",
"log",
".",
"debug",
"now",
"=",
"time",
".",
"time",
"(",
")",
"expire",
"=",
"(",
"now",
"+",
"timeout",
")",
"now",
"=",
"time",
".",
"time",
"(",
")",
"started",
"=",
"test_started",
"(",
"name",
",",
"path",
"=",
"path",
")",
"while",
"(",
"(",
"time",
".",
"time",
"(",
")",
"<",
"expire",
")",
"and",
"(",
"not",
"started",
")",
")",
":",
"time",
".",
"sleep",
"(",
"0.3",
")",
"started",
"=",
"test_started",
"(",
"name",
",",
"path",
"=",
"path",
")",
"if",
"(",
"started",
"is",
"None",
")",
":",
"logger",
"(",
"'Assuming {0} is started, although we failed to detect that is fully started correctly'",
".",
"format",
"(",
"name",
")",
")",
"ret",
"=",
"True",
"else",
":",
"ret",
"=",
"started",
"return",
"ret"
] |
check that the system has fully inited this is actually very important for systemd based containers see URL path path to the container parent default: /var/lib/lxc .
|
train
| true
|
52,040
|
def three_to_one(s):
i = d3_to_index[s]
return dindex_to_1[i]
|
[
"def",
"three_to_one",
"(",
"s",
")",
":",
"i",
"=",
"d3_to_index",
"[",
"s",
"]",
"return",
"dindex_to_1",
"[",
"i",
"]"
] |
three letter code to one letter code .
|
train
| false
|
52,041
|
def serialize_type(name, accessor, types):
name = name.strip()
description = type_description(name, types)
if ('struct' in description):
return serialize_struct_type(name, accessor, types)
elif ('template' in description):
return serialize_type_with_template(name, accessor, types)
else:
cast = description.get('cast', dereference_type(name))
return serialize_atomic_type(name, cast, accessor)
|
[
"def",
"serialize_type",
"(",
"name",
",",
"accessor",
",",
"types",
")",
":",
"name",
"=",
"name",
".",
"strip",
"(",
")",
"description",
"=",
"type_description",
"(",
"name",
",",
"types",
")",
"if",
"(",
"'struct'",
"in",
"description",
")",
":",
"return",
"serialize_struct_type",
"(",
"name",
",",
"accessor",
",",
"types",
")",
"elif",
"(",
"'template'",
"in",
"description",
")",
":",
"return",
"serialize_type_with_template",
"(",
"name",
",",
"accessor",
",",
"types",
")",
"else",
":",
"cast",
"=",
"description",
".",
"get",
"(",
"'cast'",
",",
"dereference_type",
"(",
"name",
")",
")",
"return",
"serialize_atomic_type",
"(",
"name",
",",
"cast",
",",
"accessor",
")"
] |
returns a serialization statement for the given type .
|
train
| false
|
52,042
|
def roles_trans():
roles = {}
for role in ROLE_PERMISSIONS:
roles[role] = trans_role(role)
return roles
|
[
"def",
"roles_trans",
"(",
")",
":",
"roles",
"=",
"{",
"}",
"for",
"role",
"in",
"ROLE_PERMISSIONS",
":",
"roles",
"[",
"role",
"]",
"=",
"trans_role",
"(",
"role",
")",
"return",
"roles"
] |
return dict of roles with translation .
|
train
| false
|
52,043
|
def _check_pandas_index_arguments(index, defaults):
if (not any((isinstance(index, k) for k in (list, tuple)))):
index = [index]
invalid_choices = [e for e in index if (e not in defaults)]
if invalid_choices:
options = [', '.join(e) for e in [invalid_choices, defaults]]
raise ValueError(("[%s] is not an valid option. Valid indexvalues are 'None' or %s" % tuple(options)))
|
[
"def",
"_check_pandas_index_arguments",
"(",
"index",
",",
"defaults",
")",
":",
"if",
"(",
"not",
"any",
"(",
"(",
"isinstance",
"(",
"index",
",",
"k",
")",
"for",
"k",
"in",
"(",
"list",
",",
"tuple",
")",
")",
")",
")",
":",
"index",
"=",
"[",
"index",
"]",
"invalid_choices",
"=",
"[",
"e",
"for",
"e",
"in",
"index",
"if",
"(",
"e",
"not",
"in",
"defaults",
")",
"]",
"if",
"invalid_choices",
":",
"options",
"=",
"[",
"', '",
".",
"join",
"(",
"e",
")",
"for",
"e",
"in",
"[",
"invalid_choices",
",",
"defaults",
"]",
"]",
"raise",
"ValueError",
"(",
"(",
"\"[%s] is not an valid option. Valid indexvalues are 'None' or %s\"",
"%",
"tuple",
"(",
"options",
")",
")",
")"
] |
check pandas index arguments .
|
train
| false
|
52,045
|
def create_update_gitdir():
if (not os.path.exists(gitdname)):
retcode = subprocess.call(('git clone ' + repo), shell=True, stdout=sys.stdout, stderr=sys.stderr)
if (retcode != 0):
msg = 'There was a problem cloning the repo'
raise Exception(msg)
else:
shutil.rmtree(gitdname)
create_update_gitdir()
|
[
"def",
"create_update_gitdir",
"(",
")",
":",
"if",
"(",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"gitdname",
")",
")",
":",
"retcode",
"=",
"subprocess",
".",
"call",
"(",
"(",
"'git clone '",
"+",
"repo",
")",
",",
"shell",
"=",
"True",
",",
"stdout",
"=",
"sys",
".",
"stdout",
",",
"stderr",
"=",
"sys",
".",
"stderr",
")",
"if",
"(",
"retcode",
"!=",
"0",
")",
":",
"msg",
"=",
"'There was a problem cloning the repo'",
"raise",
"Exception",
"(",
"msg",
")",
"else",
":",
"shutil",
".",
"rmtree",
"(",
"gitdname",
")",
"create_update_gitdir",
"(",
")"
] |
creates a directory for local repo if it doesnt exist .
|
train
| false
|
52,046
|
def zk_group_path(key):
project = key.app()
if key.name_space():
namespace = key.name_space()
else:
namespace = ':default'
first_element = key.path().element(0)
kind = first_element.type()
if first_element.has_id():
suffix = ':{}'.format(first_element.id())
else:
suffix = '::{}'.format(first_element.name())
return LOCK_PATH_TEMPLATE.format(project=project, namespace=namespace, group=(kind + suffix))
|
[
"def",
"zk_group_path",
"(",
"key",
")",
":",
"project",
"=",
"key",
".",
"app",
"(",
")",
"if",
"key",
".",
"name_space",
"(",
")",
":",
"namespace",
"=",
"key",
".",
"name_space",
"(",
")",
"else",
":",
"namespace",
"=",
"':default'",
"first_element",
"=",
"key",
".",
"path",
"(",
")",
".",
"element",
"(",
"0",
")",
"kind",
"=",
"first_element",
".",
"type",
"(",
")",
"if",
"first_element",
".",
"has_id",
"(",
")",
":",
"suffix",
"=",
"':{}'",
".",
"format",
"(",
"first_element",
".",
"id",
"(",
")",
")",
"else",
":",
"suffix",
"=",
"'::{}'",
".",
"format",
"(",
"first_element",
".",
"name",
"(",
")",
")",
"return",
"LOCK_PATH_TEMPLATE",
".",
"format",
"(",
"project",
"=",
"project",
",",
"namespace",
"=",
"namespace",
",",
"group",
"=",
"(",
"kind",
"+",
"suffix",
")",
")"
] |
retrieve the zookeeper lock path for a given entity key .
|
train
| false
|
52,048
|
def create_linear_initializer(input_size):
stddev = (1 / math.sqrt(input_size))
return tf.truncated_normal_initializer(stddev=stddev)
|
[
"def",
"create_linear_initializer",
"(",
"input_size",
")",
":",
"stddev",
"=",
"(",
"1",
"/",
"math",
".",
"sqrt",
"(",
"input_size",
")",
")",
"return",
"tf",
".",
"truncated_normal_initializer",
"(",
"stddev",
"=",
"stddev",
")"
] |
returns a default initializer for weights or bias of a linear module .
|
train
| false
|
52,049
|
def getProgramsMenuPath():
if (not platform.isWinNT()):
return 'C:\\Windows\\Start Menu\\Programs'
keyname = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders'
hShellFolders = win32api.RegOpenKeyEx(win32con.HKEY_LOCAL_MACHINE, keyname, 0, win32con.KEY_READ)
return win32api.RegQueryValueEx(hShellFolders, 'Common Programs')[0]
|
[
"def",
"getProgramsMenuPath",
"(",
")",
":",
"if",
"(",
"not",
"platform",
".",
"isWinNT",
"(",
")",
")",
":",
"return",
"'C:\\\\Windows\\\\Start Menu\\\\Programs'",
"keyname",
"=",
"'SOFTWARE\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Explorer\\\\Shell Folders'",
"hShellFolders",
"=",
"win32api",
".",
"RegOpenKeyEx",
"(",
"win32con",
".",
"HKEY_LOCAL_MACHINE",
",",
"keyname",
",",
"0",
",",
"win32con",
".",
"KEY_READ",
")",
"return",
"win32api",
".",
"RegQueryValueEx",
"(",
"hShellFolders",
",",
"'Common Programs'",
")",
"[",
"0",
"]"
] |
get the path to the programs menu .
|
train
| false
|
52,050
|
def get_cpi():
rdint = vs.random()
request = Request((vs.MACRO_URL % (vs.P_TYPE['http'], vs.DOMAINS['sina'], rdint, vs.MACRO_TYPE[1], 0, 600, rdint)))
text = urlopen(request, timeout=10).read()
text = (text.decode('gbk') if ct.PY3 else text)
regSym = re.compile('\\,count:(.*?)\\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.CPI_COLS)
df['cpi'] = df['cpi'].astype(float)
return df
|
[
"def",
"get_cpi",
"(",
")",
":",
"rdint",
"=",
"vs",
".",
"random",
"(",
")",
"request",
"=",
"Request",
"(",
"(",
"vs",
".",
"MACRO_URL",
"%",
"(",
"vs",
".",
"P_TYPE",
"[",
"'http'",
"]",
",",
"vs",
".",
"DOMAINS",
"[",
"'sina'",
"]",
",",
"rdint",
",",
"vs",
".",
"MACRO_TYPE",
"[",
"1",
"]",
",",
"0",
",",
"600",
",",
"rdint",
")",
")",
")",
"text",
"=",
"urlopen",
"(",
"request",
",",
"timeout",
"=",
"10",
")",
".",
"read",
"(",
")",
"text",
"=",
"(",
"text",
".",
"decode",
"(",
"'gbk'",
")",
"if",
"ct",
".",
"PY3",
"else",
"text",
")",
"regSym",
"=",
"re",
".",
"compile",
"(",
"'\\\\,count:(.*?)\\\\}'",
")",
"datastr",
"=",
"regSym",
".",
"findall",
"(",
"text",
")",
"datastr",
"=",
"datastr",
"[",
"0",
"]",
"datastr",
"=",
"datastr",
".",
"split",
"(",
"'data:'",
")",
"[",
"1",
"]",
"js",
"=",
"json",
".",
"loads",
"(",
"datastr",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"js",
",",
"columns",
"=",
"vs",
".",
"CPI_COLS",
")",
"df",
"[",
"'cpi'",
"]",
"=",
"df",
"[",
"'cpi'",
"]",
".",
"astype",
"(",
"float",
")",
"return",
"df"
] |
return dataframe month :统计月份 cpi :价格指数 .
|
train
| false
|
52,051
|
def tree_lookup_path(lookup_obj, root_sha, path):
tree = lookup_obj(root_sha)
if (not isinstance(tree, Tree)):
raise NotTreeError(root_sha)
return tree.lookup_path(lookup_obj, path)
|
[
"def",
"tree_lookup_path",
"(",
"lookup_obj",
",",
"root_sha",
",",
"path",
")",
":",
"tree",
"=",
"lookup_obj",
"(",
"root_sha",
")",
"if",
"(",
"not",
"isinstance",
"(",
"tree",
",",
"Tree",
")",
")",
":",
"raise",
"NotTreeError",
"(",
"root_sha",
")",
"return",
"tree",
".",
"lookup_path",
"(",
"lookup_obj",
",",
"path",
")"
] |
look up an object in a git tree .
|
train
| false
|
52,052
|
def album_for_id(album_id):
out = []
for plugin in find_plugins():
res = plugin.album_for_id(album_id)
if res:
out.append(res)
return out
|
[
"def",
"album_for_id",
"(",
"album_id",
")",
":",
"out",
"=",
"[",
"]",
"for",
"plugin",
"in",
"find_plugins",
"(",
")",
":",
"res",
"=",
"plugin",
".",
"album_for_id",
"(",
"album_id",
")",
"if",
"res",
":",
"out",
".",
"append",
"(",
"res",
")",
"return",
"out"
] |
get albuminfo objects for a given id string .
|
train
| false
|
52,053
|
def imsave(fname, arr, format_str=None):
return _imread.imsave(fname, arr, formatstr=format_str)
|
[
"def",
"imsave",
"(",
"fname",
",",
"arr",
",",
"format_str",
"=",
"None",
")",
":",
"return",
"_imread",
".",
"imsave",
"(",
"fname",
",",
"arr",
",",
"formatstr",
"=",
"format_str",
")"
] |
save an image to disk .
|
train
| false
|
52,054
|
def _fixupRandomEncoderParams(params, minVal, maxVal, minResolution):
encodersDict = params['modelConfig']['modelParams']['sensorParams']['encoders']
for encoder in encodersDict.itervalues():
if (encoder is not None):
if (encoder['type'] == 'RandomDistributedScalarEncoder'):
resolution = max(minResolution, ((maxVal - minVal) / encoder.pop('numBuckets')))
encodersDict['c1']['resolution'] = resolution
|
[
"def",
"_fixupRandomEncoderParams",
"(",
"params",
",",
"minVal",
",",
"maxVal",
",",
"minResolution",
")",
":",
"encodersDict",
"=",
"params",
"[",
"'modelConfig'",
"]",
"[",
"'modelParams'",
"]",
"[",
"'sensorParams'",
"]",
"[",
"'encoders'",
"]",
"for",
"encoder",
"in",
"encodersDict",
".",
"itervalues",
"(",
")",
":",
"if",
"(",
"encoder",
"is",
"not",
"None",
")",
":",
"if",
"(",
"encoder",
"[",
"'type'",
"]",
"==",
"'RandomDistributedScalarEncoder'",
")",
":",
"resolution",
"=",
"max",
"(",
"minResolution",
",",
"(",
"(",
"maxVal",
"-",
"minVal",
")",
"/",
"encoder",
".",
"pop",
"(",
"'numBuckets'",
")",
")",
")",
"encodersDict",
"[",
"'c1'",
"]",
"[",
"'resolution'",
"]",
"=",
"resolution"
] |
given model params .
|
train
| true
|
52,055
|
def openstack(registry, xml_parent, data):
tag_prefix = 'jenkins.plugins.openstack.compute.'
if ('instances' in data):
clouds_build_wrapper = XML.SubElement(xml_parent, (tag_prefix + 'JCloudsBuildWrapper'))
instances_wrapper = XML.SubElement(clouds_build_wrapper, 'instancesToRun')
for instance in data['instances']:
instances_to_run = XML.SubElement(instances_wrapper, (tag_prefix + 'InstancesToRun'))
try:
cloud_name = instance['cloud-name']
template_name = instance['template-name']
except KeyError as exception:
raise MissingAttributeError(exception.args[0])
XML.SubElement(instances_to_run, 'cloudName').text = cloud_name
if instance.get('manual-template', False):
XML.SubElement(instances_to_run, 'manualTemplateName').text = template_name
else:
XML.SubElement(instances_to_run, 'templateName').text = template_name
XML.SubElement(instances_to_run, 'count').text = str(instance.get('count', 1))
if data.get('single-use', False):
XML.SubElement(xml_parent, (tag_prefix + 'JCloudsOneOffSlave'))
|
[
"def",
"openstack",
"(",
"registry",
",",
"xml_parent",
",",
"data",
")",
":",
"tag_prefix",
"=",
"'jenkins.plugins.openstack.compute.'",
"if",
"(",
"'instances'",
"in",
"data",
")",
":",
"clouds_build_wrapper",
"=",
"XML",
".",
"SubElement",
"(",
"xml_parent",
",",
"(",
"tag_prefix",
"+",
"'JCloudsBuildWrapper'",
")",
")",
"instances_wrapper",
"=",
"XML",
".",
"SubElement",
"(",
"clouds_build_wrapper",
",",
"'instancesToRun'",
")",
"for",
"instance",
"in",
"data",
"[",
"'instances'",
"]",
":",
"instances_to_run",
"=",
"XML",
".",
"SubElement",
"(",
"instances_wrapper",
",",
"(",
"tag_prefix",
"+",
"'InstancesToRun'",
")",
")",
"try",
":",
"cloud_name",
"=",
"instance",
"[",
"'cloud-name'",
"]",
"template_name",
"=",
"instance",
"[",
"'template-name'",
"]",
"except",
"KeyError",
"as",
"exception",
":",
"raise",
"MissingAttributeError",
"(",
"exception",
".",
"args",
"[",
"0",
"]",
")",
"XML",
".",
"SubElement",
"(",
"instances_to_run",
",",
"'cloudName'",
")",
".",
"text",
"=",
"cloud_name",
"if",
"instance",
".",
"get",
"(",
"'manual-template'",
",",
"False",
")",
":",
"XML",
".",
"SubElement",
"(",
"instances_to_run",
",",
"'manualTemplateName'",
")",
".",
"text",
"=",
"template_name",
"else",
":",
"XML",
".",
"SubElement",
"(",
"instances_to_run",
",",
"'templateName'",
")",
".",
"text",
"=",
"template_name",
"XML",
".",
"SubElement",
"(",
"instances_to_run",
",",
"'count'",
")",
".",
"text",
"=",
"str",
"(",
"instance",
".",
"get",
"(",
"'count'",
",",
"1",
")",
")",
"if",
"data",
".",
"get",
"(",
"'single-use'",
",",
"False",
")",
":",
"XML",
".",
"SubElement",
"(",
"xml_parent",
",",
"(",
"tag_prefix",
"+",
"'JCloudsOneOffSlave'",
")",
")"
] |
yaml: openstack provision slaves from openstack on demand .
|
train
| false
|
52,056
|
def is_cache_stale(cache_file, cache_expiration_time, refresh=False):
if refresh:
return True
if (os.path.isfile(cache_file) and (os.path.getsize(cache_file) > 0)):
mod_time = os.path.getmtime(cache_file)
current_time = time.time()
if ((mod_time + cache_expiration_time) > current_time):
return False
return True
|
[
"def",
"is_cache_stale",
"(",
"cache_file",
",",
"cache_expiration_time",
",",
"refresh",
"=",
"False",
")",
":",
"if",
"refresh",
":",
"return",
"True",
"if",
"(",
"os",
".",
"path",
".",
"isfile",
"(",
"cache_file",
")",
"and",
"(",
"os",
".",
"path",
".",
"getsize",
"(",
"cache_file",
")",
">",
"0",
")",
")",
":",
"mod_time",
"=",
"os",
".",
"path",
".",
"getmtime",
"(",
"cache_file",
")",
"current_time",
"=",
"time",
".",
"time",
"(",
")",
"if",
"(",
"(",
"mod_time",
"+",
"cache_expiration_time",
")",
">",
"current_time",
")",
":",
"return",
"False",
"return",
"True"
] |
determines if cache file has expired .
|
train
| false
|
52,057
|
def sp(value):
return dpi2px(value, 'sp')
|
[
"def",
"sp",
"(",
"value",
")",
":",
"return",
"dpi2px",
"(",
"value",
",",
"'sp'",
")"
] |
convert from scale-independent pixels to pixels .
|
train
| false
|
52,058
|
def selector(module):
if (module.params['target'] == 'host'):
target = Host(module.params, module)
target.site_facts()
elif (module.params['target'] == 'hostgroup'):
if (module.params['fullpath'] is not None):
target = Hostgroup(module.params, module)
target.site_facts()
else:
module.fail_json(msg="Parameter 'fullpath' required for target 'hostgroup'")
else:
module.fail_json(msg=(('Error: Unexpected target "' + module.params['target']) + '" was specified.'))
|
[
"def",
"selector",
"(",
"module",
")",
":",
"if",
"(",
"module",
".",
"params",
"[",
"'target'",
"]",
"==",
"'host'",
")",
":",
"target",
"=",
"Host",
"(",
"module",
".",
"params",
",",
"module",
")",
"target",
".",
"site_facts",
"(",
")",
"elif",
"(",
"module",
".",
"params",
"[",
"'target'",
"]",
"==",
"'hostgroup'",
")",
":",
"if",
"(",
"module",
".",
"params",
"[",
"'fullpath'",
"]",
"is",
"not",
"None",
")",
":",
"target",
"=",
"Hostgroup",
"(",
"module",
".",
"params",
",",
"module",
")",
"target",
".",
"site_facts",
"(",
")",
"else",
":",
"module",
".",
"fail_json",
"(",
"msg",
"=",
"\"Parameter 'fullpath' required for target 'hostgroup'\"",
")",
"else",
":",
"module",
".",
"fail_json",
"(",
"msg",
"=",
"(",
"(",
"'Error: Unexpected target \"'",
"+",
"module",
".",
"params",
"[",
"'target'",
"]",
")",
"+",
"'\" was specified.'",
")",
")"
] |
figure out which object and which actions to take given the right parameters .
|
train
| false
|
52,059
|
def iter_token_lines(tokenlist):
line = []
for (token, c) in explode_tokens(tokenlist):
line.append((token, c))
if (c == u'\n'):
(yield line)
line = []
(yield line)
|
[
"def",
"iter_token_lines",
"(",
"tokenlist",
")",
":",
"line",
"=",
"[",
"]",
"for",
"(",
"token",
",",
"c",
")",
"in",
"explode_tokens",
"(",
"tokenlist",
")",
":",
"line",
".",
"append",
"(",
"(",
"token",
",",
"c",
")",
")",
"if",
"(",
"c",
"==",
"u'\\n'",
")",
":",
"(",
"yield",
"line",
")",
"line",
"=",
"[",
"]",
"(",
"yield",
"line",
")"
] |
iterator that yields tokenlists for each line .
|
train
| true
|
52,061
|
def _check_orphans(cursor):
orphans = cursor.all('\n select username\n from participants\n where not exists (select * from elsewhere where elsewhere.participant=username)\n and not exists (select * from absorptions where archived_as=username)\n ')
assert (len(orphans) == 0), 'missing elsewheres: {}'.format(list(orphans))
|
[
"def",
"_check_orphans",
"(",
"cursor",
")",
":",
"orphans",
"=",
"cursor",
".",
"all",
"(",
"'\\n select username\\n from participants\\n where not exists (select * from elsewhere where elsewhere.participant=username)\\n and not exists (select * from absorptions where archived_as=username)\\n '",
")",
"assert",
"(",
"len",
"(",
"orphans",
")",
"==",
"0",
")",
",",
"'missing elsewheres: {}'",
".",
"format",
"(",
"list",
"(",
"orphans",
")",
")"
] |
finds participants that * does not have corresponding elsewhere account * have not been absorbed by other participant these are broken because new participants arise from elsewhere and elsewhere is detached only by take over which makes a note in absorptions if it removes the last elsewhere account .
|
train
| false
|
52,062
|
def get_first_day(dt, d_years=0, d_months=0):
dt = getdate(dt)
(overflow_years, month) = divmod(((dt.month + d_months) - 1), 12)
year = ((dt.year + d_years) + overflow_years)
return datetime.date(year, (month + 1), 1)
|
[
"def",
"get_first_day",
"(",
"dt",
",",
"d_years",
"=",
"0",
",",
"d_months",
"=",
"0",
")",
":",
"dt",
"=",
"getdate",
"(",
"dt",
")",
"(",
"overflow_years",
",",
"month",
")",
"=",
"divmod",
"(",
"(",
"(",
"dt",
".",
"month",
"+",
"d_months",
")",
"-",
"1",
")",
",",
"12",
")",
"year",
"=",
"(",
"(",
"dt",
".",
"year",
"+",
"d_years",
")",
"+",
"overflow_years",
")",
"return",
"datetime",
".",
"date",
"(",
"year",
",",
"(",
"month",
"+",
"1",
")",
",",
"1",
")"
] |
returns the first day of the month for the date specified by date object also adds d_years and d_months if specified .
|
train
| false
|
52,063
|
def hex_decoding(t):
nums = t[1:].split('%')
return ''.join((chr(int(n, 16)) for n in nums))
|
[
"def",
"hex_decoding",
"(",
"t",
")",
":",
"nums",
"=",
"t",
"[",
"1",
":",
"]",
".",
"split",
"(",
"'%'",
")",
"return",
"''",
".",
"join",
"(",
"(",
"chr",
"(",
"int",
"(",
"n",
",",
"16",
")",
")",
"for",
"n",
"in",
"nums",
")",
")"
] |
hex decoding method .
|
train
| false
|
52,064
|
def transport_channel_id(transport, is_server, channel_id_type):
if (channel_id_type is None):
return None
if (channel_id_type not in [u'tls-unique']):
raise Exception('invalid channel ID type {}'.format(channel_id_type))
if hasattr(transport, '_tlsConnection'):
if is_server:
tls_finished_msg = transport._tlsConnection.get_peer_finished()
else:
tls_finished_msg = transport._tlsConnection.get_finished()
m = hashlib.sha256()
m.update(tls_finished_msg)
return m.digest()
else:
return None
|
[
"def",
"transport_channel_id",
"(",
"transport",
",",
"is_server",
",",
"channel_id_type",
")",
":",
"if",
"(",
"channel_id_type",
"is",
"None",
")",
":",
"return",
"None",
"if",
"(",
"channel_id_type",
"not",
"in",
"[",
"u'tls-unique'",
"]",
")",
":",
"raise",
"Exception",
"(",
"'invalid channel ID type {}'",
".",
"format",
"(",
"channel_id_type",
")",
")",
"if",
"hasattr",
"(",
"transport",
",",
"'_tlsConnection'",
")",
":",
"if",
"is_server",
":",
"tls_finished_msg",
"=",
"transport",
".",
"_tlsConnection",
".",
"get_peer_finished",
"(",
")",
"else",
":",
"tls_finished_msg",
"=",
"transport",
".",
"_tlsConnection",
".",
"get_finished",
"(",
")",
"m",
"=",
"hashlib",
".",
"sha256",
"(",
")",
"m",
".",
"update",
"(",
"tls_finished_msg",
")",
"return",
"m",
".",
"digest",
"(",
")",
"else",
":",
"return",
"None"
] |
application-layer user authentication protocols are vulnerable to generic credential forwarding attacks .
|
train
| false
|
52,066
|
def _a_encode_bool(value, mapping):
return [('0T' if value else '0F')]
|
[
"def",
"_a_encode_bool",
"(",
"value",
",",
"mapping",
")",
":",
"return",
"[",
"(",
"'0T'",
"if",
"value",
"else",
"'0F'",
")",
"]"
] |
true --> [0 .
|
train
| false
|
52,067
|
def expand_action(data):
if isinstance(data, string_types):
return (u'{"index":{}}', data)
data = data.copy()
op_type = data.pop(u'_op_type', u'index')
action = {op_type: {}}
for key in (u'_index', u'_parent', u'_percolate', u'_routing', u'_timestamp', u'_ttl', u'_type', u'_version', u'_version_type', u'_id', u'_retry_on_conflict'):
if (key in data):
action[op_type][key] = data.pop(key)
if (op_type == u'delete'):
return (action, None)
return (action, data.get(u'_source', data))
|
[
"def",
"expand_action",
"(",
"data",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"string_types",
")",
":",
"return",
"(",
"u'{\"index\":{}}'",
",",
"data",
")",
"data",
"=",
"data",
".",
"copy",
"(",
")",
"op_type",
"=",
"data",
".",
"pop",
"(",
"u'_op_type'",
",",
"u'index'",
")",
"action",
"=",
"{",
"op_type",
":",
"{",
"}",
"}",
"for",
"key",
"in",
"(",
"u'_index'",
",",
"u'_parent'",
",",
"u'_percolate'",
",",
"u'_routing'",
",",
"u'_timestamp'",
",",
"u'_ttl'",
",",
"u'_type'",
",",
"u'_version'",
",",
"u'_version_type'",
",",
"u'_id'",
",",
"u'_retry_on_conflict'",
")",
":",
"if",
"(",
"key",
"in",
"data",
")",
":",
"action",
"[",
"op_type",
"]",
"[",
"key",
"]",
"=",
"data",
".",
"pop",
"(",
"key",
")",
"if",
"(",
"op_type",
"==",
"u'delete'",
")",
":",
"return",
"(",
"action",
",",
"None",
")",
"return",
"(",
"action",
",",
"data",
".",
"get",
"(",
"u'_source'",
",",
"data",
")",
")"
] |
from one document or action definition passed in by the user extract the action/data lines needed for elasticsearchs :meth:~elasticsearch .
|
train
| false
|
52,068
|
def blue(text, attrib=None):
return colorize(text, 'blue', attrib)
|
[
"def",
"blue",
"(",
"text",
",",
"attrib",
"=",
"None",
")",
":",
"return",
"colorize",
"(",
"text",
",",
"'blue'",
",",
"attrib",
")"
] |
wrapper for colorize .
|
train
| false
|
52,069
|
def copy_volume(src, dest, size_in_m, blocksize, sync=False, execute=utils.execute, ionice=None, throttle=None, sparse=False):
if (isinstance(src, six.string_types) and isinstance(dest, six.string_types)):
if (not throttle):
throttle = throttling.Throttle.get_default()
with throttle.subcommand(src, dest) as throttle_cmd:
_copy_volume_with_path(throttle_cmd['prefix'], src, dest, size_in_m, blocksize, sync=sync, execute=execute, ionice=ionice, sparse=sparse)
else:
_copy_volume_with_file(src, dest, size_in_m)
|
[
"def",
"copy_volume",
"(",
"src",
",",
"dest",
",",
"size_in_m",
",",
"blocksize",
",",
"sync",
"=",
"False",
",",
"execute",
"=",
"utils",
".",
"execute",
",",
"ionice",
"=",
"None",
",",
"throttle",
"=",
"None",
",",
"sparse",
"=",
"False",
")",
":",
"if",
"(",
"isinstance",
"(",
"src",
",",
"six",
".",
"string_types",
")",
"and",
"isinstance",
"(",
"dest",
",",
"six",
".",
"string_types",
")",
")",
":",
"if",
"(",
"not",
"throttle",
")",
":",
"throttle",
"=",
"throttling",
".",
"Throttle",
".",
"get_default",
"(",
")",
"with",
"throttle",
".",
"subcommand",
"(",
"src",
",",
"dest",
")",
"as",
"throttle_cmd",
":",
"_copy_volume_with_path",
"(",
"throttle_cmd",
"[",
"'prefix'",
"]",
",",
"src",
",",
"dest",
",",
"size_in_m",
",",
"blocksize",
",",
"sync",
"=",
"sync",
",",
"execute",
"=",
"execute",
",",
"ionice",
"=",
"ionice",
",",
"sparse",
"=",
"sparse",
")",
"else",
":",
"_copy_volume_with_file",
"(",
"src",
",",
"dest",
",",
"size_in_m",
")"
] |
copy data from the source volume to the destination volume .
|
train
| false
|
52,070
|
def quote_query(string):
parsed = urlparse(string)
string = string.replace(parsed.query, quote(parsed.query, u'/=&'), 1)
return string
|
[
"def",
"quote_query",
"(",
"string",
")",
":",
"parsed",
"=",
"urlparse",
"(",
"string",
")",
"string",
"=",
"string",
".",
"replace",
"(",
"parsed",
".",
"query",
",",
"quote",
"(",
"parsed",
".",
"query",
",",
"u'/=&'",
")",
",",
"1",
")",
"return",
"string"
] |
quotes the query parameters .
|
train
| false
|
52,071
|
def mutate_image_dict_to_v1(image):
visibility = image.pop('visibility')
is_image_public = ('public' == visibility)
image['is_public'] = is_image_public
return image
|
[
"def",
"mutate_image_dict_to_v1",
"(",
"image",
")",
":",
"visibility",
"=",
"image",
".",
"pop",
"(",
"'visibility'",
")",
"is_image_public",
"=",
"(",
"'public'",
"==",
"visibility",
")",
"image",
"[",
"'is_public'",
"]",
"=",
"is_image_public",
"return",
"image"
] |
replaces a v2-style image dictionarys visibility member with the equivalent v1-style is_public member .
|
train
| false
|
52,072
|
def same(*values):
if (not values):
return True
(first, rest) = (values[0], values[1:])
return all(((value == first) for value in rest))
|
[
"def",
"same",
"(",
"*",
"values",
")",
":",
"if",
"(",
"not",
"values",
")",
":",
"return",
"True",
"(",
"first",
",",
"rest",
")",
"=",
"(",
"values",
"[",
"0",
"]",
",",
"values",
"[",
"1",
":",
"]",
")",
"return",
"all",
"(",
"(",
"(",
"value",
"==",
"first",
")",
"for",
"value",
"in",
"rest",
")",
")"
] |
check if all values in a sequence are equal .
|
train
| true
|
52,073
|
def lscsum0(lx):
lx = numpy.asarray(lx)
bases = lx.max(0)
x = numpy.exp((lx - bases))
ssum = x.sum(0)
result = (numpy.log(ssum) + bases)
try:
conventional = numpy.log(numpy.exp(lx).sum(0))
if (not similar(result, conventional)):
if (numpy.isinf(conventional).any() and (not numpy.isinf(result).any())):
pass
else:
import sys
print >>sys.stderr, 'Warning: scaled log sum down axis 0 did not match.'
print >>sys.stderr, 'Scaled log result:'
print >>sys.stderr, result
print >>sys.stderr, 'Conventional result:'
print >>sys.stderr, conventional
except FloatingPointError as e:
pass
return result
|
[
"def",
"lscsum0",
"(",
"lx",
")",
":",
"lx",
"=",
"numpy",
".",
"asarray",
"(",
"lx",
")",
"bases",
"=",
"lx",
".",
"max",
"(",
"0",
")",
"x",
"=",
"numpy",
".",
"exp",
"(",
"(",
"lx",
"-",
"bases",
")",
")",
"ssum",
"=",
"x",
".",
"sum",
"(",
"0",
")",
"result",
"=",
"(",
"numpy",
".",
"log",
"(",
"ssum",
")",
"+",
"bases",
")",
"try",
":",
"conventional",
"=",
"numpy",
".",
"log",
"(",
"numpy",
".",
"exp",
"(",
"lx",
")",
".",
"sum",
"(",
"0",
")",
")",
"if",
"(",
"not",
"similar",
"(",
"result",
",",
"conventional",
")",
")",
":",
"if",
"(",
"numpy",
".",
"isinf",
"(",
"conventional",
")",
".",
"any",
"(",
")",
"and",
"(",
"not",
"numpy",
".",
"isinf",
"(",
"result",
")",
".",
"any",
"(",
")",
")",
")",
":",
"pass",
"else",
":",
"import",
"sys",
"print",
">>",
"sys",
".",
"stderr",
",",
"'Warning: scaled log sum down axis 0 did not match.'",
"print",
">>",
"sys",
".",
"stderr",
",",
"'Scaled log result:'",
"print",
">>",
"sys",
".",
"stderr",
",",
"result",
"print",
">>",
"sys",
".",
"stderr",
",",
"'Conventional result:'",
"print",
">>",
"sys",
".",
"stderr",
",",
"conventional",
"except",
"FloatingPointError",
"as",
"e",
":",
"pass",
"return",
"result"
] |
accepts log-values as input .
|
train
| true
|
52,074
|
def shanks(A, k, n, m=1):
table = [A.subs(k, Integer(j)).doit() for j in range(((n + m) + 2))]
table2 = table[:]
for i in range(1, (m + 1)):
for j in range(i, ((n + m) + 1)):
(x, y, z) = (table[(j - 1)], table[j], table[(j + 1)])
table2[j] = (((z * x) - (y ** 2)) / ((z + x) - (2 * y)))
table = table2[:]
return table[n]
|
[
"def",
"shanks",
"(",
"A",
",",
"k",
",",
"n",
",",
"m",
"=",
"1",
")",
":",
"table",
"=",
"[",
"A",
".",
"subs",
"(",
"k",
",",
"Integer",
"(",
"j",
")",
")",
".",
"doit",
"(",
")",
"for",
"j",
"in",
"range",
"(",
"(",
"(",
"n",
"+",
"m",
")",
"+",
"2",
")",
")",
"]",
"table2",
"=",
"table",
"[",
":",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"(",
"m",
"+",
"1",
")",
")",
":",
"for",
"j",
"in",
"range",
"(",
"i",
",",
"(",
"(",
"n",
"+",
"m",
")",
"+",
"1",
")",
")",
":",
"(",
"x",
",",
"y",
",",
"z",
")",
"=",
"(",
"table",
"[",
"(",
"j",
"-",
"1",
")",
"]",
",",
"table",
"[",
"j",
"]",
",",
"table",
"[",
"(",
"j",
"+",
"1",
")",
"]",
")",
"table2",
"[",
"j",
"]",
"=",
"(",
"(",
"(",
"z",
"*",
"x",
")",
"-",
"(",
"y",
"**",
"2",
")",
")",
"/",
"(",
"(",
"z",
"+",
"x",
")",
"-",
"(",
"2",
"*",
"y",
")",
")",
")",
"table",
"=",
"table2",
"[",
":",
"]",
"return",
"table",
"[",
"n",
"]"
] |
calculate an approximation for lim k->oo a(k) using the n-term shanks transformation s(a)(n) .
|
train
| false
|
52,075
|
def extract_question_values(question, required_fields, is_reviewer):
response = {'value': {'type': 'string'}, 'comments': COMMENTS_SCHEMA, 'extra': {'type': 'array'}}
if (question.get('type') == 'object'):
response['value'] = get_object_jsonschema(question, required_fields, is_reviewer)
elif (question.get('type') == 'choose'):
options = question.get('options')
if options:
response['value'] = get_options_jsonschema(options)
elif (question.get('type') == 'osf-upload'):
response['extra'] = OSF_UPLOAD_EXTRA_SCHEMA
if is_reviewer:
del response['extra']
if (not (question.get('type') == 'object')):
del response['value']
return response
|
[
"def",
"extract_question_values",
"(",
"question",
",",
"required_fields",
",",
"is_reviewer",
")",
":",
"response",
"=",
"{",
"'value'",
":",
"{",
"'type'",
":",
"'string'",
"}",
",",
"'comments'",
":",
"COMMENTS_SCHEMA",
",",
"'extra'",
":",
"{",
"'type'",
":",
"'array'",
"}",
"}",
"if",
"(",
"question",
".",
"get",
"(",
"'type'",
")",
"==",
"'object'",
")",
":",
"response",
"[",
"'value'",
"]",
"=",
"get_object_jsonschema",
"(",
"question",
",",
"required_fields",
",",
"is_reviewer",
")",
"elif",
"(",
"question",
".",
"get",
"(",
"'type'",
")",
"==",
"'choose'",
")",
":",
"options",
"=",
"question",
".",
"get",
"(",
"'options'",
")",
"if",
"options",
":",
"response",
"[",
"'value'",
"]",
"=",
"get_options_jsonschema",
"(",
"options",
")",
"elif",
"(",
"question",
".",
"get",
"(",
"'type'",
")",
"==",
"'osf-upload'",
")",
":",
"response",
"[",
"'extra'",
"]",
"=",
"OSF_UPLOAD_EXTRA_SCHEMA",
"if",
"is_reviewer",
":",
"del",
"response",
"[",
"'extra'",
"]",
"if",
"(",
"not",
"(",
"question",
".",
"get",
"(",
"'type'",
")",
"==",
"'object'",
")",
")",
":",
"del",
"response",
"[",
"'value'",
"]",
"return",
"response"
] |
pulls structure for value .
|
train
| false
|
52,076
|
def sanitize_separators(value):
if settings.USE_L10N:
decimal_separator = get_format('DECIMAL_SEPARATOR')
if isinstance(value, six.string_types):
parts = []
if (decimal_separator in value):
(value, decimals) = value.split(decimal_separator, 1)
parts.append(decimals)
if settings.USE_THOUSAND_SEPARATOR:
parts.append(value.replace(get_format('THOUSAND_SEPARATOR'), ''))
else:
parts.append(value)
value = '.'.join(reversed(parts))
return value
|
[
"def",
"sanitize_separators",
"(",
"value",
")",
":",
"if",
"settings",
".",
"USE_L10N",
":",
"decimal_separator",
"=",
"get_format",
"(",
"'DECIMAL_SEPARATOR'",
")",
"if",
"isinstance",
"(",
"value",
",",
"six",
".",
"string_types",
")",
":",
"parts",
"=",
"[",
"]",
"if",
"(",
"decimal_separator",
"in",
"value",
")",
":",
"(",
"value",
",",
"decimals",
")",
"=",
"value",
".",
"split",
"(",
"decimal_separator",
",",
"1",
")",
"parts",
".",
"append",
"(",
"decimals",
")",
"if",
"settings",
".",
"USE_THOUSAND_SEPARATOR",
":",
"parts",
".",
"append",
"(",
"value",
".",
"replace",
"(",
"get_format",
"(",
"'THOUSAND_SEPARATOR'",
")",
",",
"''",
")",
")",
"else",
":",
"parts",
".",
"append",
"(",
"value",
")",
"value",
"=",
"'.'",
".",
"join",
"(",
"reversed",
"(",
"parts",
")",
")",
"return",
"value"
] |
sanitizes a value according to the current decimal and thousand separator setting .
|
train
| false
|
52,077
|
def connected_server_and_client(case, server_factory, client_factory):
def connect_client(listening_port):
return TCP4ClientEndpoint(reactor, '127.0.0.1', listening_port.getHost().port).connect(client_factory)
return listen(case, TCP4ServerEndpoint(reactor, 0), server_factory).addCallback(connect_client)
|
[
"def",
"connected_server_and_client",
"(",
"case",
",",
"server_factory",
",",
"client_factory",
")",
":",
"def",
"connect_client",
"(",
"listening_port",
")",
":",
"return",
"TCP4ClientEndpoint",
"(",
"reactor",
",",
"'127.0.0.1'",
",",
"listening_port",
".",
"getHost",
"(",
")",
".",
"port",
")",
".",
"connect",
"(",
"client_factory",
")",
"return",
"listen",
"(",
"case",
",",
"TCP4ServerEndpoint",
"(",
"reactor",
",",
"0",
")",
",",
"server_factory",
")",
".",
"addCallback",
"(",
"connect_client",
")"
] |
create a server and client connected to that server .
|
train
| false
|
52,078
|
def _prepare_colorarray(arr):
arr = np.asanyarray(arr)
if ((arr.ndim not in [3, 4]) or (arr.shape[(-1)] != 3)):
msg = ((('the input array must be have a shape == (.., ..,[ ..,] 3)), ' + 'got (') + ', '.join(map(str, arr.shape))) + ')')
raise ValueError(msg)
return dtype.img_as_float(arr)
|
[
"def",
"_prepare_colorarray",
"(",
"arr",
")",
":",
"arr",
"=",
"np",
".",
"asanyarray",
"(",
"arr",
")",
"if",
"(",
"(",
"arr",
".",
"ndim",
"not",
"in",
"[",
"3",
",",
"4",
"]",
")",
"or",
"(",
"arr",
".",
"shape",
"[",
"(",
"-",
"1",
")",
"]",
"!=",
"3",
")",
")",
":",
"msg",
"=",
"(",
"(",
"(",
"'the input array must be have a shape == (.., ..,[ ..,] 3)), '",
"+",
"'got ('",
")",
"+",
"', '",
".",
"join",
"(",
"map",
"(",
"str",
",",
"arr",
".",
"shape",
")",
")",
")",
"+",
"')'",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"return",
"dtype",
".",
"img_as_float",
"(",
"arr",
")"
] |
check the shape of the array and convert it to floating point representation .
|
train
| false
|
52,079
|
def image_from_x(x):
if isinstance(x, type(u'')):
return image_from_path(x)
if hasattr(x, u'read'):
return image_from_data(x.read())
if isinstance(x, (bytes, QImage)):
return image_from_data(x)
if isinstance(x, bytearray):
return image_from_data(bytes(x))
if isinstance(x, QPixmap):
return x.toImage()
raise TypeError((u'Unknown image src type: %s' % type(x)))
|
[
"def",
"image_from_x",
"(",
"x",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"type",
"(",
"u''",
")",
")",
":",
"return",
"image_from_path",
"(",
"x",
")",
"if",
"hasattr",
"(",
"x",
",",
"u'read'",
")",
":",
"return",
"image_from_data",
"(",
"x",
".",
"read",
"(",
")",
")",
"if",
"isinstance",
"(",
"x",
",",
"(",
"bytes",
",",
"QImage",
")",
")",
":",
"return",
"image_from_data",
"(",
"x",
")",
"if",
"isinstance",
"(",
"x",
",",
"bytearray",
")",
":",
"return",
"image_from_data",
"(",
"bytes",
"(",
"x",
")",
")",
"if",
"isinstance",
"(",
"x",
",",
"QPixmap",
")",
":",
"return",
"x",
".",
"toImage",
"(",
")",
"raise",
"TypeError",
"(",
"(",
"u'Unknown image src type: %s'",
"%",
"type",
"(",
"x",
")",
")",
")"
] |
create an image from a bytestring or a path or a file like object .
|
train
| false
|
52,080
|
def test_nospace():
entry = tokenize('(foo(one two))')[0]
assert (entry.start_line == 1)
assert (entry.start_column == 1)
assert (entry.end_line == 1)
assert (entry.end_column == 14)
entry = entry[1]
assert (entry.start_line == 1)
assert (entry.start_column == 5)
assert (entry.end_line == 1)
assert (entry.end_column == 13)
|
[
"def",
"test_nospace",
"(",
")",
":",
"entry",
"=",
"tokenize",
"(",
"'(foo(one two))'",
")",
"[",
"0",
"]",
"assert",
"(",
"entry",
".",
"start_line",
"==",
"1",
")",
"assert",
"(",
"entry",
".",
"start_column",
"==",
"1",
")",
"assert",
"(",
"entry",
".",
"end_line",
"==",
"1",
")",
"assert",
"(",
"entry",
".",
"end_column",
"==",
"14",
")",
"entry",
"=",
"entry",
"[",
"1",
"]",
"assert",
"(",
"entry",
".",
"start_line",
"==",
"1",
")",
"assert",
"(",
"entry",
".",
"start_column",
"==",
"5",
")",
"assert",
"(",
"entry",
".",
"end_line",
"==",
"1",
")",
"assert",
"(",
"entry",
".",
"end_column",
"==",
"13",
")"
] |
ensure we can tokenize without spaces if we have to .
|
train
| false
|
52,082
|
def add_to_path(path):
if (not os.path.isdir(path)):
raise RuntimeError('Tried to add nonexisting path')
def _samefile(x, y):
if (x == y):
return True
try:
return os.path.samefile(x, y)
except (IOError, OSError, AttributeError):
return False
sys.path[:] = [x for x in sys.path if (not _samefile(path, x))]
sys.path.insert(0, path)
|
[
"def",
"add_to_path",
"(",
"path",
")",
":",
"if",
"(",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
")",
":",
"raise",
"RuntimeError",
"(",
"'Tried to add nonexisting path'",
")",
"def",
"_samefile",
"(",
"x",
",",
"y",
")",
":",
"if",
"(",
"x",
"==",
"y",
")",
":",
"return",
"True",
"try",
":",
"return",
"os",
".",
"path",
".",
"samefile",
"(",
"x",
",",
"y",
")",
"except",
"(",
"IOError",
",",
"OSError",
",",
"AttributeError",
")",
":",
"return",
"False",
"sys",
".",
"path",
"[",
":",
"]",
"=",
"[",
"x",
"for",
"x",
"in",
"sys",
".",
"path",
"if",
"(",
"not",
"_samefile",
"(",
"path",
",",
"x",
")",
")",
"]",
"sys",
".",
"path",
".",
"insert",
"(",
"0",
",",
"path",
")"
] |
adds an entry to sys .
|
train
| false
|
52,083
|
def _get_info_from_caches(app, env, account, container=None):
info = _get_info_from_infocache(env, account, container)
if (info is None):
info = _get_info_from_memcache(app, env, account, container)
return info
|
[
"def",
"_get_info_from_caches",
"(",
"app",
",",
"env",
",",
"account",
",",
"container",
"=",
"None",
")",
":",
"info",
"=",
"_get_info_from_infocache",
"(",
"env",
",",
"account",
",",
"container",
")",
"if",
"(",
"info",
"is",
"None",
")",
":",
"info",
"=",
"_get_info_from_memcache",
"(",
"app",
",",
"env",
",",
"account",
",",
"container",
")",
"return",
"info"
] |
get the cached info from env or memcache in that order .
|
train
| false
|
52,084
|
def _prompt_choice(var_name, options):
choice_map = OrderedDict(((u'{0}'.format(i), value) for (i, value) in enumerate(options, 1) if (value[0] != 'test')))
choices = choice_map.keys()
default = u'1'
choice_lines = [u'{0} - {1} - {2}'.format(c[0], c[1][0], c[1][1]) for c in choice_map.items()]
prompt = u'\n'.join((u'Select {0}:'.format(var_name), u'\n'.join(choice_lines), u'Choose from {0}'.format(u', '.join(choices))))
user_choice = click.prompt(prompt, type=click.Choice(choices), default=default)
return choice_map[user_choice]
|
[
"def",
"_prompt_choice",
"(",
"var_name",
",",
"options",
")",
":",
"choice_map",
"=",
"OrderedDict",
"(",
"(",
"(",
"u'{0}'",
".",
"format",
"(",
"i",
")",
",",
"value",
")",
"for",
"(",
"i",
",",
"value",
")",
"in",
"enumerate",
"(",
"options",
",",
"1",
")",
"if",
"(",
"value",
"[",
"0",
"]",
"!=",
"'test'",
")",
")",
")",
"choices",
"=",
"choice_map",
".",
"keys",
"(",
")",
"default",
"=",
"u'1'",
"choice_lines",
"=",
"[",
"u'{0} - {1} - {2}'",
".",
"format",
"(",
"c",
"[",
"0",
"]",
",",
"c",
"[",
"1",
"]",
"[",
"0",
"]",
",",
"c",
"[",
"1",
"]",
"[",
"1",
"]",
")",
"for",
"c",
"in",
"choice_map",
".",
"items",
"(",
")",
"]",
"prompt",
"=",
"u'\\n'",
".",
"join",
"(",
"(",
"u'Select {0}:'",
".",
"format",
"(",
"var_name",
")",
",",
"u'\\n'",
".",
"join",
"(",
"choice_lines",
")",
",",
"u'Choose from {0}'",
".",
"format",
"(",
"u', '",
".",
"join",
"(",
"choices",
")",
")",
")",
")",
"user_choice",
"=",
"click",
".",
"prompt",
"(",
"prompt",
",",
"type",
"=",
"click",
".",
"Choice",
"(",
"choices",
")",
",",
"default",
"=",
"default",
")",
"return",
"choice_map",
"[",
"user_choice",
"]"
] |
prompt the user to choose between a list of options .
|
train
| false
|
52,085
|
def flatten_dict(data, flattened=None, old_key=None):
flattened = (flattened or {})
old_key = (old_key or [])
for (key, value) in data.iteritems():
new_key = (old_key + [key])
if (isinstance(value, list) and value and isinstance(value[0], dict)):
flattened = flatten_list(value, flattened, new_key)
else:
flattened[tuple(new_key)] = value
return flattened
|
[
"def",
"flatten_dict",
"(",
"data",
",",
"flattened",
"=",
"None",
",",
"old_key",
"=",
"None",
")",
":",
"flattened",
"=",
"(",
"flattened",
"or",
"{",
"}",
")",
"old_key",
"=",
"(",
"old_key",
"or",
"[",
"]",
")",
"for",
"(",
"key",
",",
"value",
")",
"in",
"data",
".",
"iteritems",
"(",
")",
":",
"new_key",
"=",
"(",
"old_key",
"+",
"[",
"key",
"]",
")",
"if",
"(",
"isinstance",
"(",
"value",
",",
"list",
")",
"and",
"value",
"and",
"isinstance",
"(",
"value",
"[",
"0",
"]",
",",
"dict",
")",
")",
":",
"flattened",
"=",
"flatten_list",
"(",
"value",
",",
"flattened",
",",
"new_key",
")",
"else",
":",
"flattened",
"[",
"tuple",
"(",
"new_key",
")",
"]",
"=",
"value",
"return",
"flattened"
] |
flatten a dict .
|
train
| false
|
52,086
|
@flake8ext
def check_assert_called_once_with(logical_line, filename):
if ('neutron/tests/' in filename):
if ('.assert_called_once_with(' in logical_line):
return
uncased_line = logical_line.lower().replace('_', '')
check_calls = ['.assertcalledonce', '.calledoncewith']
if any((x for x in check_calls if (x in uncased_line))):
msg = 'N322: Possible use of no-op mock method. please use assert_called_once_with.'
(yield (0, msg))
if ('.asserthascalled' in uncased_line):
msg = 'N322: Possible use of no-op mock method. please use assert_has_calls.'
(yield (0, msg))
|
[
"@",
"flake8ext",
"def",
"check_assert_called_once_with",
"(",
"logical_line",
",",
"filename",
")",
":",
"if",
"(",
"'neutron/tests/'",
"in",
"filename",
")",
":",
"if",
"(",
"'.assert_called_once_with('",
"in",
"logical_line",
")",
":",
"return",
"uncased_line",
"=",
"logical_line",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"'_'",
",",
"''",
")",
"check_calls",
"=",
"[",
"'.assertcalledonce'",
",",
"'.calledoncewith'",
"]",
"if",
"any",
"(",
"(",
"x",
"for",
"x",
"in",
"check_calls",
"if",
"(",
"x",
"in",
"uncased_line",
")",
")",
")",
":",
"msg",
"=",
"'N322: Possible use of no-op mock method. please use assert_called_once_with.'",
"(",
"yield",
"(",
"0",
",",
"msg",
")",
")",
"if",
"(",
"'.asserthascalled'",
"in",
"uncased_line",
")",
":",
"msg",
"=",
"'N322: Possible use of no-op mock method. please use assert_has_calls.'",
"(",
"yield",
"(",
"0",
",",
"msg",
")",
")"
] |
n322 - try to detect unintended calls of nonexistent mock methods like: assert_called_once assertcalledoncewith assert_has_called called_once_with .
|
train
| false
|
52,087
|
def test_ada_sample_wt_fit():
ada = ADASYN(random_state=RND_SEED)
assert_raises(RuntimeError, ada.sample, X, Y)
|
[
"def",
"test_ada_sample_wt_fit",
"(",
")",
":",
"ada",
"=",
"ADASYN",
"(",
"random_state",
"=",
"RND_SEED",
")",
"assert_raises",
"(",
"RuntimeError",
",",
"ada",
".",
"sample",
",",
"X",
",",
"Y",
")"
] |
test either if an error is raised when sample is called before fitting .
|
train
| false
|
52,089
|
@task.task(ignore_result=True)
def retry_open_graph_shares_for_user(user):
from django_facebook.models import OpenGraphShare
shares = OpenGraphShare.objects.recently_failed().filter(user=user)[:1000]
shares = list(shares)
logger.info('retrying %s shares for user %s', len(shares), user)
for share in shares:
retry_open_graph_share(share, reset_retries=True)
|
[
"@",
"task",
".",
"task",
"(",
"ignore_result",
"=",
"True",
")",
"def",
"retry_open_graph_shares_for_user",
"(",
"user",
")",
":",
"from",
"django_facebook",
".",
"models",
"import",
"OpenGraphShare",
"shares",
"=",
"OpenGraphShare",
".",
"objects",
".",
"recently_failed",
"(",
")",
".",
"filter",
"(",
"user",
"=",
"user",
")",
"[",
":",
"1000",
"]",
"shares",
"=",
"list",
"(",
"shares",
")",
"logger",
".",
"info",
"(",
"'retrying %s shares for user %s'",
",",
"len",
"(",
"shares",
")",
",",
"user",
")",
"for",
"share",
"in",
"shares",
":",
"retry_open_graph_share",
"(",
"share",
",",
"reset_retries",
"=",
"True",
")"
] |
we retry the open graph shares for a user when he gets a new access token .
|
train
| false
|
52,090
|
@contextmanager
def inside_transaction():
savepoint = transaction.savepoint()
try:
(yield)
except Exception:
transaction.savepoint_rollback(savepoint)
raise
else:
transaction.savepoint_commit(savepoint)
|
[
"@",
"contextmanager",
"def",
"inside_transaction",
"(",
")",
":",
"savepoint",
"=",
"transaction",
".",
"savepoint",
"(",
")",
"try",
":",
"(",
"yield",
")",
"except",
"Exception",
":",
"transaction",
".",
"savepoint_rollback",
"(",
"savepoint",
")",
"raise",
"else",
":",
"transaction",
".",
"savepoint_commit",
"(",
"savepoint",
")"
] |
perform the database operations inside a transaction .
|
train
| false
|
52,091
|
def poly_between(x, ylower, yupper):
if any((isinstance(var, np.ma.MaskedArray) for var in [ylower, yupper, x])):
numpy = np.ma
else:
numpy = np
Nx = len(x)
if (not cbook.iterable(ylower)):
ylower = (ylower * numpy.ones(Nx))
if (not cbook.iterable(yupper)):
yupper = (yupper * numpy.ones(Nx))
x = numpy.concatenate((x, x[::(-1)]))
y = numpy.concatenate((yupper, ylower[::(-1)]))
return (x, y)
|
[
"def",
"poly_between",
"(",
"x",
",",
"ylower",
",",
"yupper",
")",
":",
"if",
"any",
"(",
"(",
"isinstance",
"(",
"var",
",",
"np",
".",
"ma",
".",
"MaskedArray",
")",
"for",
"var",
"in",
"[",
"ylower",
",",
"yupper",
",",
"x",
"]",
")",
")",
":",
"numpy",
"=",
"np",
".",
"ma",
"else",
":",
"numpy",
"=",
"np",
"Nx",
"=",
"len",
"(",
"x",
")",
"if",
"(",
"not",
"cbook",
".",
"iterable",
"(",
"ylower",
")",
")",
":",
"ylower",
"=",
"(",
"ylower",
"*",
"numpy",
".",
"ones",
"(",
"Nx",
")",
")",
"if",
"(",
"not",
"cbook",
".",
"iterable",
"(",
"yupper",
")",
")",
":",
"yupper",
"=",
"(",
"yupper",
"*",
"numpy",
".",
"ones",
"(",
"Nx",
")",
")",
"x",
"=",
"numpy",
".",
"concatenate",
"(",
"(",
"x",
",",
"x",
"[",
":",
":",
"(",
"-",
"1",
")",
"]",
")",
")",
"y",
"=",
"numpy",
".",
"concatenate",
"(",
"(",
"yupper",
",",
"ylower",
"[",
":",
":",
"(",
"-",
"1",
")",
"]",
")",
")",
"return",
"(",
"x",
",",
"y",
")"
] |
given a sequence of *x* .
|
train
| false
|
52,092
|
def lookupPointer(name, timeout=None):
return getResolver().lookupPointer(name, timeout)
|
[
"def",
"lookupPointer",
"(",
"name",
",",
"timeout",
"=",
"None",
")",
":",
"return",
"getResolver",
"(",
")",
".",
"lookupPointer",
"(",
"name",
",",
"timeout",
")"
] |
perform a ptr record lookup .
|
train
| false
|
52,094
|
def convert_DateProperty(model, prop, kwargs):
if (prop.auto_now or prop.auto_now_add):
return None
kwargs.setdefault('format', '%Y-%m-%d')
return f.DateField(**kwargs)
|
[
"def",
"convert_DateProperty",
"(",
"model",
",",
"prop",
",",
"kwargs",
")",
":",
"if",
"(",
"prop",
".",
"auto_now",
"or",
"prop",
".",
"auto_now_add",
")",
":",
"return",
"None",
"kwargs",
".",
"setdefault",
"(",
"'format'",
",",
"'%Y-%m-%d'",
")",
"return",
"f",
".",
"DateField",
"(",
"**",
"kwargs",
")"
] |
returns a form field for a db .
|
train
| false
|
52,095
|
def path_wrapper(func):
def wrapped(node, context=None, _func=func, **kwargs):
'wrapper function handling context'
if (context is None):
context = InferenceContext()
context.push(node)
yielded = set()
for res in _func(node, context, **kwargs):
if (res.__class__ is Instance):
ares = res._proxied
else:
ares = res
if (not (ares in yielded)):
(yield res)
yielded.add(ares)
return wrapped
|
[
"def",
"path_wrapper",
"(",
"func",
")",
":",
"def",
"wrapped",
"(",
"node",
",",
"context",
"=",
"None",
",",
"_func",
"=",
"func",
",",
"**",
"kwargs",
")",
":",
"if",
"(",
"context",
"is",
"None",
")",
":",
"context",
"=",
"InferenceContext",
"(",
")",
"context",
".",
"push",
"(",
"node",
")",
"yielded",
"=",
"set",
"(",
")",
"for",
"res",
"in",
"_func",
"(",
"node",
",",
"context",
",",
"**",
"kwargs",
")",
":",
"if",
"(",
"res",
".",
"__class__",
"is",
"Instance",
")",
":",
"ares",
"=",
"res",
".",
"_proxied",
"else",
":",
"ares",
"=",
"res",
"if",
"(",
"not",
"(",
"ares",
"in",
"yielded",
")",
")",
":",
"(",
"yield",
"res",
")",
"yielded",
".",
"add",
"(",
"ares",
")",
"return",
"wrapped"
] |
return the given infer function wrapped to handle the path .
|
train
| false
|
52,096
|
def rrelu(layer, **kwargs):
nonlinearity = getattr(layer, 'nonlinearity', None)
if (nonlinearity is not None):
layer.nonlinearity = nonlinearities.identity
return RandomizedRectifierLayer(layer, **kwargs)
|
[
"def",
"rrelu",
"(",
"layer",
",",
"**",
"kwargs",
")",
":",
"nonlinearity",
"=",
"getattr",
"(",
"layer",
",",
"'nonlinearity'",
",",
"None",
")",
"if",
"(",
"nonlinearity",
"is",
"not",
"None",
")",
":",
"layer",
".",
"nonlinearity",
"=",
"nonlinearities",
".",
"identity",
"return",
"RandomizedRectifierLayer",
"(",
"layer",
",",
"**",
"kwargs",
")"
] |
convenience function to apply randomized rectify to a given layers output .
|
train
| false
|
52,098
|
def isPosInf(value):
return ((_sign(value) == 0) and (_exponent(value) == 2047) and _zero_mantissa(value))
|
[
"def",
"isPosInf",
"(",
"value",
")",
":",
"return",
"(",
"(",
"_sign",
"(",
"value",
")",
"==",
"0",
")",
"and",
"(",
"_exponent",
"(",
"value",
")",
"==",
"2047",
")",
"and",
"_zero_mantissa",
"(",
"value",
")",
")"
] |
determine if the argument is a ieee 754 positive infinity value .
|
train
| false
|
52,100
|
def lookupFor(typeCode):
return _fieldsRegistry[typeCode]
|
[
"def",
"lookupFor",
"(",
"typeCode",
")",
":",
"return",
"_fieldsRegistry",
"[",
"typeCode",
"]"
] |
return field definition class for the given type code .
|
train
| false
|
52,101
|
@testing.requires_testing_data
def test_label_io_and_time_course_estimates():
stc = read_source_estimate(stc_fname)
label = read_label(real_label_fname)
stc_label = stc.in_label(label)
assert_true((len(stc_label.times) == stc_label.data.shape[1]))
assert_true((len(stc_label.vertices[0]) == stc_label.data.shape[0]))
|
[
"@",
"testing",
".",
"requires_testing_data",
"def",
"test_label_io_and_time_course_estimates",
"(",
")",
":",
"stc",
"=",
"read_source_estimate",
"(",
"stc_fname",
")",
"label",
"=",
"read_label",
"(",
"real_label_fname",
")",
"stc_label",
"=",
"stc",
".",
"in_label",
"(",
"label",
")",
"assert_true",
"(",
"(",
"len",
"(",
"stc_label",
".",
"times",
")",
"==",
"stc_label",
".",
"data",
".",
"shape",
"[",
"1",
"]",
")",
")",
"assert_true",
"(",
"(",
"len",
"(",
"stc_label",
".",
"vertices",
"[",
"0",
"]",
")",
"==",
"stc_label",
".",
"data",
".",
"shape",
"[",
"0",
"]",
")",
")"
] |
test io for label + stc files .
|
train
| false
|
52,102
|
def _calc_beta(rk, rk_norm, rk1, rk1_norm):
rkk1 = (rk1[0] - rk[0])
size = np.sqrt(np.dot(rkk1, rkk1))
rkk1 /= size
num = (rk_norm + np.dot(rk, rkk1))
den = (rk1_norm + np.dot(rk1, rkk1))
res = (np.log((num / den)) / size)
return res
|
[
"def",
"_calc_beta",
"(",
"rk",
",",
"rk_norm",
",",
"rk1",
",",
"rk1_norm",
")",
":",
"rkk1",
"=",
"(",
"rk1",
"[",
"0",
"]",
"-",
"rk",
"[",
"0",
"]",
")",
"size",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"dot",
"(",
"rkk1",
",",
"rkk1",
")",
")",
"rkk1",
"/=",
"size",
"num",
"=",
"(",
"rk_norm",
"+",
"np",
".",
"dot",
"(",
"rk",
",",
"rkk1",
")",
")",
"den",
"=",
"(",
"rk1_norm",
"+",
"np",
".",
"dot",
"(",
"rk1",
",",
"rkk1",
")",
")",
"res",
"=",
"(",
"np",
".",
"log",
"(",
"(",
"num",
"/",
"den",
")",
")",
"/",
"size",
")",
"return",
"res"
] |
these coefficients are used to calculate the magic vector omega .
|
train
| false
|
52,103
|
def base64encode(value):
return base64.b64encode(value)
|
[
"def",
"base64encode",
"(",
"value",
")",
":",
"return",
"base64",
".",
"b64encode",
"(",
"value",
")"
] |
encodes string value from plain to base64 format .
|
train
| false
|
52,104
|
def topological_sort_as_sets(dependency_graph):
todo = dependency_graph.copy()
while todo:
current = {node for (node, deps) in todo.items() if (len(deps) == 0)}
if (not current):
raise ValueError('Cyclic dependency in graph: {}'.format(', '.join((repr(x) for x in todo.items()))))
(yield current)
todo = {node: (dependencies - current) for (node, dependencies) in todo.items() if (node not in current)}
|
[
"def",
"topological_sort_as_sets",
"(",
"dependency_graph",
")",
":",
"todo",
"=",
"dependency_graph",
".",
"copy",
"(",
")",
"while",
"todo",
":",
"current",
"=",
"{",
"node",
"for",
"(",
"node",
",",
"deps",
")",
"in",
"todo",
".",
"items",
"(",
")",
"if",
"(",
"len",
"(",
"deps",
")",
"==",
"0",
")",
"}",
"if",
"(",
"not",
"current",
")",
":",
"raise",
"ValueError",
"(",
"'Cyclic dependency in graph: {}'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"(",
"repr",
"(",
"x",
")",
"for",
"x",
"in",
"todo",
".",
"items",
"(",
")",
")",
")",
")",
")",
"(",
"yield",
"current",
")",
"todo",
"=",
"{",
"node",
":",
"(",
"dependencies",
"-",
"current",
")",
"for",
"(",
"node",
",",
"dependencies",
")",
"in",
"todo",
".",
"items",
"(",
")",
"if",
"(",
"node",
"not",
"in",
"current",
")",
"}"
] |
variation of kahns algorithm that returns sets .
|
train
| false
|
52,105
|
def create_buffer(length):
return array.array('B', (_dummy_s * length))
|
[
"def",
"create_buffer",
"(",
"length",
")",
":",
"return",
"array",
".",
"array",
"(",
"'B'",
",",
"(",
"_dummy_s",
"*",
"length",
")",
")"
] |
create a buffer to be passed to a read function .
|
train
| false
|
52,106
|
def np_datetime64_compat(s, *args, **kwargs):
if (not _np_version_under1p11):
s = tz_replacer(s)
return np.datetime64(s, *args, **kwargs)
|
[
"def",
"np_datetime64_compat",
"(",
"s",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"if",
"(",
"not",
"_np_version_under1p11",
")",
":",
"s",
"=",
"tz_replacer",
"(",
"s",
")",
"return",
"np",
".",
"datetime64",
"(",
"s",
",",
"*",
"args",
",",
"**",
"kwargs",
")"
] |
provide compat for construction of strings to numpy datetime64s with tz-changes in 1 .
|
train
| true
|
52,108
|
def _assert_complete_surface(surf):
from .source_space import _get_solids
tot_angle = 0.0
cm = surf['rr'].mean(axis=0)
logger.info(('%s CM is %6.2f %6.2f %6.2f mm' % (_surf_name[surf['id']], (1000 * cm[0]), (1000 * cm[1]), (1000 * cm[2]))))
tot_angle = _get_solids(surf['rr'][surf['tris']], cm[np.newaxis, :])[0]
if (np.abs(((tot_angle / (2 * np.pi)) - 1.0)) > 1e-05):
raise RuntimeError(('Surface %s is not complete (sum of solid angles = %g * 4*PI instead).' % (_surf_name[surf['id']], tot_angle)))
|
[
"def",
"_assert_complete_surface",
"(",
"surf",
")",
":",
"from",
".",
"source_space",
"import",
"_get_solids",
"tot_angle",
"=",
"0.0",
"cm",
"=",
"surf",
"[",
"'rr'",
"]",
".",
"mean",
"(",
"axis",
"=",
"0",
")",
"logger",
".",
"info",
"(",
"(",
"'%s CM is %6.2f %6.2f %6.2f mm'",
"%",
"(",
"_surf_name",
"[",
"surf",
"[",
"'id'",
"]",
"]",
",",
"(",
"1000",
"*",
"cm",
"[",
"0",
"]",
")",
",",
"(",
"1000",
"*",
"cm",
"[",
"1",
"]",
")",
",",
"(",
"1000",
"*",
"cm",
"[",
"2",
"]",
")",
")",
")",
")",
"tot_angle",
"=",
"_get_solids",
"(",
"surf",
"[",
"'rr'",
"]",
"[",
"surf",
"[",
"'tris'",
"]",
"]",
",",
"cm",
"[",
"np",
".",
"newaxis",
",",
":",
"]",
")",
"[",
"0",
"]",
"if",
"(",
"np",
".",
"abs",
"(",
"(",
"(",
"tot_angle",
"/",
"(",
"2",
"*",
"np",
".",
"pi",
")",
")",
"-",
"1.0",
")",
")",
">",
"1e-05",
")",
":",
"raise",
"RuntimeError",
"(",
"(",
"'Surface %s is not complete (sum of solid angles = %g * 4*PI instead).'",
"%",
"(",
"_surf_name",
"[",
"surf",
"[",
"'id'",
"]",
"]",
",",
"tot_angle",
")",
")",
")"
] |
check the sum of solid angles as seen from inside .
|
train
| false
|
52,109
|
def GetFeeds(client):
feed_service = client.GetService('FeedService', 'v201609')
feeds = []
more_pages = True
selector = {'fields': ['Id', 'Name', 'Attributes'], 'predicates': [{'field': 'Origin', 'operator': 'EQUALS', 'values': ['USER']}, {'field': 'FeedStatus', 'operator': 'EQUALS', 'values': ['ENABLED']}], 'paging': {'startIndex': 0, 'numberResults': PAGE_SIZE}}
while more_pages:
page = feed_service.get(selector)
if ('entries' in page):
feeds.extend(page['entries'])
selector['paging']['startIndex'] += PAGE_SIZE
more_pages = (selector['paging']['startIndex'] < int(page['totalNumEntries']))
return feeds
|
[
"def",
"GetFeeds",
"(",
"client",
")",
":",
"feed_service",
"=",
"client",
".",
"GetService",
"(",
"'FeedService'",
",",
"'v201609'",
")",
"feeds",
"=",
"[",
"]",
"more_pages",
"=",
"True",
"selector",
"=",
"{",
"'fields'",
":",
"[",
"'Id'",
",",
"'Name'",
",",
"'Attributes'",
"]",
",",
"'predicates'",
":",
"[",
"{",
"'field'",
":",
"'Origin'",
",",
"'operator'",
":",
"'EQUALS'",
",",
"'values'",
":",
"[",
"'USER'",
"]",
"}",
",",
"{",
"'field'",
":",
"'FeedStatus'",
",",
"'operator'",
":",
"'EQUALS'",
",",
"'values'",
":",
"[",
"'ENABLED'",
"]",
"}",
"]",
",",
"'paging'",
":",
"{",
"'startIndex'",
":",
"0",
",",
"'numberResults'",
":",
"PAGE_SIZE",
"}",
"}",
"while",
"more_pages",
":",
"page",
"=",
"feed_service",
".",
"get",
"(",
"selector",
")",
"if",
"(",
"'entries'",
"in",
"page",
")",
":",
"feeds",
".",
"extend",
"(",
"page",
"[",
"'entries'",
"]",
")",
"selector",
"[",
"'paging'",
"]",
"[",
"'startIndex'",
"]",
"+=",
"PAGE_SIZE",
"more_pages",
"=",
"(",
"selector",
"[",
"'paging'",
"]",
"[",
"'startIndex'",
"]",
"<",
"int",
"(",
"page",
"[",
"'totalNumEntries'",
"]",
")",
")",
"return",
"feeds"
] |
returns a list of all enabled feeds .
|
train
| true
|
52,110
|
@_define_event
def post_execute():
pass
|
[
"@",
"_define_event",
"def",
"post_execute",
"(",
")",
":",
"pass"
] |
fires after code is executed in response to user/frontend action .
|
train
| false
|
52,112
|
def cooperate(iterator):
return _theCooperator.cooperate(iterator)
|
[
"def",
"cooperate",
"(",
"iterator",
")",
":",
"return",
"_theCooperator",
".",
"cooperate",
"(",
"iterator",
")"
] |
start running the given iterator as a long-running cooperative task .
|
train
| false
|
52,114
|
def local_repo_to_remote():
with lcd(LOGDIR):
put(local_path='le.tar.gz', remote_path='')
run('tar xzf le.tar.gz')
|
[
"def",
"local_repo_to_remote",
"(",
")",
":",
"with",
"lcd",
"(",
"LOGDIR",
")",
":",
"put",
"(",
"local_path",
"=",
"'le.tar.gz'",
",",
"remote_path",
"=",
"''",
")",
"run",
"(",
"'tar xzf le.tar.gz'",
")"
] |
copies local tarball of repo to remote .
|
train
| false
|
52,115
|
def get_full_dict(lang):
if (not lang):
return {}
if (getattr(frappe.local, u'lang_full_dict', None) is not None):
return frappe.local.lang_full_dict
frappe.local.lang_full_dict = load_lang(lang)
try:
user_translations = get_user_translations(lang)
except Exception:
user_translations = None
if user_translations:
frappe.local.lang_full_dict.update(user_translations)
return frappe.local.lang_full_dict
|
[
"def",
"get_full_dict",
"(",
"lang",
")",
":",
"if",
"(",
"not",
"lang",
")",
":",
"return",
"{",
"}",
"if",
"(",
"getattr",
"(",
"frappe",
".",
"local",
",",
"u'lang_full_dict'",
",",
"None",
")",
"is",
"not",
"None",
")",
":",
"return",
"frappe",
".",
"local",
".",
"lang_full_dict",
"frappe",
".",
"local",
".",
"lang_full_dict",
"=",
"load_lang",
"(",
"lang",
")",
"try",
":",
"user_translations",
"=",
"get_user_translations",
"(",
"lang",
")",
"except",
"Exception",
":",
"user_translations",
"=",
"None",
"if",
"user_translations",
":",
"frappe",
".",
"local",
".",
"lang_full_dict",
".",
"update",
"(",
"user_translations",
")",
"return",
"frappe",
".",
"local",
".",
"lang_full_dict"
] |
load and return the entire translations dictionary for a language from :meth:frape .
|
train
| false
|
52,116
|
def unsafe(f):
f.unsafe_callable = True
return f
|
[
"def",
"unsafe",
"(",
"f",
")",
":",
"f",
".",
"unsafe_callable",
"=",
"True",
"return",
"f"
] |
mark a function or method as unsafe:: @unsafe def delete: pass .
|
train
| false
|
52,118
|
def filter_fastq(input_seqs_f, output_seqs_f, seqs_to_keep, negate=False, seqid_f=None):
if (seqid_f is None):
seqs_to_keep_lookup = {}.fromkeys([seq_id.split()[0] for seq_id in seqs_to_keep])
if (not negate):
def keep_seq(seq_id):
return (seq_id.split()[0] in seqs_to_keep_lookup)
else:
def keep_seq(seq_id):
return (seq_id.split()[0] not in seqs_to_keep_lookup)
elif (not negate):
keep_seq = seqid_f
else:
keep_seq = (lambda x: (not seqid_f(x)))
for (seq_id, seq, qual) in parse_fastq(input_seqs_f, enforce_qual_range=False):
if keep_seq(seq_id):
output_seqs_f.write(format_fastq_record(seq_id, seq, qual))
output_seqs_f.close()
|
[
"def",
"filter_fastq",
"(",
"input_seqs_f",
",",
"output_seqs_f",
",",
"seqs_to_keep",
",",
"negate",
"=",
"False",
",",
"seqid_f",
"=",
"None",
")",
":",
"if",
"(",
"seqid_f",
"is",
"None",
")",
":",
"seqs_to_keep_lookup",
"=",
"{",
"}",
".",
"fromkeys",
"(",
"[",
"seq_id",
".",
"split",
"(",
")",
"[",
"0",
"]",
"for",
"seq_id",
"in",
"seqs_to_keep",
"]",
")",
"if",
"(",
"not",
"negate",
")",
":",
"def",
"keep_seq",
"(",
"seq_id",
")",
":",
"return",
"(",
"seq_id",
".",
"split",
"(",
")",
"[",
"0",
"]",
"in",
"seqs_to_keep_lookup",
")",
"else",
":",
"def",
"keep_seq",
"(",
"seq_id",
")",
":",
"return",
"(",
"seq_id",
".",
"split",
"(",
")",
"[",
"0",
"]",
"not",
"in",
"seqs_to_keep_lookup",
")",
"elif",
"(",
"not",
"negate",
")",
":",
"keep_seq",
"=",
"seqid_f",
"else",
":",
"keep_seq",
"=",
"(",
"lambda",
"x",
":",
"(",
"not",
"seqid_f",
"(",
"x",
")",
")",
")",
"for",
"(",
"seq_id",
",",
"seq",
",",
"qual",
")",
"in",
"parse_fastq",
"(",
"input_seqs_f",
",",
"enforce_qual_range",
"=",
"False",
")",
":",
"if",
"keep_seq",
"(",
"seq_id",
")",
":",
"output_seqs_f",
".",
"write",
"(",
"format_fastq_record",
"(",
"seq_id",
",",
"seq",
",",
"qual",
")",
")",
"output_seqs_f",
".",
"close",
"(",
")"
] |
write filtered input_seqs to output_seqs_f which contains only seqs_to_keep input_seqs can be the output of parse_fasta or parse_fastq .
|
train
| false
|
52,123
|
def ar2lhs(ar):
return (- ar[1:])
|
[
"def",
"ar2lhs",
"(",
"ar",
")",
":",
"return",
"(",
"-",
"ar",
"[",
"1",
":",
"]",
")"
] |
convert full lagpolynomial into a reduced .
|
train
| false
|
52,124
|
def config_from_file(filename, config=None):
if config:
try:
with open(filename, 'w') as fdesc:
fdesc.write(json.dumps(config))
except IOError as error:
_LOGGER.error('Saving config file failed: %s', error)
return False
return config
elif os.path.isfile(filename):
try:
with open(filename, 'r') as fdesc:
return json.loads(fdesc.read())
except IOError as error:
_LOGGER.error('Reading config file failed: %s', error)
return False
else:
return {}
|
[
"def",
"config_from_file",
"(",
"filename",
",",
"config",
"=",
"None",
")",
":",
"if",
"config",
":",
"try",
":",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"fdesc",
":",
"fdesc",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"config",
")",
")",
"except",
"IOError",
"as",
"error",
":",
"_LOGGER",
".",
"error",
"(",
"'Saving config file failed: %s'",
",",
"error",
")",
"return",
"False",
"return",
"config",
"elif",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"try",
":",
"with",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"fdesc",
":",
"return",
"json",
".",
"loads",
"(",
"fdesc",
".",
"read",
"(",
")",
")",
"except",
"IOError",
"as",
"error",
":",
"_LOGGER",
".",
"error",
"(",
"'Reading config file failed: %s'",
",",
"error",
")",
"return",
"False",
"else",
":",
"return",
"{",
"}"
] |
small configuration file management function .
|
train
| true
|
52,125
|
def read_rels(archive):
xml_source = archive.read(ARC_WORKBOOK_RELS)
tree = fromstring(xml_source)
for element in safe_iterator(tree, ('{%s}Relationship' % PKG_REL_NS)):
rId = element.get('Id')
pth = element.get('Target')
typ = element.get('Type')
if pth.startswith('/xl'):
pth = pth.replace('/xl', 'xl')
elif ((not pth.startswith('xl')) and (not pth.startswith('..'))):
pth = ('xl/' + pth)
(yield (rId, {'path': pth, 'type': typ}))
|
[
"def",
"read_rels",
"(",
"archive",
")",
":",
"xml_source",
"=",
"archive",
".",
"read",
"(",
"ARC_WORKBOOK_RELS",
")",
"tree",
"=",
"fromstring",
"(",
"xml_source",
")",
"for",
"element",
"in",
"safe_iterator",
"(",
"tree",
",",
"(",
"'{%s}Relationship'",
"%",
"PKG_REL_NS",
")",
")",
":",
"rId",
"=",
"element",
".",
"get",
"(",
"'Id'",
")",
"pth",
"=",
"element",
".",
"get",
"(",
"'Target'",
")",
"typ",
"=",
"element",
".",
"get",
"(",
"'Type'",
")",
"if",
"pth",
".",
"startswith",
"(",
"'/xl'",
")",
":",
"pth",
"=",
"pth",
".",
"replace",
"(",
"'/xl'",
",",
"'xl'",
")",
"elif",
"(",
"(",
"not",
"pth",
".",
"startswith",
"(",
"'xl'",
")",
")",
"and",
"(",
"not",
"pth",
".",
"startswith",
"(",
"'..'",
")",
")",
")",
":",
"pth",
"=",
"(",
"'xl/'",
"+",
"pth",
")",
"(",
"yield",
"(",
"rId",
",",
"{",
"'path'",
":",
"pth",
",",
"'type'",
":",
"typ",
"}",
")",
")"
] |
read relationships for a workbook .
|
train
| true
|
52,126
|
@transaction.non_atomic_requests
@require_POST
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_level('staff')
def get_problem_responses(request, course_id):
course_key = CourseKey.from_string(course_id)
problem_location = request.POST.get('problem_location', '')
try:
problem_key = UsageKey.from_string(problem_location)
run = problem_key.run
if (not run):
problem_key = course_key.make_usage_key_from_deprecated_string(problem_location)
if (problem_key.course_key != course_key):
raise InvalidKeyError(type(problem_key), problem_key)
except InvalidKeyError:
return JsonResponseBadRequest(_('Could not find problem with this location.'))
try:
lms.djangoapps.instructor_task.api.submit_calculate_problem_responses_csv(request, course_key, problem_location)
success_status = _('The problem responses report is being created. To view the status of the report, see Pending Tasks below.')
return JsonResponse({'status': success_status})
except AlreadyRunningError:
already_running_status = _("A problem responses report generation task is already in progress. Check the 'Pending Tasks' table for the status of the task. When completed, the report will be available for download in the table below.")
return JsonResponse({'status': already_running_status})
|
[
"@",
"transaction",
".",
"non_atomic_requests",
"@",
"require_POST",
"@",
"ensure_csrf_cookie",
"@",
"cache_control",
"(",
"no_cache",
"=",
"True",
",",
"no_store",
"=",
"True",
",",
"must_revalidate",
"=",
"True",
")",
"@",
"require_level",
"(",
"'staff'",
")",
"def",
"get_problem_responses",
"(",
"request",
",",
"course_id",
")",
":",
"course_key",
"=",
"CourseKey",
".",
"from_string",
"(",
"course_id",
")",
"problem_location",
"=",
"request",
".",
"POST",
".",
"get",
"(",
"'problem_location'",
",",
"''",
")",
"try",
":",
"problem_key",
"=",
"UsageKey",
".",
"from_string",
"(",
"problem_location",
")",
"run",
"=",
"problem_key",
".",
"run",
"if",
"(",
"not",
"run",
")",
":",
"problem_key",
"=",
"course_key",
".",
"make_usage_key_from_deprecated_string",
"(",
"problem_location",
")",
"if",
"(",
"problem_key",
".",
"course_key",
"!=",
"course_key",
")",
":",
"raise",
"InvalidKeyError",
"(",
"type",
"(",
"problem_key",
")",
",",
"problem_key",
")",
"except",
"InvalidKeyError",
":",
"return",
"JsonResponseBadRequest",
"(",
"_",
"(",
"'Could not find problem with this location.'",
")",
")",
"try",
":",
"lms",
".",
"djangoapps",
".",
"instructor_task",
".",
"api",
".",
"submit_calculate_problem_responses_csv",
"(",
"request",
",",
"course_key",
",",
"problem_location",
")",
"success_status",
"=",
"_",
"(",
"'The problem responses report is being created. To view the status of the report, see Pending Tasks below.'",
")",
"return",
"JsonResponse",
"(",
"{",
"'status'",
":",
"success_status",
"}",
")",
"except",
"AlreadyRunningError",
":",
"already_running_status",
"=",
"_",
"(",
"\"A problem responses report generation task is already in progress. Check the 'Pending Tasks' table for the status of the task. When completed, the report will be available for download in the table below.\"",
")",
"return",
"JsonResponse",
"(",
"{",
"'status'",
":",
"already_running_status",
"}",
")"
] |
initiate generation of a csv file containing all student answers to a given problem .
|
train
| false
|
52,127
|
def _arity(f):
if (sys.version_info < (3,)):
return len(inspect.getargspec(f)[0])
else:
param = inspect.signature(f).parameters.values()
return len([p for p in param if (p.kind == p.POSITIONAL_OR_KEYWORD)])
|
[
"def",
"_arity",
"(",
"f",
")",
":",
"if",
"(",
"sys",
".",
"version_info",
"<",
"(",
"3",
",",
")",
")",
":",
"return",
"len",
"(",
"inspect",
".",
"getargspec",
"(",
"f",
")",
"[",
"0",
"]",
")",
"else",
":",
"param",
"=",
"inspect",
".",
"signature",
"(",
"f",
")",
".",
"parameters",
".",
"values",
"(",
")",
"return",
"len",
"(",
"[",
"p",
"for",
"p",
"in",
"param",
"if",
"(",
"p",
".",
"kind",
"==",
"p",
".",
"POSITIONAL_OR_KEYWORD",
")",
"]",
")"
] |
python 2 and 3 compatible version that do not raise a deprecation warning .
|
train
| false
|
52,128
|
def _format_for_table(plugins):
return [[data[u'name'], data[u'version'], data[u'description'], data[u'authors'], data[u'home']] for data in plugins]
|
[
"def",
"_format_for_table",
"(",
"plugins",
")",
":",
"return",
"[",
"[",
"data",
"[",
"u'name'",
"]",
",",
"data",
"[",
"u'version'",
"]",
",",
"data",
"[",
"u'description'",
"]",
",",
"data",
"[",
"u'authors'",
"]",
",",
"data",
"[",
"u'home'",
"]",
"]",
"for",
"data",
"in",
"plugins",
"]"
] |
take a list of plugins and format it for the table on the ui .
|
train
| false
|
52,129
|
def showFPS(texw, texh):
global frames, t0
t = time.time()
if ((t - t0) > 1.0):
fps = (frames / (t - t0))
titlestr = ('%sx%s texture, %.1f FPS' % (texw, texh, fps))
glfw.glfwSetWindowTitle(window, titlestr)
t0 = t
frames = 0
frames += 1
|
[
"def",
"showFPS",
"(",
"texw",
",",
"texh",
")",
":",
"global",
"frames",
",",
"t0",
"t",
"=",
"time",
".",
"time",
"(",
")",
"if",
"(",
"(",
"t",
"-",
"t0",
")",
">",
"1.0",
")",
":",
"fps",
"=",
"(",
"frames",
"/",
"(",
"t",
"-",
"t0",
")",
")",
"titlestr",
"=",
"(",
"'%sx%s texture, %.1f FPS'",
"%",
"(",
"texw",
",",
"texh",
",",
"fps",
")",
")",
"glfw",
".",
"glfwSetWindowTitle",
"(",
"window",
",",
"titlestr",
")",
"t0",
"=",
"t",
"frames",
"=",
"0",
"frames",
"+=",
"1"
] |
showfps - calculate and report texture size and frames per second in the window title bar .
|
train
| false
|
52,130
|
def _validate_device_list(data, valid_values=None):
if (not data):
msg = _('Cannot create a gateway with an empty device list')
return msg
try:
for device in data:
err_msg = attributes._validate_dict(device, key_specs={DEVICE_ID_ATTR: {'type:regex': attributes.UUID_PATTERN, 'required': True}, IFACE_NAME_ATTR: {'type:string': None, 'required': False}})
if err_msg:
return err_msg
except TypeError:
return (_('%s: provided data are not iterable') % _validate_device_list.__name__)
|
[
"def",
"_validate_device_list",
"(",
"data",
",",
"valid_values",
"=",
"None",
")",
":",
"if",
"(",
"not",
"data",
")",
":",
"msg",
"=",
"_",
"(",
"'Cannot create a gateway with an empty device list'",
")",
"return",
"msg",
"try",
":",
"for",
"device",
"in",
"data",
":",
"err_msg",
"=",
"attributes",
".",
"_validate_dict",
"(",
"device",
",",
"key_specs",
"=",
"{",
"DEVICE_ID_ATTR",
":",
"{",
"'type:regex'",
":",
"attributes",
".",
"UUID_PATTERN",
",",
"'required'",
":",
"True",
"}",
",",
"IFACE_NAME_ATTR",
":",
"{",
"'type:string'",
":",
"None",
",",
"'required'",
":",
"False",
"}",
"}",
")",
"if",
"err_msg",
":",
"return",
"err_msg",
"except",
"TypeError",
":",
"return",
"(",
"_",
"(",
"'%s: provided data are not iterable'",
")",
"%",
"_validate_device_list",
".",
"__name__",
")"
] |
validate the list of service definitions .
|
train
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.