id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
3,487
def _api_pause_pp(name, output, kwargs): PostProcessor.do.paused = True return report(output)
[ "def", "_api_pause_pp", "(", "name", ",", "output", ",", "kwargs", ")", ":", "PostProcessor", ".", "do", ".", "paused", "=", "True", "return", "report", "(", "output", ")" ]
api: accepts output .
train
false
3,488
def tuple_key(tup): def generator(): for item in tup: try: (yield (1, int(item))) except ValueError: (yield (0, item)) return tuple(generator())
[ "def", "tuple_key", "(", "tup", ")", ":", "def", "generator", "(", ")", ":", "for", "item", "in", "tup", ":", "try", ":", "(", "yield", "(", "1", ",", "int", "(", "item", ")", ")", ")", "except", "ValueError", ":", "(", "yield", "(", "0", ",", "item", ")", ")", "return", "tuple", "(", "generator", "(", ")", ")" ]
return a sort key for mixed int/string tuples .
train
false
3,489
def choose_task_name(app_name, queue_name, user_chosen=None): if (not user_chosen): user_chosen = _get_random_string() return ('task_%s_%s_%s' % (app_name, queue_name, user_chosen))
[ "def", "choose_task_name", "(", "app_name", ",", "queue_name", ",", "user_chosen", "=", "None", ")", ":", "if", "(", "not", "user_chosen", ")", ":", "user_chosen", "=", "_get_random_string", "(", ")", "return", "(", "'task_%s_%s_%s'", "%", "(", "app_name", ",", "queue_name", ",", "user_chosen", ")", ")" ]
creates a task name that the system can use to address tasks from different apps and queues .
train
false
3,490
def _clean_tags(tags): def clean(tagstr): if isinstance(tagstr, str): return tagstr.replace('|', '_')[:200] return unicode(tagstr).replace('|', '_')[:200].encode('utf-8') return [clean(t) for t in tags]
[ "def", "_clean_tags", "(", "tags", ")", ":", "def", "clean", "(", "tagstr", ")", ":", "if", "isinstance", "(", "tagstr", ",", "str", ")", ":", "return", "tagstr", ".", "replace", "(", "'|'", ",", "'_'", ")", "[", ":", "200", "]", "return", "unicode", "(", "tagstr", ")", ".", "replace", "(", "'|'", ",", "'_'", ")", "[", ":", "200", "]", ".", "encode", "(", "'utf-8'", ")", "return", "[", "clean", "(", "t", ")", "for", "t", "in", "tags", "]" ]
helper method that does the actual cleaning of tags for sending to statsd .
train
false
3,492
def list_recursively(dir): if (not _os.path.isdir(dir)): raise ValueError, ('%s is not a directory.' % dir) for f in _os.listdir(dir): if _os.path.isdir(f): list_recursively(f) else: (yield f)
[ "def", "list_recursively", "(", "dir", ")", ":", "if", "(", "not", "_os", ".", "path", ".", "isdir", "(", "dir", ")", ")", ":", "raise", "ValueError", ",", "(", "'%s is not a directory.'", "%", "dir", ")", "for", "f", "in", "_os", ".", "listdir", "(", "dir", ")", ":", "if", "_os", ".", "path", ".", "isdir", "(", "f", ")", ":", "list_recursively", "(", "f", ")", "else", ":", "(", "yield", "f", ")" ]
recursively list the contents of a directory .
train
false
3,494
def make_html_patterns(): tags = any('builtin', ['<', '[\\?/]?>', '(?<=<).*?(?=[ >])']) keywords = any('keyword', [' [\\w:-]*?(?==)']) string = any('string', ['".*?"']) comment = any('comment', ['<!--.*?-->']) multiline_comment_start = any('multiline_comment_start', ['<!--']) multiline_comment_end = any('multiline_comment_end', ['-->']) return '|'.join([comment, multiline_comment_start, multiline_comment_end, tags, keywords, string])
[ "def", "make_html_patterns", "(", ")", ":", "tags", "=", "any", "(", "'builtin'", ",", "[", "'<'", ",", "'[\\\\?/]?>'", ",", "'(?<=<).*?(?=[ >])'", "]", ")", "keywords", "=", "any", "(", "'keyword'", ",", "[", "' [\\\\w:-]*?(?==)'", "]", ")", "string", "=", "any", "(", "'string'", ",", "[", "'\".*?\"'", "]", ")", "comment", "=", "any", "(", "'comment'", ",", "[", "'<!--.*?-->'", "]", ")", "multiline_comment_start", "=", "any", "(", "'multiline_comment_start'", ",", "[", "'<!--'", "]", ")", "multiline_comment_end", "=", "any", "(", "'multiline_comment_end'", ",", "[", "'-->'", "]", ")", "return", "'|'", ".", "join", "(", "[", "comment", ",", "multiline_comment_start", ",", "multiline_comment_end", ",", "tags", ",", "keywords", ",", "string", "]", ")" ]
strongly inspired from idlelib .
train
false
3,495
def unpack_bitstr(rev_cur_bit, bitstr): bstr_len = len(bitstr) return ''.join([rev_cur_bit[bitstr[i:(i + 2)]] for i in range(0, bstr_len, 2)])
[ "def", "unpack_bitstr", "(", "rev_cur_bit", ",", "bitstr", ")", ":", "bstr_len", "=", "len", "(", "bitstr", ")", "return", "''", ".", "join", "(", "[", "rev_cur_bit", "[", "bitstr", "[", "i", ":", "(", "i", "+", "2", ")", "]", "]", "for", "i", "in", "range", "(", "0", ",", "bstr_len", ",", "2", ")", "]", ")" ]
unpack bistring into nt sequence .
train
false
3,496
def _inflate_g(g, n): def inflate(params, n): ' (a1, .., ak) -> (a1/n, (a1+1)/n, ..., (ak + n-1)/n) ' res = [] for a in params: for i in range(n): res.append(((a + i) / n)) return res v = S((len(g.ap) - len(g.bq))) C = (n ** ((1 + g.nu) + (v / 2))) C /= ((2 * pi) ** ((n - 1) * g.delta)) return (C, meijerg(inflate(g.an, n), inflate(g.aother, n), inflate(g.bm, n), inflate(g.bother, n), ((g.argument ** n) * (n ** (n * v)))))
[ "def", "_inflate_g", "(", "g", ",", "n", ")", ":", "def", "inflate", "(", "params", ",", "n", ")", ":", "res", "=", "[", "]", "for", "a", "in", "params", ":", "for", "i", "in", "range", "(", "n", ")", ":", "res", ".", "append", "(", "(", "(", "a", "+", "i", ")", "/", "n", ")", ")", "return", "res", "v", "=", "S", "(", "(", "len", "(", "g", ".", "ap", ")", "-", "len", "(", "g", ".", "bq", ")", ")", ")", "C", "=", "(", "n", "**", "(", "(", "1", "+", "g", ".", "nu", ")", "+", "(", "v", "/", "2", ")", ")", ")", "C", "/=", "(", "(", "2", "*", "pi", ")", "**", "(", "(", "n", "-", "1", ")", "*", "g", ".", "delta", ")", ")", "return", "(", "C", ",", "meijerg", "(", "inflate", "(", "g", ".", "an", ",", "n", ")", ",", "inflate", "(", "g", ".", "aother", ",", "n", ")", ",", "inflate", "(", "g", ".", "bm", ",", "n", ")", ",", "inflate", "(", "g", ".", "bother", ",", "n", ")", ",", "(", "(", "g", ".", "argument", "**", "n", ")", "*", "(", "n", "**", "(", "n", "*", "v", ")", ")", ")", ")", ")" ]
return c .
train
false
3,497
@app.route('/_add_numbers') def add_numbers(): a = request.args.get('a', 0, type=int) b = request.args.get('b', 0, type=int) return jsonify(result=(a + b))
[ "@", "app", ".", "route", "(", "'/_add_numbers'", ")", "def", "add_numbers", "(", ")", ":", "a", "=", "request", ".", "args", ".", "get", "(", "'a'", ",", "0", ",", "type", "=", "int", ")", "b", "=", "request", ".", "args", ".", "get", "(", "'b'", ",", "0", ",", "type", "=", "int", ")", "return", "jsonify", "(", "result", "=", "(", "a", "+", "b", ")", ")" ]
add two numbers server side .
train
false
3,498
@contextmanager def signals_disabled(qobject): old_state = qobject.signalsBlocked() qobject.blockSignals(True) try: (yield) finally: qobject.blockSignals(old_state)
[ "@", "contextmanager", "def", "signals_disabled", "(", "qobject", ")", ":", "old_state", "=", "qobject", ".", "signalsBlocked", "(", ")", "qobject", ".", "blockSignals", "(", "True", ")", "try", ":", "(", "yield", ")", "finally", ":", "qobject", ".", "blockSignals", "(", "old_state", ")" ]
disables signals on an instance of qobject .
train
false
3,499
def varcorrection_pairs_unbalanced(nobs_all, srange=False): (n1, n2) = np.meshgrid(nobs_all, nobs_all) if (not srange): return ((1.0 / n1) + (1.0 / n2)) else: return (((1.0 / n1) + (1.0 / n2)) / 2.0)
[ "def", "varcorrection_pairs_unbalanced", "(", "nobs_all", ",", "srange", "=", "False", ")", ":", "(", "n1", ",", "n2", ")", "=", "np", ".", "meshgrid", "(", "nobs_all", ",", "nobs_all", ")", "if", "(", "not", "srange", ")", ":", "return", "(", "(", "1.0", "/", "n1", ")", "+", "(", "1.0", "/", "n2", ")", ")", "else", ":", "return", "(", "(", "(", "1.0", "/", "n1", ")", "+", "(", "1.0", "/", "n2", ")", ")", "/", "2.0", ")" ]
correction factor for variance with unequal sample sizes for all pairs this is just a harmonic mean parameters nobs_all : array_like the number of observations for each sample srange : bool if true .
train
false
3,500
def _millis(when): micros = _microseconds_from_datetime(when) return (micros // 1000)
[ "def", "_millis", "(", "when", ")", ":", "micros", "=", "_microseconds_from_datetime", "(", "when", ")", "return", "(", "micros", "//", "1000", ")" ]
convert a zone-aware datetime to integer milliseconds .
train
false
3,503
def hankel(c, r=None): c = np.asarray(c).ravel() if (r is None): r = np.zeros_like(c) else: r = np.asarray(r).ravel() vals = np.concatenate((c, r[1:])) (a, b) = np.ogrid[0:len(c), 0:len(r)] indx = (a + b) return vals[indx]
[ "def", "hankel", "(", "c", ",", "r", "=", "None", ")", ":", "c", "=", "np", ".", "asarray", "(", "c", ")", ".", "ravel", "(", ")", "if", "(", "r", "is", "None", ")", ":", "r", "=", "np", ".", "zeros_like", "(", "c", ")", "else", ":", "r", "=", "np", ".", "asarray", "(", "r", ")", ".", "ravel", "(", ")", "vals", "=", "np", ".", "concatenate", "(", "(", "c", ",", "r", "[", "1", ":", "]", ")", ")", "(", "a", ",", "b", ")", "=", "np", ".", "ogrid", "[", "0", ":", "len", "(", "c", ")", ",", "0", ":", "len", "(", "r", ")", "]", "indx", "=", "(", "a", "+", "b", ")", "return", "vals", "[", "indx", "]" ]
construct a hankel matrix .
train
false
3,506
def _unify_gens(f_gens, g_gens): f_gens = list(f_gens) g_gens = list(g_gens) if (f_gens == g_gens): return tuple(f_gens) (gens, common, k) = ([], [], 0) for gen in f_gens: if (gen in g_gens): common.append(gen) for (i, gen) in enumerate(g_gens): if (gen in common): (g_gens[i], k) = (common[k], (k + 1)) for gen in common: i = f_gens.index(gen) gens.extend(f_gens[:i]) f_gens = f_gens[(i + 1):] i = g_gens.index(gen) gens.extend(g_gens[:i]) g_gens = g_gens[(i + 1):] gens.append(gen) gens.extend(f_gens) gens.extend(g_gens) return tuple(gens)
[ "def", "_unify_gens", "(", "f_gens", ",", "g_gens", ")", ":", "f_gens", "=", "list", "(", "f_gens", ")", "g_gens", "=", "list", "(", "g_gens", ")", "if", "(", "f_gens", "==", "g_gens", ")", ":", "return", "tuple", "(", "f_gens", ")", "(", "gens", ",", "common", ",", "k", ")", "=", "(", "[", "]", ",", "[", "]", ",", "0", ")", "for", "gen", "in", "f_gens", ":", "if", "(", "gen", "in", "g_gens", ")", ":", "common", ".", "append", "(", "gen", ")", "for", "(", "i", ",", "gen", ")", "in", "enumerate", "(", "g_gens", ")", ":", "if", "(", "gen", "in", "common", ")", ":", "(", "g_gens", "[", "i", "]", ",", "k", ")", "=", "(", "common", "[", "k", "]", ",", "(", "k", "+", "1", ")", ")", "for", "gen", "in", "common", ":", "i", "=", "f_gens", ".", "index", "(", "gen", ")", "gens", ".", "extend", "(", "f_gens", "[", ":", "i", "]", ")", "f_gens", "=", "f_gens", "[", "(", "i", "+", "1", ")", ":", "]", "i", "=", "g_gens", ".", "index", "(", "gen", ")", "gens", ".", "extend", "(", "g_gens", "[", ":", "i", "]", ")", "g_gens", "=", "g_gens", "[", "(", "i", "+", "1", ")", ":", "]", "gens", ".", "append", "(", "gen", ")", "gens", ".", "extend", "(", "f_gens", ")", "gens", ".", "extend", "(", "g_gens", ")", "return", "tuple", "(", "gens", ")" ]
unify generators in a reasonably intelligent way .
train
false
3,508
def maximum_line_length(physical_line): length = len(physical_line.rstrip()) if (length > 79): return (79, ('E501 line too long (%d characters)' % length))
[ "def", "maximum_line_length", "(", "physical_line", ")", ":", "length", "=", "len", "(", "physical_line", ".", "rstrip", "(", ")", ")", "if", "(", "length", ">", "79", ")", ":", "return", "(", "79", ",", "(", "'E501 line too long (%d characters)'", "%", "length", ")", ")" ]
limit all lines to a maximum of 79 characters .
train
false
3,509
def mv(src_path, dest_path): try: os.rename(src_path, dest_path) except OSError: os.remove(dest_path) os.rename(src_path, dest_path)
[ "def", "mv", "(", "src_path", ",", "dest_path", ")", ":", "try", ":", "os", ".", "rename", "(", "src_path", ",", "dest_path", ")", "except", "OSError", ":", "os", ".", "remove", "(", "dest_path", ")", "os", ".", "rename", "(", "src_path", ",", "dest_path", ")" ]
a shell-like mv .
train
false
3,514
@slicer.after_request def add_cors_headers(response): origin = current_app.slicer.allow_cors_origin if (origin and len(origin)): if (request.method == 'OPTIONS'): response.headers['Access-Control-Allow-Headers'] = 'X-Requested-With' if (origin == '*'): response.headers['Access-Control-Allow-Origin'] = request.headers.get('Origin', origin) else: response.headers['Access-Control-Allow-Origin'] = origin response.headers['Access-Control-Allow-Credentials'] = 'true' response.headers['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS' response.headers['Access-Control-Max-Age'] = CORS_MAX_AGE return response
[ "@", "slicer", ".", "after_request", "def", "add_cors_headers", "(", "response", ")", ":", "origin", "=", "current_app", ".", "slicer", ".", "allow_cors_origin", "if", "(", "origin", "and", "len", "(", "origin", ")", ")", ":", "if", "(", "request", ".", "method", "==", "'OPTIONS'", ")", ":", "response", ".", "headers", "[", "'Access-Control-Allow-Headers'", "]", "=", "'X-Requested-With'", "if", "(", "origin", "==", "'*'", ")", ":", "response", ".", "headers", "[", "'Access-Control-Allow-Origin'", "]", "=", "request", ".", "headers", ".", "get", "(", "'Origin'", ",", "origin", ")", "else", ":", "response", ".", "headers", "[", "'Access-Control-Allow-Origin'", "]", "=", "origin", "response", ".", "headers", "[", "'Access-Control-Allow-Credentials'", "]", "=", "'true'", "response", ".", "headers", "[", "'Access-Control-Allow-Methods'", "]", "=", "'GET, POST, OPTIONS'", "response", ".", "headers", "[", "'Access-Control-Max-Age'", "]", "=", "CORS_MAX_AGE", "return", "response" ]
add cross-origin resource sharing headers .
train
false
3,515
def _urlunsplit(scheme=None, netloc=None, path=None, query=None, fragment=None): if ((not scheme) or (not netloc)): scheme = None netloc = None if path: path = urllib.quote(_to_utf8(path)) if (query and (not isinstance(query, basestring))): if isinstance(query, dict): query = query.iteritems() query = urllib.urlencode(sorted(query)) if fragment: fragment = urllib.quote(_to_utf8(fragment)) return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
[ "def", "_urlunsplit", "(", "scheme", "=", "None", ",", "netloc", "=", "None", ",", "path", "=", "None", ",", "query", "=", "None", ",", "fragment", "=", "None", ")", ":", "if", "(", "(", "not", "scheme", ")", "or", "(", "not", "netloc", ")", ")", ":", "scheme", "=", "None", "netloc", "=", "None", "if", "path", ":", "path", "=", "urllib", ".", "quote", "(", "_to_utf8", "(", "path", ")", ")", "if", "(", "query", "and", "(", "not", "isinstance", "(", "query", ",", "basestring", ")", ")", ")", ":", "if", "isinstance", "(", "query", ",", "dict", ")", ":", "query", "=", "query", ".", "iteritems", "(", ")", "query", "=", "urllib", ".", "urlencode", "(", "sorted", "(", "query", ")", ")", "if", "fragment", ":", "fragment", "=", "urllib", ".", "quote", "(", "_to_utf8", "(", "fragment", ")", ")", "return", "urlparse", ".", "urlunsplit", "(", "(", "scheme", ",", "netloc", ",", "path", ",", "query", ",", "fragment", ")", ")" ]
like urlparse .
train
false
3,516
def run_correlation_test(data_generator, test, test_choices, pval_assignment_method, permutations=None): (corr_coefs, pvals) = ([], []) test_fn = test_choices[test] for (otu_vals, md_vals) in data_generator: r = test_fn(otu_vals, md_vals) if (pval_assignment_method == 'bootstrapped'): pval = assign_correlation_pval(r, len(otu_vals), pval_assignment_method, permutations, test_fn, otu_vals, md_vals) else: pval = assign_correlation_pval(r, len(otu_vals), pval_assignment_method) corr_coefs.append(r) pvals.append(pval) return (corr_coefs, pvals)
[ "def", "run_correlation_test", "(", "data_generator", ",", "test", ",", "test_choices", ",", "pval_assignment_method", ",", "permutations", "=", "None", ")", ":", "(", "corr_coefs", ",", "pvals", ")", "=", "(", "[", "]", ",", "[", "]", ")", "test_fn", "=", "test_choices", "[", "test", "]", "for", "(", "otu_vals", ",", "md_vals", ")", "in", "data_generator", ":", "r", "=", "test_fn", "(", "otu_vals", ",", "md_vals", ")", "if", "(", "pval_assignment_method", "==", "'bootstrapped'", ")", ":", "pval", "=", "assign_correlation_pval", "(", "r", ",", "len", "(", "otu_vals", ")", ",", "pval_assignment_method", ",", "permutations", ",", "test_fn", ",", "otu_vals", ",", "md_vals", ")", "else", ":", "pval", "=", "assign_correlation_pval", "(", "r", ",", "len", "(", "otu_vals", ")", ",", "pval_assignment_method", ")", "corr_coefs", ".", "append", "(", "r", ")", "pvals", ".", "append", "(", "pval", ")", "return", "(", "corr_coefs", ",", "pvals", ")" ]
run correlation tests .
train
false
3,517
def getMsPerFrame(myWin, nFrames=60, showVisual=False, msg='', msDelay=0.0): return myWin.getMsPerFrame(nFrames=60, showVisual=showVisual, msg=msg, msDelay=0.0)
[ "def", "getMsPerFrame", "(", "myWin", ",", "nFrames", "=", "60", ",", "showVisual", "=", "False", ",", "msg", "=", "''", ",", "msDelay", "=", "0.0", ")", ":", "return", "myWin", ".", "getMsPerFrame", "(", "nFrames", "=", "60", ",", "showVisual", "=", "showVisual", ",", "msg", "=", "msg", ",", "msDelay", "=", "0.0", ")" ]
deprecated: please use the getmsperframe method in the psychopy .
train
false
3,518
def chuang_f3(individual): total = 0 if (individual[(-1)] == 0): for i in xrange(0, (len(individual) - 1), 4): total += inv_trap(individual[i:(i + 4)]) else: for i in xrange(2, (len(individual) - 3), 4): total += inv_trap(individual[i:(i + 4)]) total += trap((individual[(-2):] + individual[:2])) return (total,)
[ "def", "chuang_f3", "(", "individual", ")", ":", "total", "=", "0", "if", "(", "individual", "[", "(", "-", "1", ")", "]", "==", "0", ")", ":", "for", "i", "in", "xrange", "(", "0", ",", "(", "len", "(", "individual", ")", "-", "1", ")", ",", "4", ")", ":", "total", "+=", "inv_trap", "(", "individual", "[", "i", ":", "(", "i", "+", "4", ")", "]", ")", "else", ":", "for", "i", "in", "xrange", "(", "2", ",", "(", "len", "(", "individual", ")", "-", "3", ")", ",", "4", ")", ":", "total", "+=", "inv_trap", "(", "individual", "[", "i", ":", "(", "i", "+", "4", ")", "]", ")", "total", "+=", "trap", "(", "(", "individual", "[", "(", "-", "2", ")", ":", "]", "+", "individual", "[", ":", "2", "]", ")", ")", "return", "(", "total", ",", ")" ]
binary deceptive function from : multivariate multi-model approach for globally multimodal problems by chung-yao chuang and wen-lian hsu .
train
false
3,519
def parse_storage_string(value): byte_multipliers = {u'K': 1024, u'M': 1048576, u'G': 1073741824, u'T': 1099511627776} if (not isinstance(value, types.StringTypes)): raise ValueError(u'Value must be string, got {type}.'.format(type=type(value).__name__)) pattern = re.compile(u'^(\\d+\\.?\\d*)(K|M|G|T)?$', (re.I | re.U)) parsed = pattern.match(value) if (not parsed): raise ValueError(u"Value '{value}' could not be parsed as a storage quantity.".format(value=value)) (quantity, unit) = parsed.groups() quantity = float(quantity) if (unit is not None): unit = unit.upper() quantity = (quantity * byte_multipliers[unit]) quantity = int(math.ceil(quantity)) return quantity
[ "def", "parse_storage_string", "(", "value", ")", ":", "byte_multipliers", "=", "{", "u'K'", ":", "1024", ",", "u'M'", ":", "1048576", ",", "u'G'", ":", "1073741824", ",", "u'T'", ":", "1099511627776", "}", "if", "(", "not", "isinstance", "(", "value", ",", "types", ".", "StringTypes", ")", ")", ":", "raise", "ValueError", "(", "u'Value must be string, got {type}.'", ".", "format", "(", "type", "=", "type", "(", "value", ")", ".", "__name__", ")", ")", "pattern", "=", "re", ".", "compile", "(", "u'^(\\\\d+\\\\.?\\\\d*)(K|M|G|T)?$'", ",", "(", "re", ".", "I", "|", "re", ".", "U", ")", ")", "parsed", "=", "pattern", ".", "match", "(", "value", ")", "if", "(", "not", "parsed", ")", ":", "raise", "ValueError", "(", "u\"Value '{value}' could not be parsed as a storage quantity.\"", ".", "format", "(", "value", "=", "value", ")", ")", "(", "quantity", ",", "unit", ")", "=", "parsed", ".", "groups", "(", ")", "quantity", "=", "float", "(", "quantity", ")", "if", "(", "unit", "is", "not", "None", ")", ":", "unit", "=", "unit", ".", "upper", "(", ")", "quantity", "=", "(", "quantity", "*", "byte_multipliers", "[", "unit", "]", ")", "quantity", "=", "int", "(", "math", ".", "ceil", "(", "quantity", ")", ")", "return", "quantity" ]
converts a string representing a quantity and a unit identifier in to an integer value representing the number of bytes in that quantity .
train
false
3,522
def find_web_xml(app_name): app_dir = '/var/apps/{}/app'.format(app_name) file_name = 'appengine-web.xml' matches = [] for (root, dirs, files) in os.walk(app_dir): if ((file_name in files) and root.endswith('/WEB-INF')): matches.append(os.path.join(root, file_name)) if (len(matches) < 1): raise BadConfigurationException('Unable to find {} file for {}'.format(file_name, app_name)) if (len(matches) > 1): matches.sort() match_to_use = matches[0] for match in matches: if (len(match) < len(match_to_use)): match_to_use = match return match_to_use return matches[0]
[ "def", "find_web_xml", "(", "app_name", ")", ":", "app_dir", "=", "'/var/apps/{}/app'", ".", "format", "(", "app_name", ")", "file_name", "=", "'appengine-web.xml'", "matches", "=", "[", "]", "for", "(", "root", ",", "dirs", ",", "files", ")", "in", "os", ".", "walk", "(", "app_dir", ")", ":", "if", "(", "(", "file_name", "in", "files", ")", "and", "root", ".", "endswith", "(", "'/WEB-INF'", ")", ")", ":", "matches", ".", "append", "(", "os", ".", "path", ".", "join", "(", "root", ",", "file_name", ")", ")", "if", "(", "len", "(", "matches", ")", "<", "1", ")", ":", "raise", "BadConfigurationException", "(", "'Unable to find {} file for {}'", ".", "format", "(", "file_name", ",", "app_name", ")", ")", "if", "(", "len", "(", "matches", ")", ">", "1", ")", ":", "matches", ".", "sort", "(", ")", "match_to_use", "=", "matches", "[", "0", "]", "for", "match", "in", "matches", ":", "if", "(", "len", "(", "match", ")", "<", "len", "(", "match_to_use", ")", ")", ":", "match_to_use", "=", "match", "return", "match_to_use", "return", "matches", "[", "0", "]" ]
returns the location of a java applications appengine-web .
train
false
3,524
def help_option(*param_decls, **attrs): def decorator(f): def callback(ctx, param, value): if (value and (not ctx.resilient_parsing)): echo(ctx.get_help(), color=ctx.color) ctx.exit() attrs.setdefault('is_flag', True) attrs.setdefault('expose_value', False) attrs.setdefault('help', 'Show this message and exit.') attrs.setdefault('is_eager', True) attrs['callback'] = callback return option(*(param_decls or ('--help',)), **attrs)(f) return decorator
[ "def", "help_option", "(", "*", "param_decls", ",", "**", "attrs", ")", ":", "def", "decorator", "(", "f", ")", ":", "def", "callback", "(", "ctx", ",", "param", ",", "value", ")", ":", "if", "(", "value", "and", "(", "not", "ctx", ".", "resilient_parsing", ")", ")", ":", "echo", "(", "ctx", ".", "get_help", "(", ")", ",", "color", "=", "ctx", ".", "color", ")", "ctx", ".", "exit", "(", ")", "attrs", ".", "setdefault", "(", "'is_flag'", ",", "True", ")", "attrs", ".", "setdefault", "(", "'expose_value'", ",", "False", ")", "attrs", ".", "setdefault", "(", "'help'", ",", "'Show this message and exit.'", ")", "attrs", ".", "setdefault", "(", "'is_eager'", ",", "True", ")", "attrs", "[", "'callback'", "]", "=", "callback", "return", "option", "(", "*", "(", "param_decls", "or", "(", "'--help'", ",", ")", ")", ",", "**", "attrs", ")", "(", "f", ")", "return", "decorator" ]
adds a --help option which immediately ends the program printing out the help page .
train
true
3,525
def need_ext(): sys.stdout.write('{0}\next_mods\n'.format(OPTIONS.delimiter)) sys.exit(EX_MOD_DEPLOY)
[ "def", "need_ext", "(", ")", ":", "sys", ".", "stdout", ".", "write", "(", "'{0}\\next_mods\\n'", ".", "format", "(", "OPTIONS", ".", "delimiter", ")", ")", "sys", ".", "exit", "(", "EX_MOD_DEPLOY", ")" ]
signal that external modules need to be deployed .
train
true
3,526
def extract_filters(term, opts=None): opts = (opts or {}) filters = {} params = {} (term, addon_type) = extract_from_query(term, 'type', '\\w+') addon_type = (addon_type or opts.get('addon_type')) if addon_type: try: atype = int(addon_type) if (atype in amo.ADDON_SEARCH_TYPES): filters['type'] = atype except ValueError: atype = amo.ADDON_SEARCH_SLUGS.get(addon_type.lower()) if atype: filters['type'] = atype (term, platform) = extract_from_query(term, 'platform', '\\w+') params['platform'] = (platform or opts.get('platform')) (term, version) = extract_from_query(term, 'version', '[0-9.]+') params['version'] = (version or opts.get('version')) (term, tag) = extract_from_query(term, 'tag', '\\w+') if tag: tag = Tag.objects.filter(tag_text=tag).values_list('tag_text', flat=True) if tag: filters['tags__in'] = list(tag) return (term, filters, params)
[ "def", "extract_filters", "(", "term", ",", "opts", "=", "None", ")", ":", "opts", "=", "(", "opts", "or", "{", "}", ")", "filters", "=", "{", "}", "params", "=", "{", "}", "(", "term", ",", "addon_type", ")", "=", "extract_from_query", "(", "term", ",", "'type'", ",", "'\\\\w+'", ")", "addon_type", "=", "(", "addon_type", "or", "opts", ".", "get", "(", "'addon_type'", ")", ")", "if", "addon_type", ":", "try", ":", "atype", "=", "int", "(", "addon_type", ")", "if", "(", "atype", "in", "amo", ".", "ADDON_SEARCH_TYPES", ")", ":", "filters", "[", "'type'", "]", "=", "atype", "except", "ValueError", ":", "atype", "=", "amo", ".", "ADDON_SEARCH_SLUGS", ".", "get", "(", "addon_type", ".", "lower", "(", ")", ")", "if", "atype", ":", "filters", "[", "'type'", "]", "=", "atype", "(", "term", ",", "platform", ")", "=", "extract_from_query", "(", "term", ",", "'platform'", ",", "'\\\\w+'", ")", "params", "[", "'platform'", "]", "=", "(", "platform", "or", "opts", ".", "get", "(", "'platform'", ")", ")", "(", "term", ",", "version", ")", "=", "extract_from_query", "(", "term", ",", "'version'", ",", "'[0-9.]+'", ")", "params", "[", "'version'", "]", "=", "(", "version", "or", "opts", ".", "get", "(", "'version'", ")", ")", "(", "term", ",", "tag", ")", "=", "extract_from_query", "(", "term", ",", "'tag'", ",", "'\\\\w+'", ")", "if", "tag", ":", "tag", "=", "Tag", ".", "objects", ".", "filter", "(", "tag_text", "=", "tag", ")", ".", "values_list", "(", "'tag_text'", ",", "flat", "=", "True", ")", "if", "tag", ":", "filters", "[", "'tags__in'", "]", "=", "list", "(", "tag", ")", "return", "(", "term", ",", "filters", ",", "params", ")" ]
pulls all the filtering options out of the term and returns a cleaned term and a dictionary of filter names and filter values .
train
false
3,527
def _scale_mpl_figure(fig, scale): fig.set_size_inches((fig.get_size_inches() * scale)) fig.set_dpi((fig.get_dpi() * scale)) import matplotlib as mpl if (scale >= 1): sfactor = (scale ** 2) elif (scale < 1): sfactor = (- ((1.0 / scale) ** 2)) for text in fig.findobj(mpl.text.Text): fs = text.get_fontsize() new_size = (fs + sfactor) if (new_size <= 0): raise ValueError('could not rescale matplotlib fonts, consider increasing "scale"') text.set_fontsize(new_size) fig.canvas.draw()
[ "def", "_scale_mpl_figure", "(", "fig", ",", "scale", ")", ":", "fig", ".", "set_size_inches", "(", "(", "fig", ".", "get_size_inches", "(", ")", "*", "scale", ")", ")", "fig", ".", "set_dpi", "(", "(", "fig", ".", "get_dpi", "(", ")", "*", "scale", ")", ")", "import", "matplotlib", "as", "mpl", "if", "(", "scale", ">=", "1", ")", ":", "sfactor", "=", "(", "scale", "**", "2", ")", "elif", "(", "scale", "<", "1", ")", ":", "sfactor", "=", "(", "-", "(", "(", "1.0", "/", "scale", ")", "**", "2", ")", ")", "for", "text", "in", "fig", ".", "findobj", "(", "mpl", ".", "text", ".", "Text", ")", ":", "fs", "=", "text", ".", "get_fontsize", "(", ")", "new_size", "=", "(", "fs", "+", "sfactor", ")", "if", "(", "new_size", "<=", "0", ")", ":", "raise", "ValueError", "(", "'could not rescale matplotlib fonts, consider increasing \"scale\"'", ")", "text", ".", "set_fontsize", "(", "new_size", ")", "fig", ".", "canvas", ".", "draw", "(", ")" ]
magic scaling helper .
train
false
3,529
def specific_iterator(qs): pks_and_types = qs.values_list(u'pk', u'content_type') pks_by_type = defaultdict(list) for (pk, content_type) in pks_and_types: pks_by_type[content_type].append(pk) content_types = {pk: ContentType.objects.get_for_id(pk) for (_, pk) in pks_and_types} pages_by_type = {} for (content_type, pks) in pks_by_type.items(): model = content_types[content_type].model_class() pages = model.objects.filter(pk__in=pks) pages_by_type[content_type] = {page.pk: page for page in pages} for (pk, content_type) in pks_and_types: (yield pages_by_type[content_type][pk])
[ "def", "specific_iterator", "(", "qs", ")", ":", "pks_and_types", "=", "qs", ".", "values_list", "(", "u'pk'", ",", "u'content_type'", ")", "pks_by_type", "=", "defaultdict", "(", "list", ")", "for", "(", "pk", ",", "content_type", ")", "in", "pks_and_types", ":", "pks_by_type", "[", "content_type", "]", ".", "append", "(", "pk", ")", "content_types", "=", "{", "pk", ":", "ContentType", ".", "objects", ".", "get_for_id", "(", "pk", ")", "for", "(", "_", ",", "pk", ")", "in", "pks_and_types", "}", "pages_by_type", "=", "{", "}", "for", "(", "content_type", ",", "pks", ")", "in", "pks_by_type", ".", "items", "(", ")", ":", "model", "=", "content_types", "[", "content_type", "]", ".", "model_class", "(", ")", "pages", "=", "model", ".", "objects", ".", "filter", "(", "pk__in", "=", "pks", ")", "pages_by_type", "[", "content_type", "]", "=", "{", "page", ".", "pk", ":", "page", "for", "page", "in", "pages", "}", "for", "(", "pk", ",", "content_type", ")", "in", "pks_and_types", ":", "(", "yield", "pages_by_type", "[", "content_type", "]", "[", "pk", "]", ")" ]
this efficiently iterates all the specific pages in a queryset .
train
false
3,530
def flush_rttable(rt_table): if (rt_table in ['local', 'main', 'default']): return run(settings.ip, 'route', 'flush', 'table', rt_table)
[ "def", "flush_rttable", "(", "rt_table", ")", ":", "if", "(", "rt_table", "in", "[", "'local'", ",", "'main'", ",", "'default'", "]", ")", ":", "return", "run", "(", "settings", ".", "ip", ",", "'route'", ",", "'flush'", ",", "'table'", ",", "rt_table", ")" ]
flushes specified routing table entries .
train
false
3,531
def validate_hook(shell_cmd, hook_name): if shell_cmd: cmd = shell_cmd.split(None, 1)[0] if (not _prog(cmd)): path = os.environ['PATH'] msg = 'Unable to find {2}-hook command {0} in the PATH.\n(PATH is {1})'.format(cmd, path, hook_name) raise errors.HookCommandNotFound(msg)
[ "def", "validate_hook", "(", "shell_cmd", ",", "hook_name", ")", ":", "if", "shell_cmd", ":", "cmd", "=", "shell_cmd", ".", "split", "(", "None", ",", "1", ")", "[", "0", "]", "if", "(", "not", "_prog", "(", "cmd", ")", ")", ":", "path", "=", "os", ".", "environ", "[", "'PATH'", "]", "msg", "=", "'Unable to find {2}-hook command {0} in the PATH.\\n(PATH is {1})'", ".", "format", "(", "cmd", ",", "path", ",", "hook_name", ")", "raise", "errors", ".", "HookCommandNotFound", "(", "msg", ")" ]
check that a command provided as a hook is plausibly executable .
train
false
3,532
@library.filter def round_percent(num): return (round(num, 1) if (num < 10) else int(round(num, 0)))
[ "@", "library", ".", "filter", "def", "round_percent", "(", "num", ")", ":", "return", "(", "round", "(", "num", ",", "1", ")", "if", "(", "num", "<", "10", ")", "else", "int", "(", "round", "(", "num", ",", "0", ")", ")", ")" ]
return a customercare-format percentage from a number .
train
false
3,534
def relative_wildcard_glob(dirname, pattern): if (not dirname): dirname = os.curdir try: if ('**' in pattern): names = list(_iter_relative_dirs(dirname)) else: names = os.listdir(dirname) except OSError: return [] result = [] pattern = os.path.normcase(pattern) match = re.compile(translate(pattern)).match for name in names: if match(os.path.normcase(name)): result.append(name) return result
[ "def", "relative_wildcard_glob", "(", "dirname", ",", "pattern", ")", ":", "if", "(", "not", "dirname", ")", ":", "dirname", "=", "os", ".", "curdir", "try", ":", "if", "(", "'**'", "in", "pattern", ")", ":", "names", "=", "list", "(", "_iter_relative_dirs", "(", "dirname", ")", ")", "else", ":", "names", "=", "os", ".", "listdir", "(", "dirname", ")", "except", "OSError", ":", "return", "[", "]", "result", "=", "[", "]", "pattern", "=", "os", ".", "path", ".", "normcase", "(", "pattern", ")", "match", "=", "re", ".", "compile", "(", "translate", "(", "pattern", ")", ")", ".", "match", "for", "name", "in", "names", ":", "if", "match", "(", "os", ".", "path", ".", "normcase", "(", "name", ")", ")", ":", "result", ".", "append", "(", "name", ")", "return", "result" ]
non-recursive glob for one directory .
train
false
3,535
def _compute_signature(parameters, access_key_secret): def percent_encode(line): if (not isinstance(line, str)): return line s = line if (sys.stdin.encoding is None): s = line.decode().encode('utf8') else: s = line.decode(sys.stdin.encoding).encode('utf8') res = _quote(s, '') res = res.replace('+', '%20') res = res.replace('*', '%2A') res = res.replace('%7E', '~') return res sortedParameters = sorted(list(parameters.items()), key=(lambda items: items[0])) canonicalizedQueryString = '' for (k, v) in sortedParameters: canonicalizedQueryString += ((('&' + percent_encode(k)) + '=') + percent_encode(v)) stringToSign = ('GET&%2F&' + percent_encode(canonicalizedQueryString[1:])) h = hmac.new((access_key_secret + '&'), stringToSign, sha1) signature = base64.encodestring(h.digest()).strip() return signature
[ "def", "_compute_signature", "(", "parameters", ",", "access_key_secret", ")", ":", "def", "percent_encode", "(", "line", ")", ":", "if", "(", "not", "isinstance", "(", "line", ",", "str", ")", ")", ":", "return", "line", "s", "=", "line", "if", "(", "sys", ".", "stdin", ".", "encoding", "is", "None", ")", ":", "s", "=", "line", ".", "decode", "(", ")", ".", "encode", "(", "'utf8'", ")", "else", ":", "s", "=", "line", ".", "decode", "(", "sys", ".", "stdin", ".", "encoding", ")", ".", "encode", "(", "'utf8'", ")", "res", "=", "_quote", "(", "s", ",", "''", ")", "res", "=", "res", ".", "replace", "(", "'+'", ",", "'%20'", ")", "res", "=", "res", ".", "replace", "(", "'*'", ",", "'%2A'", ")", "res", "=", "res", ".", "replace", "(", "'%7E'", ",", "'~'", ")", "return", "res", "sortedParameters", "=", "sorted", "(", "list", "(", "parameters", ".", "items", "(", ")", ")", ",", "key", "=", "(", "lambda", "items", ":", "items", "[", "0", "]", ")", ")", "canonicalizedQueryString", "=", "''", "for", "(", "k", ",", "v", ")", "in", "sortedParameters", ":", "canonicalizedQueryString", "+=", "(", "(", "(", "'&'", "+", "percent_encode", "(", "k", ")", ")", "+", "'='", ")", "+", "percent_encode", "(", "v", ")", ")", "stringToSign", "=", "(", "'GET&%2F&'", "+", "percent_encode", "(", "canonicalizedQueryString", "[", "1", ":", "]", ")", ")", "h", "=", "hmac", ".", "new", "(", "(", "access_key_secret", "+", "'&'", ")", ",", "stringToSign", ",", "sha1", ")", "signature", "=", "base64", ".", "encodestring", "(", "h", ".", "digest", "(", ")", ")", ".", "strip", "(", ")", "return", "signature" ]
generate an api request signature .
train
true
3,537
def _theano_single_leapfrog(H, q, p, **theano_kwargs): epsilon = tt.dscalar('epsilon') epsilon.tag.test_value = 1 p_new = (p + ((0.5 * epsilon) * H.dlogp(q))) q_new = (q + (epsilon * H.pot.velocity(p_new))) p_new += ((0.5 * epsilon) * H.dlogp(q_new)) energy_new = energy(H, q_new, p_new) f = theano.function(inputs=[q, p, epsilon], outputs=[q_new, p_new, energy_new], **theano_kwargs) f.trust_input = True return f
[ "def", "_theano_single_leapfrog", "(", "H", ",", "q", ",", "p", ",", "**", "theano_kwargs", ")", ":", "epsilon", "=", "tt", ".", "dscalar", "(", "'epsilon'", ")", "epsilon", ".", "tag", ".", "test_value", "=", "1", "p_new", "=", "(", "p", "+", "(", "(", "0.5", "*", "epsilon", ")", "*", "H", ".", "dlogp", "(", "q", ")", ")", ")", "q_new", "=", "(", "q", "+", "(", "epsilon", "*", "H", ".", "pot", ".", "velocity", "(", "p_new", ")", ")", ")", "p_new", "+=", "(", "(", "0.5", "*", "epsilon", ")", "*", "H", ".", "dlogp", "(", "q_new", ")", ")", "energy_new", "=", "energy", "(", "H", ",", "q_new", ",", "p_new", ")", "f", "=", "theano", ".", "function", "(", "inputs", "=", "[", "q", ",", "p", ",", "epsilon", "]", ",", "outputs", "=", "[", "q_new", ",", "p_new", ",", "energy_new", "]", ",", "**", "theano_kwargs", ")", "f", ".", "trust_input", "=", "True", "return", "f" ]
leapfrog integrator for a single step .
train
false
3,539
def stopcron(): global _cron_stopping _cron_stopping = True while _cron_subprocs: proc = _cron_subprocs.pop() if (proc.poll() is None): try: proc.terminate() except: import traceback traceback.print_exc()
[ "def", "stopcron", "(", ")", ":", "global", "_cron_stopping", "_cron_stopping", "=", "True", "while", "_cron_subprocs", ":", "proc", "=", "_cron_subprocs", ".", "pop", "(", ")", "if", "(", "proc", ".", "poll", "(", ")", "is", "None", ")", ":", "try", ":", "proc", ".", "terminate", "(", ")", "except", ":", "import", "traceback", "traceback", ".", "print_exc", "(", ")" ]
graceful shutdown of cron .
train
false
3,540
def to_id(s): if (s == '+'): return 11 if (s == '*'): return 12 return (int(s) + 1)
[ "def", "to_id", "(", "s", ")", ":", "if", "(", "s", "==", "'+'", ")", ":", "return", "11", "if", "(", "s", "==", "'*'", ")", ":", "return", "12", "return", "(", "int", "(", "s", ")", "+", "1", ")" ]
covert text to ids .
train
false
3,541
def _add_loss_summaries(total_loss): loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') losses = tf.get_collection('losses') loss_averages_op = loss_averages.apply((losses + [total_loss])) for l in (losses + [total_loss]): tf.summary.scalar((l.op.name + ' (raw)'), l) tf.summary.scalar(l.op.name, loss_averages.average(l)) return loss_averages_op
[ "def", "_add_loss_summaries", "(", "total_loss", ")", ":", "loss_averages", "=", "tf", ".", "train", ".", "ExponentialMovingAverage", "(", "0.9", ",", "name", "=", "'avg'", ")", "losses", "=", "tf", ".", "get_collection", "(", "'losses'", ")", "loss_averages_op", "=", "loss_averages", ".", "apply", "(", "(", "losses", "+", "[", "total_loss", "]", ")", ")", "for", "l", "in", "(", "losses", "+", "[", "total_loss", "]", ")", ":", "tf", ".", "summary", ".", "scalar", "(", "(", "l", ".", "op", ".", "name", "+", "' (raw)'", ")", ",", "l", ")", "tf", ".", "summary", ".", "scalar", "(", "l", ".", "op", ".", "name", ",", "loss_averages", ".", "average", "(", "l", ")", ")", "return", "loss_averages_op" ]
add summaries for losses in cifar-10 model .
train
true
3,542
def get_foreign_module(namespace): if (namespace not in _MODULES): try: module = importlib.import_module(('.' + namespace), __package__) except ImportError: module = None _MODULES[namespace] = module module = _MODULES.get(namespace) if (module is None): raise ForeignError(('Foreign %r structs not supported' % namespace)) return module
[ "def", "get_foreign_module", "(", "namespace", ")", ":", "if", "(", "namespace", "not", "in", "_MODULES", ")", ":", "try", ":", "module", "=", "importlib", ".", "import_module", "(", "(", "'.'", "+", "namespace", ")", ",", "__package__", ")", "except", "ImportError", ":", "module", "=", "None", "_MODULES", "[", "namespace", "]", "=", "module", "module", "=", "_MODULES", ".", "get", "(", "namespace", ")", "if", "(", "module", "is", "None", ")", ":", "raise", "ForeignError", "(", "(", "'Foreign %r structs not supported'", "%", "namespace", ")", ")", "return", "module" ]
returns the module or raises foreignerror .
train
true
3,543
def do_setup(): root = get_root() try: cfg = get_config_from_root(root) except (EnvironmentError, configparser.NoSectionError, configparser.NoOptionError) as e: if isinstance(e, (EnvironmentError, configparser.NoSectionError)): print('Adding sample versioneer config to setup.cfg', file=sys.stderr) with open(os.path.join(root, 'setup.cfg'), 'a') as f: f.write(SAMPLE_CONFIG) print(CONFIG_ERROR, file=sys.stderr) return 1 print((' creating %s' % cfg.versionfile_source)) with open(cfg.versionfile_source, 'w') as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write((LONG % {'DOLLAR': '$', 'STYLE': cfg.style, 'TAG_PREFIX': cfg.tag_prefix, 'PARENTDIR_PREFIX': cfg.parentdir_prefix, 'VERSIONFILE_SOURCE': cfg.versionfile_source})) ipy = os.path.join(os.path.dirname(cfg.versionfile_source), '__init__.py') if os.path.exists(ipy): try: with open(ipy, 'r') as f: old = f.read() except EnvironmentError: old = '' if (INIT_PY_SNIPPET not in old): print((' appending to %s' % ipy)) with open(ipy, 'a') as f: f.write(INIT_PY_SNIPPET) else: print((' %s unmodified' % ipy)) else: print((" %s doesn't exist, ok" % ipy)) ipy = None manifest_in = os.path.join(root, 'MANIFEST.in') simple_includes = set() try: with open(manifest_in, 'r') as f: for line in f: if line.startswith('include '): for include in line.split()[1:]: simple_includes.add(include) except EnvironmentError: pass if ('versioneer.py' not in simple_includes): print(" appending 'versioneer.py' to MANIFEST.in") with open(manifest_in, 'a') as f: f.write('include versioneer.py\n') else: print(" 'versioneer.py' already in MANIFEST.in") if (cfg.versionfile_source not in simple_includes): print((" appending versionfile_source ('%s') to MANIFEST.in" % cfg.versionfile_source)) with open(manifest_in, 'a') as f: f.write(('include %s\n' % cfg.versionfile_source)) else: print(' versionfile_source already in MANIFEST.in') do_vcs_install(manifest_in, cfg.versionfile_source, ipy) return 0
[ "def", "do_setup", "(", ")", ":", "root", "=", "get_root", "(", ")", "try", ":", "cfg", "=", "get_config_from_root", "(", "root", ")", "except", "(", "EnvironmentError", ",", "configparser", ".", "NoSectionError", ",", "configparser", ".", "NoOptionError", ")", "as", "e", ":", "if", "isinstance", "(", "e", ",", "(", "EnvironmentError", ",", "configparser", ".", "NoSectionError", ")", ")", ":", "print", "(", "'Adding sample versioneer config to setup.cfg'", ",", "file", "=", "sys", ".", "stderr", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "root", ",", "'setup.cfg'", ")", ",", "'a'", ")", "as", "f", ":", "f", ".", "write", "(", "SAMPLE_CONFIG", ")", "print", "(", "CONFIG_ERROR", ",", "file", "=", "sys", ".", "stderr", ")", "return", "1", "print", "(", "(", "' creating %s'", "%", "cfg", ".", "versionfile_source", ")", ")", "with", "open", "(", "cfg", ".", "versionfile_source", ",", "'w'", ")", "as", "f", ":", "LONG", "=", "LONG_VERSION_PY", "[", "cfg", ".", "VCS", "]", "f", ".", "write", "(", "(", "LONG", "%", "{", "'DOLLAR'", ":", "'$'", ",", "'STYLE'", ":", "cfg", ".", "style", ",", "'TAG_PREFIX'", ":", "cfg", ".", "tag_prefix", ",", "'PARENTDIR_PREFIX'", ":", "cfg", ".", "parentdir_prefix", ",", "'VERSIONFILE_SOURCE'", ":", "cfg", ".", "versionfile_source", "}", ")", ")", "ipy", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "cfg", ".", "versionfile_source", ")", ",", "'__init__.py'", ")", "if", "os", ".", "path", ".", "exists", "(", "ipy", ")", ":", "try", ":", "with", "open", "(", "ipy", ",", "'r'", ")", "as", "f", ":", "old", "=", "f", ".", "read", "(", ")", "except", "EnvironmentError", ":", "old", "=", "''", "if", "(", "INIT_PY_SNIPPET", "not", "in", "old", ")", ":", "print", "(", "(", "' appending to %s'", "%", "ipy", ")", ")", "with", "open", "(", "ipy", ",", "'a'", ")", "as", "f", ":", "f", ".", "write", "(", "INIT_PY_SNIPPET", ")", "else", ":", "print", "(", "(", "' %s unmodified'", "%", "ipy", ")", ")", "else", ":", "print", "(", "(", "\" %s doesn't exist, ok\"", "%", "ipy", ")", ")", "ipy", "=", "None", "manifest_in", "=", "os", ".", "path", ".", "join", "(", "root", ",", "'MANIFEST.in'", ")", "simple_includes", "=", "set", "(", ")", "try", ":", "with", "open", "(", "manifest_in", ",", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "if", "line", ".", "startswith", "(", "'include '", ")", ":", "for", "include", "in", "line", ".", "split", "(", ")", "[", "1", ":", "]", ":", "simple_includes", ".", "add", "(", "include", ")", "except", "EnvironmentError", ":", "pass", "if", "(", "'versioneer.py'", "not", "in", "simple_includes", ")", ":", "print", "(", "\" appending 'versioneer.py' to MANIFEST.in\"", ")", "with", "open", "(", "manifest_in", ",", "'a'", ")", "as", "f", ":", "f", ".", "write", "(", "'include versioneer.py\\n'", ")", "else", ":", "print", "(", "\" 'versioneer.py' already in MANIFEST.in\"", ")", "if", "(", "cfg", ".", "versionfile_source", "not", "in", "simple_includes", ")", ":", "print", "(", "(", "\" appending versionfile_source ('%s') to MANIFEST.in\"", "%", "cfg", ".", "versionfile_source", ")", ")", "with", "open", "(", "manifest_in", ",", "'a'", ")", "as", "f", ":", "f", ".", "write", "(", "(", "'include %s\\n'", "%", "cfg", ".", "versionfile_source", ")", ")", "else", ":", "print", "(", "' versionfile_source already in MANIFEST.in'", ")", "do_vcs_install", "(", "manifest_in", ",", "cfg", ".", "versionfile_source", ",", "ipy", ")", "return", "0" ]
run the setup after we have everything configured .
train
true
3,544
def build_database(): if snapshot: uc = upgrade_code_snapshot else: uc = upgrade_code if msilib.Win64: productsuffix = ' (64-bit)' else: productsuffix = '' db = msilib.init_database(('python-%s%s.msi' % (full_current_version, msilib.arch_ext)), schema, ProductName=(('Python ' + full_current_version) + productsuffix), ProductCode=product_code, ProductVersion=current_version, Manufacturer=u'Python Software Foundation', request_uac=True) msilib.change_sequence(sequence.InstallExecuteSequence, 'RemoveExistingProducts', 1510) msilib.add_tables(db, sequence) add_data(db, 'Property', [('UpgradeCode', uc), ('WhichUsers', 'ALL'), ('ProductLine', ('Python%s%s' % (major, minor)))]) db.Commit() return db
[ "def", "build_database", "(", ")", ":", "if", "snapshot", ":", "uc", "=", "upgrade_code_snapshot", "else", ":", "uc", "=", "upgrade_code", "if", "msilib", ".", "Win64", ":", "productsuffix", "=", "' (64-bit)'", "else", ":", "productsuffix", "=", "''", "db", "=", "msilib", ".", "init_database", "(", "(", "'python-%s%s.msi'", "%", "(", "full_current_version", ",", "msilib", ".", "arch_ext", ")", ")", ",", "schema", ",", "ProductName", "=", "(", "(", "'Python '", "+", "full_current_version", ")", "+", "productsuffix", ")", ",", "ProductCode", "=", "product_code", ",", "ProductVersion", "=", "current_version", ",", "Manufacturer", "=", "u'Python Software Foundation'", ",", "request_uac", "=", "True", ")", "msilib", ".", "change_sequence", "(", "sequence", ".", "InstallExecuteSequence", ",", "'RemoveExistingProducts'", ",", "1510", ")", "msilib", ".", "add_tables", "(", "db", ",", "sequence", ")", "add_data", "(", "db", ",", "'Property'", ",", "[", "(", "'UpgradeCode'", ",", "uc", ")", ",", "(", "'WhichUsers'", ",", "'ALL'", ")", ",", "(", "'ProductLine'", ",", "(", "'Python%s%s'", "%", "(", "major", ",", "minor", ")", ")", ")", "]", ")", "db", ".", "Commit", "(", ")", "return", "db" ]
generate an empty database .
train
false
3,545
@utils.arg('host', metavar='<hostname>', help=_('Name of host.')) @utils.arg('binary', metavar='<binary>', help=_('Service binary.')) def do_service_enable(cs, args): result = cs.services.enable(args.host, args.binary) utils.print_list([result], ['Host', 'Binary', 'Status'])
[ "@", "utils", ".", "arg", "(", "'host'", ",", "metavar", "=", "'<hostname>'", ",", "help", "=", "_", "(", "'Name of host.'", ")", ")", "@", "utils", ".", "arg", "(", "'binary'", ",", "metavar", "=", "'<binary>'", ",", "help", "=", "_", "(", "'Service binary.'", ")", ")", "def", "do_service_enable", "(", "cs", ",", "args", ")", ":", "result", "=", "cs", ".", "services", ".", "enable", "(", "args", ".", "host", ",", "args", ".", "binary", ")", "utils", ".", "print_list", "(", "[", "result", "]", ",", "[", "'Host'", ",", "'Binary'", ",", "'Status'", "]", ")" ]
enable the service .
train
false
3,548
@register.filter def has_permissions(user, component): return user.has_perms(getattr(component, 'permissions', set()))
[ "@", "register", ".", "filter", "def", "has_permissions", "(", "user", ",", "component", ")", ":", "return", "user", ".", "has_perms", "(", "getattr", "(", "component", ",", "'permissions'", ",", "set", "(", ")", ")", ")" ]
checks if the given user meets the permissions requirements for the component .
train
false
3,549
@scope.define_pure def idxs_take(idxs, vals, which): assert (len(idxs) == len(vals)) table = dict(list(zip(idxs, vals))) return np.asarray([table[w] for w in which])
[ "@", "scope", ".", "define_pure", "def", "idxs_take", "(", "idxs", ",", "vals", ",", "which", ")", ":", "assert", "(", "len", "(", "idxs", ")", "==", "len", "(", "vals", ")", ")", "table", "=", "dict", "(", "list", "(", "zip", "(", "idxs", ",", "vals", ")", ")", ")", "return", "np", ".", "asarray", "(", "[", "table", "[", "w", "]", "for", "w", "in", "which", "]", ")" ]
return vals[which] where which is a subset of idxs .
train
false
3,550
def SGD_final_layer(self, training_data, epochs, mini_batch_size, eta, lmbda): encoded_training_data = [(self.feedforward(x, start=0, end=(self.num_layers - 2)), y) for (x, y) in training_data] net = Network(self.sizes[(-2):]) net.biases[0] = self.biases[(-1)] net.weights[0] = self.weights[(-1)] net.SGD(encoded_training_data, epochs, mini_batch_size, eta, lmbda) self.biases[(-1)] = net.biases[0] self.weights[(-1)] = net.weights[0]
[ "def", "SGD_final_layer", "(", "self", ",", "training_data", ",", "epochs", ",", "mini_batch_size", ",", "eta", ",", "lmbda", ")", ":", "encoded_training_data", "=", "[", "(", "self", ".", "feedforward", "(", "x", ",", "start", "=", "0", ",", "end", "=", "(", "self", ".", "num_layers", "-", "2", ")", ")", ",", "y", ")", "for", "(", "x", ",", "y", ")", "in", "training_data", "]", "net", "=", "Network", "(", "self", ".", "sizes", "[", "(", "-", "2", ")", ":", "]", ")", "net", ".", "biases", "[", "0", "]", "=", "self", ".", "biases", "[", "(", "-", "1", ")", "]", "net", ".", "weights", "[", "0", "]", "=", "self", ".", "weights", "[", "(", "-", "1", ")", "]", "net", ".", "SGD", "(", "encoded_training_data", ",", "epochs", ",", "mini_batch_size", ",", "eta", ",", "lmbda", ")", "self", ".", "biases", "[", "(", "-", "1", ")", "]", "=", "net", ".", "biases", "[", "0", "]", "self", ".", "weights", "[", "(", "-", "1", ")", "]", "=", "net", ".", "weights", "[", "0", "]" ]
run sgd on the final layer of the network self .
train
false
3,551
def _tan1(p, x, prec): R = p.ring p1 = R(0) for precx in _giant_steps(prec): tmp = (p - rs_atan(p1, x, precx)) tmp = rs_mul(tmp, (1 + rs_square(p1, x, precx)), x, precx) p1 += tmp return p1
[ "def", "_tan1", "(", "p", ",", "x", ",", "prec", ")", ":", "R", "=", "p", ".", "ring", "p1", "=", "R", "(", "0", ")", "for", "precx", "in", "_giant_steps", "(", "prec", ")", ":", "tmp", "=", "(", "p", "-", "rs_atan", "(", "p1", ",", "x", ",", "precx", ")", ")", "tmp", "=", "rs_mul", "(", "tmp", ",", "(", "1", "+", "rs_square", "(", "p1", ",", "x", ",", "precx", ")", ")", ",", "x", ",", "precx", ")", "p1", "+=", "tmp", "return", "p1" ]
helper function of rs_tan .
train
false
3,552
def get_view(request): return HttpResponse('Hello world')
[ "def", "get_view", "(", "request", ")", ":", "return", "HttpResponse", "(", "'Hello world'", ")" ]
a simple login protected view .
train
false
3,554
def some_action(post): print post['created_time']
[ "def", "some_action", "(", "post", ")", ":", "print", "post", "[", "'created_time'", "]" ]
here you might want to do something with each post .
train
false
3,556
def testopendocx(): if isinstance(opendocx(TEST_FILE), lxml.etree._Element): pass else: assert False
[ "def", "testopendocx", "(", ")", ":", "if", "isinstance", "(", "opendocx", "(", "TEST_FILE", ")", ",", "lxml", ".", "etree", ".", "_Element", ")", ":", "pass", "else", ":", "assert", "False" ]
ensure an etree element is returned .
train
false
3,557
def runNetwork(network, writer): sensorRegion = network.regions['sensor'] spatialPoolerRegion = network.regions['spatialPoolerRegion'] temporalPoolerRegion = network.regions['temporalPoolerRegion'] anomalyLikelihoodRegion = network.regions['anomalyLikelihoodRegion'] prevPredictedColumns = [] for i in xrange(_NUM_RECORDS): network.run(1) consumption = sensorRegion.getOutputData('sourceOut')[0] anomalyScore = temporalPoolerRegion.getOutputData('anomalyScore')[0] anomalyLikelihood = anomalyLikelihoodRegion.getOutputData('anomalyLikelihood')[0] writer.writerow((i, consumption, anomalyScore, anomalyLikelihood))
[ "def", "runNetwork", "(", "network", ",", "writer", ")", ":", "sensorRegion", "=", "network", ".", "regions", "[", "'sensor'", "]", "spatialPoolerRegion", "=", "network", ".", "regions", "[", "'spatialPoolerRegion'", "]", "temporalPoolerRegion", "=", "network", ".", "regions", "[", "'temporalPoolerRegion'", "]", "anomalyLikelihoodRegion", "=", "network", ".", "regions", "[", "'anomalyLikelihoodRegion'", "]", "prevPredictedColumns", "=", "[", "]", "for", "i", "in", "xrange", "(", "_NUM_RECORDS", ")", ":", "network", ".", "run", "(", "1", ")", "consumption", "=", "sensorRegion", ".", "getOutputData", "(", "'sourceOut'", ")", "[", "0", "]", "anomalyScore", "=", "temporalPoolerRegion", ".", "getOutputData", "(", "'anomalyScore'", ")", "[", "0", "]", "anomalyLikelihood", "=", "anomalyLikelihoodRegion", ".", "getOutputData", "(", "'anomalyLikelihood'", ")", "[", "0", "]", "writer", ".", "writerow", "(", "(", "i", ",", "consumption", ",", "anomalyScore", ",", "anomalyLikelihood", ")", ")" ]
run the network and write output to writer .
train
true
3,558
def remove_extension(template): return template[:(- len('.template'))]
[ "def", "remove_extension", "(", "template", ")", ":", "return", "template", "[", ":", "(", "-", "len", "(", "'.template'", ")", ")", "]" ]
remove download or media extension from name .
train
false
3,559
@require_context def volume_get_all_active_by_window(context, begin, end=None, project_id=None): query = model_query(context, models.Volume, read_deleted='yes') query = query.filter(or_((models.Volume.deleted_at == None), (models.Volume.deleted_at > begin))) if end: query = query.filter((models.Volume.created_at < end)) if project_id: query = query.filter_by(project_id=project_id) query = query.options(joinedload('volume_metadata')).options(joinedload('volume_type')).options(joinedload('volume_attachment')).options(joinedload('consistencygroup')).options(joinedload('group')) if is_admin_context(context): query = query.options(joinedload('volume_admin_metadata')) return query.all()
[ "@", "require_context", "def", "volume_get_all_active_by_window", "(", "context", ",", "begin", ",", "end", "=", "None", ",", "project_id", "=", "None", ")", ":", "query", "=", "model_query", "(", "context", ",", "models", ".", "Volume", ",", "read_deleted", "=", "'yes'", ")", "query", "=", "query", ".", "filter", "(", "or_", "(", "(", "models", ".", "Volume", ".", "deleted_at", "==", "None", ")", ",", "(", "models", ".", "Volume", ".", "deleted_at", ">", "begin", ")", ")", ")", "if", "end", ":", "query", "=", "query", ".", "filter", "(", "(", "models", ".", "Volume", ".", "created_at", "<", "end", ")", ")", "if", "project_id", ":", "query", "=", "query", ".", "filter_by", "(", "project_id", "=", "project_id", ")", "query", "=", "query", ".", "options", "(", "joinedload", "(", "'volume_metadata'", ")", ")", ".", "options", "(", "joinedload", "(", "'volume_type'", ")", ")", ".", "options", "(", "joinedload", "(", "'volume_attachment'", ")", ")", ".", "options", "(", "joinedload", "(", "'consistencygroup'", ")", ")", ".", "options", "(", "joinedload", "(", "'group'", ")", ")", "if", "is_admin_context", "(", "context", ")", ":", "query", "=", "query", ".", "options", "(", "joinedload", "(", "'volume_admin_metadata'", ")", ")", "return", "query", ".", "all", "(", ")" ]
get all the volumes inside the window .
train
false
3,560
def cache_property(key, empty, type): return property((lambda x: x._get_cache_value(key, empty, type)), (lambda x, v: x._set_cache_value(key, v, type)), (lambda x: x._del_cache_value(key)), ('accessor for %r' % key))
[ "def", "cache_property", "(", "key", ",", "empty", ",", "type", ")", ":", "return", "property", "(", "(", "lambda", "x", ":", "x", ".", "_get_cache_value", "(", "key", ",", "empty", ",", "type", ")", ")", ",", "(", "lambda", "x", ",", "v", ":", "x", ".", "_set_cache_value", "(", "key", ",", "v", ",", "type", ")", ")", ",", "(", "lambda", "x", ":", "x", ".", "_del_cache_value", "(", "key", ")", ")", ",", "(", "'accessor for %r'", "%", "key", ")", ")" ]
return a new property object for a cache header .
train
true
3,563
def filter_partition_list(partitions, devnames): filtered_list = [] for p in partitions: for d in devnames: if ((p.device == d) and (p not in filtered_list)): filtered_list.append(p) return filtered_list
[ "def", "filter_partition_list", "(", "partitions", ",", "devnames", ")", ":", "filtered_list", "=", "[", "]", "for", "p", "in", "partitions", ":", "for", "d", "in", "devnames", ":", "if", "(", "(", "p", ".", "device", "==", "d", ")", "and", "(", "p", "not", "in", "filtered_list", ")", ")", ":", "filtered_list", ".", "append", "(", "p", ")", "return", "filtered_list" ]
pick and choose which partition to keep .
train
false
3,564
def active_contributors(from_date, to_date=None, locale=None, product=None): return User.objects.filter(id__in=_active_contributors_id(from_date, to_date, locale, product)).order_by('username')
[ "def", "active_contributors", "(", "from_date", ",", "to_date", "=", "None", ",", "locale", "=", "None", ",", "product", "=", "None", ")", ":", "return", "User", ".", "objects", ".", "filter", "(", "id__in", "=", "_active_contributors_id", "(", "from_date", ",", "to_date", ",", "locale", ",", "product", ")", ")", ".", "order_by", "(", "'username'", ")" ]
return active kb contributors for the specified parameters .
train
false
3,566
def only_squares(*matrices): if (matrices[0].rows != matrices[(-1)].cols): raise RuntimeError('Invalid matrices being multiplied') out = [] start = 0 for (i, M) in enumerate(matrices): if (M.cols == matrices[start].rows): out.append(MatMul(*matrices[start:(i + 1)]).doit()) start = (i + 1) return out
[ "def", "only_squares", "(", "*", "matrices", ")", ":", "if", "(", "matrices", "[", "0", "]", ".", "rows", "!=", "matrices", "[", "(", "-", "1", ")", "]", ".", "cols", ")", ":", "raise", "RuntimeError", "(", "'Invalid matrices being multiplied'", ")", "out", "=", "[", "]", "start", "=", "0", "for", "(", "i", ",", "M", ")", "in", "enumerate", "(", "matrices", ")", ":", "if", "(", "M", ".", "cols", "==", "matrices", "[", "start", "]", ".", "rows", ")", ":", "out", ".", "append", "(", "MatMul", "(", "*", "matrices", "[", "start", ":", "(", "i", "+", "1", ")", "]", ")", ".", "doit", "(", ")", ")", "start", "=", "(", "i", "+", "1", ")", "return", "out" ]
factor matrices only if they are square .
train
false
3,567
def compute_eta(start_time, current_value, final_value): elapsed = (time.time() - start_time) completion = ((float(current_value) / final_value) or 1e-05) return get_time_units((((1.0 / completion) * elapsed) - elapsed))
[ "def", "compute_eta", "(", "start_time", ",", "current_value", ",", "final_value", ")", ":", "elapsed", "=", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", "completion", "=", "(", "(", "float", "(", "current_value", ")", "/", "final_value", ")", "or", "1e-05", ")", "return", "get_time_units", "(", "(", "(", "(", "1.0", "/", "completion", ")", "*", "elapsed", ")", "-", "elapsed", ")", ")" ]
compute an eta .
train
false
3,571
def fdatasync(fd): try: os.fdatasync(fd) except AttributeError: fsync(fd)
[ "def", "fdatasync", "(", "fd", ")", ":", "try", ":", "os", ".", "fdatasync", "(", "fd", ")", "except", "AttributeError", ":", "fsync", "(", "fd", ")" ]
sync modified file data to disk .
train
false
3,572
def registerNamespaceAlias(namespace_uri, alias): global registered_aliases if (registered_aliases.get(alias) == namespace_uri): return if (namespace_uri in registered_aliases.values()): raise NamespaceAliasRegistrationError, ('Namespace uri %r already registered' % (namespace_uri,)) if (alias in registered_aliases): raise NamespaceAliasRegistrationError, ('Alias %r already registered' % (alias,)) registered_aliases[alias] = namespace_uri
[ "def", "registerNamespaceAlias", "(", "namespace_uri", ",", "alias", ")", ":", "global", "registered_aliases", "if", "(", "registered_aliases", ".", "get", "(", "alias", ")", "==", "namespace_uri", ")", ":", "return", "if", "(", "namespace_uri", "in", "registered_aliases", ".", "values", "(", ")", ")", ":", "raise", "NamespaceAliasRegistrationError", ",", "(", "'Namespace uri %r already registered'", "%", "(", "namespace_uri", ",", ")", ")", "if", "(", "alias", "in", "registered_aliases", ")", ":", "raise", "NamespaceAliasRegistrationError", ",", "(", "'Alias %r already registered'", "%", "(", "alias", ",", ")", ")", "registered_aliases", "[", "alias", "]", "=", "namespace_uri" ]
registers a mapping in a global namespace alias map .
train
true
3,573
def _should_include_path(path, includes, excludes): from os.path import basename from fnmatch import fnmatch base = basename(path) if includes: for include in includes: if fnmatch(base, include): try: log.debug("include `%s' (matches `%s')", path, include) except (NameError, AttributeError): pass break else: try: log.debug("exclude `%s' (matches no includes)", path) except (NameError, AttributeError): pass return False for exclude in excludes: if fnmatch(base, exclude): try: log.debug("exclude `%s' (matches `%s')", path, exclude) except (NameError, AttributeError): pass return False return True
[ "def", "_should_include_path", "(", "path", ",", "includes", ",", "excludes", ")", ":", "from", "os", ".", "path", "import", "basename", "from", "fnmatch", "import", "fnmatch", "base", "=", "basename", "(", "path", ")", "if", "includes", ":", "for", "include", "in", "includes", ":", "if", "fnmatch", "(", "base", ",", "include", ")", ":", "try", ":", "log", ".", "debug", "(", "\"include `%s' (matches `%s')\"", ",", "path", ",", "include", ")", "except", "(", "NameError", ",", "AttributeError", ")", ":", "pass", "break", "else", ":", "try", ":", "log", ".", "debug", "(", "\"exclude `%s' (matches no includes)\"", ",", "path", ")", "except", "(", "NameError", ",", "AttributeError", ")", ":", "pass", "return", "False", "for", "exclude", "in", "excludes", ":", "if", "fnmatch", "(", "base", ",", "exclude", ")", ":", "try", ":", "log", ".", "debug", "(", "\"exclude `%s' (matches `%s')\"", ",", "path", ",", "exclude", ")", "except", "(", "NameError", ",", "AttributeError", ")", ":", "pass", "return", "False", "return", "True" ]
return true iff the given path should be included .
train
false
3,575
def libvlc_vlm_set_loop(p_instance, psz_name, b_loop): f = (_Cfunctions.get('libvlc_vlm_set_loop', None) or _Cfunction('libvlc_vlm_set_loop', ((1,), (1,), (1,)), None, ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int)) return f(p_instance, psz_name, b_loop)
[ "def", "libvlc_vlm_set_loop", "(", "p_instance", ",", "psz_name", ",", "b_loop", ")", ":", "f", "=", "(", "_Cfunctions", ".", "get", "(", "'libvlc_vlm_set_loop'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_vlm_set_loop'", ",", "(", "(", "1", ",", ")", ",", "(", "1", ",", ")", ",", "(", "1", ",", ")", ")", ",", "None", ",", "ctypes", ".", "c_int", ",", "Instance", ",", "ctypes", ".", "c_char_p", ",", "ctypes", ".", "c_int", ")", ")", "return", "f", "(", "p_instance", ",", "psz_name", ",", "b_loop", ")" ]
set a medias loop status .
train
true
3,578
@contextmanager def check_environ(unset=True): with mock.patch.dict(os.environ): old_fds = os.environ.get('LISTEN_FDS', None) old_pid = os.environ.get('LISTEN_PID', None) (yield) if unset: assert ('LISTEN_FDS' not in os.environ), 'LISTEN_FDS should have been unset' assert ('LISTEN_PID' not in os.environ), 'LISTEN_PID should have been unset' else: new_fds = os.environ.get('LISTEN_FDS', None) new_pid = os.environ.get('LISTEN_PID', None) assert (new_fds == old_fds), 'LISTEN_FDS should not have been changed' assert (new_pid == old_pid), 'LISTEN_PID should not have been changed'
[ "@", "contextmanager", "def", "check_environ", "(", "unset", "=", "True", ")", ":", "with", "mock", ".", "patch", ".", "dict", "(", "os", ".", "environ", ")", ":", "old_fds", "=", "os", ".", "environ", ".", "get", "(", "'LISTEN_FDS'", ",", "None", ")", "old_pid", "=", "os", ".", "environ", ".", "get", "(", "'LISTEN_PID'", ",", "None", ")", "(", "yield", ")", "if", "unset", ":", "assert", "(", "'LISTEN_FDS'", "not", "in", "os", ".", "environ", ")", ",", "'LISTEN_FDS should have been unset'", "assert", "(", "'LISTEN_PID'", "not", "in", "os", ".", "environ", ")", ",", "'LISTEN_PID should have been unset'", "else", ":", "new_fds", "=", "os", ".", "environ", ".", "get", "(", "'LISTEN_FDS'", ",", "None", ")", "new_pid", "=", "os", ".", "environ", ".", "get", "(", "'LISTEN_PID'", ",", "None", ")", "assert", "(", "new_fds", "==", "old_fds", ")", ",", "'LISTEN_FDS should not have been changed'", "assert", "(", "new_pid", "==", "old_pid", ")", ",", "'LISTEN_PID should not have been changed'" ]
ensure that os .
train
false
3,580
def show_name_details(book, name, show_contents=0, f=sys.stdout): name_lcase = name.lower() nobj_list = book.name_map.get(name_lcase) if (not nobj_list): print(('%r: unknown name' % name), file=f) return for nobj in nobj_list: show_name_object(book, nobj, show_contents, f)
[ "def", "show_name_details", "(", "book", ",", "name", ",", "show_contents", "=", "0", ",", "f", "=", "sys", ".", "stdout", ")", ":", "name_lcase", "=", "name", ".", "lower", "(", ")", "nobj_list", "=", "book", ".", "name_map", ".", "get", "(", "name_lcase", ")", "if", "(", "not", "nobj_list", ")", ":", "print", "(", "(", "'%r: unknown name'", "%", "name", ")", ",", "file", "=", "f", ")", "return", "for", "nobj", "in", "nobj_list", ":", "show_name_object", "(", "book", ",", "nobj", ",", "show_contents", ",", "f", ")" ]
book -- book object obtained from xlrd .
train
false
3,581
def parse_server(server_string): host = DefaultHost port = DefaultPort if ((server_string != None) and (server_string.find(':') == (-1))): host = server_string elif (server_string != None): (host, port) = server_string.split(':', 1) return (socket.gethostbyname(host), int(port))
[ "def", "parse_server", "(", "server_string", ")", ":", "host", "=", "DefaultHost", "port", "=", "DefaultPort", "if", "(", "(", "server_string", "!=", "None", ")", "and", "(", "server_string", ".", "find", "(", "':'", ")", "==", "(", "-", "1", ")", ")", ")", ":", "host", "=", "server_string", "elif", "(", "server_string", "!=", "None", ")", ":", "(", "host", ",", "port", ")", "=", "server_string", ".", "split", "(", "':'", ",", "1", ")", "return", "(", "socket", ".", "gethostbyname", "(", "host", ")", ",", "int", "(", "port", ")", ")" ]
decode the server endpoint parameters .
train
false
3,582
def _swap(f, i): ring = f.ring k = ring.ngens fswap = ring.zero for (monom, coeff) in f.iterterms(): monomswap = (((monom[i],) + monom[:i]) + monom[(i + 1):]) fswap[monomswap] = coeff return fswap
[ "def", "_swap", "(", "f", ",", "i", ")", ":", "ring", "=", "f", ".", "ring", "k", "=", "ring", ".", "ngens", "fswap", "=", "ring", ".", "zero", "for", "(", "monom", ",", "coeff", ")", "in", "f", ".", "iterterms", "(", ")", ":", "monomswap", "=", "(", "(", "(", "monom", "[", "i", "]", ",", ")", "+", "monom", "[", ":", "i", "]", ")", "+", "monom", "[", "(", "i", "+", "1", ")", ":", "]", ")", "fswap", "[", "monomswap", "]", "=", "coeff", "return", "fswap" ]
make the variable x_i the leading one in a multivariate polynomial f .
train
false
3,583
def countby(key, seq): if (not callable(key)): key = getter(key) return frequencies(map(key, seq))
[ "def", "countby", "(", "key", ",", "seq", ")", ":", "if", "(", "not", "callable", "(", "key", ")", ")", ":", "key", "=", "getter", "(", "key", ")", "return", "frequencies", "(", "map", "(", "key", ",", "seq", ")", ")" ]
count elements of a collection by a key function .
train
false
3,584
def get_markup_filter_name(): name = get_markup_filter()[0] return ('html' if (name is None) else name)
[ "def", "get_markup_filter_name", "(", ")", ":", "name", "=", "get_markup_filter", "(", ")", "[", "0", "]", "return", "(", "'html'", "if", "(", "name", "is", "None", ")", "else", "name", ")" ]
returns the current markup filters name .
train
false
3,585
def send_alert(request, message=None, url=None, code='soft-eol'): if (url is None): url = request.registry.settings['project_docs'] request.response.headers['Alert'] = encode_header(json.dumps({'code': code, 'message': message, 'url': url}))
[ "def", "send_alert", "(", "request", ",", "message", "=", "None", ",", "url", "=", "None", ",", "code", "=", "'soft-eol'", ")", ":", "if", "(", "url", "is", "None", ")", ":", "url", "=", "request", ".", "registry", ".", "settings", "[", "'project_docs'", "]", "request", ".", "response", ".", "headers", "[", "'Alert'", "]", "=", "encode_header", "(", "json", ".", "dumps", "(", "{", "'code'", ":", "code", ",", "'message'", ":", "message", ",", "'url'", ":", "url", "}", ")", ")" ]
this sends an email alert .
train
false
3,588
def make_valuation(concepts, read=False, lexicon=False): vals = [] for c in concepts: vals.append((c.prefLabel, c.extension)) if lexicon: read = True if read: from nltk.sem import Valuation val = Valuation({}) val.update(vals) val = label_indivs(val, lexicon=lexicon) return val else: return vals
[ "def", "make_valuation", "(", "concepts", ",", "read", "=", "False", ",", "lexicon", "=", "False", ")", ":", "vals", "=", "[", "]", "for", "c", "in", "concepts", ":", "vals", ".", "append", "(", "(", "c", ".", "prefLabel", ",", "c", ".", "extension", ")", ")", "if", "lexicon", ":", "read", "=", "True", "if", "read", ":", "from", "nltk", ".", "sem", "import", "Valuation", "val", "=", "Valuation", "(", "{", "}", ")", "val", ".", "update", "(", "vals", ")", "val", "=", "label_indivs", "(", "val", ",", "lexicon", "=", "lexicon", ")", "return", "val", "else", ":", "return", "vals" ]
convert a list of concept objects into a list of pairs; optionally create a valuation object .
train
false
3,589
def put_lifecycle_configuration(Bucket, Rules, region=None, key=None, keyid=None, profile=None): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if ((Rules is not None) and isinstance(Rules, six.string_types)): Rules = json.loads(Rules) conn.put_bucket_lifecycle_configuration(Bucket=Bucket, LifecycleConfiguration={'Rules': Rules}) return {'updated': True, 'name': Bucket} except ClientError as e: return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
[ "def", "put_lifecycle_configuration", "(", "Bucket", ",", "Rules", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "(", "(", "Rules", "is", "not", "None", ")", "and", "isinstance", "(", "Rules", ",", "six", ".", "string_types", ")", ")", ":", "Rules", "=", "json", ".", "loads", "(", "Rules", ")", "conn", ".", "put_bucket_lifecycle_configuration", "(", "Bucket", "=", "Bucket", ",", "LifecycleConfiguration", "=", "{", "'Rules'", ":", "Rules", "}", ")", "return", "{", "'updated'", ":", "True", ",", "'name'", ":", "Bucket", "}", "except", "ClientError", "as", "e", ":", "return", "{", "'updated'", ":", "False", ",", "'error'", ":", "__utils__", "[", "'boto3.get_error'", "]", "(", "e", ")", "}" ]
given a valid config .
train
true
3,590
def _wrapx(input, output, repeat): output[...] = 0 nbytes = (((repeat - 1) // 8) + 1) unused = ((nbytes * 8) - repeat) for i in range(nbytes): _min = (i * 8) _max = min(((i + 1) * 8), repeat) for j in range(_min, _max): if (j != _min): np.left_shift(output[..., i], 1, output[..., i]) np.add(output[..., i], input[..., j], output[..., i]) np.left_shift(output[..., i], unused, output[..., i])
[ "def", "_wrapx", "(", "input", ",", "output", ",", "repeat", ")", ":", "output", "[", "...", "]", "=", "0", "nbytes", "=", "(", "(", "(", "repeat", "-", "1", ")", "//", "8", ")", "+", "1", ")", "unused", "=", "(", "(", "nbytes", "*", "8", ")", "-", "repeat", ")", "for", "i", "in", "range", "(", "nbytes", ")", ":", "_min", "=", "(", "i", "*", "8", ")", "_max", "=", "min", "(", "(", "(", "i", "+", "1", ")", "*", "8", ")", ",", "repeat", ")", "for", "j", "in", "range", "(", "_min", ",", "_max", ")", ":", "if", "(", "j", "!=", "_min", ")", ":", "np", ".", "left_shift", "(", "output", "[", "...", ",", "i", "]", ",", "1", ",", "output", "[", "...", ",", "i", "]", ")", "np", ".", "add", "(", "output", "[", "...", ",", "i", "]", ",", "input", "[", "...", ",", "j", "]", ",", "output", "[", "...", ",", "i", "]", ")", "np", ".", "left_shift", "(", "output", "[", "...", ",", "i", "]", ",", "unused", ",", "output", "[", "...", ",", "i", "]", ")" ]
wrap the x format column boolean array into an uint8 array .
train
false
3,592
def ValidateVfsPath(path): components = (path or '').lstrip('/').split('/') if (not components): raise ValueError(('Empty path is not a valid path: %s.' % utils.SmartStr(path))) if (components[0] not in ROOT_FILES_WHITELIST): raise ValueError(("First path component was '%s', but has to be one of %s" % (utils.SmartStr(components[0]), ', '.join(ROOT_FILES_WHITELIST)))) return True
[ "def", "ValidateVfsPath", "(", "path", ")", ":", "components", "=", "(", "path", "or", "''", ")", ".", "lstrip", "(", "'/'", ")", ".", "split", "(", "'/'", ")", "if", "(", "not", "components", ")", ":", "raise", "ValueError", "(", "(", "'Empty path is not a valid path: %s.'", "%", "utils", ".", "SmartStr", "(", "path", ")", ")", ")", "if", "(", "components", "[", "0", "]", "not", "in", "ROOT_FILES_WHITELIST", ")", ":", "raise", "ValueError", "(", "(", "\"First path component was '%s', but has to be one of %s\"", "%", "(", "utils", ".", "SmartStr", "(", "components", "[", "0", "]", ")", ",", "', '", ".", "join", "(", "ROOT_FILES_WHITELIST", ")", ")", ")", ")", "return", "True" ]
validates a vfs path .
train
true
3,593
@validate('form') def valid_type_in_colspan(arch): return all((attrib.isdigit() for attrib in arch.xpath('//@colspan')))
[ "@", "validate", "(", "'form'", ")", "def", "valid_type_in_colspan", "(", "arch", ")", ":", "return", "all", "(", "(", "attrib", ".", "isdigit", "(", ")", "for", "attrib", "in", "arch", ".", "xpath", "(", "'//@colspan'", ")", ")", ")" ]
a colspan attribute must be an integer type .
train
false
3,594
def gf_irred_p_rabin(f, p, K): n = gf_degree(f) if (n <= 1): return True (_, f) = gf_monic(f, p, K) x = [K.one, K.zero] indices = {(n // d) for d in factorint(n)} b = gf_frobenius_monomial_base(f, p, K) h = b[1] for i in range(1, n): if (i in indices): g = gf_sub(h, x, p, K) if (gf_gcd(f, g, p, K) != [K.one]): return False h = gf_frobenius_map(h, f, b, p, K) return (h == x)
[ "def", "gf_irred_p_rabin", "(", "f", ",", "p", ",", "K", ")", ":", "n", "=", "gf_degree", "(", "f", ")", "if", "(", "n", "<=", "1", ")", ":", "return", "True", "(", "_", ",", "f", ")", "=", "gf_monic", "(", "f", ",", "p", ",", "K", ")", "x", "=", "[", "K", ".", "one", ",", "K", ".", "zero", "]", "indices", "=", "{", "(", "n", "//", "d", ")", "for", "d", "in", "factorint", "(", "n", ")", "}", "b", "=", "gf_frobenius_monomial_base", "(", "f", ",", "p", ",", "K", ")", "h", "=", "b", "[", "1", "]", "for", "i", "in", "range", "(", "1", ",", "n", ")", ":", "if", "(", "i", "in", "indices", ")", ":", "g", "=", "gf_sub", "(", "h", ",", "x", ",", "p", ",", "K", ")", "if", "(", "gf_gcd", "(", "f", ",", "g", ",", "p", ",", "K", ")", "!=", "[", "K", ".", "one", "]", ")", ":", "return", "False", "h", "=", "gf_frobenius_map", "(", "h", ",", "f", ",", "b", ",", "p", ",", "K", ")", "return", "(", "h", "==", "x", ")" ]
rabins polynomial irreducibility test over finite fields .
train
false
3,595
def gen3(): for i in (0, 1, 2): (yield i)
[ "def", "gen3", "(", ")", ":", "for", "i", "in", "(", "0", ",", "1", ",", "2", ")", ":", "(", "yield", "i", ")" ]
non-restartable source sequence .
train
false
3,597
def def_unit(s, represents=None, doc=None, format=None, prefixes=False, exclude_prefixes=[], namespace=None): if (represents is not None): result = Unit(s, represents, namespace=namespace, doc=doc, format=format) else: result = IrreducibleUnit(s, namespace=namespace, doc=doc, format=format) if prefixes: _add_prefixes(result, excludes=exclude_prefixes, namespace=namespace, prefixes=prefixes) return result
[ "def", "def_unit", "(", "s", ",", "represents", "=", "None", ",", "doc", "=", "None", ",", "format", "=", "None", ",", "prefixes", "=", "False", ",", "exclude_prefixes", "=", "[", "]", ",", "namespace", "=", "None", ")", ":", "if", "(", "represents", "is", "not", "None", ")", ":", "result", "=", "Unit", "(", "s", ",", "represents", ",", "namespace", "=", "namespace", ",", "doc", "=", "doc", ",", "format", "=", "format", ")", "else", ":", "result", "=", "IrreducibleUnit", "(", "s", ",", "namespace", "=", "namespace", ",", "doc", "=", "doc", ",", "format", "=", "format", ")", "if", "prefixes", ":", "_add_prefixes", "(", "result", ",", "excludes", "=", "exclude_prefixes", ",", "namespace", "=", "namespace", ",", "prefixes", "=", "prefixes", ")", "return", "result" ]
factory function for defining new units .
train
false
3,598
def create_pipeline(name, unique_id, description='', region=None, key=None, keyid=None, profile=None): client = _get_client(region, key, keyid, profile) r = {} try: response = client.create_pipeline(name=name, uniqueId=unique_id, description=description) r['result'] = response['pipelineId'] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: r['error'] = str(e) return r
[ "def", "create_pipeline", "(", "name", ",", "unique_id", ",", "description", "=", "''", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "client", "=", "_get_client", "(", "region", ",", "key", ",", "keyid", ",", "profile", ")", "r", "=", "{", "}", "try", ":", "response", "=", "client", ".", "create_pipeline", "(", "name", "=", "name", ",", "uniqueId", "=", "unique_id", ",", "description", "=", "description", ")", "r", "[", "'result'", "]", "=", "response", "[", "'pipelineId'", "]", "except", "(", "botocore", ".", "exceptions", ".", "BotoCoreError", ",", "botocore", ".", "exceptions", ".", "ClientError", ")", "as", "e", ":", "r", "[", "'error'", "]", "=", "str", "(", "e", ")", "return", "r" ]
create a new .
train
true
3,599
def config_gen(dic): sio = StringIO() print >>sio, '<?xml version="1.0" encoding="UTF-8"?>' print >>sio, '<configuration>' for (k, v) in dic.iteritems(): print >>sio, ('<property>\n <name>%s</name>\n <value><![CDATA[%s]]></value>\n</property>\n' % (escape(k), (v.replace(']]>', '') if isinstance(v, basestring) else v))) print >>sio, '</configuration>' sio.flush() sio.seek(0) return sio.read()
[ "def", "config_gen", "(", "dic", ")", ":", "sio", "=", "StringIO", "(", ")", "print", ">>", "sio", ",", "'<?xml version=\"1.0\" encoding=\"UTF-8\"?>'", "print", ">>", "sio", ",", "'<configuration>'", "for", "(", "k", ",", "v", ")", "in", "dic", ".", "iteritems", "(", ")", ":", "print", ">>", "sio", ",", "(", "'<property>\\n <name>%s</name>\\n <value><![CDATA[%s]]></value>\\n</property>\\n'", "%", "(", "escape", "(", "k", ")", ",", "(", "v", ".", "replace", "(", "']]>'", ",", "''", ")", "if", "isinstance", "(", "v", ",", "basestring", ")", "else", "v", ")", ")", ")", "print", ">>", "sio", ",", "'</configuration>'", "sio", ".", "flush", "(", ")", "sio", ".", "seek", "(", "0", ")", "return", "sio", ".", "read", "(", ")" ]
config_gen -> xml for oozie workflow configuration .
train
false
3,600
def balanced_eq(want, to_balance): expander = ForParser(to_balance) eq_(want, expander.to_unicode())
[ "def", "balanced_eq", "(", "want", ",", "to_balance", ")", ":", "expander", "=", "ForParser", "(", "to_balance", ")", "eq_", "(", "want", ",", "expander", ".", "to_unicode", "(", ")", ")" ]
run to_balance through the expander to get its tags balanced .
train
false
3,602
def makeelement(tagname, tagtext=None, nsprefix='w', attributes=None, attrnsprefix=None): namespacemap = None if isinstance(nsprefix, list): namespacemap = {} for prefix in nsprefix: namespacemap[prefix] = nsprefixes[prefix] nsprefix = nsprefix[0] if nsprefix: namespace = ('{%s}' % nsprefixes[nsprefix]) else: namespace = '' newelement = etree.Element((namespace + tagname), nsmap=namespacemap) if attributes: if (not attrnsprefix): if (nsprefix == 'w'): attributenamespace = namespace else: attributenamespace = '' else: attributenamespace = (('{' + nsprefixes[attrnsprefix]) + '}') for tagattribute in attributes: newelement.set((attributenamespace + tagattribute), attributes[tagattribute]) if tagtext: newelement.text = tagtext return newelement
[ "def", "makeelement", "(", "tagname", ",", "tagtext", "=", "None", ",", "nsprefix", "=", "'w'", ",", "attributes", "=", "None", ",", "attrnsprefix", "=", "None", ")", ":", "namespacemap", "=", "None", "if", "isinstance", "(", "nsprefix", ",", "list", ")", ":", "namespacemap", "=", "{", "}", "for", "prefix", "in", "nsprefix", ":", "namespacemap", "[", "prefix", "]", "=", "nsprefixes", "[", "prefix", "]", "nsprefix", "=", "nsprefix", "[", "0", "]", "if", "nsprefix", ":", "namespace", "=", "(", "'{%s}'", "%", "nsprefixes", "[", "nsprefix", "]", ")", "else", ":", "namespace", "=", "''", "newelement", "=", "etree", ".", "Element", "(", "(", "namespace", "+", "tagname", ")", ",", "nsmap", "=", "namespacemap", ")", "if", "attributes", ":", "if", "(", "not", "attrnsprefix", ")", ":", "if", "(", "nsprefix", "==", "'w'", ")", ":", "attributenamespace", "=", "namespace", "else", ":", "attributenamespace", "=", "''", "else", ":", "attributenamespace", "=", "(", "(", "'{'", "+", "nsprefixes", "[", "attrnsprefix", "]", ")", "+", "'}'", ")", "for", "tagattribute", "in", "attributes", ":", "newelement", ".", "set", "(", "(", "attributenamespace", "+", "tagattribute", ")", ",", "attributes", "[", "tagattribute", "]", ")", "if", "tagtext", ":", "newelement", ".", "text", "=", "tagtext", "return", "newelement" ]
create an element & return it .
train
true
3,603
def get_mock_hdfs_root(environ=None): return get_mock_dir('hdfs', environ=environ)
[ "def", "get_mock_hdfs_root", "(", "environ", "=", "None", ")", ":", "return", "get_mock_dir", "(", "'hdfs'", ",", "environ", "=", "environ", ")" ]
get the path of mock root of hdfs .
train
false
3,604
def get_mirrors(hostname=None): if (hostname is None): hostname = DEFAULT_MIRROR_URL try: hostname = socket.gethostbyname_ex(hostname)[0] except socket.gaierror: return [] end_letter = hostname.split('.', 1) return [('%s.%s' % (s, end_letter[1])) for s in string_range(end_letter[0])]
[ "def", "get_mirrors", "(", "hostname", "=", "None", ")", ":", "if", "(", "hostname", "is", "None", ")", ":", "hostname", "=", "DEFAULT_MIRROR_URL", "try", ":", "hostname", "=", "socket", ".", "gethostbyname_ex", "(", "hostname", ")", "[", "0", "]", "except", "socket", ".", "gaierror", ":", "return", "[", "]", "end_letter", "=", "hostname", ".", "split", "(", "'.'", ",", "1", ")", "return", "[", "(", "'%s.%s'", "%", "(", "s", ",", "end_letter", "[", "1", "]", ")", ")", "for", "s", "in", "string_range", "(", "end_letter", "[", "0", "]", ")", "]" ]
get a repository mirror list from gitosis .
train
true
3,605
def is_safe_for_update(block_device_dict): fields = set(block_device_dict.keys()) return (fields <= ((bdm_new_fields | bdm_db_inherited_fields) | bdm_db_only_fields))
[ "def", "is_safe_for_update", "(", "block_device_dict", ")", ":", "fields", "=", "set", "(", "block_device_dict", ".", "keys", "(", ")", ")", "return", "(", "fields", "<=", "(", "(", "bdm_new_fields", "|", "bdm_db_inherited_fields", ")", "|", "bdm_db_only_fields", ")", ")" ]
determine if passed dict is a safe subset for update .
train
false
3,606
def clone_get_equiv(inputs, outputs, copy_inputs_and_orphans=True, memo=None): if (memo is None): memo = {} for input in inputs: if copy_inputs_and_orphans: cpy = input.clone() cpy.owner = None cpy.index = None memo.setdefault(input, cpy) else: memo.setdefault(input, input) for apply in io_toposort(inputs, outputs): for input in apply.inputs: if (input not in memo): if copy_inputs_and_orphans: cpy = input.clone() memo[input] = cpy else: memo[input] = input new_apply = apply.clone_with_new_inputs([memo[i] for i in apply.inputs]) memo.setdefault(apply, new_apply) for (output, new_output) in zip(apply.outputs, new_apply.outputs): memo.setdefault(output, new_output) for output in outputs: if (output not in memo): memo[output] = output.clone() return memo
[ "def", "clone_get_equiv", "(", "inputs", ",", "outputs", ",", "copy_inputs_and_orphans", "=", "True", ",", "memo", "=", "None", ")", ":", "if", "(", "memo", "is", "None", ")", ":", "memo", "=", "{", "}", "for", "input", "in", "inputs", ":", "if", "copy_inputs_and_orphans", ":", "cpy", "=", "input", ".", "clone", "(", ")", "cpy", ".", "owner", "=", "None", "cpy", ".", "index", "=", "None", "memo", ".", "setdefault", "(", "input", ",", "cpy", ")", "else", ":", "memo", ".", "setdefault", "(", "input", ",", "input", ")", "for", "apply", "in", "io_toposort", "(", "inputs", ",", "outputs", ")", ":", "for", "input", "in", "apply", ".", "inputs", ":", "if", "(", "input", "not", "in", "memo", ")", ":", "if", "copy_inputs_and_orphans", ":", "cpy", "=", "input", ".", "clone", "(", ")", "memo", "[", "input", "]", "=", "cpy", "else", ":", "memo", "[", "input", "]", "=", "input", "new_apply", "=", "apply", ".", "clone_with_new_inputs", "(", "[", "memo", "[", "i", "]", "for", "i", "in", "apply", ".", "inputs", "]", ")", "memo", ".", "setdefault", "(", "apply", ",", "new_apply", ")", "for", "(", "output", ",", "new_output", ")", "in", "zip", "(", "apply", ".", "outputs", ",", "new_apply", ".", "outputs", ")", ":", "memo", ".", "setdefault", "(", "output", ",", "new_output", ")", "for", "output", "in", "outputs", ":", "if", "(", "output", "not", "in", "memo", ")", ":", "memo", "[", "output", "]", "=", "output", ".", "clone", "(", ")", "return", "memo" ]
return a dictionary that maps from variable and apply nodes in the original graph to a new node in a new graph .
train
false
3,607
def config_14(key, salt, string_list): if ('email' in string_list[18]): dup = True elif ('email' in string_list[19]): dup = False config_dict = {} config_dict['Version'] = 'Predator Pain v14' config_dict['Email Address'] = decrypt_string(key, salt, string_list[4]) config_dict['Email Password'] = decrypt_string(key, salt, string_list[5]) config_dict['SMTP Server'] = decrypt_string(key, salt, string_list[6]) config_dict['SMTP Port'] = string_list[7] config_dict['Interval Timer'] = string_list[8] config_dict['FTP Host'] = decrypt_string(key, salt, string_list[12]) config_dict['FTP User'] = decrypt_string(key, salt, string_list[13]) config_dict['FTP Pass'] = decrypt_string(key, salt, string_list[14]) config_dict['PHP Link'] = decrypt_string(key, salt, string_list[15]) if dup: config_dict['PHP Link'] = decrypt_string(key, salt, string_list[15]) config_dict['Use Email'] = string_list[18] config_dict['Use FTP'] = string_list[19] config_dict['Use PHP'] = string_list[20] config_dict['Download & Exec'] = string_list[25] if (string_list[24] == 'bindfiles'): config_dict['Bound Files'] = 'False' else: config_dict['Bound Files'] = 'True' else: config_dict['Use Email'] = string_list[19] config_dict['Use FTP'] = string_list[20] config_dict['Use PHP'] = string_list[21] config_dict['Download & Exec'] = string_list[26] if (string_list[25] == 'bindfiles'): config_dict['Bound Files'] = 'False' else: config_dict['Bound Files'] = 'True' return config_dict
[ "def", "config_14", "(", "key", ",", "salt", ",", "string_list", ")", ":", "if", "(", "'email'", "in", "string_list", "[", "18", "]", ")", ":", "dup", "=", "True", "elif", "(", "'email'", "in", "string_list", "[", "19", "]", ")", ":", "dup", "=", "False", "config_dict", "=", "{", "}", "config_dict", "[", "'Version'", "]", "=", "'Predator Pain v14'", "config_dict", "[", "'Email Address'", "]", "=", "decrypt_string", "(", "key", ",", "salt", ",", "string_list", "[", "4", "]", ")", "config_dict", "[", "'Email Password'", "]", "=", "decrypt_string", "(", "key", ",", "salt", ",", "string_list", "[", "5", "]", ")", "config_dict", "[", "'SMTP Server'", "]", "=", "decrypt_string", "(", "key", ",", "salt", ",", "string_list", "[", "6", "]", ")", "config_dict", "[", "'SMTP Port'", "]", "=", "string_list", "[", "7", "]", "config_dict", "[", "'Interval Timer'", "]", "=", "string_list", "[", "8", "]", "config_dict", "[", "'FTP Host'", "]", "=", "decrypt_string", "(", "key", ",", "salt", ",", "string_list", "[", "12", "]", ")", "config_dict", "[", "'FTP User'", "]", "=", "decrypt_string", "(", "key", ",", "salt", ",", "string_list", "[", "13", "]", ")", "config_dict", "[", "'FTP Pass'", "]", "=", "decrypt_string", "(", "key", ",", "salt", ",", "string_list", "[", "14", "]", ")", "config_dict", "[", "'PHP Link'", "]", "=", "decrypt_string", "(", "key", ",", "salt", ",", "string_list", "[", "15", "]", ")", "if", "dup", ":", "config_dict", "[", "'PHP Link'", "]", "=", "decrypt_string", "(", "key", ",", "salt", ",", "string_list", "[", "15", "]", ")", "config_dict", "[", "'Use Email'", "]", "=", "string_list", "[", "18", "]", "config_dict", "[", "'Use FTP'", "]", "=", "string_list", "[", "19", "]", "config_dict", "[", "'Use PHP'", "]", "=", "string_list", "[", "20", "]", "config_dict", "[", "'Download & Exec'", "]", "=", "string_list", "[", "25", "]", "if", "(", "string_list", "[", "24", "]", "==", "'bindfiles'", ")", ":", "config_dict", "[", "'Bound Files'", "]", "=", "'False'", "else", ":", "config_dict", "[", "'Bound Files'", "]", "=", "'True'", "else", ":", "config_dict", "[", "'Use Email'", "]", "=", "string_list", "[", "19", "]", "config_dict", "[", "'Use FTP'", "]", "=", "string_list", "[", "20", "]", "config_dict", "[", "'Use PHP'", "]", "=", "string_list", "[", "21", "]", "config_dict", "[", "'Download & Exec'", "]", "=", "string_list", "[", "26", "]", "if", "(", "string_list", "[", "25", "]", "==", "'bindfiles'", ")", ":", "config_dict", "[", "'Bound Files'", "]", "=", "'False'", "else", ":", "config_dict", "[", "'Bound Files'", "]", "=", "'True'", "return", "config_dict" ]
identical strings are not stored multiple times .
train
false
3,608
def get_local_size(*args, **kargs): raise _stub_error
[ "def", "get_local_size", "(", "*", "args", ",", "**", "kargs", ")", ":", "raise", "_stub_error" ]
opencl get_local_size() .
train
false
3,609
def external_first_login_authenticate(user, response): data = (session.data if session._get_current_object() else {}) data.update({'auth_user_external_id_provider': user['external_id_provider'], 'auth_user_external_id': user['external_id'], 'auth_user_fullname': user['fullname'], 'auth_user_access_token': user['access_token'], 'auth_user_external_first_login': True, 'service_url': user['service_url']}) response = create_session(response, data=data) return response
[ "def", "external_first_login_authenticate", "(", "user", ",", "response", ")", ":", "data", "=", "(", "session", ".", "data", "if", "session", ".", "_get_current_object", "(", ")", "else", "{", "}", ")", "data", ".", "update", "(", "{", "'auth_user_external_id_provider'", ":", "user", "[", "'external_id_provider'", "]", ",", "'auth_user_external_id'", ":", "user", "[", "'external_id'", "]", ",", "'auth_user_fullname'", ":", "user", "[", "'fullname'", "]", ",", "'auth_user_access_token'", ":", "user", "[", "'access_token'", "]", ",", "'auth_user_external_first_login'", ":", "True", ",", "'service_url'", ":", "user", "[", "'service_url'", "]", "}", ")", "response", "=", "create_session", "(", "response", ",", "data", "=", "data", ")", "return", "response" ]
create a special unauthenticated session for user login through external identity provider for the first time .
train
false
3,610
def test_stacked_line_reverse(): stacked = StackedLine(stack_from_top=True) stacked.add('one_two', [1, 2]) stacked.add('ten_twelve', [10, 12]) q = stacked.render_pyquery() assert (set([v.text for v in q('desc.value')]) == set(('11 (+1)', '14 (+2)', '10', '12')))
[ "def", "test_stacked_line_reverse", "(", ")", ":", "stacked", "=", "StackedLine", "(", "stack_from_top", "=", "True", ")", "stacked", ".", "add", "(", "'one_two'", ",", "[", "1", ",", "2", "]", ")", "stacked", ".", "add", "(", "'ten_twelve'", ",", "[", "10", ",", "12", "]", ")", "q", "=", "stacked", ".", "render_pyquery", "(", ")", "assert", "(", "set", "(", "[", "v", ".", "text", "for", "v", "in", "q", "(", "'desc.value'", ")", "]", ")", "==", "set", "(", "(", "'11 (+1)'", ",", "'14 (+2)'", ",", "'10'", ",", "'12'", ")", ")", ")" ]
test stack from top stacked line .
train
false
3,612
def max_return(nbarprice, islong): high = (-1000000) low = 1000000 maxdiffs = [] if islong: for ith_price in nbarprice: if (ith_price > high): high = ith_price low = 1000000 elif (ith_price < low): low = ith_price maxdiffs.append((high - low)) return (max(maxdiffs) if maxdiffs else 0) else: for ith_price in nbarprice: if (ith_price < low): low = ith_price high = (-1000000) elif (ith_price > high): high = ith_price maxdiffs.append((high - low)) return (max(maxdiffs) if maxdiffs else 0)
[ "def", "max_return", "(", "nbarprice", ",", "islong", ")", ":", "high", "=", "(", "-", "1000000", ")", "low", "=", "1000000", "maxdiffs", "=", "[", "]", "if", "islong", ":", "for", "ith_price", "in", "nbarprice", ":", "if", "(", "ith_price", ">", "high", ")", ":", "high", "=", "ith_price", "low", "=", "1000000", "elif", "(", "ith_price", "<", "low", ")", ":", "low", "=", "ith_price", "maxdiffs", ".", "append", "(", "(", "high", "-", "low", ")", ")", "return", "(", "max", "(", "maxdiffs", ")", "if", "maxdiffs", "else", "0", ")", "else", ":", "for", "ith_price", "in", "nbarprice", ":", "if", "(", "ith_price", "<", "low", ")", ":", "low", "=", "ith_price", "high", "=", "(", "-", "1000000", ")", "elif", "(", "ith_price", ">", "high", ")", ":", "high", "=", "ith_price", "maxdiffs", ".", "append", "(", "(", "high", "-", "low", ")", ")", "return", "(", "max", "(", "maxdiffs", ")", "if", "maxdiffs", "else", "0", ")" ]
docstring for maxreturn .
train
false
3,613
def show_ipsec_site_connection(ipsec_site_connection, profile=None): conn = _auth(profile) return conn.show_ipsec_site_connection(ipsec_site_connection)
[ "def", "show_ipsec_site_connection", "(", "ipsec_site_connection", ",", "profile", "=", "None", ")", ":", "conn", "=", "_auth", "(", "profile", ")", "return", "conn", ".", "show_ipsec_site_connection", "(", "ipsec_site_connection", ")" ]
fetches information of a specific ipsecsiteconnection cli example: .
train
false
3,615
def get_user_id_for_username(user_name, allow_none=False): try: if (c.userobj and (c.userobj.name == user_name)): return c.userobj.id except TypeError: pass user = model.User.get(user_name) if user: return user.id if allow_none: return None raise Exception('Not logged in user')
[ "def", "get_user_id_for_username", "(", "user_name", ",", "allow_none", "=", "False", ")", ":", "try", ":", "if", "(", "c", ".", "userobj", "and", "(", "c", ".", "userobj", ".", "name", "==", "user_name", ")", ")", ":", "return", "c", ".", "userobj", ".", "id", "except", "TypeError", ":", "pass", "user", "=", "model", ".", "User", ".", "get", "(", "user_name", ")", "if", "user", ":", "return", "user", ".", "id", "if", "allow_none", ":", "return", "None", "raise", "Exception", "(", "'Not logged in user'", ")" ]
helper function to get user id .
train
false
3,616
def is_parfile(fn): PAR_ID = 'PAR2\x00PKT' try: with open(fn, 'rb') as f: buf = f.read(8) return buf.startswith(PAR_ID) except: pass return False
[ "def", "is_parfile", "(", "fn", ")", ":", "PAR_ID", "=", "'PAR2\\x00PKT'", "try", ":", "with", "open", "(", "fn", ",", "'rb'", ")", "as", "f", ":", "buf", "=", "f", ".", "read", "(", "8", ")", "return", "buf", ".", "startswith", "(", "PAR_ID", ")", "except", ":", "pass", "return", "False" ]
check quickly whether file has par2 signature .
train
false
3,617
def is_conflict_free(path): rgx = re.compile(u'^(<<<<<<<|\\|\\|\\|\\|\\|\\|\\||>>>>>>>) ') try: with core.xopen(path, u'r') as f: for line in f: line = core.decode(line, errors=u'ignore') if rgx.match(line): if should_stage_conflicts(path): return True else: return False except IOError: pass return True
[ "def", "is_conflict_free", "(", "path", ")", ":", "rgx", "=", "re", ".", "compile", "(", "u'^(<<<<<<<|\\\\|\\\\|\\\\|\\\\|\\\\|\\\\|\\\\||>>>>>>>) '", ")", "try", ":", "with", "core", ".", "xopen", "(", "path", ",", "u'r'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "line", "=", "core", ".", "decode", "(", "line", ",", "errors", "=", "u'ignore'", ")", "if", "rgx", ".", "match", "(", "line", ")", ":", "if", "should_stage_conflicts", "(", "path", ")", ":", "return", "True", "else", ":", "return", "False", "except", "IOError", ":", "pass", "return", "True" ]
return true if path contains no conflict markers .
train
false
3,618
@pytest.fixture(scope=u'session') def celery_parameters(): return {}
[ "@", "pytest", ".", "fixture", "(", "scope", "=", "u'session'", ")", "def", "celery_parameters", "(", ")", ":", "return", "{", "}" ]
redefine this fixture to change the init parameters of test celery app .
train
false
3,619
def make_readonly(path): mode = os.stat(path).st_mode os.chmod(path, (mode & (~ stat.S_IWRITE)))
[ "def", "make_readonly", "(", "path", ")", ":", "mode", "=", "os", ".", "stat", "(", "path", ")", ".", "st_mode", "os", ".", "chmod", "(", "path", ",", "(", "mode", "&", "(", "~", "stat", ".", "S_IWRITE", ")", ")", ")" ]
helper function that is called in the tests to change the access permissions of the given file .
train
false
3,621
def dgap_l21(M, G, X, active_set, alpha, n_orient): GX = np.dot(G[:, active_set], X) R = (M - GX) penalty = norm_l21(X, n_orient, copy=True) nR2 = sum_squared(R) pobj = ((0.5 * nR2) + (alpha * penalty)) dual_norm = norm_l2inf(np.dot(G.T, R), n_orient, copy=False) scaling = (alpha / dual_norm) scaling = min(scaling, 1.0) dobj = (((0.5 * (scaling ** 2)) * nR2) + (scaling * np.sum((R * GX)))) gap = (pobj - dobj) return (gap, pobj, dobj, R)
[ "def", "dgap_l21", "(", "M", ",", "G", ",", "X", ",", "active_set", ",", "alpha", ",", "n_orient", ")", ":", "GX", "=", "np", ".", "dot", "(", "G", "[", ":", ",", "active_set", "]", ",", "X", ")", "R", "=", "(", "M", "-", "GX", ")", "penalty", "=", "norm_l21", "(", "X", ",", "n_orient", ",", "copy", "=", "True", ")", "nR2", "=", "sum_squared", "(", "R", ")", "pobj", "=", "(", "(", "0.5", "*", "nR2", ")", "+", "(", "alpha", "*", "penalty", ")", ")", "dual_norm", "=", "norm_l2inf", "(", "np", ".", "dot", "(", "G", ".", "T", ",", "R", ")", ",", "n_orient", ",", "copy", "=", "False", ")", "scaling", "=", "(", "alpha", "/", "dual_norm", ")", "scaling", "=", "min", "(", "scaling", ",", "1.0", ")", "dobj", "=", "(", "(", "(", "0.5", "*", "(", "scaling", "**", "2", ")", ")", "*", "nR2", ")", "+", "(", "scaling", "*", "np", ".", "sum", "(", "(", "R", "*", "GX", ")", ")", ")", ")", "gap", "=", "(", "pobj", "-", "dobj", ")", "return", "(", "gap", ",", "pobj", ",", "dobj", ",", "R", ")" ]
duality gaps for the mixed norm inverse problem .
train
false
3,622
def _ll_geom(y, X, beta): ll = _ll_nbp(y, X, beta, alph=1, Q=0) return ll
[ "def", "_ll_geom", "(", "y", ",", "X", ",", "beta", ")", ":", "ll", "=", "_ll_nbp", "(", "y", ",", "X", ",", "beta", ",", "alph", "=", "1", ",", "Q", "=", "0", ")", "return", "ll" ]
geometric regression .
train
false
3,623
def _combine_rhs(rhs): series = {} if isinstance(rhs, Series): series['x'] = rhs elif isinstance(rhs, DataFrame): series = rhs.copy() elif isinstance(rhs, dict): for (name, value) in compat.iteritems(rhs): if isinstance(value, Series): _safe_update(series, {name: value}) elif isinstance(value, (dict, DataFrame)): _safe_update(series, value) else: raise Exception(('Invalid RHS data type: %s' % type(value))) else: raise Exception(('Invalid RHS type: %s' % type(rhs))) if (not isinstance(series, DataFrame)): series = DataFrame(series, dtype=float) return series
[ "def", "_combine_rhs", "(", "rhs", ")", ":", "series", "=", "{", "}", "if", "isinstance", "(", "rhs", ",", "Series", ")", ":", "series", "[", "'x'", "]", "=", "rhs", "elif", "isinstance", "(", "rhs", ",", "DataFrame", ")", ":", "series", "=", "rhs", ".", "copy", "(", ")", "elif", "isinstance", "(", "rhs", ",", "dict", ")", ":", "for", "(", "name", ",", "value", ")", "in", "compat", ".", "iteritems", "(", "rhs", ")", ":", "if", "isinstance", "(", "value", ",", "Series", ")", ":", "_safe_update", "(", "series", ",", "{", "name", ":", "value", "}", ")", "elif", "isinstance", "(", "value", ",", "(", "dict", ",", "DataFrame", ")", ")", ":", "_safe_update", "(", "series", ",", "value", ")", "else", ":", "raise", "Exception", "(", "(", "'Invalid RHS data type: %s'", "%", "type", "(", "value", ")", ")", ")", "else", ":", "raise", "Exception", "(", "(", "'Invalid RHS type: %s'", "%", "type", "(", "rhs", ")", ")", ")", "if", "(", "not", "isinstance", "(", "series", ",", "DataFrame", ")", ")", ":", "series", "=", "DataFrame", "(", "series", ",", "dtype", "=", "float", ")", "return", "series" ]
glue input x variables together while checking for potential duplicates .
train
false
3,624
def installReactor(shortName): for installer in getReactorTypes(): if (installer.shortName == shortName): return installer.install() raise NoSuchReactor(shortName)
[ "def", "installReactor", "(", "shortName", ")", ":", "for", "installer", "in", "getReactorTypes", "(", ")", ":", "if", "(", "installer", ".", "shortName", "==", "shortName", ")", ":", "return", "installer", ".", "install", "(", ")", "raise", "NoSuchReactor", "(", "shortName", ")" ]
install reactor c{reactor} .
train
false
3,625
def cgsnapshot_create(context, values): return IMPL.cgsnapshot_create(context, values)
[ "def", "cgsnapshot_create", "(", "context", ",", "values", ")", ":", "return", "IMPL", ".", "cgsnapshot_create", "(", "context", ",", "values", ")" ]
create a cgsnapshot from the values dictionary .
train
false
3,626
def set_form_control_value(control, val): if isinstance(control, ClientForm.CheckboxControl): try: checkbox = control.get() checkbox.selected = make_boolean(val) return except ClientForm.AmbiguityError: pass if isinstance(control, ClientForm.ListControl): if val.startswith('-'): val = val[1:] flag = False else: flag = True if val.startswith('+'): val = val[1:] try: item = control.get(name=val) except ClientForm.ItemNotFoundError: try: item = control.get(label=val) except ClientForm.AmbiguityError: raise ClientForm.ItemNotFoundError(('multiple matches to value/label "%s" in list control' % (val,))) except ClientForm.ItemNotFoundError: raise ClientForm.ItemNotFoundError(('cannot find value/label "%s" in list control' % (val,))) if flag: item.selected = 1 else: item.selected = 0 else: control.value = val
[ "def", "set_form_control_value", "(", "control", ",", "val", ")", ":", "if", "isinstance", "(", "control", ",", "ClientForm", ".", "CheckboxControl", ")", ":", "try", ":", "checkbox", "=", "control", ".", "get", "(", ")", "checkbox", ".", "selected", "=", "make_boolean", "(", "val", ")", "return", "except", "ClientForm", ".", "AmbiguityError", ":", "pass", "if", "isinstance", "(", "control", ",", "ClientForm", ".", "ListControl", ")", ":", "if", "val", ".", "startswith", "(", "'-'", ")", ":", "val", "=", "val", "[", "1", ":", "]", "flag", "=", "False", "else", ":", "flag", "=", "True", "if", "val", ".", "startswith", "(", "'+'", ")", ":", "val", "=", "val", "[", "1", ":", "]", "try", ":", "item", "=", "control", ".", "get", "(", "name", "=", "val", ")", "except", "ClientForm", ".", "ItemNotFoundError", ":", "try", ":", "item", "=", "control", ".", "get", "(", "label", "=", "val", ")", "except", "ClientForm", ".", "AmbiguityError", ":", "raise", "ClientForm", ".", "ItemNotFoundError", "(", "(", "'multiple matches to value/label \"%s\" in list control'", "%", "(", "val", ",", ")", ")", ")", "except", "ClientForm", ".", "ItemNotFoundError", ":", "raise", "ClientForm", ".", "ItemNotFoundError", "(", "(", "'cannot find value/label \"%s\" in list control'", "%", "(", "val", ",", ")", ")", ")", "if", "flag", ":", "item", ".", "selected", "=", "1", "else", ":", "item", ".", "selected", "=", "0", "else", ":", "control", ".", "value", "=", "val" ]
helper function to deal with setting form values on checkboxes .
train
false