id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
10,861
def _audit_changes(account, auditors, send_report, debug=True): try: for au in auditors: au.audit_all_objects() au.save_issues() if send_report: report = au.create_report() au.email_report(report) if jirasync: app.logger.info('Syncing {} issues on {} with Jira'.format(au.index, account)) jirasync.sync_issues([account], au.index) except (OperationalError, InvalidRequestError, StatementError) as e: app.logger.exception('Database error processing accounts %s, cleaning up session.', account) db.session.remove() store_exception('scheduler-audit-changes', None, e)
[ "def", "_audit_changes", "(", "account", ",", "auditors", ",", "send_report", ",", "debug", "=", "True", ")", ":", "try", ":", "for", "au", "in", "auditors", ":", "au", ".", "audit_all_objects", "(", ")", "au", ".", "save_issues", "(", ")", "if", "send_report", ":", "report", "=", "au", ".", "create_report", "(", ")", "au", ".", "email_report", "(", "report", ")", "if", "jirasync", ":", "app", ".", "logger", ".", "info", "(", "'Syncing {} issues on {} with Jira'", ".", "format", "(", "au", ".", "index", ",", "account", ")", ")", "jirasync", ".", "sync_issues", "(", "[", "account", "]", ",", "au", ".", "index", ")", "except", "(", "OperationalError", ",", "InvalidRequestError", ",", "StatementError", ")", "as", "e", ":", "app", ".", "logger", ".", "exception", "(", "'Database error processing accounts %s, cleaning up session.'", ",", "account", ")", "db", ".", "session", ".", "remove", "(", ")", "store_exception", "(", "'scheduler-audit-changes'", ",", "None", ",", "e", ")" ]
runs auditors on all items .
train
false
10,862
def test_override_SIGNING_SERVER_setting(monkeypatch): assert (not settings.SIGNING_SERVER) def signing_server(ids, **kwargs): assert (settings.SIGNING_SERVER == 'http://example.com') monkeypatch.setattr(SIGN_ADDONS, signing_server) call_command('sign_addons', 123, signing_server='http://example.com')
[ "def", "test_override_SIGNING_SERVER_setting", "(", "monkeypatch", ")", ":", "assert", "(", "not", "settings", ".", "SIGNING_SERVER", ")", "def", "signing_server", "(", "ids", ",", "**", "kwargs", ")", ":", "assert", "(", "settings", ".", "SIGNING_SERVER", "==", "'http://example.com'", ")", "monkeypatch", ".", "setattr", "(", "SIGN_ADDONS", ",", "signing_server", ")", "call_command", "(", "'sign_addons'", ",", "123", ",", "signing_server", "=", "'http://example.com'", ")" ]
you can override the signing_server settings .
train
false
10,863
def test_commutative(): assert (ask(Q.commutative(x)) is True) assert (ask(Q.commutative(x), (~ Q.commutative(x))) is False) assert (ask(Q.commutative(x), Q.complex(x)) is True) assert (ask(Q.commutative(x), Q.imaginary(x)) is True) assert (ask(Q.commutative(x), Q.real(x)) is True) assert (ask(Q.commutative(x), Q.positive(x)) is True) assert (ask(Q.commutative(x), (~ Q.commutative(y))) is True) assert (ask(Q.commutative((2 * x))) is True) assert (ask(Q.commutative((2 * x)), (~ Q.commutative(x))) is False) assert (ask(Q.commutative((x + 1))) is True) assert (ask(Q.commutative((x + 1)), (~ Q.commutative(x))) is False) assert (ask(Q.commutative((x ** 2))) is True) assert (ask(Q.commutative((x ** 2)), (~ Q.commutative(x))) is False) assert (ask(Q.commutative(log(x))) is True)
[ "def", "test_commutative", "(", ")", ":", "assert", "(", "ask", "(", "Q", ".", "commutative", "(", "x", ")", ")", "is", "True", ")", "assert", "(", "ask", "(", "Q", ".", "commutative", "(", "x", ")", ",", "(", "~", "Q", ".", "commutative", "(", "x", ")", ")", ")", "is", "False", ")", "assert", "(", "ask", "(", "Q", ".", "commutative", "(", "x", ")", ",", "Q", ".", "complex", "(", "x", ")", ")", "is", "True", ")", "assert", "(", "ask", "(", "Q", ".", "commutative", "(", "x", ")", ",", "Q", ".", "imaginary", "(", "x", ")", ")", "is", "True", ")", "assert", "(", "ask", "(", "Q", ".", "commutative", "(", "x", ")", ",", "Q", ".", "real", "(", "x", ")", ")", "is", "True", ")", "assert", "(", "ask", "(", "Q", ".", "commutative", "(", "x", ")", ",", "Q", ".", "positive", "(", "x", ")", ")", "is", "True", ")", "assert", "(", "ask", "(", "Q", ".", "commutative", "(", "x", ")", ",", "(", "~", "Q", ".", "commutative", "(", "y", ")", ")", ")", "is", "True", ")", "assert", "(", "ask", "(", "Q", ".", "commutative", "(", "(", "2", "*", "x", ")", ")", ")", "is", "True", ")", "assert", "(", "ask", "(", "Q", ".", "commutative", "(", "(", "2", "*", "x", ")", ")", ",", "(", "~", "Q", ".", "commutative", "(", "x", ")", ")", ")", "is", "False", ")", "assert", "(", "ask", "(", "Q", ".", "commutative", "(", "(", "x", "+", "1", ")", ")", ")", "is", "True", ")", "assert", "(", "ask", "(", "Q", ".", "commutative", "(", "(", "x", "+", "1", ")", ")", ",", "(", "~", "Q", ".", "commutative", "(", "x", ")", ")", ")", "is", "False", ")", "assert", "(", "ask", "(", "Q", ".", "commutative", "(", "(", "x", "**", "2", ")", ")", ")", "is", "True", ")", "assert", "(", "ask", "(", "Q", ".", "commutative", "(", "(", "x", "**", "2", ")", ")", ",", "(", "~", "Q", ".", "commutative", "(", "x", ")", ")", ")", "is", "False", ")", "assert", "(", "ask", "(", "Q", ".", "commutative", "(", "log", "(", "x", ")", ")", ")", "is", "True", ")" ]
test for commutativity of and and or .
train
false
10,864
def get_path_sortkey(path): if ((path == '') or path.endswith('/')): return path head = os.path.split(path)[0] return u'~'.join([head, path])
[ "def", "get_path_sortkey", "(", "path", ")", ":", "if", "(", "(", "path", "==", "''", ")", "or", "path", ".", "endswith", "(", "'/'", ")", ")", ":", "return", "path", "head", "=", "os", ".", "path", ".", "split", "(", "path", ")", "[", "0", "]", "return", "u'~'", ".", "join", "(", "[", "head", ",", "path", "]", ")" ]
returns the sortkey to use for a path .
train
false
10,868
def idealfourths(data, axis=None): def _idf(data): x = data.compressed() n = len(x) if (n < 3): return [np.nan, np.nan] (j, h) = divmod(((n / 4.0) + (5 / 12.0)), 1) j = int(j) qlo = (((1 - h) * x[(j - 1)]) + (h * x[j])) k = (n - j) qup = (((1 - h) * x[k]) + (h * x[(k - 1)])) return [qlo, qup] data = ma.sort(data, axis=axis).view(MaskedArray) if (axis is None): return _idf(data) else: return ma.apply_along_axis(_idf, axis, data)
[ "def", "idealfourths", "(", "data", ",", "axis", "=", "None", ")", ":", "def", "_idf", "(", "data", ")", ":", "x", "=", "data", ".", "compressed", "(", ")", "n", "=", "len", "(", "x", ")", "if", "(", "n", "<", "3", ")", ":", "return", "[", "np", ".", "nan", ",", "np", ".", "nan", "]", "(", "j", ",", "h", ")", "=", "divmod", "(", "(", "(", "n", "/", "4.0", ")", "+", "(", "5", "/", "12.0", ")", ")", ",", "1", ")", "j", "=", "int", "(", "j", ")", "qlo", "=", "(", "(", "(", "1", "-", "h", ")", "*", "x", "[", "(", "j", "-", "1", ")", "]", ")", "+", "(", "h", "*", "x", "[", "j", "]", ")", ")", "k", "=", "(", "n", "-", "j", ")", "qup", "=", "(", "(", "(", "1", "-", "h", ")", "*", "x", "[", "k", "]", ")", "+", "(", "h", "*", "x", "[", "(", "k", "-", "1", ")", "]", ")", ")", "return", "[", "qlo", ",", "qup", "]", "data", "=", "ma", ".", "sort", "(", "data", ",", "axis", "=", "axis", ")", ".", "view", "(", "MaskedArray", ")", "if", "(", "axis", "is", "None", ")", ":", "return", "_idf", "(", "data", ")", "else", ":", "return", "ma", ".", "apply_along_axis", "(", "_idf", ",", "axis", ",", "data", ")" ]
this function returns an estimate of the lower and upper quartiles of the data along the given axis .
train
false
10,869
def libvlc_media_get_user_data(p_md): f = (_Cfunctions.get('libvlc_media_get_user_data', None) or _Cfunction('libvlc_media_get_user_data', ((1,),), None, ctypes.c_void_p, Media)) return f(p_md)
[ "def", "libvlc_media_get_user_data", "(", "p_md", ")", ":", "f", "=", "(", "_Cfunctions", ".", "get", "(", "'libvlc_media_get_user_data'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_media_get_user_data'", ",", "(", "(", "1", ",", ")", ",", ")", ",", "None", ",", "ctypes", ".", "c_void_p", ",", "Media", ")", ")", "return", "f", "(", "p_md", ")" ]
get media descriptors user_data .
train
true
10,870
def autumn(): rc('image', cmap='autumn') im = gci() if (im is not None): im.set_cmap(cm.autumn) draw_if_interactive()
[ "def", "autumn", "(", ")", ":", "rc", "(", "'image'", ",", "cmap", "=", "'autumn'", ")", "im", "=", "gci", "(", ")", "if", "(", "im", "is", "not", "None", ")", ":", "im", ".", "set_cmap", "(", "cm", ".", "autumn", ")", "draw_if_interactive", "(", ")" ]
set the default colormap to autumn and apply to current image if any .
train
false
10,871
def __grab_lock(): try: if LOCK_ENABLED: if (not os.path.exists('/var/lib/cobbler/lock')): fd = open('/var/lib/cobbler/lock', 'w+') fd.close() LOCK_HANDLE = open('/var/lib/cobbler/lock', 'r') fcntl.flock(LOCK_HANDLE.fileno(), fcntl.LOCK_EX) except: traceback.print_exc() sys.exit(7)
[ "def", "__grab_lock", "(", ")", ":", "try", ":", "if", "LOCK_ENABLED", ":", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "'/var/lib/cobbler/lock'", ")", ")", ":", "fd", "=", "open", "(", "'/var/lib/cobbler/lock'", ",", "'w+'", ")", "fd", ".", "close", "(", ")", "LOCK_HANDLE", "=", "open", "(", "'/var/lib/cobbler/lock'", ",", "'r'", ")", "fcntl", ".", "flock", "(", "LOCK_HANDLE", ".", "fileno", "(", ")", ",", "fcntl", ".", "LOCK_EX", ")", "except", ":", "traceback", ".", "print_exc", "(", ")", "sys", ".", "exit", "(", "7", ")" ]
dual purpose locking: (a) flock to avoid multiple process access (b) block signal handler to avoid ctrl+c while writing yaml .
train
false
10,873
def _cos_win(M, a, sym=True): if _len_guards(M): return np.ones(M) (M, needs_trunc) = _extend(M, sym) fac = np.linspace((- np.pi), np.pi, M) w = np.zeros(M) for k in range(len(a)): w += (a[k] * np.cos((k * fac))) return _truncate(w, needs_trunc)
[ "def", "_cos_win", "(", "M", ",", "a", ",", "sym", "=", "True", ")", ":", "if", "_len_guards", "(", "M", ")", ":", "return", "np", ".", "ones", "(", "M", ")", "(", "M", ",", "needs_trunc", ")", "=", "_extend", "(", "M", ",", "sym", ")", "fac", "=", "np", ".", "linspace", "(", "(", "-", "np", ".", "pi", ")", ",", "np", ".", "pi", ",", "M", ")", "w", "=", "np", ".", "zeros", "(", "M", ")", "for", "k", "in", "range", "(", "len", "(", "a", ")", ")", ":", "w", "+=", "(", "a", "[", "k", "]", "*", "np", ".", "cos", "(", "(", "k", "*", "fac", ")", ")", ")", "return", "_truncate", "(", "w", ",", "needs_trunc", ")" ]
generic weighted sum of cosine terms window parameters m : int number of points in the output window a : array_like sequence of weighting coefficients .
train
false
10,874
def is_pypy(): import platform return (platform.python_implementation() == 'PyPy')
[ "def", "is_pypy", "(", ")", ":", "import", "platform", "return", "(", "platform", ".", "python_implementation", "(", ")", "==", "'PyPy'", ")" ]
test whether we are running pypy .
train
false
10,875
def rendering_info(obj): return ('<ul>%s</ul>' % ''.join((('<li>%s</li>' % (x % y)) for (x, y) in (('<img src="%s/admin/img/admin/icon-yes.gif" alt="%s"> Deferred rendering', (settings.STATIC_URL, obj.defer_rendering)), ('%s (last)', obj.last_rendered_at), ('%s (started)', obj.render_started_at), ('%s (scheduled)', obj.render_scheduled_at)) if y)))
[ "def", "rendering_info", "(", "obj", ")", ":", "return", "(", "'<ul>%s</ul>'", "%", "''", ".", "join", "(", "(", "(", "'<li>%s</li>'", "%", "(", "x", "%", "y", ")", ")", "for", "(", "x", ",", "y", ")", "in", "(", "(", "'<img src=\"%s/admin/img/admin/icon-yes.gif\" alt=\"%s\"> Deferred rendering'", ",", "(", "settings", ".", "STATIC_URL", ",", "obj", ".", "defer_rendering", ")", ")", ",", "(", "'%s (last)'", ",", "obj", ".", "last_rendered_at", ")", ",", "(", "'%s (started)'", ",", "obj", ".", "render_started_at", ")", ",", "(", "'%s (scheduled)'", ",", "obj", ".", "render_scheduled_at", ")", ")", "if", "y", ")", ")", ")" ]
combine the rendering times into one block .
train
false
10,877
def _inFilesystemNamespace(path): return (path[:1] not in ('\x00', u'\x00'))
[ "def", "_inFilesystemNamespace", "(", "path", ")", ":", "return", "(", "path", "[", ":", "1", "]", "not", "in", "(", "'\\x00'", ",", "u'\\x00'", ")", ")" ]
determine whether the given unix socket path is in a filesystem namespace .
train
false
10,881
def do_single_request(*args, **kwargs): cluster = kwargs['cluster'] try: req = cluster.api_client.request(*args) except NvpApiClient.ResourceNotFound: raise exception.NotFound() return req
[ "def", "do_single_request", "(", "*", "args", ",", "**", "kwargs", ")", ":", "cluster", "=", "kwargs", "[", "'cluster'", "]", "try", ":", "req", "=", "cluster", ".", "api_client", ".", "request", "(", "*", "args", ")", "except", "NvpApiClient", ".", "ResourceNotFound", ":", "raise", "exception", ".", "NotFound", "(", ")", "return", "req" ]
issue a request to a specified cluster if specified via kwargs .
train
false
10,882
def get_view_plugins(view_types): view_plugins = [] for view_type in view_types: view_plugin = get_view_plugin(view_type) if view_plugin: view_plugins.append(view_plugin) return view_plugins
[ "def", "get_view_plugins", "(", "view_types", ")", ":", "view_plugins", "=", "[", "]", "for", "view_type", "in", "view_types", ":", "view_plugin", "=", "get_view_plugin", "(", "view_type", ")", "if", "view_plugin", ":", "view_plugins", ".", "append", "(", "view_plugin", ")", "return", "view_plugins" ]
returns a list of the view plugins associated with the given view_types .
train
false
10,883
def getFrontOverWidthAddYList(front, numberOfLines, xIntersectionIndexLists, width, yList): frontOverWidth = (front / width) for fillLine in xrange(numberOfLines): yList.append((front + (float(fillLine) * width))) xIntersectionIndexLists.append([]) return frontOverWidth
[ "def", "getFrontOverWidthAddYList", "(", "front", ",", "numberOfLines", ",", "xIntersectionIndexLists", ",", "width", ",", "yList", ")", ":", "frontOverWidth", "=", "(", "front", "/", "width", ")", "for", "fillLine", "in", "xrange", "(", "numberOfLines", ")", ":", "yList", ".", "append", "(", "(", "front", "+", "(", "float", "(", "fillLine", ")", "*", "width", ")", ")", ")", "xIntersectionIndexLists", ".", "append", "(", "[", "]", ")", "return", "frontOverWidth" ]
get the front over width and add the x intersection index lists and ylist .
train
false
10,884
def clear_messages(identity): BackendMessage.objects.filter(identity=identity, name=BACKEND_NAME).delete()
[ "def", "clear_messages", "(", "identity", ")", ":", "BackendMessage", ".", "objects", ".", "filter", "(", "identity", "=", "identity", ",", "name", "=", "BACKEND_NAME", ")", ".", "delete", "(", ")" ]
forget messages to/from this identity .
train
false
10,885
def _strxor(s1, s2): return ''.join(map((lambda x, y: chr((ord(x) ^ ord(y)))), s1, s2))
[ "def", "_strxor", "(", "s1", ",", "s2", ")", ":", "return", "''", ".", "join", "(", "map", "(", "(", "lambda", "x", ",", "y", ":", "chr", "(", "(", "ord", "(", "x", ")", "^", "ord", "(", "y", ")", ")", ")", ")", ",", "s1", ",", "s2", ")", ")" ]
utility method .
train
true
10,886
@commands(u'lmgtfy', u'lmgify', u'gify', u'gtfy') def googleit(bot, trigger): if (not trigger.group(2)): return bot.say(u'http://google.com/') bot.say((u'http://lmgtfy.com/?q=' + trigger.group(2).replace(u' ', u'+')))
[ "@", "commands", "(", "u'lmgtfy'", ",", "u'lmgify'", ",", "u'gify'", ",", "u'gtfy'", ")", "def", "googleit", "(", "bot", ",", "trigger", ")", ":", "if", "(", "not", "trigger", ".", "group", "(", "2", ")", ")", ":", "return", "bot", ".", "say", "(", "u'http://google.com/'", ")", "bot", ".", "say", "(", "(", "u'http://lmgtfy.com/?q='", "+", "trigger", ".", "group", "(", "2", ")", ".", "replace", "(", "u' '", ",", "u'+'", ")", ")", ")" ]
let me just .
train
false
10,887
def run_networks(): random.seed(12345678) np.random.seed(12345678) (training_data, validation_data, test_data) = mnist_loader.load_data_wrapper() results = [] for eta in LEARNING_RATES: print ('\nTrain a network using eta = ' + str(eta)) net = network2.Network([784, 30, 10]) results.append(net.SGD(training_data, NUM_EPOCHS, 10, eta, lmbda=5.0, evaluation_data=validation_data, monitor_training_cost=True)) f = open('multiple_eta.json', 'w') json.dump(results, f) f.close()
[ "def", "run_networks", "(", ")", ":", "random", ".", "seed", "(", "12345678", ")", "np", ".", "random", ".", "seed", "(", "12345678", ")", "(", "training_data", ",", "validation_data", ",", "test_data", ")", "=", "mnist_loader", ".", "load_data_wrapper", "(", ")", "results", "=", "[", "]", "for", "eta", "in", "LEARNING_RATES", ":", "print", "(", "'\\nTrain a network using eta = '", "+", "str", "(", "eta", ")", ")", "net", "=", "network2", ".", "Network", "(", "[", "784", ",", "30", ",", "10", "]", ")", "results", ".", "append", "(", "net", ".", "SGD", "(", "training_data", ",", "NUM_EPOCHS", ",", "10", ",", "eta", ",", "lmbda", "=", "5.0", ",", "evaluation_data", "=", "validation_data", ",", "monitor_training_cost", "=", "True", ")", ")", "f", "=", "open", "(", "'multiple_eta.json'", ",", "'w'", ")", "json", ".", "dump", "(", "results", ",", "f", ")", "f", ".", "close", "(", ")" ]
train networks using three different values for the learning rate .
train
false
10,888
def update_installed(uuid=''): imgadm = _check_imgadm() if imgadm: cmd = '{0} update {1}'.format(imgadm, uuid).rstrip() __salt__['cmd.run'](cmd) return {}
[ "def", "update_installed", "(", "uuid", "=", "''", ")", ":", "imgadm", "=", "_check_imgadm", "(", ")", "if", "imgadm", ":", "cmd", "=", "'{0} update {1}'", ".", "format", "(", "imgadm", ",", "uuid", ")", ".", "rstrip", "(", ")", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ")", "return", "{", "}" ]
gather info on unknown image(s) uuid : string optional uuid of image cli example: .
train
false
10,889
def log_subprocess(func): @wraps(func) def wrapper(*params, **kwargs): fc = ('%s(%s)' % (func.__name__, ', '.join(([a.__repr__() for a in params] + [('%s = %s' % (a, b)) for (a, b) in kwargs.items()])))) log.debug(('%s called' % fc)) return func(*params, **kwargs) return wrapper
[ "def", "log_subprocess", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "params", ",", "**", "kwargs", ")", ":", "fc", "=", "(", "'%s(%s)'", "%", "(", "func", ".", "__name__", ",", "', '", ".", "join", "(", "(", "[", "a", ".", "__repr__", "(", ")", "for", "a", "in", "params", "]", "+", "[", "(", "'%s = %s'", "%", "(", "a", ",", "b", ")", ")", "for", "(", "a", ",", "b", ")", "in", "kwargs", ".", "items", "(", ")", "]", ")", ")", ")", ")", "log", ".", "debug", "(", "(", "'%s called'", "%", "fc", ")", ")", "return", "func", "(", "*", "params", ",", "**", "kwargs", ")", "return", "wrapper" ]
wrapper around subprocess to log .
train
false
10,891
def __regex_err_msg(match, regex): err_msg = (StdioErrorLevel.desc(regex.error_level) + ': ') if (None is not regex.desc): err_msg += regex.desc else: mstart = match.start() mend = match.end() err_msg += 'Matched on ' if ((mend - mstart) > 256): err_msg += (match.string[mstart:(mstart + 256)] + '...') else: err_msg += match.string[mstart:mend] return err_msg
[ "def", "__regex_err_msg", "(", "match", ",", "regex", ")", ":", "err_msg", "=", "(", "StdioErrorLevel", ".", "desc", "(", "regex", ".", "error_level", ")", "+", "': '", ")", "if", "(", "None", "is", "not", "regex", ".", "desc", ")", ":", "err_msg", "+=", "regex", ".", "desc", "else", ":", "mstart", "=", "match", ".", "start", "(", ")", "mend", "=", "match", ".", "end", "(", ")", "err_msg", "+=", "'Matched on '", "if", "(", "(", "mend", "-", "mstart", ")", ">", "256", ")", ":", "err_msg", "+=", "(", "match", ".", "string", "[", "mstart", ":", "(", "mstart", "+", "256", ")", "]", "+", "'...'", ")", "else", ":", "err_msg", "+=", "match", ".", "string", "[", "mstart", ":", "mend", "]", "return", "err_msg" ]
return a message about the match on tool output using the given toolstdioregex regex object .
train
false
10,892
def l2(x): return T.sum((x ** 2))
[ "def", "l2", "(", "x", ")", ":", "return", "T", ".", "sum", "(", "(", "x", "**", "2", ")", ")" ]
computes the squared l2 norm of a tensor parameters x : theano tensor returns theano scalar squared l2 norm .
train
false
10,894
def write_tadm_file(train_toks, encoding, stream): labels = encoding.labels() for (featureset, label) in train_toks: length_line = (u'%d\n' % len(labels)) stream.write(length_line) for known_label in labels: v = encoding.encode(featureset, known_label) line = (u'%d %d %s\n' % (int((label == known_label)), len(v), u' '.join(((u'%d %d' % u) for u in v)))) stream.write(line)
[ "def", "write_tadm_file", "(", "train_toks", ",", "encoding", ",", "stream", ")", ":", "labels", "=", "encoding", ".", "labels", "(", ")", "for", "(", "featureset", ",", "label", ")", "in", "train_toks", ":", "length_line", "=", "(", "u'%d\\n'", "%", "len", "(", "labels", ")", ")", "stream", ".", "write", "(", "length_line", ")", "for", "known_label", "in", "labels", ":", "v", "=", "encoding", ".", "encode", "(", "featureset", ",", "known_label", ")", "line", "=", "(", "u'%d %d %s\\n'", "%", "(", "int", "(", "(", "label", "==", "known_label", ")", ")", ",", "len", "(", "v", ")", ",", "u' '", ".", "join", "(", "(", "(", "u'%d %d'", "%", "u", ")", "for", "u", "in", "v", ")", ")", ")", ")", "stream", ".", "write", "(", "line", ")" ]
generate an input file for tadm based on the given corpus of classified tokens .
train
false
10,895
def override_flask_hello(): html = u'<!DOCTYPE html>\n<html>\n <head>\n <title>Hello from Flask</title>\n </head>\n <body>\n Hello World, this is served from an extension, overriding the flask url.\n </body>\n</html>' return render_template_string(html)
[ "def", "override_flask_hello", "(", ")", ":", "html", "=", "u'<!DOCTYPE html>\\n<html>\\n <head>\\n <title>Hello from Flask</title>\\n </head>\\n <body>\\n Hello World, this is served from an extension, overriding the flask url.\\n </body>\\n</html>'", "return", "render_template_string", "(", "html", ")" ]
a simple replacement for the flash hello view function .
train
false
10,896
def parse_cli(arg_list=None, origin=os.getcwd(), arg_parser=None, key_value_delimiters=('=', ':'), comment_seperators=(), key_delimiters=(',',), section_override_delimiters=('.',)): arg_parser = (default_arg_parser() if (arg_parser is None) else arg_parser) origin += os.path.sep sections = OrderedDict(default=Section('Default')) line_parser = LineParser(key_value_delimiters, comment_seperators, key_delimiters, {}, section_override_delimiters) for (arg_key, arg_value) in sorted(vars(arg_parser.parse_args(arg_list)).items()): if ((arg_key == 'settings') and (arg_value is not None)): parse_custom_settings(sections, arg_value, origin, line_parser) else: if isinstance(arg_value, list): arg_value = ','.join([str(val) for val in arg_value]) append_to_sections(sections, arg_key, arg_value, origin, from_cli=True) return sections
[ "def", "parse_cli", "(", "arg_list", "=", "None", ",", "origin", "=", "os", ".", "getcwd", "(", ")", ",", "arg_parser", "=", "None", ",", "key_value_delimiters", "=", "(", "'='", ",", "':'", ")", ",", "comment_seperators", "=", "(", ")", ",", "key_delimiters", "=", "(", "','", ",", ")", ",", "section_override_delimiters", "=", "(", "'.'", ",", ")", ")", ":", "arg_parser", "=", "(", "default_arg_parser", "(", ")", "if", "(", "arg_parser", "is", "None", ")", "else", "arg_parser", ")", "origin", "+=", "os", ".", "path", ".", "sep", "sections", "=", "OrderedDict", "(", "default", "=", "Section", "(", "'Default'", ")", ")", "line_parser", "=", "LineParser", "(", "key_value_delimiters", ",", "comment_seperators", ",", "key_delimiters", ",", "{", "}", ",", "section_override_delimiters", ")", "for", "(", "arg_key", ",", "arg_value", ")", "in", "sorted", "(", "vars", "(", "arg_parser", ".", "parse_args", "(", "arg_list", ")", ")", ".", "items", "(", ")", ")", ":", "if", "(", "(", "arg_key", "==", "'settings'", ")", "and", "(", "arg_value", "is", "not", "None", ")", ")", ":", "parse_custom_settings", "(", "sections", ",", "arg_value", ",", "origin", ",", "line_parser", ")", "else", ":", "if", "isinstance", "(", "arg_value", ",", "list", ")", ":", "arg_value", "=", "','", ".", "join", "(", "[", "str", "(", "val", ")", "for", "val", "in", "arg_value", "]", ")", "append_to_sections", "(", "sections", ",", "arg_key", ",", "arg_value", ",", "origin", ",", "from_cli", "=", "True", ")", "return", "sections" ]
parses the cli arguments and creates sections out of it .
train
false
10,898
def get_sensor_data(**kwargs): import ast with _IpmiCommand(**kwargs) as s: data = {} for reading in s.get_sensor_data(): if reading: r = ast.literal_eval(repr(reading)) data[r.pop('name')] = r return data
[ "def", "get_sensor_data", "(", "**", "kwargs", ")", ":", "import", "ast", "with", "_IpmiCommand", "(", "**", "kwargs", ")", "as", "s", ":", "data", "=", "{", "}", "for", "reading", "in", "s", ".", "get_sensor_data", "(", ")", ":", "if", "reading", ":", "r", "=", "ast", ".", "literal_eval", "(", "repr", "(", "reading", ")", ")", "data", "[", "r", ".", "pop", "(", "'name'", ")", "]", "=", "r", "return", "data" ]
get sensor readings iterates sensor reading objects .
train
true
10,900
def p_pointer_1(t): pass
[ "def", "p_pointer_1", "(", "t", ")", ":", "pass" ]
pointer : times type_qualifier_list .
train
false
10,901
def read_content_types(archive): xml_source = archive.read(ARC_CONTENT_TYPES) root = fromstring(xml_source) contents_root = root.findall(('{%s}Override' % CONTYPES_NS)) for type in contents_root: (yield (type.get('ContentType'), type.get('PartName')))
[ "def", "read_content_types", "(", "archive", ")", ":", "xml_source", "=", "archive", ".", "read", "(", "ARC_CONTENT_TYPES", ")", "root", "=", "fromstring", "(", "xml_source", ")", "contents_root", "=", "root", ".", "findall", "(", "(", "'{%s}Override'", "%", "CONTYPES_NS", ")", ")", "for", "type", "in", "contents_root", ":", "(", "yield", "(", "type", ".", "get", "(", "'ContentType'", ")", ",", "type", ".", "get", "(", "'PartName'", ")", ")", ")" ]
read content types .
train
true
10,902
def clean_custom_shortcuts(): global CUSTOM_SHORTCUTS CUSTOM_SHORTCUTS = {}
[ "def", "clean_custom_shortcuts", "(", ")", ":", "global", "CUSTOM_SHORTCUTS", "CUSTOM_SHORTCUTS", "=", "{", "}" ]
cleans customs_shortcuts .
train
false
10,903
def getAroundsFromPoints(points, radius): arounds = [] radius = abs(radius) centers = getCentersFromPoints(points, radius) for center in centers: inset = getSimplifiedInsetFromClockwiseLoop(center, radius) if isLargeSameDirection(inset, center, radius): arounds.append(inset) return arounds
[ "def", "getAroundsFromPoints", "(", "points", ",", "radius", ")", ":", "arounds", "=", "[", "]", "radius", "=", "abs", "(", "radius", ")", "centers", "=", "getCentersFromPoints", "(", "points", ",", "radius", ")", "for", "center", "in", "centers", ":", "inset", "=", "getSimplifiedInsetFromClockwiseLoop", "(", "center", ",", "radius", ")", "if", "isLargeSameDirection", "(", "inset", ",", "center", ",", "radius", ")", ":", "arounds", ".", "append", "(", "inset", ")", "return", "arounds" ]
get the arounds from the points .
train
false
10,904
def _get_proc_status(proc): try: return (proc.status() if PSUTIL2 else proc.status) except (psutil.NoSuchProcess, psutil.AccessDenied): return None
[ "def", "_get_proc_status", "(", "proc", ")", ":", "try", ":", "return", "(", "proc", ".", "status", "(", ")", "if", "PSUTIL2", "else", "proc", ".", "status", ")", "except", "(", "psutil", ".", "NoSuchProcess", ",", "psutil", ".", "AccessDenied", ")", ":", "return", "None" ]
returns the status of a process instance .
train
false
10,905
def delete_rax_keypair(args): print ("--- Cleaning Key Pairs matching '%s'" % args.match_re) for region in pyrax.identity.services.compute.regions: cs = pyrax.connect_to_cloudservers(region=region) for keypair in cs.keypairs.list(): if re.search(args.match_re, keypair.name): prompt_and_delete(keypair, ('Delete matching %s? [y/n]: ' % keypair), args.assumeyes)
[ "def", "delete_rax_keypair", "(", "args", ")", ":", "print", "(", "\"--- Cleaning Key Pairs matching '%s'\"", "%", "args", ".", "match_re", ")", "for", "region", "in", "pyrax", ".", "identity", ".", "services", ".", "compute", ".", "regions", ":", "cs", "=", "pyrax", ".", "connect_to_cloudservers", "(", "region", "=", "region", ")", "for", "keypair", "in", "cs", ".", "keypairs", ".", "list", "(", ")", ":", "if", "re", ".", "search", "(", "args", ".", "match_re", ",", "keypair", ".", "name", ")", ":", "prompt_and_delete", "(", "keypair", ",", "(", "'Delete matching %s? [y/n]: '", "%", "keypair", ")", ",", "args", ".", "assumeyes", ")" ]
function for deleting rackspace key pairs .
train
false
10,906
def parseContentRange(header): (kind, other) = header.strip().split() if (kind.lower() != 'bytes'): raise ValueError('a range of type %r is not supported') (startend, realLength) = other.split('/') (start, end) = map(int, startend.split('-')) if (realLength == '*'): realLength = None else: realLength = int(realLength) return (start, end, realLength)
[ "def", "parseContentRange", "(", "header", ")", ":", "(", "kind", ",", "other", ")", "=", "header", ".", "strip", "(", ")", ".", "split", "(", ")", "if", "(", "kind", ".", "lower", "(", ")", "!=", "'bytes'", ")", ":", "raise", "ValueError", "(", "'a range of type %r is not supported'", ")", "(", "startend", ",", "realLength", ")", "=", "other", ".", "split", "(", "'/'", ")", "(", "start", ",", "end", ")", "=", "map", "(", "int", ",", "startend", ".", "split", "(", "'-'", ")", ")", "if", "(", "realLength", "==", "'*'", ")", ":", "realLength", "=", "None", "else", ":", "realLength", "=", "int", "(", "realLength", ")", "return", "(", "start", ",", "end", ",", "realLength", ")" ]
parse a content-range header into .
train
false
10,908
def fake_execute(*cmd_parts, **kwargs): global _fake_execute_repliers process_input = kwargs.get('process_input', None) check_exit_code = kwargs.get('check_exit_code', 0) delay_on_retry = kwargs.get('delay_on_retry', True) attempts = kwargs.get('attempts', 1) run_as_root = kwargs.get('run_as_root', False) cmd_str = ' '.join((str(part) for part in cmd_parts)) LOG.debug(_('Faking execution of cmd (subprocess): %s'), cmd_str) _fake_execute_log.append(cmd_str) reply_handler = fake_execute_default_reply_handler for fake_replier in _fake_execute_repliers: if re.match(fake_replier[0], cmd_str): reply_handler = fake_replier[1] LOG.debug((_('Faked command matched %s') % fake_replier[0])) break if isinstance(reply_handler, basestring): reply = (reply_handler, '') else: try: reply = reply_handler(cmd_parts, process_input=process_input, delay_on_retry=delay_on_retry, attempts=attempts, run_as_root=run_as_root, check_exit_code=check_exit_code) except exception.ProcessExecutionError as e: LOG.debug(_('Faked command raised an exception %s'), e) raise stdout = reply[0] stderr = reply[1] LOG.debug((_("Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'") % locals())) greenthread.sleep(0) return reply
[ "def", "fake_execute", "(", "*", "cmd_parts", ",", "**", "kwargs", ")", ":", "global", "_fake_execute_repliers", "process_input", "=", "kwargs", ".", "get", "(", "'process_input'", ",", "None", ")", "check_exit_code", "=", "kwargs", ".", "get", "(", "'check_exit_code'", ",", "0", ")", "delay_on_retry", "=", "kwargs", ".", "get", "(", "'delay_on_retry'", ",", "True", ")", "attempts", "=", "kwargs", ".", "get", "(", "'attempts'", ",", "1", ")", "run_as_root", "=", "kwargs", ".", "get", "(", "'run_as_root'", ",", "False", ")", "cmd_str", "=", "' '", ".", "join", "(", "(", "str", "(", "part", ")", "for", "part", "in", "cmd_parts", ")", ")", "LOG", ".", "debug", "(", "_", "(", "'Faking execution of cmd (subprocess): %s'", ")", ",", "cmd_str", ")", "_fake_execute_log", ".", "append", "(", "cmd_str", ")", "reply_handler", "=", "fake_execute_default_reply_handler", "for", "fake_replier", "in", "_fake_execute_repliers", ":", "if", "re", ".", "match", "(", "fake_replier", "[", "0", "]", ",", "cmd_str", ")", ":", "reply_handler", "=", "fake_replier", "[", "1", "]", "LOG", ".", "debug", "(", "(", "_", "(", "'Faked command matched %s'", ")", "%", "fake_replier", "[", "0", "]", ")", ")", "break", "if", "isinstance", "(", "reply_handler", ",", "basestring", ")", ":", "reply", "=", "(", "reply_handler", ",", "''", ")", "else", ":", "try", ":", "reply", "=", "reply_handler", "(", "cmd_parts", ",", "process_input", "=", "process_input", ",", "delay_on_retry", "=", "delay_on_retry", ",", "attempts", "=", "attempts", ",", "run_as_root", "=", "run_as_root", ",", "check_exit_code", "=", "check_exit_code", ")", "except", "exception", ".", "ProcessExecutionError", "as", "e", ":", "LOG", ".", "debug", "(", "_", "(", "'Faked command raised an exception %s'", ")", ",", "e", ")", "raise", "stdout", "=", "reply", "[", "0", "]", "stderr", "=", "reply", "[", "1", "]", "LOG", ".", "debug", "(", "(", "_", "(", "\"Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'\"", ")", "%", "locals", "(", ")", ")", ")", "greenthread", ".", "sleep", "(", "0", ")", "return", "reply" ]
this function stubs out execute .
train
false
10,909
def FakerAttribute(provider, **kwargs): fake_gen = getattr(fake, provider) if (not fake_gen): raise ValueError('{0!r} is not a valid faker provider.'.format(provider)) return LazyAttribute((lambda x: fake_gen(**kwargs)))
[ "def", "FakerAttribute", "(", "provider", ",", "**", "kwargs", ")", ":", "fake_gen", "=", "getattr", "(", "fake", ",", "provider", ")", "if", "(", "not", "fake_gen", ")", ":", "raise", "ValueError", "(", "'{0!r} is not a valid faker provider.'", ".", "format", "(", "provider", ")", ")", "return", "LazyAttribute", "(", "(", "lambda", "x", ":", "fake_gen", "(", "**", "kwargs", ")", ")", ")" ]
attribute that lazily generates a value using the faker library .
train
false
10,911
@login_required @require_POST def cancel_draft(request, media_type='image'): drafts = _get_drafts(request.user) if ((media_type == 'image') and drafts['image']): drafts['image'].delete() drafts['image'] = None else: msg = _(u'Unrecognized request or nothing to cancel.') content_type = None if request.is_ajax(): msg = json.dumps({'status': 'error', 'message': msg}) content_type = 'application/json' return HttpResponseBadRequest(msg, content_type=content_type) if request.is_ajax(): return HttpResponse(json.dumps({'status': 'success'}), content_type='application/json') return HttpResponseRedirect(reverse('gallery.gallery', args=[media_type]))
[ "@", "login_required", "@", "require_POST", "def", "cancel_draft", "(", "request", ",", "media_type", "=", "'image'", ")", ":", "drafts", "=", "_get_drafts", "(", "request", ".", "user", ")", "if", "(", "(", "media_type", "==", "'image'", ")", "and", "drafts", "[", "'image'", "]", ")", ":", "drafts", "[", "'image'", "]", ".", "delete", "(", ")", "drafts", "[", "'image'", "]", "=", "None", "else", ":", "msg", "=", "_", "(", "u'Unrecognized request or nothing to cancel.'", ")", "content_type", "=", "None", "if", "request", ".", "is_ajax", "(", ")", ":", "msg", "=", "json", ".", "dumps", "(", "{", "'status'", ":", "'error'", ",", "'message'", ":", "msg", "}", ")", "content_type", "=", "'application/json'", "return", "HttpResponseBadRequest", "(", "msg", ",", "content_type", "=", "content_type", ")", "if", "request", ".", "is_ajax", "(", ")", ":", "return", "HttpResponse", "(", "json", ".", "dumps", "(", "{", "'status'", ":", "'success'", "}", ")", ",", "content_type", "=", "'application/json'", ")", "return", "HttpResponseRedirect", "(", "reverse", "(", "'gallery.gallery'", ",", "args", "=", "[", "media_type", "]", ")", ")" ]
delete an existing draft for the user .
train
false
10,912
def enable_microsites(log): if is_feature_enabled(): BACKEND.enable_microsites(log)
[ "def", "enable_microsites", "(", "log", ")", ":", "if", "is_feature_enabled", "(", ")", ":", "BACKEND", ".", "enable_microsites", "(", "log", ")" ]
calls the enable_microsites function in the microsite backend .
train
false
10,914
def prevent_core_dump(): core_resource = resource.RLIMIT_CORE try: core_limit_prev = resource.getrlimit(core_resource) except ValueError as exc: error = DaemonOSEnvironmentError(('System does not support RLIMIT_CORE resource limit (%(exc)s)' % vars())) raise error core_limit = (0, 0) resource.setrlimit(core_resource, core_limit)
[ "def", "prevent_core_dump", "(", ")", ":", "core_resource", "=", "resource", ".", "RLIMIT_CORE", "try", ":", "core_limit_prev", "=", "resource", ".", "getrlimit", "(", "core_resource", ")", "except", "ValueError", "as", "exc", ":", "error", "=", "DaemonOSEnvironmentError", "(", "(", "'System does not support RLIMIT_CORE resource limit (%(exc)s)'", "%", "vars", "(", ")", ")", ")", "raise", "error", "core_limit", "=", "(", "0", ",", "0", ")", "resource", ".", "setrlimit", "(", "core_resource", ",", "core_limit", ")" ]
prevent this process from generating a core dump .
train
false
10,916
def sql_flush(style, connection, only_django=False, reset_sequences=True, allow_cascade=False): if only_django: tables = connection.introspection.django_table_names(only_existing=True) else: tables = connection.introspection.table_names() seqs = (connection.introspection.sequence_list() if reset_sequences else ()) statements = connection.ops.sql_flush(style, tables, seqs, allow_cascade) return statements
[ "def", "sql_flush", "(", "style", ",", "connection", ",", "only_django", "=", "False", ",", "reset_sequences", "=", "True", ",", "allow_cascade", "=", "False", ")", ":", "if", "only_django", ":", "tables", "=", "connection", ".", "introspection", ".", "django_table_names", "(", "only_existing", "=", "True", ")", "else", ":", "tables", "=", "connection", ".", "introspection", ".", "table_names", "(", ")", "seqs", "=", "(", "connection", ".", "introspection", ".", "sequence_list", "(", ")", "if", "reset_sequences", "else", "(", ")", ")", "statements", "=", "connection", ".", "ops", ".", "sql_flush", "(", "style", ",", "tables", ",", "seqs", ",", "allow_cascade", ")", "return", "statements" ]
returns a list of the sql statements used to flush the database .
train
false
10,917
def escape_filter_chars(assertion_value, escape_mode=0): if escape_mode: r = [] if (escape_mode == 1): for c in assertion_value: if ((c < '0') or (c > 'z') or (c in '\\*()')): c = ('\\%02x' % ord(c)) r.append(c) elif (escape_mode == 2): for c in assertion_value: r.append(('\\%02x' % ord(c))) else: raise ValueError('escape_mode must be 0, 1 or 2.') s = ''.join(r) else: s = assertion_value.replace('\\', '\\5c') s = s.replace('*', '\\2a') s = s.replace('(', '\\28') s = s.replace(')', '\\29') s = s.replace('\x00', '\\00') return s
[ "def", "escape_filter_chars", "(", "assertion_value", ",", "escape_mode", "=", "0", ")", ":", "if", "escape_mode", ":", "r", "=", "[", "]", "if", "(", "escape_mode", "==", "1", ")", ":", "for", "c", "in", "assertion_value", ":", "if", "(", "(", "c", "<", "'0'", ")", "or", "(", "c", ">", "'z'", ")", "or", "(", "c", "in", "'\\\\*()'", ")", ")", ":", "c", "=", "(", "'\\\\%02x'", "%", "ord", "(", "c", ")", ")", "r", ".", "append", "(", "c", ")", "elif", "(", "escape_mode", "==", "2", ")", ":", "for", "c", "in", "assertion_value", ":", "r", ".", "append", "(", "(", "'\\\\%02x'", "%", "ord", "(", "c", ")", ")", ")", "else", ":", "raise", "ValueError", "(", "'escape_mode must be 0, 1 or 2.'", ")", "s", "=", "''", ".", "join", "(", "r", ")", "else", ":", "s", "=", "assertion_value", ".", "replace", "(", "'\\\\'", ",", "'\\\\5c'", ")", "s", "=", "s", ".", "replace", "(", "'*'", ",", "'\\\\2a'", ")", "s", "=", "s", ".", "replace", "(", "'('", ",", "'\\\\28'", ")", "s", "=", "s", ".", "replace", "(", "')'", ",", "'\\\\29'", ")", "s", "=", "s", ".", "replace", "(", "'\\x00'", ",", "'\\\\00'", ")", "return", "s" ]
replace all special characters found in assertion_value by quoted notation .
train
false
10,918
def get_return_data_type(func_name): if func_name.startswith('get_'): if func_name.endswith('_list'): return 'List' elif func_name.endswith('_count'): return 'Integer' return ''
[ "def", "get_return_data_type", "(", "func_name", ")", ":", "if", "func_name", ".", "startswith", "(", "'get_'", ")", ":", "if", "func_name", ".", "endswith", "(", "'_list'", ")", ":", "return", "'List'", "elif", "func_name", ".", "endswith", "(", "'_count'", ")", ":", "return", "'Integer'", "return", "''" ]
return a somewhat-helpful data type given a function name .
train
false
10,920
def lang_stats(resources=None, languages=None): locale_dirs = _get_locale_dirs() for (name, dir_) in locale_dirs: if (resources and (not (name in resources))): continue print ("\nShowing translations stats for '%s':" % name) langs = sorted([d for d in os.listdir(dir_) if (not d.startswith('_'))]) for lang in langs: if (languages and (not (lang in languages))): continue p = Popen(('msgfmt -vc -o /dev/null %(path)s/%(lang)s/LC_MESSAGES/django%(ext)s.po' % {'path': dir_, 'lang': lang, 'ext': ('js' if name.endswith('-js') else '')}), stdout=PIPE, stderr=PIPE, shell=True) (output, errors) = p.communicate() if (p.returncode == 0): print ('%s: %s' % (lang, errors.strip())) else: print ('Errors happened when checking %s translation for %s:\n%s' % (lang, name, errors))
[ "def", "lang_stats", "(", "resources", "=", "None", ",", "languages", "=", "None", ")", ":", "locale_dirs", "=", "_get_locale_dirs", "(", ")", "for", "(", "name", ",", "dir_", ")", "in", "locale_dirs", ":", "if", "(", "resources", "and", "(", "not", "(", "name", "in", "resources", ")", ")", ")", ":", "continue", "print", "(", "\"\\nShowing translations stats for '%s':\"", "%", "name", ")", "langs", "=", "sorted", "(", "[", "d", "for", "d", "in", "os", ".", "listdir", "(", "dir_", ")", "if", "(", "not", "d", ".", "startswith", "(", "'_'", ")", ")", "]", ")", "for", "lang", "in", "langs", ":", "if", "(", "languages", "and", "(", "not", "(", "lang", "in", "languages", ")", ")", ")", ":", "continue", "p", "=", "Popen", "(", "(", "'msgfmt -vc -o /dev/null %(path)s/%(lang)s/LC_MESSAGES/django%(ext)s.po'", "%", "{", "'path'", ":", "dir_", ",", "'lang'", ":", "lang", ",", "'ext'", ":", "(", "'js'", "if", "name", ".", "endswith", "(", "'-js'", ")", "else", "''", ")", "}", ")", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ",", "shell", "=", "True", ")", "(", "output", ",", "errors", ")", "=", "p", ".", "communicate", "(", ")", "if", "(", "p", ".", "returncode", "==", "0", ")", ":", "print", "(", "'%s: %s'", "%", "(", "lang", ",", "errors", ".", "strip", "(", ")", ")", ")", "else", ":", "print", "(", "'Errors happened when checking %s translation for %s:\\n%s'", "%", "(", "lang", ",", "name", ",", "errors", ")", ")" ]
output language statistics of committed translation files for each django catalog .
train
false
10,921
@nx.utils.not_implemented_for('undirected') def is_branching(G): return (is_forest(G) and (max((d for (n, d) in G.in_degree())) <= 1))
[ "@", "nx", ".", "utils", ".", "not_implemented_for", "(", "'undirected'", ")", "def", "is_branching", "(", "G", ")", ":", "return", "(", "is_forest", "(", "G", ")", "and", "(", "max", "(", "(", "d", "for", "(", "n", ",", "d", ")", "in", "G", ".", "in_degree", "(", ")", ")", ")", "<=", "1", ")", ")" ]
returns true if g is a branching .
train
false
10,924
def get_templatetags_modules(): global templatetags_modules if (not templatetags_modules): _templatetags_modules = [] for app_module in (['google.appengine._internal.django'] + list(settings.INSTALLED_APPS)): try: templatetag_module = ('%s.templatetags' % app_module) import_module(templatetag_module) _templatetags_modules.append(templatetag_module) except ImportError: continue templatetags_modules = _templatetags_modules return templatetags_modules
[ "def", "get_templatetags_modules", "(", ")", ":", "global", "templatetags_modules", "if", "(", "not", "templatetags_modules", ")", ":", "_templatetags_modules", "=", "[", "]", "for", "app_module", "in", "(", "[", "'google.appengine._internal.django'", "]", "+", "list", "(", "settings", ".", "INSTALLED_APPS", ")", ")", ":", "try", ":", "templatetag_module", "=", "(", "'%s.templatetags'", "%", "app_module", ")", "import_module", "(", "templatetag_module", ")", "_templatetags_modules", ".", "append", "(", "templatetag_module", ")", "except", "ImportError", ":", "continue", "templatetags_modules", "=", "_templatetags_modules", "return", "templatetags_modules" ]
return the list of all available template tag modules .
train
false
10,925
def download_libiconv(dest_dir, version=None): version_re = re.compile('^libiconv-([0-9.]+[0-9]).tar.gz$') filename = 'libiconv-%s.tar.gz' return download_library(dest_dir, LIBICONV_LOCATION, 'libiconv', version_re, filename, version=version)
[ "def", "download_libiconv", "(", "dest_dir", ",", "version", "=", "None", ")", ":", "version_re", "=", "re", ".", "compile", "(", "'^libiconv-([0-9.]+[0-9]).tar.gz$'", ")", "filename", "=", "'libiconv-%s.tar.gz'", "return", "download_library", "(", "dest_dir", ",", "LIBICONV_LOCATION", ",", "'libiconv'", ",", "version_re", ",", "filename", ",", "version", "=", "version", ")" ]
downloads libiconv .
train
false
10,926
@core_helper def groups_available(am_member=False): context = {} data_dict = {'available_only': True, 'am_member': am_member} return logic.get_action('group_list_authz')(context, data_dict)
[ "@", "core_helper", "def", "groups_available", "(", "am_member", "=", "False", ")", ":", "context", "=", "{", "}", "data_dict", "=", "{", "'available_only'", ":", "True", ",", "'am_member'", ":", "am_member", "}", "return", "logic", ".", "get_action", "(", "'group_list_authz'", ")", "(", "context", ",", "data_dict", ")" ]
return a list of the groups that the user is authorized to edit .
train
false
10,927
def get_config_from_root(root): setup_cfg = os.path.join(root, 'setup.cfg') parser = configparser.SafeConfigParser() with open(setup_cfg, 'r') as f: parser.readfp(f) VCS = parser.get('versioneer', 'VCS') def get(parser, name): if parser.has_option('versioneer', name): return parser.get('versioneer', name) return None cfg = VersioneerConfig() cfg.VCS = VCS cfg.style = (get(parser, 'style') or '') cfg.versionfile_source = get(parser, 'versionfile_source') cfg.versionfile_build = get(parser, 'versionfile_build') cfg.tag_prefix = get(parser, 'tag_prefix') if (cfg.tag_prefix in ("''", '""')): cfg.tag_prefix = '' cfg.parentdir_prefix = get(parser, 'parentdir_prefix') cfg.verbose = get(parser, 'verbose') return cfg
[ "def", "get_config_from_root", "(", "root", ")", ":", "setup_cfg", "=", "os", ".", "path", ".", "join", "(", "root", ",", "'setup.cfg'", ")", "parser", "=", "configparser", ".", "SafeConfigParser", "(", ")", "with", "open", "(", "setup_cfg", ",", "'r'", ")", "as", "f", ":", "parser", ".", "readfp", "(", "f", ")", "VCS", "=", "parser", ".", "get", "(", "'versioneer'", ",", "'VCS'", ")", "def", "get", "(", "parser", ",", "name", ")", ":", "if", "parser", ".", "has_option", "(", "'versioneer'", ",", "name", ")", ":", "return", "parser", ".", "get", "(", "'versioneer'", ",", "name", ")", "return", "None", "cfg", "=", "VersioneerConfig", "(", ")", "cfg", ".", "VCS", "=", "VCS", "cfg", ".", "style", "=", "(", "get", "(", "parser", ",", "'style'", ")", "or", "''", ")", "cfg", ".", "versionfile_source", "=", "get", "(", "parser", ",", "'versionfile_source'", ")", "cfg", ".", "versionfile_build", "=", "get", "(", "parser", ",", "'versionfile_build'", ")", "cfg", ".", "tag_prefix", "=", "get", "(", "parser", ",", "'tag_prefix'", ")", "if", "(", "cfg", ".", "tag_prefix", "in", "(", "\"''\"", ",", "'\"\"'", ")", ")", ":", "cfg", ".", "tag_prefix", "=", "''", "cfg", ".", "parentdir_prefix", "=", "get", "(", "parser", ",", "'parentdir_prefix'", ")", "cfg", ".", "verbose", "=", "get", "(", "parser", ",", "'verbose'", ")", "return", "cfg" ]
read the project setup .
train
true
10,928
def has_links(html): class LinkFound(Exception, ): pass def raise_on_link(attrs, new): raise LinkFound try: bleach.linkify(html, callbacks=[raise_on_link]) except LinkFound: return True return False
[ "def", "has_links", "(", "html", ")", ":", "class", "LinkFound", "(", "Exception", ",", ")", ":", "pass", "def", "raise_on_link", "(", "attrs", ",", "new", ")", ":", "raise", "LinkFound", "try", ":", "bleach", ".", "linkify", "(", "html", ",", "callbacks", "=", "[", "raise_on_link", "]", ")", "except", "LinkFound", ":", "return", "True", "return", "False" ]
return true if links are found in the given html .
train
false
10,929
def test_cache_insert_data(config_stub, tmpdir): config_stub.data = {'storage': {'cache-size': 1024}, 'general': {'private-browsing': False}} url = 'http://qutebrowser.org' content = 'foobar' disk_cache = cache.DiskCache(str(tmpdir)) assert (disk_cache.cacheSize() == 0) preload_cache(disk_cache, url, content) assert (disk_cache.cacheSize() != 0) assert (disk_cache.data(QUrl(url)).readAll() == content)
[ "def", "test_cache_insert_data", "(", "config_stub", ",", "tmpdir", ")", ":", "config_stub", ".", "data", "=", "{", "'storage'", ":", "{", "'cache-size'", ":", "1024", "}", ",", "'general'", ":", "{", "'private-browsing'", ":", "False", "}", "}", "url", "=", "'http://qutebrowser.org'", "content", "=", "'foobar'", "disk_cache", "=", "cache", ".", "DiskCache", "(", "str", "(", "tmpdir", ")", ")", "assert", "(", "disk_cache", ".", "cacheSize", "(", ")", "==", "0", ")", "preload_cache", "(", "disk_cache", ",", "url", ",", "content", ")", "assert", "(", "disk_cache", ".", "cacheSize", "(", ")", "!=", "0", ")", "assert", "(", "disk_cache", ".", "data", "(", "QUrl", "(", "url", ")", ")", ".", "readAll", "(", ")", "==", "content", ")" ]
test if entries inserted into the cache are actually there .
train
false
10,930
def boundary_expansion(G, S): return (len(nx.node_boundary(G, S)) / len(S))
[ "def", "boundary_expansion", "(", "G", ",", "S", ")", ":", "return", "(", "len", "(", "nx", ".", "node_boundary", "(", "G", ",", "S", ")", ")", "/", "len", "(", "S", ")", ")" ]
returns the boundary expansion of the set s .
train
false
10,932
def load_zen(request, project, subproject, lang): translation = get_translation(request, project, subproject, lang) (search_result, unitdata) = get_zen_unitdata(translation, request) if isinstance(search_result, HttpResponse): return search_result return render(request, u'zen-units.html', {u'object': translation, u'unitdata': unitdata, u'search_query': search_result[u'query'], u'search_id': search_result[u'search_id'], u'last_section': search_result[u'last_section']})
[ "def", "load_zen", "(", "request", ",", "project", ",", "subproject", ",", "lang", ")", ":", "translation", "=", "get_translation", "(", "request", ",", "project", ",", "subproject", ",", "lang", ")", "(", "search_result", ",", "unitdata", ")", "=", "get_zen_unitdata", "(", "translation", ",", "request", ")", "if", "isinstance", "(", "search_result", ",", "HttpResponse", ")", ":", "return", "search_result", "return", "render", "(", "request", ",", "u'zen-units.html'", ",", "{", "u'object'", ":", "translation", ",", "u'unitdata'", ":", "unitdata", ",", "u'search_query'", ":", "search_result", "[", "u'query'", "]", ",", "u'search_id'", ":", "search_result", "[", "u'search_id'", "]", ",", "u'last_section'", ":", "search_result", "[", "u'last_section'", "]", "}", ")" ]
loads additional units for zen editor .
train
false
10,933
def parse_citation(elem, directory, citation_manager): citation_type = elem.attrib.get('type', None) citation_class = CITATION_CLASSES.get(citation_type, None) if (not citation_class): log.warning(('Unknown or unspecified citation type: %s' % citation_type)) return None return citation_class(elem, directory, citation_manager)
[ "def", "parse_citation", "(", "elem", ",", "directory", ",", "citation_manager", ")", ":", "citation_type", "=", "elem", ".", "attrib", ".", "get", "(", "'type'", ",", "None", ")", "citation_class", "=", "CITATION_CLASSES", ".", "get", "(", "citation_type", ",", "None", ")", "if", "(", "not", "citation_class", ")", ":", "log", ".", "warning", "(", "(", "'Unknown or unspecified citation type: %s'", "%", "citation_type", ")", ")", "return", "None", "return", "citation_class", "(", "elem", ",", "directory", ",", "citation_manager", ")" ]
parse an abstract citation entry from the specified xml element .
train
false
10,934
def is_running(proxyname): return {'result': _is_proxy_running(proxyname)}
[ "def", "is_running", "(", "proxyname", ")", ":", "return", "{", "'result'", ":", "_is_proxy_running", "(", "proxyname", ")", "}" ]
check if a service is running .
train
false
10,935
def fullSizeCoverURL(obj): if isinstance(obj, Movie): coverUrl = obj.get('cover url') elif isinstance(obj, (Person, Character)): coverUrl = obj.get('headshot') else: coverUrl = obj if (not coverUrl): return None return _Container._re_fullsizeURL.sub('', coverUrl)
[ "def", "fullSizeCoverURL", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "Movie", ")", ":", "coverUrl", "=", "obj", ".", "get", "(", "'cover url'", ")", "elif", "isinstance", "(", "obj", ",", "(", "Person", ",", "Character", ")", ")", ":", "coverUrl", "=", "obj", ".", "get", "(", "'headshot'", ")", "else", ":", "coverUrl", "=", "obj", "if", "(", "not", "coverUrl", ")", ":", "return", "None", "return", "_Container", ".", "_re_fullsizeURL", ".", "sub", "(", "''", ",", "coverUrl", ")" ]
given an url string or a movie .
train
false
10,936
def varsim(coefs, intercept, sig_u, steps=100, initvalues=None, seed=None): rs = np.random.RandomState(seed=seed) rmvnorm = rs.multivariate_normal (p, k, k) = coefs.shape ugen = rmvnorm(np.zeros(len(sig_u)), sig_u, steps) result = np.zeros((steps, k)) result[p:] = (intercept + ugen[p:]) for t in range(p, steps): ygen = result[t] for j in range(p): ygen += np.dot(coefs[j], result[((t - j) - 1)]) return result
[ "def", "varsim", "(", "coefs", ",", "intercept", ",", "sig_u", ",", "steps", "=", "100", ",", "initvalues", "=", "None", ",", "seed", "=", "None", ")", ":", "rs", "=", "np", ".", "random", ".", "RandomState", "(", "seed", "=", "seed", ")", "rmvnorm", "=", "rs", ".", "multivariate_normal", "(", "p", ",", "k", ",", "k", ")", "=", "coefs", ".", "shape", "ugen", "=", "rmvnorm", "(", "np", ".", "zeros", "(", "len", "(", "sig_u", ")", ")", ",", "sig_u", ",", "steps", ")", "result", "=", "np", ".", "zeros", "(", "(", "steps", ",", "k", ")", ")", "result", "[", "p", ":", "]", "=", "(", "intercept", "+", "ugen", "[", "p", ":", "]", ")", "for", "t", "in", "range", "(", "p", ",", "steps", ")", ":", "ygen", "=", "result", "[", "t", "]", "for", "j", "in", "range", "(", "p", ")", ":", "ygen", "+=", "np", ".", "dot", "(", "coefs", "[", "j", "]", ",", "result", "[", "(", "(", "t", "-", "j", ")", "-", "1", ")", "]", ")", "return", "result" ]
simulate simple var(p) process with known coefficients .
train
false
10,937
def set_logger_level(logger_name, log_level='error'): logging.getLogger(logger_name).setLevel(LOG_LEVELS.get(log_level.lower(), logging.ERROR))
[ "def", "set_logger_level", "(", "logger_name", ",", "log_level", "=", "'error'", ")", ":", "logging", ".", "getLogger", "(", "logger_name", ")", ".", "setLevel", "(", "LOG_LEVELS", ".", "get", "(", "log_level", ".", "lower", "(", ")", ",", "logging", ".", "ERROR", ")", ")" ]
tweak a specific loggers logging level .
train
true
10,940
def _require_project(code, name, source_language, **kwargs): from pootle_project.models import Project criteria = {'code': code, 'fullname': name, 'source_language': source_language, 'checkstyle': 'standard', 'treestyle': 'auto'} criteria.update(kwargs) new_project = Project.objects.get_or_create(**criteria)[0] return new_project
[ "def", "_require_project", "(", "code", ",", "name", ",", "source_language", ",", "**", "kwargs", ")", ":", "from", "pootle_project", ".", "models", "import", "Project", "criteria", "=", "{", "'code'", ":", "code", ",", "'fullname'", ":", "name", ",", "'source_language'", ":", "source_language", ",", "'checkstyle'", ":", "'standard'", ",", "'treestyle'", ":", "'auto'", "}", "criteria", ".", "update", "(", "kwargs", ")", "new_project", "=", "Project", ".", "objects", ".", "get_or_create", "(", "**", "criteria", ")", "[", "0", "]", "return", "new_project" ]
helper to get/create a new project .
train
false
10,941
@np.deprecate(message='stats.betai is deprecated in scipy 0.17.0; use special.betainc instead') def betai(a, b, x): return _betai(a, b, x)
[ "@", "np", ".", "deprecate", "(", "message", "=", "'stats.betai is deprecated in scipy 0.17.0; use special.betainc instead'", ")", "def", "betai", "(", "a", ",", "b", ",", "x", ")", ":", "return", "_betai", "(", "a", ",", "b", ",", "x", ")" ]
betai() is deprecated in scipy 0 .
train
false
10,942
def save_object(filename, obj): logging.info('saving {}...'.format(filename)) try: with gzip.GzipFile(filename, 'wb') as f: f.write(pickle.dumps(obj, 1)) except Exception as e: logging.error('save failure: {}'.format(e)) raise
[ "def", "save_object", "(", "filename", ",", "obj", ")", ":", "logging", ".", "info", "(", "'saving {}...'", ".", "format", "(", "filename", ")", ")", "try", ":", "with", "gzip", ".", "GzipFile", "(", "filename", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "pickle", ".", "dumps", "(", "obj", ",", "1", ")", ")", "except", "Exception", "as", "e", ":", "logging", ".", "error", "(", "'save failure: {}'", ".", "format", "(", "e", ")", ")", "raise" ]
compresses and pickles given object to the given filename .
train
true
10,943
def get_voters(obj): obj_type = apps.get_model('contenttypes', 'ContentType').objects.get_for_model(obj) return get_user_model().objects.filter(votes__content_type=obj_type, votes__object_id=obj.id)
[ "def", "get_voters", "(", "obj", ")", ":", "obj_type", "=", "apps", ".", "get_model", "(", "'contenttypes'", ",", "'ContentType'", ")", ".", "objects", ".", "get_for_model", "(", "obj", ")", "return", "get_user_model", "(", ")", ".", "objects", ".", "filter", "(", "votes__content_type", "=", "obj_type", ",", "votes__object_id", "=", "obj", ".", "id", ")" ]
get the voters of an object .
train
false
10,944
def emailerrors(to_address, olderror, from_address=None): from_address = (from_address or to_address) def emailerrors_internal(): error = olderror() tb = sys.exc_info() error_name = tb[0] error_value = tb[1] tb_txt = ''.join(traceback.format_exception(*tb)) path = web.ctx.path request = (((web.ctx.method + ' ') + web.ctx.home) + web.ctx.fullpath) message = ('\n%s\n\n%s\n\n' % (request, tb_txt)) sendmail(('your buggy site <%s>' % from_address), ('the bugfixer <%s>' % to_address), ('bug: %(error_name)s: %(error_value)s (%(path)s)' % locals()), message, attachments=[dict(filename='bug.html', content=safestr(djangoerror()))]) return error return emailerrors_internal
[ "def", "emailerrors", "(", "to_address", ",", "olderror", ",", "from_address", "=", "None", ")", ":", "from_address", "=", "(", "from_address", "or", "to_address", ")", "def", "emailerrors_internal", "(", ")", ":", "error", "=", "olderror", "(", ")", "tb", "=", "sys", ".", "exc_info", "(", ")", "error_name", "=", "tb", "[", "0", "]", "error_value", "=", "tb", "[", "1", "]", "tb_txt", "=", "''", ".", "join", "(", "traceback", ".", "format_exception", "(", "*", "tb", ")", ")", "path", "=", "web", ".", "ctx", ".", "path", "request", "=", "(", "(", "(", "web", ".", "ctx", ".", "method", "+", "' '", ")", "+", "web", ".", "ctx", ".", "home", ")", "+", "web", ".", "ctx", ".", "fullpath", ")", "message", "=", "(", "'\\n%s\\n\\n%s\\n\\n'", "%", "(", "request", ",", "tb_txt", ")", ")", "sendmail", "(", "(", "'your buggy site <%s>'", "%", "from_address", ")", ",", "(", "'the bugfixer <%s>'", "%", "to_address", ")", ",", "(", "'bug: %(error_name)s: %(error_value)s (%(path)s)'", "%", "locals", "(", ")", ")", ",", "message", ",", "attachments", "=", "[", "dict", "(", "filename", "=", "'bug.html'", ",", "content", "=", "safestr", "(", "djangoerror", "(", ")", ")", ")", "]", ")", "return", "error", "return", "emailerrors_internal" ]
wraps the old internalerror handler to additionally email all errors to to_address .
train
false
10,945
def getPreferenceFloat(name): try: setting = getPreference(name).replace(',', '.') return float(eval(setting, {}, {})) except: return 0.0
[ "def", "getPreferenceFloat", "(", "name", ")", ":", "try", ":", "setting", "=", "getPreference", "(", "name", ")", ".", "replace", "(", "','", ",", "'.'", ")", "return", "float", "(", "eval", "(", "setting", ",", "{", "}", ",", "{", "}", ")", ")", "except", ":", "return", "0.0" ]
get the float value of a preference .
train
false
10,946
def find_threshold_graph(G, create_using=None): return threshold_graph(find_creation_sequence(G), create_using)
[ "def", "find_threshold_graph", "(", "G", ",", "create_using", "=", "None", ")", ":", "return", "threshold_graph", "(", "find_creation_sequence", "(", "G", ")", ",", "create_using", ")" ]
return a threshold subgraph that is close to largest in g .
train
false
10,948
def ss(s, encoding=None, errors=u'strict'): encoding = (encoding or getEncoding()) try: if isinstance(s, six.binary_type): if (encoding == u'utf-8'): return s else: return s.decode(u'utf-8', errors).encode(encoding, errors) if (not isinstance(s, six.string_types)): try: if six.PY3: return six.text_type(s).encode(encoding) else: return six.binary_type(s) except UnicodeEncodeError: return six.text_type(s).encode(encoding, errors) else: return s.encode(encoding, errors) except UnicodeEncodeError as e: print e return s
[ "def", "ss", "(", "s", ",", "encoding", "=", "None", ",", "errors", "=", "u'strict'", ")", ":", "encoding", "=", "(", "encoding", "or", "getEncoding", "(", ")", ")", "try", ":", "if", "isinstance", "(", "s", ",", "six", ".", "binary_type", ")", ":", "if", "(", "encoding", "==", "u'utf-8'", ")", ":", "return", "s", "else", ":", "return", "s", ".", "decode", "(", "u'utf-8'", ",", "errors", ")", ".", "encode", "(", "encoding", ",", "errors", ")", "if", "(", "not", "isinstance", "(", "s", ",", "six", ".", "string_types", ")", ")", ":", "try", ":", "if", "six", ".", "PY3", ":", "return", "six", ".", "text_type", "(", "s", ")", ".", "encode", "(", "encoding", ")", "else", ":", "return", "six", ".", "binary_type", "(", "s", ")", "except", "UnicodeEncodeError", ":", "return", "six", ".", "text_type", "(", "s", ")", ".", "encode", "(", "encoding", ",", "errors", ")", "else", ":", "return", "s", ".", "encode", "(", "encoding", ",", "errors", ")", "except", "UnicodeEncodeError", "as", "e", ":", "print", "e", "return", "s" ]
converts basestring to sys_encoding .
train
false
10,949
def _to_m8(key): if (not isinstance(key, Timedelta)): key = Timedelta(key) return np.int64(key.value).view(_TD_DTYPE)
[ "def", "_to_m8", "(", "key", ")", ":", "if", "(", "not", "isinstance", "(", "key", ",", "Timedelta", ")", ")", ":", "key", "=", "Timedelta", "(", "key", ")", "return", "np", ".", "int64", "(", "key", ".", "value", ")", ".", "view", "(", "_TD_DTYPE", ")" ]
timestamp-like => dt64 .
train
false
10,950
def _seq_pprinter_factory(start, end, basetype): def inner(obj, p, cycle): typ = type(obj) if ((basetype is not None) and (typ is not basetype) and (typ.__repr__ != basetype.__repr__)): return p.text(typ.__repr__(obj)) if cycle: return p.text(((start + '...') + end)) step = len(start) p.begin_group(step, start) for (idx, x) in p._enumerate(obj): if idx: p.text(',') p.breakable() p.pretty(x) if ((len(obj) == 1) and (type(obj) is tuple)): p.text(',') p.end_group(step, end) return inner
[ "def", "_seq_pprinter_factory", "(", "start", ",", "end", ",", "basetype", ")", ":", "def", "inner", "(", "obj", ",", "p", ",", "cycle", ")", ":", "typ", "=", "type", "(", "obj", ")", "if", "(", "(", "basetype", "is", "not", "None", ")", "and", "(", "typ", "is", "not", "basetype", ")", "and", "(", "typ", ".", "__repr__", "!=", "basetype", ".", "__repr__", ")", ")", ":", "return", "p", ".", "text", "(", "typ", ".", "__repr__", "(", "obj", ")", ")", "if", "cycle", ":", "return", "p", ".", "text", "(", "(", "(", "start", "+", "'...'", ")", "+", "end", ")", ")", "step", "=", "len", "(", "start", ")", "p", ".", "begin_group", "(", "step", ",", "start", ")", "for", "(", "idx", ",", "x", ")", "in", "p", ".", "_enumerate", "(", "obj", ")", ":", "if", "idx", ":", "p", ".", "text", "(", "','", ")", "p", ".", "breakable", "(", ")", "p", ".", "pretty", "(", "x", ")", "if", "(", "(", "len", "(", "obj", ")", "==", "1", ")", "and", "(", "type", "(", "obj", ")", "is", "tuple", ")", ")", ":", "p", ".", "text", "(", "','", ")", "p", ".", "end_group", "(", "step", ",", "end", ")", "return", "inner" ]
factory that returns a pprint function useful for sequences .
train
true
10,955
def SplitMeta(meta): n = len(meta) if ((n % 2) == 1): raise ConfigurationError(('%r has an odd number of metacharacters' % meta)) return (meta[:(n / 2)], meta[(n / 2):])
[ "def", "SplitMeta", "(", "meta", ")", ":", "n", "=", "len", "(", "meta", ")", "if", "(", "(", "n", "%", "2", ")", "==", "1", ")", ":", "raise", "ConfigurationError", "(", "(", "'%r has an odd number of metacharacters'", "%", "meta", ")", ")", "return", "(", "meta", "[", ":", "(", "n", "/", "2", ")", "]", ",", "meta", "[", "(", "n", "/", "2", ")", ":", "]", ")" ]
split and validate metacharacters .
train
true
10,956
@frappe.whitelist() def get_perm_info(role): from frappe.permissions import get_all_perms return get_all_perms(role)
[ "@", "frappe", ".", "whitelist", "(", ")", "def", "get_perm_info", "(", "role", ")", ":", "from", "frappe", ".", "permissions", "import", "get_all_perms", "return", "get_all_perms", "(", "role", ")" ]
get permission info .
train
false
10,958
def parse_sparse_fields(type_=None): fields = dict(((key[7:(-1)], set(value.split(','))) for (key, value) in request.args.items() if (key.startswith('fields[') and key.endswith(']')))) return (fields.get(type_) if (type_ is not None) else fields)
[ "def", "parse_sparse_fields", "(", "type_", "=", "None", ")", ":", "fields", "=", "dict", "(", "(", "(", "key", "[", "7", ":", "(", "-", "1", ")", "]", ",", "set", "(", "value", ".", "split", "(", "','", ")", ")", ")", "for", "(", "key", ",", "value", ")", "in", "request", ".", "args", ".", "items", "(", ")", "if", "(", "key", ".", "startswith", "(", "'fields['", ")", "and", "key", ".", "endswith", "(", "']'", ")", ")", ")", ")", "return", "(", "fields", ".", "get", "(", "type_", ")", "if", "(", "type_", "is", "not", "None", ")", "else", "fields", ")" ]
get the sparse fields as requested by the client .
train
false
10,959
def _process_text(self, txt): if (not self.localcontext): return str2xml(txt) if (not txt): return '' result = '' sps = _regex.split(txt) while sps: to_translate = tools.ustr(sps.pop(0)) result += tools.ustr(self.localcontext.get('translate', (lambda x: x))(to_translate)) if sps: txt = None try: expr = sps.pop(0) txt = safe_eval(expr, self.localcontext) if (txt and isinstance(txt, basestring)): txt = tools.ustr(txt) except Exception: _logger.info('Failed to evaluate expression [[ %s ]] with context %r while rendering report, ignored.', expr, self.localcontext) if isinstance(txt, basestring): result += txt elif (txt and (txt is not None) and (txt is not False)): result += ustr(txt) return str2xml(result)
[ "def", "_process_text", "(", "self", ",", "txt", ")", ":", "if", "(", "not", "self", ".", "localcontext", ")", ":", "return", "str2xml", "(", "txt", ")", "if", "(", "not", "txt", ")", ":", "return", "''", "result", "=", "''", "sps", "=", "_regex", ".", "split", "(", "txt", ")", "while", "sps", ":", "to_translate", "=", "tools", ".", "ustr", "(", "sps", ".", "pop", "(", "0", ")", ")", "result", "+=", "tools", ".", "ustr", "(", "self", ".", "localcontext", ".", "get", "(", "'translate'", ",", "(", "lambda", "x", ":", "x", ")", ")", "(", "to_translate", ")", ")", "if", "sps", ":", "txt", "=", "None", "try", ":", "expr", "=", "sps", ".", "pop", "(", "0", ")", "txt", "=", "safe_eval", "(", "expr", ",", "self", ".", "localcontext", ")", "if", "(", "txt", "and", "isinstance", "(", "txt", ",", "basestring", ")", ")", ":", "txt", "=", "tools", ".", "ustr", "(", "txt", ")", "except", "Exception", ":", "_logger", ".", "info", "(", "'Failed to evaluate expression [[ %s ]] with context %r while rendering report, ignored.'", ",", "expr", ",", "self", ".", "localcontext", ")", "if", "isinstance", "(", "txt", ",", "basestring", ")", ":", "result", "+=", "txt", "elif", "(", "txt", "and", "(", "txt", "is", "not", "None", ")", "and", "(", "txt", "is", "not", "False", ")", ")", ":", "result", "+=", "ustr", "(", "txt", ")", "return", "str2xml", "(", "result", ")" ]
translate txt according to the language in the local context .
train
false
10,960
def rmatmul(self, rhs): if isinstance(rhs, variable.Variable): return MatMulVarVar()(rhs, self) _check_constant_type(rhs) return MatMulConstVar(rhs)(self)
[ "def", "rmatmul", "(", "self", ",", "rhs", ")", ":", "if", "isinstance", "(", "rhs", ",", "variable", ".", "Variable", ")", ":", "return", "MatMulVarVar", "(", ")", "(", "rhs", ",", "self", ")", "_check_constant_type", "(", "rhs", ")", "return", "MatMulConstVar", "(", "rhs", ")", "(", "self", ")" ]
matrix multiplication .
train
false
10,961
def load_from_path(): try: executable = find_executable_in_dir() if (executable is None): raise ValueError('Caffe executable not found in PATH') if (not is_pycaffe_in_dir()): raise ValueError('Pycaffe not found in PYTHONPATH') import_pycaffe() (version, flavor) = get_version_and_flavor(executable) except: print 'A valid Caffe installation was not found on your system.' print 'Use the envvar CAFFE_ROOT to indicate a valid installation.' raise return (executable, version, flavor)
[ "def", "load_from_path", "(", ")", ":", "try", ":", "executable", "=", "find_executable_in_dir", "(", ")", "if", "(", "executable", "is", "None", ")", ":", "raise", "ValueError", "(", "'Caffe executable not found in PATH'", ")", "if", "(", "not", "is_pycaffe_in_dir", "(", ")", ")", ":", "raise", "ValueError", "(", "'Pycaffe not found in PYTHONPATH'", ")", "import_pycaffe", "(", ")", "(", "version", ",", "flavor", ")", "=", "get_version_and_flavor", "(", "executable", ")", "except", ":", "print", "'A valid Caffe installation was not found on your system.'", "print", "'Use the envvar CAFFE_ROOT to indicate a valid installation.'", "raise", "return", "(", "executable", ",", "version", ",", "flavor", ")" ]
load information from an installation on standard paths .
train
false
10,963
@app.route('/status/<codes>', methods=['GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'TRACE']) def view_status_code(codes): if (',' not in codes): code = int(codes) return status_code(code) choices = [] for choice in codes.split(','): if (':' not in choice): code = choice weight = 1 else: (code, weight) = choice.split(':') choices.append((int(code), float(weight))) code = weighted_choice(choices) return status_code(code)
[ "@", "app", ".", "route", "(", "'/status/<codes>'", ",", "methods", "=", "[", "'GET'", ",", "'POST'", ",", "'PUT'", ",", "'DELETE'", ",", "'PATCH'", ",", "'TRACE'", "]", ")", "def", "view_status_code", "(", "codes", ")", ":", "if", "(", "','", "not", "in", "codes", ")", ":", "code", "=", "int", "(", "codes", ")", "return", "status_code", "(", "code", ")", "choices", "=", "[", "]", "for", "choice", "in", "codes", ".", "split", "(", "','", ")", ":", "if", "(", "':'", "not", "in", "choice", ")", ":", "code", "=", "choice", "weight", "=", "1", "else", ":", "(", "code", ",", "weight", ")", "=", "choice", ".", "split", "(", "':'", ")", "choices", ".", "append", "(", "(", "int", "(", "code", ")", ",", "float", "(", "weight", ")", ")", ")", "code", "=", "weighted_choice", "(", "choices", ")", "return", "status_code", "(", "code", ")" ]
return status code or random status code if more than one are given .
train
false
10,964
def _GenerateProject(project, options, version, generator_flags): default_config = _GetDefaultConfiguration(project.spec) if default_config.get('msvs_existing_vcproj'): return [] if version.UsesVcxproj(): return _GenerateMSBuildProject(project, options, version, generator_flags) else: return _GenerateMSVSProject(project, options, version, generator_flags)
[ "def", "_GenerateProject", "(", "project", ",", "options", ",", "version", ",", "generator_flags", ")", ":", "default_config", "=", "_GetDefaultConfiguration", "(", "project", ".", "spec", ")", "if", "default_config", ".", "get", "(", "'msvs_existing_vcproj'", ")", ":", "return", "[", "]", "if", "version", ".", "UsesVcxproj", "(", ")", ":", "return", "_GenerateMSBuildProject", "(", "project", ",", "options", ",", "version", ",", "generator_flags", ")", "else", ":", "return", "_GenerateMSVSProject", "(", "project", ",", "options", ",", "version", ",", "generator_flags", ")" ]
generates a vcproj file .
train
false
10,965
def UpdateManifestResourcesFromXMLFile(dstpath, srcpath, names=None, languages=None): logger.info('Updating manifest from %s in %s', srcpath, dstpath) if dstpath.lower().endswith('.exe'): name = 1 else: name = 2 winresource.UpdateResourcesFromDataFile(dstpath, srcpath, RT_MANIFEST, (names or [name]), (languages or [0, '*']))
[ "def", "UpdateManifestResourcesFromXMLFile", "(", "dstpath", ",", "srcpath", ",", "names", "=", "None", ",", "languages", "=", "None", ")", ":", "logger", ".", "info", "(", "'Updating manifest from %s in %s'", ",", "srcpath", ",", "dstpath", ")", "if", "dstpath", ".", "lower", "(", ")", ".", "endswith", "(", "'.exe'", ")", ":", "name", "=", "1", "else", ":", "name", "=", "2", "winresource", ".", "UpdateResourcesFromDataFile", "(", "dstpath", ",", "srcpath", ",", "RT_MANIFEST", ",", "(", "names", "or", "[", "name", "]", ")", ",", "(", "languages", "or", "[", "0", ",", "'*'", "]", ")", ")" ]
update or add manifest xml from srcpath as resource in dstpath .
train
true
10,966
def unpack_infer(stmt, context=None): if isinstance(stmt, (List, Tuple)): for elt in stmt.elts: for infered_elt in unpack_infer(elt, context): (yield infered_elt) return infered = next(stmt.infer(context)) if (infered is stmt): (yield infered) return for infered in stmt.infer(context): if (infered is YES): (yield infered) else: for inf_inf in unpack_infer(infered, context): (yield inf_inf)
[ "def", "unpack_infer", "(", "stmt", ",", "context", "=", "None", ")", ":", "if", "isinstance", "(", "stmt", ",", "(", "List", ",", "Tuple", ")", ")", ":", "for", "elt", "in", "stmt", ".", "elts", ":", "for", "infered_elt", "in", "unpack_infer", "(", "elt", ",", "context", ")", ":", "(", "yield", "infered_elt", ")", "return", "infered", "=", "next", "(", "stmt", ".", "infer", "(", "context", ")", ")", "if", "(", "infered", "is", "stmt", ")", ":", "(", "yield", "infered", ")", "return", "for", "infered", "in", "stmt", ".", "infer", "(", "context", ")", ":", "if", "(", "infered", "is", "YES", ")", ":", "(", "yield", "infered", ")", "else", ":", "for", "inf_inf", "in", "unpack_infer", "(", "infered", ",", "context", ")", ":", "(", "yield", "inf_inf", ")" ]
recursively generate nodes inferred by the given statement .
train
false
10,967
def hellinger(vec1, vec2): if scipy.sparse.issparse(vec1): vec1 = vec1.toarray() if scipy.sparse.issparse(vec2): vec2 = vec2.toarray() if (isbow(vec1) and isbow(vec2)): (vec1, vec2) = (dict(vec1), dict(vec2)) if (len(vec2) < len(vec1)): (vec1, vec2) = (vec2, vec1) sim = np.sqrt((0.5 * sum((((np.sqrt(value) - np.sqrt(vec2.get(index, 0.0))) ** 2) for (index, value) in iteritems(vec1))))) return sim else: sim = np.sqrt((0.5 * ((np.sqrt(vec1) - np.sqrt(vec2)) ** 2).sum())) return sim
[ "def", "hellinger", "(", "vec1", ",", "vec2", ")", ":", "if", "scipy", ".", "sparse", ".", "issparse", "(", "vec1", ")", ":", "vec1", "=", "vec1", ".", "toarray", "(", ")", "if", "scipy", ".", "sparse", ".", "issparse", "(", "vec2", ")", ":", "vec2", "=", "vec2", ".", "toarray", "(", ")", "if", "(", "isbow", "(", "vec1", ")", "and", "isbow", "(", "vec2", ")", ")", ":", "(", "vec1", ",", "vec2", ")", "=", "(", "dict", "(", "vec1", ")", ",", "dict", "(", "vec2", ")", ")", "if", "(", "len", "(", "vec2", ")", "<", "len", "(", "vec1", ")", ")", ":", "(", "vec1", ",", "vec2", ")", "=", "(", "vec2", ",", "vec1", ")", "sim", "=", "np", ".", "sqrt", "(", "(", "0.5", "*", "sum", "(", "(", "(", "(", "np", ".", "sqrt", "(", "value", ")", "-", "np", ".", "sqrt", "(", "vec2", ".", "get", "(", "index", ",", "0.0", ")", ")", ")", "**", "2", ")", "for", "(", "index", ",", "value", ")", "in", "iteritems", "(", "vec1", ")", ")", ")", ")", ")", "return", "sim", "else", ":", "sim", "=", "np", ".", "sqrt", "(", "(", "0.5", "*", "(", "(", "np", ".", "sqrt", "(", "vec1", ")", "-", "np", ".", "sqrt", "(", "vec2", ")", ")", "**", "2", ")", ".", "sum", "(", ")", ")", ")", "return", "sim" ]
hellinger distance is a distance metric to quantify the similarity between two probability distributions .
train
false
10,968
def writeGAEObject(obj, encoder=None): if (not obj.is_saved()): encoder.writeObject(obj) return context = encoder.context kls = obj.__class__ s = obj.key() gae_objects = getGAEObjects(context) try: referenced_object = gae_objects.getClassKey(kls, s) except KeyError: referenced_object = obj gae_objects.addClassKey(kls, s, obj) encoder.writeObject(referenced_object)
[ "def", "writeGAEObject", "(", "obj", ",", "encoder", "=", "None", ")", ":", "if", "(", "not", "obj", ".", "is_saved", "(", ")", ")", ":", "encoder", ".", "writeObject", "(", "obj", ")", "return", "context", "=", "encoder", ".", "context", "kls", "=", "obj", ".", "__class__", "s", "=", "obj", ".", "key", "(", ")", "gae_objects", "=", "getGAEObjects", "(", "context", ")", "try", ":", "referenced_object", "=", "gae_objects", ".", "getClassKey", "(", "kls", ",", "s", ")", "except", "KeyError", ":", "referenced_object", "=", "obj", "gae_objects", ".", "addClassKey", "(", "kls", ",", "s", ",", "obj", ")", "encoder", ".", "writeObject", "(", "referenced_object", ")" ]
the gae datastore creates new instances of objects for each get request .
train
false
10,971
def group_by(items, key_fn): groups = defaultdict(list) for item in items: key = key_fn(item) groups[key].append(item) return groups
[ "def", "group_by", "(", "items", ",", "key_fn", ")", ":", "groups", "=", "defaultdict", "(", "list", ")", "for", "item", "in", "items", ":", "key", "=", "key_fn", "(", "item", ")", "groups", "[", "key", "]", ".", "append", "(", "item", ")", "return", "groups" ]
returns a defaultdict .
train
false
10,972
def delicious_datetime(str): return datetime.datetime(*time.strptime(str, ISO_8601_DATETIME)[0:6])
[ "def", "delicious_datetime", "(", "str", ")", ":", "return", "datetime", ".", "datetime", "(", "*", "time", ".", "strptime", "(", "str", ",", "ISO_8601_DATETIME", ")", "[", "0", ":", "6", "]", ")" ]
parse a iso 8601 formatted string to a python datetime .
train
false
10,973
def get_messages(request): return getattr(request, '_messages', [])
[ "def", "get_messages", "(", "request", ")", ":", "return", "getattr", "(", "request", ",", "'_messages'", ",", "[", "]", ")" ]
a helper method to generate the messages to parse .
train
false
10,974
@cacheit def _extract_delta(expr, index): if (not _has_simple_delta(expr, index)): return (None, expr) if isinstance(expr, KroneckerDelta): return (expr, S(1)) if (not expr.is_Mul): raise ValueError('Incorrect expr') delta = None terms = [] for arg in expr.args: if ((delta is None) and _is_simple_delta(arg, index)): delta = arg else: terms.append(arg) return (delta, expr.func(*terms))
[ "@", "cacheit", "def", "_extract_delta", "(", "expr", ",", "index", ")", ":", "if", "(", "not", "_has_simple_delta", "(", "expr", ",", "index", ")", ")", ":", "return", "(", "None", ",", "expr", ")", "if", "isinstance", "(", "expr", ",", "KroneckerDelta", ")", ":", "return", "(", "expr", ",", "S", "(", "1", ")", ")", "if", "(", "not", "expr", ".", "is_Mul", ")", ":", "raise", "ValueError", "(", "'Incorrect expr'", ")", "delta", "=", "None", "terms", "=", "[", "]", "for", "arg", "in", "expr", ".", "args", ":", "if", "(", "(", "delta", "is", "None", ")", "and", "_is_simple_delta", "(", "arg", ",", "index", ")", ")", ":", "delta", "=", "arg", "else", ":", "terms", ".", "append", "(", "arg", ")", "return", "(", "delta", ",", "expr", ".", "func", "(", "*", "terms", ")", ")" ]
extract a simple kroneckerdelta from the expression .
train
false
10,975
def maxmackie(crypt_me): abc = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890' crypt = 'mplnkoijbqazdsxcwerfgvhutyQASXZWDVFCERGTBHNYKMJLUPIO4567382901' print crypt_me.translate(string.maketrans(abc, crypt))
[ "def", "maxmackie", "(", "crypt_me", ")", ":", "abc", "=", "'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'", "crypt", "=", "'mplnkoijbqazdsxcwerfgvhutyQASXZWDVFCERGTBHNYKMJLUPIO4567382901'", "print", "crypt_me", ".", "translate", "(", "string", ".", "maketrans", "(", "abc", ",", "crypt", ")", ")" ]
just try and crack this cipher .
train
false
10,976
def getBitsFromNum(num, bitsPerComponent=8): if (not isinstance(num, int)): return ((-1), 'num must be an integer') if (not isinstance(bitsPerComponent, int)): return ((-1), 'bitsPerComponent must be an integer') try: bitsRepresentation = bin(num) bitsRepresentation = bitsRepresentation.replace('0b', '') mod = (len(bitsRepresentation) % 8) if (mod != 0): bitsRepresentation = (('0' * (8 - mod)) + bitsRepresentation) bitsRepresentation = bitsRepresentation[((-1) * bitsPerComponent):] except: return ((-1), 'Error in conversion from number to bits') return (0, bitsRepresentation)
[ "def", "getBitsFromNum", "(", "num", ",", "bitsPerComponent", "=", "8", ")", ":", "if", "(", "not", "isinstance", "(", "num", ",", "int", ")", ")", ":", "return", "(", "(", "-", "1", ")", ",", "'num must be an integer'", ")", "if", "(", "not", "isinstance", "(", "bitsPerComponent", ",", "int", ")", ")", ":", "return", "(", "(", "-", "1", ")", ",", "'bitsPerComponent must be an integer'", ")", "try", ":", "bitsRepresentation", "=", "bin", "(", "num", ")", "bitsRepresentation", "=", "bitsRepresentation", ".", "replace", "(", "'0b'", ",", "''", ")", "mod", "=", "(", "len", "(", "bitsRepresentation", ")", "%", "8", ")", "if", "(", "mod", "!=", "0", ")", ":", "bitsRepresentation", "=", "(", "(", "'0'", "*", "(", "8", "-", "mod", ")", ")", "+", "bitsRepresentation", ")", "bitsRepresentation", "=", "bitsRepresentation", "[", "(", "(", "-", "1", ")", "*", "bitsPerComponent", ")", ":", "]", "except", ":", "return", "(", "(", "-", "1", ")", ",", "'Error in conversion from number to bits'", ")", "return", "(", "0", ",", "bitsRepresentation", ")" ]
makes the conversion between number and bits .
train
false
10,980
def set_brain(brain): global _BRAIN _BRAIN = brain
[ "def", "set_brain", "(", "brain", ")", ":", "global", "_BRAIN", "_BRAIN", "=", "brain" ]
set the brain used by enforce() .
train
false
10,981
def swap_word_order(source): assert ((len(source) % 4) == 0) words = ('I' * (len(source) // 4)) return struct.pack(words, *reversed(struct.unpack(words, source)))
[ "def", "swap_word_order", "(", "source", ")", ":", "assert", "(", "(", "len", "(", "source", ")", "%", "4", ")", "==", "0", ")", "words", "=", "(", "'I'", "*", "(", "len", "(", "source", ")", "//", "4", ")", ")", "return", "struct", ".", "pack", "(", "words", ",", "*", "reversed", "(", "struct", ".", "unpack", "(", "words", ",", "source", ")", ")", ")" ]
swap the order of the words in source bitstring .
train
true
10,982
def fmeasure(y_true, y_pred): return fbeta_score(y_true, y_pred, beta=1)
[ "def", "fmeasure", "(", "y_true", ",", "y_pred", ")", ":", "return", "fbeta_score", "(", "y_true", ",", "y_pred", ",", "beta", "=", "1", ")" ]
computes the f-measure .
train
false
10,983
def _url_collapse_path_split(path): path_parts = [] for part in path.split('/'): if (part == '.'): path_parts.append('') else: path_parts.append(part) path_parts = ([part for part in path_parts[:(-1)] if part] + path_parts[(-1):]) if path_parts: tail_part = path_parts.pop() else: tail_part = '' head_parts = [] for part in path_parts: if (part == '..'): head_parts.pop() else: head_parts.append(part) if (tail_part and (tail_part == '..')): head_parts.pop() tail_part = '' return (('/' + '/'.join(head_parts)), tail_part)
[ "def", "_url_collapse_path_split", "(", "path", ")", ":", "path_parts", "=", "[", "]", "for", "part", "in", "path", ".", "split", "(", "'/'", ")", ":", "if", "(", "part", "==", "'.'", ")", ":", "path_parts", ".", "append", "(", "''", ")", "else", ":", "path_parts", ".", "append", "(", "part", ")", "path_parts", "=", "(", "[", "part", "for", "part", "in", "path_parts", "[", ":", "(", "-", "1", ")", "]", "if", "part", "]", "+", "path_parts", "[", "(", "-", "1", ")", ":", "]", ")", "if", "path_parts", ":", "tail_part", "=", "path_parts", ".", "pop", "(", ")", "else", ":", "tail_part", "=", "''", "head_parts", "=", "[", "]", "for", "part", "in", "path_parts", ":", "if", "(", "part", "==", "'..'", ")", ":", "head_parts", ".", "pop", "(", ")", "else", ":", "head_parts", ".", "append", "(", "part", ")", "if", "(", "tail_part", "and", "(", "tail_part", "==", "'..'", ")", ")", ":", "head_parts", ".", "pop", "(", ")", "tail_part", "=", "''", "return", "(", "(", "'/'", "+", "'/'", ".", "join", "(", "head_parts", ")", ")", ",", "tail_part", ")" ]
given a url path .
train
false
10,984
def get_nav_open(fund_type='all'): if (ct._check_nav_oft_input(fund_type) is True): ct._write_head() nums = _get_fund_num((ct.SINA_NAV_COUNT_URL % (ct.P_TYPE['http'], ct.DOMAINS['vsf'], ct.NAV_OPEN_KEY[fund_type], ct.NAV_OPEN_API[fund_type], ct.NAV_OPEN_T2[fund_type], ct.NAV_OPEN_T3))) fund_df = _parse_fund_data((ct.SINA_NAV_DATA_URL % (ct.P_TYPE['http'], ct.DOMAINS['vsf'], ct.NAV_OPEN_KEY[fund_type], ct.NAV_OPEN_API[fund_type], nums, ct.NAV_OPEN_T2[fund_type], ct.NAV_OPEN_T3))) return fund_df
[ "def", "get_nav_open", "(", "fund_type", "=", "'all'", ")", ":", "if", "(", "ct", ".", "_check_nav_oft_input", "(", "fund_type", ")", "is", "True", ")", ":", "ct", ".", "_write_head", "(", ")", "nums", "=", "_get_fund_num", "(", "(", "ct", ".", "SINA_NAV_COUNT_URL", "%", "(", "ct", ".", "P_TYPE", "[", "'http'", "]", ",", "ct", ".", "DOMAINS", "[", "'vsf'", "]", ",", "ct", ".", "NAV_OPEN_KEY", "[", "fund_type", "]", ",", "ct", ".", "NAV_OPEN_API", "[", "fund_type", "]", ",", "ct", ".", "NAV_OPEN_T2", "[", "fund_type", "]", ",", "ct", ".", "NAV_OPEN_T3", ")", ")", ")", "fund_df", "=", "_parse_fund_data", "(", "(", "ct", ".", "SINA_NAV_DATA_URL", "%", "(", "ct", ".", "P_TYPE", "[", "'http'", "]", ",", "ct", ".", "DOMAINS", "[", "'vsf'", "]", ",", "ct", ".", "NAV_OPEN_KEY", "[", "fund_type", "]", ",", "ct", ".", "NAV_OPEN_API", "[", "fund_type", "]", ",", "nums", ",", "ct", ".", "NAV_OPEN_T2", "[", "fund_type", "]", ",", "ct", ".", "NAV_OPEN_T3", ")", ")", ")", "return", "fund_df" ]
parameters type:string 1 .
train
false
10,985
def extract_urls_from_markdown(md): html = snudown.markdown(_force_utf8(md)) links = SoupStrainer('a') for link in BeautifulSoup(html, parseOnlyThese=links): url = link.get('href') if url: (yield url)
[ "def", "extract_urls_from_markdown", "(", "md", ")", ":", "html", "=", "snudown", ".", "markdown", "(", "_force_utf8", "(", "md", ")", ")", "links", "=", "SoupStrainer", "(", "'a'", ")", "for", "link", "in", "BeautifulSoup", "(", "html", ",", "parseOnlyThese", "=", "links", ")", ":", "url", "=", "link", ".", "get", "(", "'href'", ")", "if", "url", ":", "(", "yield", "url", ")" ]
extract urls that will be hot links from a piece of raw markdown .
train
false
10,987
def var_propagate1_post36(a, b): c = ((a if (a > b) else b) + 5) return c
[ "def", "var_propagate1_post36", "(", "a", ",", "b", ")", ":", "c", "=", "(", "(", "a", "if", "(", "a", ">", "b", ")", "else", "b", ")", "+", "5", ")", "return", "c" ]
label 0: a = arg [a] b = arg [b] $0 .
train
false
10,988
def _sas(l1, d, l2): p1 = Point(0, 0) p2 = Point(l2, 0) p3 = Point((cos(rad(d)) * l1), (sin(rad(d)) * l1)) return Triangle(p1, p2, p3)
[ "def", "_sas", "(", "l1", ",", "d", ",", "l2", ")", ":", "p1", "=", "Point", "(", "0", ",", "0", ")", "p2", "=", "Point", "(", "l2", ",", "0", ")", "p3", "=", "Point", "(", "(", "cos", "(", "rad", "(", "d", ")", ")", "*", "l1", ")", ",", "(", "sin", "(", "rad", "(", "d", ")", ")", "*", "l1", ")", ")", "return", "Triangle", "(", "p1", ",", "p2", ",", "p3", ")" ]
return triangle having side with length l2 on the x-axis .
train
false
10,989
def describe_usage_plans(name=None, plan_id=None, region=None, key=None, keyid=None, profile=None): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) plans = _multi_call(conn.get_usage_plans, 'items') if name: plans = _filter_plans('name', name, plans) if plan_id: plans = _filter_plans('id', plan_id, plans) return {'plans': [_convert_datetime_str(plan) for plan in plans]} except ClientError as e: return {'error': salt.utils.boto3.get_error(e)}
[ "def", "describe_usage_plans", "(", "name", "=", "None", ",", "plan_id", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "plans", "=", "_multi_call", "(", "conn", ".", "get_usage_plans", ",", "'items'", ")", "if", "name", ":", "plans", "=", "_filter_plans", "(", "'name'", ",", "name", ",", "plans", ")", "if", "plan_id", ":", "plans", "=", "_filter_plans", "(", "'id'", ",", "plan_id", ",", "plans", ")", "return", "{", "'plans'", ":", "[", "_convert_datetime_str", "(", "plan", ")", "for", "plan", "in", "plans", "]", "}", "except", "ClientError", "as", "e", ":", "return", "{", "'error'", ":", "salt", ".", "utils", ".", "boto3", ".", "get_error", "(", "e", ")", "}" ]
returns a list of existing usage plans .
train
false
10,990
def send_next_event(email, event_name, link, up_coming_events): message_settings = MessageSettings.query.filter_by(action=NEXT_EVENT).first() if ((not message_settings) or (message_settings.mail_status == 1)): upcoming_event_html = '<ul>' for event in up_coming_events: upcoming_event_html += ("<a href='%s'><li> %s </li></a>" % (url_for('events.details_view', event_id=event.id, _external=True), event.name)) upcoming_event_html += '</ul><br/>' send_email(to=email, action=NEXT_EVENT, subject=MAILS[NEXT_EVENT]['subject'].format(event_name=event_name), html=MAILS[NEXT_EVENT]['message'].format(email=str(email), event_name=str(event_name), link=link, up_coming_events=upcoming_event_html))
[ "def", "send_next_event", "(", "email", ",", "event_name", ",", "link", ",", "up_coming_events", ")", ":", "message_settings", "=", "MessageSettings", ".", "query", ".", "filter_by", "(", "action", "=", "NEXT_EVENT", ")", ".", "first", "(", ")", "if", "(", "(", "not", "message_settings", ")", "or", "(", "message_settings", ".", "mail_status", "==", "1", ")", ")", ":", "upcoming_event_html", "=", "'<ul>'", "for", "event", "in", "up_coming_events", ":", "upcoming_event_html", "+=", "(", "\"<a href='%s'><li> %s </li></a>\"", "%", "(", "url_for", "(", "'events.details_view'", ",", "event_id", "=", "event", ".", "id", ",", "_external", "=", "True", ")", ",", "event", ".", "name", ")", ")", "upcoming_event_html", "+=", "'</ul><br/>'", "send_email", "(", "to", "=", "email", ",", "action", "=", "NEXT_EVENT", ",", "subject", "=", "MAILS", "[", "NEXT_EVENT", "]", "[", "'subject'", "]", ".", "format", "(", "event_name", "=", "event_name", ")", ",", "html", "=", "MAILS", "[", "NEXT_EVENT", "]", "[", "'message'", "]", ".", "format", "(", "email", "=", "str", "(", "email", ")", ",", "event_name", "=", "str", "(", "event_name", ")", ",", "link", "=", "link", ",", "up_coming_events", "=", "upcoming_event_html", ")", ")" ]
send next event .
train
false
10,992
@pytest.mark.usefixtures('back_up_rc') def test_get_user_config_nonexistent(): assert (config.get_user_config() == config.DEFAULT_CONFIG)
[ "@", "pytest", ".", "mark", ".", "usefixtures", "(", "'back_up_rc'", ")", "def", "test_get_user_config_nonexistent", "(", ")", ":", "assert", "(", "config", ".", "get_user_config", "(", ")", "==", "config", ".", "DEFAULT_CONFIG", ")" ]
get config from a nonexistent ~/ .
train
false
10,996
def write_pem(text, path, overwrite=True, pem_type=None): old_umask = os.umask(63) text = get_pem_entry(text, pem_type=pem_type) _dhparams = '' _private_key = '' if (pem_type and (pem_type == 'CERTIFICATE') and os.path.isfile(path) and (not overwrite)): _filecontents = _text_or_file(path) try: _dhparams = get_pem_entry(_filecontents, 'DH PARAMETERS') except salt.exceptions.SaltInvocationError: pass try: _private_key = get_pem_entry(_filecontents, 'RSA PRIVATE KEY') except salt.exceptions.SaltInvocationError: pass with salt.utils.fopen(path, 'w') as _fp: if (pem_type and (pem_type == 'CERTIFICATE') and _private_key): _fp.write(_private_key) _fp.write(text) if (pem_type and (pem_type == 'CERTIFICATE') and _dhparams): _fp.write(_dhparams) os.umask(old_umask) return 'PEM written to {0}'.format(path)
[ "def", "write_pem", "(", "text", ",", "path", ",", "overwrite", "=", "True", ",", "pem_type", "=", "None", ")", ":", "old_umask", "=", "os", ".", "umask", "(", "63", ")", "text", "=", "get_pem_entry", "(", "text", ",", "pem_type", "=", "pem_type", ")", "_dhparams", "=", "''", "_private_key", "=", "''", "if", "(", "pem_type", "and", "(", "pem_type", "==", "'CERTIFICATE'", ")", "and", "os", ".", "path", ".", "isfile", "(", "path", ")", "and", "(", "not", "overwrite", ")", ")", ":", "_filecontents", "=", "_text_or_file", "(", "path", ")", "try", ":", "_dhparams", "=", "get_pem_entry", "(", "_filecontents", ",", "'DH PARAMETERS'", ")", "except", "salt", ".", "exceptions", ".", "SaltInvocationError", ":", "pass", "try", ":", "_private_key", "=", "get_pem_entry", "(", "_filecontents", ",", "'RSA PRIVATE KEY'", ")", "except", "salt", ".", "exceptions", ".", "SaltInvocationError", ":", "pass", "with", "salt", ".", "utils", ".", "fopen", "(", "path", ",", "'w'", ")", "as", "_fp", ":", "if", "(", "pem_type", "and", "(", "pem_type", "==", "'CERTIFICATE'", ")", "and", "_private_key", ")", ":", "_fp", ".", "write", "(", "_private_key", ")", "_fp", ".", "write", "(", "text", ")", "if", "(", "pem_type", "and", "(", "pem_type", "==", "'CERTIFICATE'", ")", "and", "_dhparams", ")", ":", "_fp", ".", "write", "(", "_dhparams", ")", "os", ".", "umask", "(", "old_umask", ")", "return", "'PEM written to {0}'", ".", "format", "(", "path", ")" ]
writes out a pem string fixing any formatting or whitespace issues before writing .
train
false
10,997
def unintegrate(x, levels): levels = list(levels)[:] if (len(levels) > 1): x0 = levels.pop((-1)) return unintegrate(np.cumsum(np.r_[(x0, x)]), levels) x0 = levels[0] return np.cumsum(np.r_[(x0, x)])
[ "def", "unintegrate", "(", "x", ",", "levels", ")", ":", "levels", "=", "list", "(", "levels", ")", "[", ":", "]", "if", "(", "len", "(", "levels", ")", ">", "1", ")", ":", "x0", "=", "levels", ".", "pop", "(", "(", "-", "1", ")", ")", "return", "unintegrate", "(", "np", ".", "cumsum", "(", "np", ".", "r_", "[", "(", "x0", ",", "x", ")", "]", ")", ",", "levels", ")", "x0", "=", "levels", "[", "0", "]", "return", "np", ".", "cumsum", "(", "np", ".", "r_", "[", "(", "x0", ",", "x", ")", "]", ")" ]
after taking n-differences of a series .
train
false
10,998
def dup_half_gcdex(f, g, K): if (not K.has_Field): raise DomainError(("can't compute half extended GCD over %s" % K)) (a, b) = ([K.one], []) while g: (q, r) = dup_div(f, g, K) (f, g) = (g, r) (a, b) = (b, dup_sub_mul(a, q, b, K)) a = dup_quo_ground(a, dup_LC(f, K), K) f = dup_monic(f, K) return (a, f)
[ "def", "dup_half_gcdex", "(", "f", ",", "g", ",", "K", ")", ":", "if", "(", "not", "K", ".", "has_Field", ")", ":", "raise", "DomainError", "(", "(", "\"can't compute half extended GCD over %s\"", "%", "K", ")", ")", "(", "a", ",", "b", ")", "=", "(", "[", "K", ".", "one", "]", ",", "[", "]", ")", "while", "g", ":", "(", "q", ",", "r", ")", "=", "dup_div", "(", "f", ",", "g", ",", "K", ")", "(", "f", ",", "g", ")", "=", "(", "g", ",", "r", ")", "(", "a", ",", "b", ")", "=", "(", "b", ",", "dup_sub_mul", "(", "a", ",", "q", ",", "b", ",", "K", ")", ")", "a", "=", "dup_quo_ground", "(", "a", ",", "dup_LC", "(", "f", ",", "K", ")", ",", "K", ")", "f", "=", "dup_monic", "(", "f", ",", "K", ")", "return", "(", "a", ",", "f", ")" ]
half extended euclidean algorithm in f[x] .
train
false
10,999
def MakeStatResponse(st, pathspec): response = client.StatEntry(pathspec=pathspec) if (st is None): pass else: for attr in ['st_mode', 'st_ino', 'st_dev', 'st_nlink', 'st_uid', 'st_gid', 'st_size', 'st_atime', 'st_mtime', 'st_ctime', 'st_blocks', 'st_blksize', 'st_rdev']: try: value = getattr(st, attr) if (value is None): continue value = long(value) if (value < 0): value &= 4294967295 setattr(response, attr, value) except AttributeError: pass return response
[ "def", "MakeStatResponse", "(", "st", ",", "pathspec", ")", ":", "response", "=", "client", ".", "StatEntry", "(", "pathspec", "=", "pathspec", ")", "if", "(", "st", "is", "None", ")", ":", "pass", "else", ":", "for", "attr", "in", "[", "'st_mode'", ",", "'st_ino'", ",", "'st_dev'", ",", "'st_nlink'", ",", "'st_uid'", ",", "'st_gid'", ",", "'st_size'", ",", "'st_atime'", ",", "'st_mtime'", ",", "'st_ctime'", ",", "'st_blocks'", ",", "'st_blksize'", ",", "'st_rdev'", "]", ":", "try", ":", "value", "=", "getattr", "(", "st", ",", "attr", ")", "if", "(", "value", "is", "None", ")", ":", "continue", "value", "=", "long", "(", "value", ")", "if", "(", "value", "<", "0", ")", ":", "value", "&=", "4294967295", "setattr", "(", "response", ",", "attr", ",", "value", ")", "except", "AttributeError", ":", "pass", "return", "response" ]
creates a statentry .
train
false