id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
4,450
def logExceptions(logger=None): logger = (logger if (logger is not None) else logging.getLogger(__name__)) def exceptionLoggingDecorator(func): @functools.wraps(func) def exceptionLoggingWrap(*args, **kwargs): try: return func(*args, **kwargs) except: logger.exception('Unhandled exception %r from %r. Caller stack:\n%s', sys.exc_info()[1], func, ''.join(traceback.format_stack())) raise return exceptionLoggingWrap return exceptionLoggingDecorator
[ "def", "logExceptions", "(", "logger", "=", "None", ")", ":", "logger", "=", "(", "logger", "if", "(", "logger", "is", "not", "None", ")", "else", "logging", ".", "getLogger", "(", "__name__", ")", ")", "def", "exceptionLoggingDecorator", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "exceptionLoggingWrap", "(", "*", "args", ",", "**", "kwargs", ")", ":", "try", ":", "return", "func", "(", "*", "args", ",", "**", "kwargs", ")", "except", ":", "logger", ".", "exception", "(", "'Unhandled exception %r from %r. Caller stack:\\n%s'", ",", "sys", ".", "exc_info", "(", ")", "[", "1", "]", ",", "func", ",", "''", ".", "join", "(", "traceback", ".", "format_stack", "(", ")", ")", ")", "raise", "return", "exceptionLoggingWrap", "return", "exceptionLoggingDecorator" ]
returns a closure suitable for use as function/method decorator for logging exceptions that leave the scope of the decorated function .
train
true
4,452
def ensemble(nets): (test_x, test_y) = test_data for net in nets: i = T.lscalar() net.test_mb_predictions = theano.function([i], net.layers[(-1)].y_out, givens={net.x: test_x[(i * net.mini_batch_size):((i + 1) * net.mini_batch_size)]}) net.test_predictions = list(np.concatenate([net.test_mb_predictions(i) for i in xrange(1000)])) all_test_predictions = zip(*[net.test_predictions for net in nets]) def plurality(p): return Counter(p).most_common(1)[0][0] plurality_test_predictions = [plurality(p) for p in all_test_predictions] test_y_eval = test_y.eval() error_locations = [j for j in xrange(10000) if (plurality_test_predictions[j] != test_y_eval[j])] erroneous_predictions = [plurality(all_test_predictions[j]) for j in error_locations] print 'Accuracy is {:.2%}'.format((1 - (len(error_locations) / 10000.0))) return (error_locations, erroneous_predictions)
[ "def", "ensemble", "(", "nets", ")", ":", "(", "test_x", ",", "test_y", ")", "=", "test_data", "for", "net", "in", "nets", ":", "i", "=", "T", ".", "lscalar", "(", ")", "net", ".", "test_mb_predictions", "=", "theano", ".", "function", "(", "[", "i", "]", ",", "net", ".", "layers", "[", "(", "-", "1", ")", "]", ".", "y_out", ",", "givens", "=", "{", "net", ".", "x", ":", "test_x", "[", "(", "i", "*", "net", ".", "mini_batch_size", ")", ":", "(", "(", "i", "+", "1", ")", "*", "net", ".", "mini_batch_size", ")", "]", "}", ")", "net", ".", "test_predictions", "=", "list", "(", "np", ".", "concatenate", "(", "[", "net", ".", "test_mb_predictions", "(", "i", ")", "for", "i", "in", "xrange", "(", "1000", ")", "]", ")", ")", "all_test_predictions", "=", "zip", "(", "*", "[", "net", ".", "test_predictions", "for", "net", "in", "nets", "]", ")", "def", "plurality", "(", "p", ")", ":", "return", "Counter", "(", "p", ")", ".", "most_common", "(", "1", ")", "[", "0", "]", "[", "0", "]", "plurality_test_predictions", "=", "[", "plurality", "(", "p", ")", "for", "p", "in", "all_test_predictions", "]", "test_y_eval", "=", "test_y", ".", "eval", "(", ")", "error_locations", "=", "[", "j", "for", "j", "in", "xrange", "(", "10000", ")", "if", "(", "plurality_test_predictions", "[", "j", "]", "!=", "test_y_eval", "[", "j", "]", ")", "]", "erroneous_predictions", "=", "[", "plurality", "(", "all_test_predictions", "[", "j", "]", ")", "for", "j", "in", "error_locations", "]", "print", "'Accuracy is {:.2%}'", ".", "format", "(", "(", "1", "-", "(", "len", "(", "error_locations", ")", "/", "10000.0", ")", ")", ")", "return", "(", "error_locations", ",", "erroneous_predictions", ")" ]
ensemble of problems .
train
false
4,453
def handle404(request): return render(request, 'handlers/404.html', status=404)
[ "def", "handle404", "(", "request", ")", ":", "return", "render", "(", "request", ",", "'handlers/404.html'", ",", "status", "=", "404", ")" ]
a handler for 404s .
train
false
4,454
def parse_boundary_stream(stream, max_header_size): chunk = stream.read(max_header_size) header_end = chunk.find('\r\n\r\n') def _parse_header(line): (main_value_pair, params) = parse_header(line) try: (name, value) = main_value_pair.split(':', 1) except: raise ValueError(('Invalid header: %r' % line)) return (name, (value, params)) if (header_end == (-1)): stream.unget(chunk) return (RAW, {}, stream) header = chunk[:header_end] stream.unget(chunk[(header_end + 4):]) TYPE = RAW outdict = {} for line in header.split('\r\n'): try: (name, (value, params)) = _parse_header(line) except: continue if (name == 'content-disposition'): TYPE = FIELD if params.get('filename'): TYPE = FILE outdict[name] = (value, params) if (TYPE == RAW): stream.unget(chunk) return (TYPE, outdict, stream)
[ "def", "parse_boundary_stream", "(", "stream", ",", "max_header_size", ")", ":", "chunk", "=", "stream", ".", "read", "(", "max_header_size", ")", "header_end", "=", "chunk", ".", "find", "(", "'\\r\\n\\r\\n'", ")", "def", "_parse_header", "(", "line", ")", ":", "(", "main_value_pair", ",", "params", ")", "=", "parse_header", "(", "line", ")", "try", ":", "(", "name", ",", "value", ")", "=", "main_value_pair", ".", "split", "(", "':'", ",", "1", ")", "except", ":", "raise", "ValueError", "(", "(", "'Invalid header: %r'", "%", "line", ")", ")", "return", "(", "name", ",", "(", "value", ",", "params", ")", ")", "if", "(", "header_end", "==", "(", "-", "1", ")", ")", ":", "stream", ".", "unget", "(", "chunk", ")", "return", "(", "RAW", ",", "{", "}", ",", "stream", ")", "header", "=", "chunk", "[", ":", "header_end", "]", "stream", ".", "unget", "(", "chunk", "[", "(", "header_end", "+", "4", ")", ":", "]", ")", "TYPE", "=", "RAW", "outdict", "=", "{", "}", "for", "line", "in", "header", ".", "split", "(", "'\\r\\n'", ")", ":", "try", ":", "(", "name", ",", "(", "value", ",", "params", ")", ")", "=", "_parse_header", "(", "line", ")", "except", ":", "continue", "if", "(", "name", "==", "'content-disposition'", ")", ":", "TYPE", "=", "FIELD", "if", "params", ".", "get", "(", "'filename'", ")", ":", "TYPE", "=", "FILE", "outdict", "[", "name", "]", "=", "(", "value", ",", "params", ")", "if", "(", "TYPE", "==", "RAW", ")", ":", "stream", ".", "unget", "(", "chunk", ")", "return", "(", "TYPE", ",", "outdict", ",", "stream", ")" ]
parses one and exactly one stream that encapsulates a boundary .
train
false
4,455
def _assert_required_roles(cls, roles, methods): if (('appender' not in roles) or (not hasattr(cls, roles['appender']))): raise sa_exc.ArgumentError(('Type %s must elect an appender method to be a collection class' % cls.__name__)) elif ((roles['appender'] not in methods) and (not hasattr(getattr(cls, roles['appender']), '_sa_instrumented'))): methods[roles['appender']] = ('fire_append_event', 1, None) if (('remover' not in roles) or (not hasattr(cls, roles['remover']))): raise sa_exc.ArgumentError(('Type %s must elect a remover method to be a collection class' % cls.__name__)) elif ((roles['remover'] not in methods) and (not hasattr(getattr(cls, roles['remover']), '_sa_instrumented'))): methods[roles['remover']] = ('fire_remove_event', 1, None) if (('iterator' not in roles) or (not hasattr(cls, roles['iterator']))): raise sa_exc.ArgumentError(('Type %s must elect an iterator method to be a collection class' % cls.__name__))
[ "def", "_assert_required_roles", "(", "cls", ",", "roles", ",", "methods", ")", ":", "if", "(", "(", "'appender'", "not", "in", "roles", ")", "or", "(", "not", "hasattr", "(", "cls", ",", "roles", "[", "'appender'", "]", ")", ")", ")", ":", "raise", "sa_exc", ".", "ArgumentError", "(", "(", "'Type %s must elect an appender method to be a collection class'", "%", "cls", ".", "__name__", ")", ")", "elif", "(", "(", "roles", "[", "'appender'", "]", "not", "in", "methods", ")", "and", "(", "not", "hasattr", "(", "getattr", "(", "cls", ",", "roles", "[", "'appender'", "]", ")", ",", "'_sa_instrumented'", ")", ")", ")", ":", "methods", "[", "roles", "[", "'appender'", "]", "]", "=", "(", "'fire_append_event'", ",", "1", ",", "None", ")", "if", "(", "(", "'remover'", "not", "in", "roles", ")", "or", "(", "not", "hasattr", "(", "cls", ",", "roles", "[", "'remover'", "]", ")", ")", ")", ":", "raise", "sa_exc", ".", "ArgumentError", "(", "(", "'Type %s must elect a remover method to be a collection class'", "%", "cls", ".", "__name__", ")", ")", "elif", "(", "(", "roles", "[", "'remover'", "]", "not", "in", "methods", ")", "and", "(", "not", "hasattr", "(", "getattr", "(", "cls", ",", "roles", "[", "'remover'", "]", ")", ",", "'_sa_instrumented'", ")", ")", ")", ":", "methods", "[", "roles", "[", "'remover'", "]", "]", "=", "(", "'fire_remove_event'", ",", "1", ",", "None", ")", "if", "(", "(", "'iterator'", "not", "in", "roles", ")", "or", "(", "not", "hasattr", "(", "cls", ",", "roles", "[", "'iterator'", "]", ")", ")", ")", ":", "raise", "sa_exc", ".", "ArgumentError", "(", "(", "'Type %s must elect an iterator method to be a collection class'", "%", "cls", ".", "__name__", ")", ")" ]
ensure all roles are present .
train
false
4,456
def dotedges(expr, atom=(lambda x: (not isinstance(x, Basic))), pos=(), repeat=True): if atom(expr): return [] else: expr_str = purestr(expr) arg_strs = [purestr(arg) for arg in expr.args] if repeat: expr_str += ('_%s' % str(pos)) arg_strs = [(arg_str + ('_%s' % str((pos + (i,))))) for (i, arg_str) in enumerate(arg_strs)] return [('"%s" -> "%s";' % (expr_str, arg_str)) for arg_str in arg_strs]
[ "def", "dotedges", "(", "expr", ",", "atom", "=", "(", "lambda", "x", ":", "(", "not", "isinstance", "(", "x", ",", "Basic", ")", ")", ")", ",", "pos", "=", "(", ")", ",", "repeat", "=", "True", ")", ":", "if", "atom", "(", "expr", ")", ":", "return", "[", "]", "else", ":", "expr_str", "=", "purestr", "(", "expr", ")", "arg_strs", "=", "[", "purestr", "(", "arg", ")", "for", "arg", "in", "expr", ".", "args", "]", "if", "repeat", ":", "expr_str", "+=", "(", "'_%s'", "%", "str", "(", "pos", ")", ")", "arg_strs", "=", "[", "(", "arg_str", "+", "(", "'_%s'", "%", "str", "(", "(", "pos", "+", "(", "i", ",", ")", ")", ")", ")", ")", "for", "(", "i", ",", "arg_str", ")", "in", "enumerate", "(", "arg_strs", ")", "]", "return", "[", "(", "'\"%s\" -> \"%s\";'", "%", "(", "expr_str", ",", "arg_str", ")", ")", "for", "arg_str", "in", "arg_strs", "]" ]
list of strings for all expr->expr .
train
false
4,457
def resolve_order(reg, deps): x = [] for dep in deps[reg]: x.extend(resolve_order(dep, deps)) x.append(reg) return x
[ "def", "resolve_order", "(", "reg", ",", "deps", ")", ":", "x", "=", "[", "]", "for", "dep", "in", "deps", "[", "reg", "]", ":", "x", ".", "extend", "(", "resolve_order", "(", "dep", ",", "deps", ")", ")", "x", ".", "append", "(", "reg", ")", "return", "x" ]
resolve the order of all dependencies starting at a given register .
train
false
4,458
def ext(external, pillar=None): if isinstance(external, six.string_types): external = yaml.safe_load(external) pillar_obj = salt.pillar.get_pillar(__opts__, __grains__, __opts__['id'], __opts__['environment'], ext=external, pillar=pillar) ret = pillar_obj.compile_pillar() return ret
[ "def", "ext", "(", "external", ",", "pillar", "=", "None", ")", ":", "if", "isinstance", "(", "external", ",", "six", ".", "string_types", ")", ":", "external", "=", "yaml", ".", "safe_load", "(", "external", ")", "pillar_obj", "=", "salt", ".", "pillar", ".", "get_pillar", "(", "__opts__", ",", "__grains__", ",", "__opts__", "[", "'id'", "]", ",", "__opts__", "[", "'environment'", "]", ",", "ext", "=", "external", ",", "pillar", "=", "pillar", ")", "ret", "=", "pillar_obj", ".", "compile_pillar", "(", ")", "return", "ret" ]
generate the pillar and apply an explicit external pillar cli example: pillar : none if specified .
train
true
4,459
def create_help_entry(key, entrytext, category='General', locks=None, aliases=None): global _HelpEntry if (not _HelpEntry): from evennia.help.models import HelpEntry as _HelpEntry try: new_help = _HelpEntry() new_help.key = key new_help.entrytext = entrytext new_help.help_category = category if locks: new_help.locks.add(locks) if aliases: new_help.aliases.add(aliases) new_help.save() return new_help except IntegrityError: string = ("Could not add help entry: key '%s' already exists." % key) logger.log_err(string) return None except Exception: logger.log_trace() return None
[ "def", "create_help_entry", "(", "key", ",", "entrytext", ",", "category", "=", "'General'", ",", "locks", "=", "None", ",", "aliases", "=", "None", ")", ":", "global", "_HelpEntry", "if", "(", "not", "_HelpEntry", ")", ":", "from", "evennia", ".", "help", ".", "models", "import", "HelpEntry", "as", "_HelpEntry", "try", ":", "new_help", "=", "_HelpEntry", "(", ")", "new_help", ".", "key", "=", "key", "new_help", ".", "entrytext", "=", "entrytext", "new_help", ".", "help_category", "=", "category", "if", "locks", ":", "new_help", ".", "locks", ".", "add", "(", "locks", ")", "if", "aliases", ":", "new_help", ".", "aliases", ".", "add", "(", "aliases", ")", "new_help", ".", "save", "(", ")", "return", "new_help", "except", "IntegrityError", ":", "string", "=", "(", "\"Could not add help entry: key '%s' already exists.\"", "%", "key", ")", "logger", ".", "log_err", "(", "string", ")", "return", "None", "except", "Exception", ":", "logger", ".", "log_trace", "(", ")", "return", "None" ]
create a static help entry in the help database .
train
false
4,460
def on_loop(notifier, counter): if (counter.count > 4): sys.stdout.write('Exit\n') notifier.stop() sys.exit(0) else: sys.stdout.write(('Loop %d\n' % counter.count)) counter.plusone()
[ "def", "on_loop", "(", "notifier", ",", "counter", ")", ":", "if", "(", "counter", ".", "count", ">", "4", ")", ":", "sys", ".", "stdout", ".", "write", "(", "'Exit\\n'", ")", "notifier", ".", "stop", "(", ")", "sys", ".", "exit", "(", "0", ")", "else", ":", "sys", ".", "stdout", ".", "write", "(", "(", "'Loop %d\\n'", "%", "counter", ".", "count", ")", ")", "counter", ".", "plusone", "(", ")" ]
dummy function called after each event loop .
train
true
4,462
def _coerce(T, S): assert (T is not bool), 'initial type T is bool' if (T is S): return T if ((S is int) or (S is bool)): return T if (T is int): return S if issubclass(S, T): return S if issubclass(T, S): return T if issubclass(T, int): return S if issubclass(S, int): return T if (issubclass(T, Fraction) and issubclass(S, float)): return S if (issubclass(T, float) and issubclass(S, Fraction)): return T msg = "don't know how to coerce %s and %s" raise TypeError((msg % (T.__name__, S.__name__)))
[ "def", "_coerce", "(", "T", ",", "S", ")", ":", "assert", "(", "T", "is", "not", "bool", ")", ",", "'initial type T is bool'", "if", "(", "T", "is", "S", ")", ":", "return", "T", "if", "(", "(", "S", "is", "int", ")", "or", "(", "S", "is", "bool", ")", ")", ":", "return", "T", "if", "(", "T", "is", "int", ")", ":", "return", "S", "if", "issubclass", "(", "S", ",", "T", ")", ":", "return", "S", "if", "issubclass", "(", "T", ",", "S", ")", ":", "return", "T", "if", "issubclass", "(", "T", ",", "int", ")", ":", "return", "S", "if", "issubclass", "(", "S", ",", "int", ")", ":", "return", "T", "if", "(", "issubclass", "(", "T", ",", "Fraction", ")", "and", "issubclass", "(", "S", ",", "float", ")", ")", ":", "return", "S", "if", "(", "issubclass", "(", "T", ",", "float", ")", "and", "issubclass", "(", "S", ",", "Fraction", ")", ")", ":", "return", "T", "msg", "=", "\"don't know how to coerce %s and %s\"", "raise", "TypeError", "(", "(", "msg", "%", "(", "T", ".", "__name__", ",", "S", ".", "__name__", ")", ")", ")" ]
coerce types t and s to a common type .
train
false
4,463
def _cmp_by_local_pref(path1, path2): lp1 = path1.get_pattr(BGP_ATTR_TYPE_LOCAL_PREF) lp2 = path2.get_pattr(BGP_ATTR_TYPE_LOCAL_PREF) if (not (lp1 and lp2)): return None lp1 = lp1.value lp2 = lp2.value if (lp1 > lp2): return path1 elif (lp2 > lp1): return path2 else: return None
[ "def", "_cmp_by_local_pref", "(", "path1", ",", "path2", ")", ":", "lp1", "=", "path1", ".", "get_pattr", "(", "BGP_ATTR_TYPE_LOCAL_PREF", ")", "lp2", "=", "path2", ".", "get_pattr", "(", "BGP_ATTR_TYPE_LOCAL_PREF", ")", "if", "(", "not", "(", "lp1", "and", "lp2", ")", ")", ":", "return", "None", "lp1", "=", "lp1", ".", "value", "lp2", "=", "lp2", ".", "value", "if", "(", "lp1", ">", "lp2", ")", ":", "return", "path1", "elif", "(", "lp2", ">", "lp1", ")", ":", "return", "path2", "else", ":", "return", "None" ]
selects a path with highest local-preference .
train
true
4,464
def filter_release_name(name, filter_words): if filter_words: for test_word in filter_words.split(','): test_word = test_word.strip() if test_word: if re.search((('(^|[\\W_])' + test_word) + '($|[\\W_])'), name, re.I): logger.log((((u'' + name) + ' contains word: ') + test_word), logger.DEBUG) return True return False
[ "def", "filter_release_name", "(", "name", ",", "filter_words", ")", ":", "if", "filter_words", ":", "for", "test_word", "in", "filter_words", ".", "split", "(", "','", ")", ":", "test_word", "=", "test_word", ".", "strip", "(", ")", "if", "test_word", ":", "if", "re", ".", "search", "(", "(", "(", "'(^|[\\\\W_])'", "+", "test_word", ")", "+", "'($|[\\\\W_])'", ")", ",", "name", ",", "re", ".", "I", ")", ":", "logger", ".", "log", "(", "(", "(", "(", "u''", "+", "name", ")", "+", "' contains word: '", ")", "+", "test_word", ")", ",", "logger", ".", "DEBUG", ")", "return", "True", "return", "False" ]
filters out results based on filter_words name: name to check filter_words : words to filter on .
train
false
4,465
def _model_oper(oper, **kwargs): return (lambda left, right: _CompoundModelMeta._from_operator(oper, left, right, **kwargs))
[ "def", "_model_oper", "(", "oper", ",", "**", "kwargs", ")", ":", "return", "(", "lambda", "left", ",", "right", ":", "_CompoundModelMeta", ".", "_from_operator", "(", "oper", ",", "left", ",", "right", ",", "**", "kwargs", ")", ")" ]
returns a function that evaluates a given python arithmetic operator between two models .
train
false
4,467
@pytest.fixture() def filepath(): def make_filepath(filename): return os.path.join(FILES_DIR, filename) return make_filepath
[ "@", "pytest", ".", "fixture", "(", ")", "def", "filepath", "(", ")", ":", "def", "make_filepath", "(", "filename", ")", ":", "return", "os", ".", "path", ".", "join", "(", "FILES_DIR", ",", "filename", ")", "return", "make_filepath" ]
returns full file path for test files .
train
false
4,468
@contextmanager def atomic_write(filepath, binary=False, fsync=False): tmppath = (filepath + '~') while os.path.isfile(tmppath): tmppath += '~' try: with open(tmppath, ('wb' if binary else 'w')) as file: (yield file) if fsync: file.flush() os.fsync(file.fileno()) replace(tmppath, filepath) finally: try: os.remove(tmppath) except (IOError, OSError): pass
[ "@", "contextmanager", "def", "atomic_write", "(", "filepath", ",", "binary", "=", "False", ",", "fsync", "=", "False", ")", ":", "tmppath", "=", "(", "filepath", "+", "'~'", ")", "while", "os", ".", "path", ".", "isfile", "(", "tmppath", ")", ":", "tmppath", "+=", "'~'", "try", ":", "with", "open", "(", "tmppath", ",", "(", "'wb'", "if", "binary", "else", "'w'", ")", ")", "as", "file", ":", "(", "yield", "file", ")", "if", "fsync", ":", "file", ".", "flush", "(", ")", "os", ".", "fsync", "(", "file", ".", "fileno", "(", ")", ")", "replace", "(", "tmppath", ",", "filepath", ")", "finally", ":", "try", ":", "os", ".", "remove", "(", "tmppath", ")", "except", "(", "IOError", ",", "OSError", ")", ":", "pass" ]
writeable file object that atomically updates a file .
train
false
4,469
def dup_from_raw_dict(f, K): if (not f): return [] (n, h) = (max(f.keys()), []) for k in range(n, (-1), (-1)): h.append(f.get(k, K.zero)) return dup_strip(h)
[ "def", "dup_from_raw_dict", "(", "f", ",", "K", ")", ":", "if", "(", "not", "f", ")", ":", "return", "[", "]", "(", "n", ",", "h", ")", "=", "(", "max", "(", "f", ".", "keys", "(", ")", ")", ",", "[", "]", ")", "for", "k", "in", "range", "(", "n", ",", "(", "-", "1", ")", ",", "(", "-", "1", ")", ")", ":", "h", ".", "append", "(", "f", ".", "get", "(", "k", ",", "K", ".", "zero", ")", ")", "return", "dup_strip", "(", "h", ")" ]
create a k[x] polynomial from a raw dict .
train
false
4,470
def profile(request): return render(None, request, _profile_dict(request.user))
[ "def", "profile", "(", "request", ")", ":", "return", "render", "(", "None", ",", "request", ",", "_profile_dict", "(", "request", ".", "user", ")", ")" ]
decorator that will run the function and print a line-by-line profile .
train
false
4,471
def disk_partitions(all=False): retlist = [] partitions = cext.disk_partitions() for partition in partitions: (device, mountpoint, fstype, opts) = partition ntuple = _common.sdiskpart(device, mountpoint, fstype, opts) retlist.append(ntuple) return retlist
[ "def", "disk_partitions", "(", "all", "=", "False", ")", ":", "retlist", "=", "[", "]", "partitions", "=", "cext", ".", "disk_partitions", "(", ")", "for", "partition", "in", "partitions", ":", "(", "device", ",", "mountpoint", ",", "fstype", ",", "opts", ")", "=", "partition", "ntuple", "=", "_common", ".", "sdiskpart", "(", "device", ",", "mountpoint", ",", "fstype", ",", "opts", ")", "retlist", ".", "append", "(", "ntuple", ")", "return", "retlist" ]
return mounted partitions as a list of namedtuple .
train
false
4,472
def stackdepth(frame): depth = 0 while frame: frame = frame.older() depth += 1 return depth
[ "def", "stackdepth", "(", "frame", ")", ":", "depth", "=", "0", "while", "frame", ":", "frame", "=", "frame", ".", "older", "(", ")", "depth", "+=", "1", "return", "depth" ]
tells the stackdepth of a gdb frame .
train
false
4,473
def has_dups(seq): from sympy.core.containers import Dict from sympy.sets.sets import Set if isinstance(seq, (dict, set, Dict, Set)): return False uniq = set() return any((True for s in seq if ((s in uniq) or uniq.add(s))))
[ "def", "has_dups", "(", "seq", ")", ":", "from", "sympy", ".", "core", ".", "containers", "import", "Dict", "from", "sympy", ".", "sets", ".", "sets", "import", "Set", "if", "isinstance", "(", "seq", ",", "(", "dict", ",", "set", ",", "Dict", ",", "Set", ")", ")", ":", "return", "False", "uniq", "=", "set", "(", ")", "return", "any", "(", "(", "True", "for", "s", "in", "seq", "if", "(", "(", "s", "in", "uniq", ")", "or", "uniq", ".", "add", "(", "s", ")", ")", ")", ")" ]
return true if there are any duplicate elements in seq .
train
false
4,474
def populationStability(vectors, numSamples=None): numVectors = len(vectors) if (numSamples is None): numSamples = (numVectors - 1) countOn = range((numVectors - 1)) else: countOn = numpy.random.randint(0, (numVectors - 1), numSamples) sigmap = 0.0 for i in countOn: match = checkMatch(vectors[i], vectors[(i + 1)], sparse=False) if (match[1] != 0): sigmap += (float(match[0]) / match[1]) return (sigmap / numSamples)
[ "def", "populationStability", "(", "vectors", ",", "numSamples", "=", "None", ")", ":", "numVectors", "=", "len", "(", "vectors", ")", "if", "(", "numSamples", "is", "None", ")", ":", "numSamples", "=", "(", "numVectors", "-", "1", ")", "countOn", "=", "range", "(", "(", "numVectors", "-", "1", ")", ")", "else", ":", "countOn", "=", "numpy", ".", "random", ".", "randint", "(", "0", ",", "(", "numVectors", "-", "1", ")", ",", "numSamples", ")", "sigmap", "=", "0.0", "for", "i", "in", "countOn", ":", "match", "=", "checkMatch", "(", "vectors", "[", "i", "]", ",", "vectors", "[", "(", "i", "+", "1", ")", "]", ",", "sparse", "=", "False", ")", "if", "(", "match", "[", "1", "]", "!=", "0", ")", ":", "sigmap", "+=", "(", "float", "(", "match", "[", "0", "]", ")", "/", "match", "[", "1", "]", ")", "return", "(", "sigmap", "/", "numSamples", ")" ]
returns the stability for the population averaged over multiple time steps parameters: vectors: the vectors for which the stability is calculated numsamples the number of time steps where stability is counted at each time step .
train
true
4,475
def comments(): try: task_id = request.args[0] except: raise HTTP(400) table = s3db.project_comment field = table.task_id field.default = task_id field.writable = field.readable = False r = s3_request(prefix='project', name='comment', args=[], vars=None, extension='html') r.customise_resource() form = s3base.S3SQLCustomForm('parent', 'task_id', 'body')(r) comments = db((field == task_id)).select(table.id, table.parent, table.body, table.created_by, table.created_on) output = UL(_id='comments') for comment in comments: if (not comment.parent): thread = comment_parse(comment, comments, task_id=task_id) output.append(thread) script = ''.join("$('#comments').collapsible({xoffset:'-5',yoffset:'50',imagehide:img_path+'arrow-down.png',imageshow:img_path+'arrow-right.png',defaulthide:false})\n$('#project_comment_parent__row1').hide()\n$('#project_comment_parent__row').hide()\n$('#project_comment_body').ckeditor(ck_config)\n$('#submit_record__row input').click(function(){\n $('#comment-form').hide()\n $('#project_comment_body').ckeditorGet().destroy()\n return true\n})") output = DIV(output, DIV(H4(T('New Post'), _id='comment-title'), form, _id='comment-form', _class='clear'), SCRIPT(script)) return XML(output)
[ "def", "comments", "(", ")", ":", "try", ":", "task_id", "=", "request", ".", "args", "[", "0", "]", "except", ":", "raise", "HTTP", "(", "400", ")", "table", "=", "s3db", ".", "project_comment", "field", "=", "table", ".", "task_id", "field", ".", "default", "=", "task_id", "field", ".", "writable", "=", "field", ".", "readable", "=", "False", "r", "=", "s3_request", "(", "prefix", "=", "'project'", ",", "name", "=", "'comment'", ",", "args", "=", "[", "]", ",", "vars", "=", "None", ",", "extension", "=", "'html'", ")", "r", ".", "customise_resource", "(", ")", "form", "=", "s3base", ".", "S3SQLCustomForm", "(", "'parent'", ",", "'task_id'", ",", "'body'", ")", "(", "r", ")", "comments", "=", "db", "(", "(", "field", "==", "task_id", ")", ")", ".", "select", "(", "table", ".", "id", ",", "table", ".", "parent", ",", "table", ".", "body", ",", "table", ".", "created_by", ",", "table", ".", "created_on", ")", "output", "=", "UL", "(", "_id", "=", "'comments'", ")", "for", "comment", "in", "comments", ":", "if", "(", "not", "comment", ".", "parent", ")", ":", "thread", "=", "comment_parse", "(", "comment", ",", "comments", ",", "task_id", "=", "task_id", ")", "output", ".", "append", "(", "thread", ")", "script", "=", "''", ".", "join", "(", "\"$('#comments').collapsible({xoffset:'-5',yoffset:'50',imagehide:img_path+'arrow-down.png',imageshow:img_path+'arrow-right.png',defaulthide:false})\\n$('#project_comment_parent__row1').hide()\\n$('#project_comment_parent__row').hide()\\n$('#project_comment_body').ckeditor(ck_config)\\n$('#submit_record__row input').click(function(){\\n $('#comment-form').hide()\\n $('#project_comment_body').ckeditorGet().destroy()\\n return true\\n})\"", ")", "output", "=", "DIV", "(", "output", ",", "DIV", "(", "H4", "(", "T", "(", "'New Post'", ")", ",", "_id", "=", "'comment-title'", ")", ",", "form", ",", "_id", "=", "'comment-form'", ",", "_class", "=", "'clear'", ")", ",", "SCRIPT", "(", "script", ")", ")", "return", "XML", "(", "output", ")" ]
receive use request to view comments .
train
false
4,476
def hessian_matrix_det(image, sigma=1): image = img_as_float(image) image = integral_image(image) return np.array(_hessian_matrix_det(image, sigma))
[ "def", "hessian_matrix_det", "(", "image", ",", "sigma", "=", "1", ")", ":", "image", "=", "img_as_float", "(", "image", ")", "image", "=", "integral_image", "(", "image", ")", "return", "np", ".", "array", "(", "_hessian_matrix_det", "(", "image", ",", "sigma", ")", ")" ]
computes the approximate hessian determinant over an image .
train
false
4,477
def bin_to_text(ip): if (len(ip) == 4): return ipv4_to_str(ip) elif (len(ip) == 16): return ipv6_to_str(ip) else: raise struct.error(('Invalid ip address length: %s' % len(ip)))
[ "def", "bin_to_text", "(", "ip", ")", ":", "if", "(", "len", "(", "ip", ")", "==", "4", ")", ":", "return", "ipv4_to_str", "(", "ip", ")", "elif", "(", "len", "(", "ip", ")", "==", "16", ")", ":", "return", "ipv6_to_str", "(", "ip", ")", "else", ":", "raise", "struct", ".", "error", "(", "(", "'Invalid ip address length: %s'", "%", "len", "(", "ip", ")", ")", ")" ]
converts binary representation to human readable ipv4 or ipv6 string .
train
true
4,478
def _StartBabysitter(servers): import daemon from viewfinder.backend.prod import babysitter os.mkdir('logs') context = daemon.DaemonContext(working_directory=os.getcwd(), stdout=open(os.path.join(os.getcwd(), 'logs', 'STDOUT'), 'w+'), stderr=open(os.path.join(os.getcwd(), 'logs', 'STDERR'), 'w+'), umask=2) context.signal_map = {signal.SIGTERM: 'terminate', signal.SIGHUP: 'terminate'} with context: babysitter.Start(servers)
[ "def", "_StartBabysitter", "(", "servers", ")", ":", "import", "daemon", "from", "viewfinder", ".", "backend", ".", "prod", "import", "babysitter", "os", ".", "mkdir", "(", "'logs'", ")", "context", "=", "daemon", ".", "DaemonContext", "(", "working_directory", "=", "os", ".", "getcwd", "(", ")", ",", "stdout", "=", "open", "(", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "'logs'", ",", "'STDOUT'", ")", ",", "'w+'", ")", ",", "stderr", "=", "open", "(", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "'logs'", ",", "'STDERR'", ")", ",", "'w+'", ")", ",", "umask", "=", "2", ")", "context", ".", "signal_map", "=", "{", "signal", ".", "SIGTERM", ":", "'terminate'", ",", "signal", ".", "SIGHUP", ":", "'terminate'", "}", "with", "context", ":", "babysitter", ".", "Start", "(", "servers", ")" ]
runs the babysitter as a daemon process .
train
false
4,479
def test_page_title_markup(): request = Mock() request.APP = amo.FIREFOX res = render('{{ page_title("{0}"|fe("It\'s all text")) }}', {'request': request}) assert (res == 'It's all text :: Add-ons for Firefox')
[ "def", "test_page_title_markup", "(", ")", ":", "request", "=", "Mock", "(", ")", "request", ".", "APP", "=", "amo", ".", "FIREFOX", "res", "=", "render", "(", "'{{ page_title(\"{0}\"|fe(\"It\\'s all text\")) }}'", ",", "{", "'request'", ":", "request", "}", ")", "assert", "(", "res", "==", "'It's all text :: Add-ons for Firefox'", ")" ]
if the title passed to page_title is a jinja2 markup object .
train
false
4,480
def ccEstablishmentConfirmed(RepeatIndicator_presence=0, BearerCapability_presence=0, BearerCapability_presence1=0, Cause_presence=0): a = TpPd(pd=3) b = MessageType(mesType=6) packet = (a / b) if (RepeatIndicator_presence is 1): c = RepeatIndicatorHdr(ieiRI=13, eightBitRI=0) packet = (packet / c) if (BearerCapability_presence is 1): d = BearerCapabilityHdr(ieiBC=4, eightBitBC=0) packet = (packet / d) if (BearerCapability_presence1 is 1): e = BearerCapabilityHdr(ieiBC=4, eightBitBC=0) packet = (packet / e) if (Cause_presence is 1): f = CauseHdr(ieiC=8, eightBitC=0) packet = (packet / f) return packet
[ "def", "ccEstablishmentConfirmed", "(", "RepeatIndicator_presence", "=", "0", ",", "BearerCapability_presence", "=", "0", ",", "BearerCapability_presence1", "=", "0", ",", "Cause_presence", "=", "0", ")", ":", "a", "=", "TpPd", "(", "pd", "=", "3", ")", "b", "=", "MessageType", "(", "mesType", "=", "6", ")", "packet", "=", "(", "a", "/", "b", ")", "if", "(", "RepeatIndicator_presence", "is", "1", ")", ":", "c", "=", "RepeatIndicatorHdr", "(", "ieiRI", "=", "13", ",", "eightBitRI", "=", "0", ")", "packet", "=", "(", "packet", "/", "c", ")", "if", "(", "BearerCapability_presence", "is", "1", ")", ":", "d", "=", "BearerCapabilityHdr", "(", "ieiBC", "=", "4", ",", "eightBitBC", "=", "0", ")", "packet", "=", "(", "packet", "/", "d", ")", "if", "(", "BearerCapability_presence1", "is", "1", ")", ":", "e", "=", "BearerCapabilityHdr", "(", "ieiBC", "=", "4", ",", "eightBitBC", "=", "0", ")", "packet", "=", "(", "packet", "/", "e", ")", "if", "(", "Cause_presence", "is", "1", ")", ":", "f", "=", "CauseHdr", "(", "ieiC", "=", "8", ",", "eightBitC", "=", "0", ")", "packet", "=", "(", "packet", "/", "f", ")", "return", "packet" ]
cc-establishment confirmed section 9 .
train
true
4,481
def update_collection(committer_id, collection_id, change_list, commit_message): is_public = rights_manager.is_collection_public(collection_id) if (is_public and (not commit_message)): raise ValueError('Collection is public so expected a commit message but received none.') collection = apply_change_list(collection_id, change_list) _save_collection(committer_id, collection, commit_message, change_list) update_collection_summary(collection.id, committer_id) if (not rights_manager.is_collection_private(collection.id)): user_services.update_first_contribution_msec_if_not_set(committer_id, utils.get_current_time_in_millisecs())
[ "def", "update_collection", "(", "committer_id", ",", "collection_id", ",", "change_list", ",", "commit_message", ")", ":", "is_public", "=", "rights_manager", ".", "is_collection_public", "(", "collection_id", ")", "if", "(", "is_public", "and", "(", "not", "commit_message", ")", ")", ":", "raise", "ValueError", "(", "'Collection is public so expected a commit message but received none.'", ")", "collection", "=", "apply_change_list", "(", "collection_id", ",", "change_list", ")", "_save_collection", "(", "committer_id", ",", "collection", ",", "commit_message", ",", "change_list", ")", "update_collection_summary", "(", "collection", ".", "id", ",", "committer_id", ")", "if", "(", "not", "rights_manager", ".", "is_collection_private", "(", "collection", ".", "id", ")", ")", ":", "user_services", ".", "update_first_contribution_msec_if_not_set", "(", "committer_id", ",", "utils", ".", "get_current_time_in_millisecs", "(", ")", ")" ]
updates a collection .
train
false
4,483
def jinja_messages_to_python(src, origin=None): output = StringIO(u'') output_lineno = 1 for (lineno, message, comments, context) in extract_jinja(src, origin): for comment in comments: output.write((u'# %s %s\n' % (COMMENT_TAG, comment))) output_lineno += 1 lines_to_add = (lineno - output_lineno) if (lines_to_add > 0): output.write((lines_to_add * u'\n')) output_lineno += lines_to_add output.write((u'gettext(%r),' % (message,))) return output.getvalue()
[ "def", "jinja_messages_to_python", "(", "src", ",", "origin", "=", "None", ")", ":", "output", "=", "StringIO", "(", "u''", ")", "output_lineno", "=", "1", "for", "(", "lineno", ",", "message", ",", "comments", ",", "context", ")", "in", "extract_jinja", "(", "src", ",", "origin", ")", ":", "for", "comment", "in", "comments", ":", "output", ".", "write", "(", "(", "u'# %s %s\\n'", "%", "(", "COMMENT_TAG", ",", "comment", ")", ")", ")", "output_lineno", "+=", "1", "lines_to_add", "=", "(", "lineno", "-", "output_lineno", ")", "if", "(", "lines_to_add", ">", "0", ")", ":", "output", ".", "write", "(", "(", "lines_to_add", "*", "u'\\n'", ")", ")", "output_lineno", "+=", "lines_to_add", "output", ".", "write", "(", "(", "u'gettext(%r),'", "%", "(", "message", ",", ")", ")", ")", "return", "output", ".", "getvalue", "(", ")" ]
convert jinja2 file to python preserving only messages .
train
false
4,484
def getcycle(d, keys): return _toposort(d, keys=keys, returncycle=True)
[ "def", "getcycle", "(", "d", ",", "keys", ")", ":", "return", "_toposort", "(", "d", ",", "keys", "=", "keys", ",", "returncycle", "=", "True", ")" ]
return a list of nodes that form a cycle if dask is not a dag .
train
false
4,485
def new(rsa_key): return PKCS115_SigScheme(rsa_key)
[ "def", "new", "(", "rsa_key", ")", ":", "return", "PKCS115_SigScheme", "(", "rsa_key", ")" ]
return a signature scheme object dss_sigscheme that can be used to perform dss signature or verification .
train
false
4,490
def tracks_to_mpd_format(tracks, start=0, end=None): if (end is None): end = len(tracks) tracks = tracks[start:end] positions = range(start, end) assert (len(tracks) == len(positions)) result = [] for (track, position) in zip(tracks, positions): formatted_track = track_to_mpd_format(track, position) if formatted_track: result.append(formatted_track) return result
[ "def", "tracks_to_mpd_format", "(", "tracks", ",", "start", "=", "0", ",", "end", "=", "None", ")", ":", "if", "(", "end", "is", "None", ")", ":", "end", "=", "len", "(", "tracks", ")", "tracks", "=", "tracks", "[", "start", ":", "end", "]", "positions", "=", "range", "(", "start", ",", "end", ")", "assert", "(", "len", "(", "tracks", ")", "==", "len", "(", "positions", ")", ")", "result", "=", "[", "]", "for", "(", "track", ",", "position", ")", "in", "zip", "(", "tracks", ",", "positions", ")", ":", "formatted_track", "=", "track_to_mpd_format", "(", "track", ",", "position", ")", "if", "formatted_track", ":", "result", ".", "append", "(", "formatted_track", ")", "return", "result" ]
format list of tracks for output to mpd client .
train
false
4,492
def get_info_filename(base_path): base_file = os.path.basename(base_path) return (CONF.image_info_filename_pattern % {'image': base_file})
[ "def", "get_info_filename", "(", "base_path", ")", ":", "base_file", "=", "os", ".", "path", ".", "basename", "(", "base_path", ")", "return", "(", "CONF", ".", "image_info_filename_pattern", "%", "{", "'image'", ":", "base_file", "}", ")" ]
construct a filename for storing additional information about a base image .
train
false
4,493
def _generate_broadcasting_indices(out_shape, *shapes): all_shapes = ((out_shape,) + shapes) ret_indices = [[()] for shape in all_shapes] for dim in xrange(len(out_shape)): _ret_indices = [[] for shape in all_shapes] out_range = list(range(out_shape[dim])) ranges = [out_range] for shape in shapes: if (shape[dim] == out_shape[dim]): ranges.append(out_range) elif (shape[dim] == 1): ranges.append(([0] * out_shape[dim])) else: raise ValueError(('shape[%i] (%i) should be equal to out_shape[%i] (%i) or to 1' % (dim, shape[dim], dim, out_shape[dim])), shape, out_shape, shapes) for prev_index in zip(*ret_indices): for dim_index in zip(*ranges): for i in xrange(len(all_shapes)): _ret_indices[i].append((prev_index[i] + (dim_index[i],))) ret_indices = _ret_indices return ret_indices
[ "def", "_generate_broadcasting_indices", "(", "out_shape", ",", "*", "shapes", ")", ":", "all_shapes", "=", "(", "(", "out_shape", ",", ")", "+", "shapes", ")", "ret_indices", "=", "[", "[", "(", ")", "]", "for", "shape", "in", "all_shapes", "]", "for", "dim", "in", "xrange", "(", "len", "(", "out_shape", ")", ")", ":", "_ret_indices", "=", "[", "[", "]", "for", "shape", "in", "all_shapes", "]", "out_range", "=", "list", "(", "range", "(", "out_shape", "[", "dim", "]", ")", ")", "ranges", "=", "[", "out_range", "]", "for", "shape", "in", "shapes", ":", "if", "(", "shape", "[", "dim", "]", "==", "out_shape", "[", "dim", "]", ")", ":", "ranges", ".", "append", "(", "out_range", ")", "elif", "(", "shape", "[", "dim", "]", "==", "1", ")", ":", "ranges", ".", "append", "(", "(", "[", "0", "]", "*", "out_shape", "[", "dim", "]", ")", ")", "else", ":", "raise", "ValueError", "(", "(", "'shape[%i] (%i) should be equal to out_shape[%i] (%i) or to 1'", "%", "(", "dim", ",", "shape", "[", "dim", "]", ",", "dim", ",", "out_shape", "[", "dim", "]", ")", ")", ",", "shape", ",", "out_shape", ",", "shapes", ")", "for", "prev_index", "in", "zip", "(", "*", "ret_indices", ")", ":", "for", "dim_index", "in", "zip", "(", "*", "ranges", ")", ":", "for", "i", "in", "xrange", "(", "len", "(", "all_shapes", ")", ")", ":", "_ret_indices", "[", "i", "]", ".", "append", "(", "(", "prev_index", "[", "i", "]", "+", "(", "dim_index", "[", "i", "]", ",", ")", ")", ")", "ret_indices", "=", "_ret_indices", "return", "ret_indices" ]
return indices over each shape that broadcast them to match out_shape .
train
false
4,494
def processSVGElementcircle(elementNode, svgReader): attributes = elementNode.attributes center = euclidean.getComplexDefaultByDictionaryKeys(complex(), attributes, 'cx', 'cy') radius = euclidean.getFloatDefaultByDictionary(0.0, attributes, 'r') if (radius == 0.0): print 'Warning, in processSVGElementcircle in svgReader radius is zero in:' print attributes return global globalNumberOfCirclePoints global globalSideAngle loop = [] loopLayer = svgReader.getLoopLayer() for side in xrange(globalNumberOfCirclePoints): unitPolar = euclidean.getWiddershinsUnitPolar((float(side) * globalSideAngle)) loop.append((center + (radius * unitPolar))) loopLayer.loops += getTransformedFillOutline(elementNode, loop, svgReader.yAxisPointingUpward)
[ "def", "processSVGElementcircle", "(", "elementNode", ",", "svgReader", ")", ":", "attributes", "=", "elementNode", ".", "attributes", "center", "=", "euclidean", ".", "getComplexDefaultByDictionaryKeys", "(", "complex", "(", ")", ",", "attributes", ",", "'cx'", ",", "'cy'", ")", "radius", "=", "euclidean", ".", "getFloatDefaultByDictionary", "(", "0.0", ",", "attributes", ",", "'r'", ")", "if", "(", "radius", "==", "0.0", ")", ":", "print", "'Warning, in processSVGElementcircle in svgReader radius is zero in:'", "print", "attributes", "return", "global", "globalNumberOfCirclePoints", "global", "globalSideAngle", "loop", "=", "[", "]", "loopLayer", "=", "svgReader", ".", "getLoopLayer", "(", ")", "for", "side", "in", "xrange", "(", "globalNumberOfCirclePoints", ")", ":", "unitPolar", "=", "euclidean", ".", "getWiddershinsUnitPolar", "(", "(", "float", "(", "side", ")", "*", "globalSideAngle", ")", ")", "loop", ".", "append", "(", "(", "center", "+", "(", "radius", "*", "unitPolar", ")", ")", ")", "loopLayer", ".", "loops", "+=", "getTransformedFillOutline", "(", "elementNode", ",", "loop", ",", "svgReader", ".", "yAxisPointingUpward", ")" ]
process xmlelement by svgreader .
train
false
4,495
def libvlc_video_get_chapter_description(p_mi, i_title): f = (_Cfunctions.get('libvlc_video_get_chapter_description', None) or _Cfunction('libvlc_video_get_chapter_description', ((1,), (1,)), None, ctypes.POINTER(TrackDescription), MediaPlayer, ctypes.c_int)) return f(p_mi, i_title)
[ "def", "libvlc_video_get_chapter_description", "(", "p_mi", ",", "i_title", ")", ":", "f", "=", "(", "_Cfunctions", ".", "get", "(", "'libvlc_video_get_chapter_description'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_video_get_chapter_description'", ",", "(", "(", "1", ",", ")", ",", "(", "1", ",", ")", ")", ",", "None", ",", "ctypes", ".", "POINTER", "(", "TrackDescription", ")", ",", "MediaPlayer", ",", "ctypes", ".", "c_int", ")", ")", "return", "f", "(", "p_mi", ",", "i_title", ")" ]
get the description of available chapters for specific title .
train
true
4,497
def remove_rare_taxa(taxdata, nkeep=(-1)): if ((nkeep <= 0) or (nkeep > len(taxdata['prevalence']))): nkeep = len(taxdata['prevalence']) ixs = argsort(taxdata['prevalence']) ixs = ixs[::(-1)][:nkeep] taxdata['coord'] = taxdata['coord'][ixs, :] taxdata['counts'] = taxdata['counts'][ixs, :] tmp = [taxdata['lineages'][idx] for idx in ixs] taxdata['lineages'] = tmp taxdata['prevalence'] = taxdata['prevalence'][ixs]
[ "def", "remove_rare_taxa", "(", "taxdata", ",", "nkeep", "=", "(", "-", "1", ")", ")", ":", "if", "(", "(", "nkeep", "<=", "0", ")", "or", "(", "nkeep", ">", "len", "(", "taxdata", "[", "'prevalence'", "]", ")", ")", ")", ":", "nkeep", "=", "len", "(", "taxdata", "[", "'prevalence'", "]", ")", "ixs", "=", "argsort", "(", "taxdata", "[", "'prevalence'", "]", ")", "ixs", "=", "ixs", "[", ":", ":", "(", "-", "1", ")", "]", "[", ":", "nkeep", "]", "taxdata", "[", "'coord'", "]", "=", "taxdata", "[", "'coord'", "]", "[", "ixs", ",", ":", "]", "taxdata", "[", "'counts'", "]", "=", "taxdata", "[", "'counts'", "]", "[", "ixs", ",", ":", "]", "tmp", "=", "[", "taxdata", "[", "'lineages'", "]", "[", "idx", "]", "for", "idx", "in", "ixs", "]", "taxdata", "[", "'lineages'", "]", "=", "tmp", "taxdata", "[", "'prevalence'", "]", "=", "taxdata", "[", "'prevalence'", "]", "[", "ixs", "]" ]
keeps only requested number of taxa .
train
false
4,498
def _yield_all_instance_groups(emr_conn, cluster_id, *args, **kwargs): for resp in _repeat(emr_conn.list_instance_groups, cluster_id, *args, **kwargs): for group in getattr(resp, 'instancegroups', []): (yield group)
[ "def", "_yield_all_instance_groups", "(", "emr_conn", ",", "cluster_id", ",", "*", "args", ",", "**", "kwargs", ")", ":", "for", "resp", "in", "_repeat", "(", "emr_conn", ".", "list_instance_groups", ",", "cluster_id", ",", "*", "args", ",", "**", "kwargs", ")", ":", "for", "group", "in", "getattr", "(", "resp", ",", "'instancegroups'", ",", "[", "]", ")", ":", "(", "yield", "group", ")" ]
get all instance groups for the given cluster .
train
false
4,500
def add_message(request, level, message, extra_tags='', fail_silently=False): if hasattr(request, '_messages'): return request._messages.add(level, message, extra_tags) if (hasattr(request, 'user') and request.user.is_authenticated()): return request.user.message_set.create(message=message) if (not fail_silently): raise MessageFailure('Without the django.contrib.messages middleware, messages can only be added to authenticated users.')
[ "def", "add_message", "(", "request", ",", "level", ",", "message", ",", "extra_tags", "=", "''", ",", "fail_silently", "=", "False", ")", ":", "if", "hasattr", "(", "request", ",", "'_messages'", ")", ":", "return", "request", ".", "_messages", ".", "add", "(", "level", ",", "message", ",", "extra_tags", ")", "if", "(", "hasattr", "(", "request", ",", "'user'", ")", "and", "request", ".", "user", ".", "is_authenticated", "(", ")", ")", ":", "return", "request", ".", "user", ".", "message_set", ".", "create", "(", "message", "=", "message", ")", "if", "(", "not", "fail_silently", ")", ":", "raise", "MessageFailure", "(", "'Without the django.contrib.messages middleware, messages can only be added to authenticated users.'", ")" ]
records a message .
train
false
4,501
def random_threshold_sequence(n, p, seed=None): if (not (seed is None)): random.seed(seed) if (not (0 <= p <= 1)): raise ValueError('p must be in [0,1]') cs = ['d'] for i in range(1, n): if (random.random() < p): cs.append('d') else: cs.append('i') return cs
[ "def", "random_threshold_sequence", "(", "n", ",", "p", ",", "seed", "=", "None", ")", ":", "if", "(", "not", "(", "seed", "is", "None", ")", ")", ":", "random", ".", "seed", "(", "seed", ")", "if", "(", "not", "(", "0", "<=", "p", "<=", "1", ")", ")", ":", "raise", "ValueError", "(", "'p must be in [0,1]'", ")", "cs", "=", "[", "'d'", "]", "for", "i", "in", "range", "(", "1", ",", "n", ")", ":", "if", "(", "random", ".", "random", "(", ")", "<", "p", ")", ":", "cs", ".", "append", "(", "'d'", ")", "else", ":", "cs", ".", "append", "(", "'i'", ")", "return", "cs" ]
create a random threshold sequence of size n .
train
false
4,502
def rationalize(tokens, local_dict, global_dict): result = [] passed_float = False for (toknum, tokval) in tokens: if (toknum == NAME): if (tokval == 'Float'): passed_float = True tokval = 'Rational' result.append((toknum, tokval)) elif ((passed_float == True) and (toknum == NUMBER)): passed_float = False result.append((STRING, tokval)) else: result.append((toknum, tokval)) return result
[ "def", "rationalize", "(", "tokens", ",", "local_dict", ",", "global_dict", ")", ":", "result", "=", "[", "]", "passed_float", "=", "False", "for", "(", "toknum", ",", "tokval", ")", "in", "tokens", ":", "if", "(", "toknum", "==", "NAME", ")", ":", "if", "(", "tokval", "==", "'Float'", ")", ":", "passed_float", "=", "True", "tokval", "=", "'Rational'", "result", ".", "append", "(", "(", "toknum", ",", "tokval", ")", ")", "elif", "(", "(", "passed_float", "==", "True", ")", "and", "(", "toknum", "==", "NUMBER", ")", ")", ":", "passed_float", "=", "False", "result", ".", "append", "(", "(", "STRING", ",", "tokval", ")", ")", "else", ":", "result", ".", "append", "(", "(", "toknum", ",", "tokval", ")", ")", "return", "result" ]
helps identifying a rational number from a float value by using a continued fraction .
train
false
4,503
def buildNestedNetwork(): N = FeedForwardNetwork('outer') a = LinearLayer(1, name='a') b = LinearLayer(2, name='b') c = buildNetwork(2, 3, 1) c.name = 'inner' N.addInputModule(a) N.addModule(c) N.addOutputModule(b) N.addConnection(FullConnection(a, b)) N.addConnection(FullConnection(b, c)) N.sortModules() return N
[ "def", "buildNestedNetwork", "(", ")", ":", "N", "=", "FeedForwardNetwork", "(", "'outer'", ")", "a", "=", "LinearLayer", "(", "1", ",", "name", "=", "'a'", ")", "b", "=", "LinearLayer", "(", "2", ",", "name", "=", "'b'", ")", "c", "=", "buildNetwork", "(", "2", ",", "3", ",", "1", ")", "c", ".", "name", "=", "'inner'", "N", ".", "addInputModule", "(", "a", ")", "N", ".", "addModule", "(", "c", ")", "N", ".", "addOutputModule", "(", "b", ")", "N", ".", "addConnection", "(", "FullConnection", "(", "a", ",", "b", ")", ")", "N", ".", "addConnection", "(", "FullConnection", "(", "b", ",", "c", ")", ")", "N", ".", "sortModules", "(", ")", "return", "N" ]
build a nested network .
train
false
4,505
def get_detect_model(): (x, conv_layer, conv_vars) = convolutional_layers() W_fc1 = weight_variable([((8 * 32) * 128), 2048]) W_conv1 = tf.reshape(W_fc1, [8, 32, 128, 2048]) b_fc1 = bias_variable([2048]) h_conv1 = tf.nn.relu((conv2d(conv_layer, W_conv1, stride=(1, 1), padding='VALID') + b_fc1)) W_fc2 = weight_variable([2048, (1 + (7 * len(common.CHARS)))]) W_conv2 = tf.reshape(W_fc2, [1, 1, 2048, (1 + (7 * len(common.CHARS)))]) b_fc2 = bias_variable([(1 + (7 * len(common.CHARS)))]) h_conv2 = (conv2d(h_conv1, W_conv2) + b_fc2) return (x, h_conv2, (conv_vars + [W_fc1, b_fc1, W_fc2, b_fc2]))
[ "def", "get_detect_model", "(", ")", ":", "(", "x", ",", "conv_layer", ",", "conv_vars", ")", "=", "convolutional_layers", "(", ")", "W_fc1", "=", "weight_variable", "(", "[", "(", "(", "8", "*", "32", ")", "*", "128", ")", ",", "2048", "]", ")", "W_conv1", "=", "tf", ".", "reshape", "(", "W_fc1", ",", "[", "8", ",", "32", ",", "128", ",", "2048", "]", ")", "b_fc1", "=", "bias_variable", "(", "[", "2048", "]", ")", "h_conv1", "=", "tf", ".", "nn", ".", "relu", "(", "(", "conv2d", "(", "conv_layer", ",", "W_conv1", ",", "stride", "=", "(", "1", ",", "1", ")", ",", "padding", "=", "'VALID'", ")", "+", "b_fc1", ")", ")", "W_fc2", "=", "weight_variable", "(", "[", "2048", ",", "(", "1", "+", "(", "7", "*", "len", "(", "common", ".", "CHARS", ")", ")", ")", "]", ")", "W_conv2", "=", "tf", ".", "reshape", "(", "W_fc2", ",", "[", "1", ",", "1", ",", "2048", ",", "(", "1", "+", "(", "7", "*", "len", "(", "common", ".", "CHARS", ")", ")", ")", "]", ")", "b_fc2", "=", "bias_variable", "(", "[", "(", "1", "+", "(", "7", "*", "len", "(", "common", ".", "CHARS", ")", ")", ")", "]", ")", "h_conv2", "=", "(", "conv2d", "(", "h_conv1", ",", "W_conv2", ")", "+", "b_fc2", ")", "return", "(", "x", ",", "h_conv2", ",", "(", "conv_vars", "+", "[", "W_fc1", ",", "b_fc1", ",", "W_fc2", ",", "b_fc2", "]", ")", ")" ]
the same as the training model .
train
false
4,507
def layout_title(layout): for child in layout.children: if isinstance(child, Title): return u' '.join([node.data for node in get_nodes(child, Text)])
[ "def", "layout_title", "(", "layout", ")", ":", "for", "child", "in", "layout", ".", "children", ":", "if", "isinstance", "(", "child", ",", "Title", ")", ":", "return", "u' '", ".", "join", "(", "[", "node", ".", "data", "for", "node", "in", "get_nodes", "(", "child", ",", "Text", ")", "]", ")" ]
try to return the layouts title as string .
train
false
4,508
def is_signed(file_path): try: with zipfile.ZipFile(file_path, mode='r') as zf: filenames = set(zf.namelist()) except (zipfile.BadZipfile, IOError): filenames = set() return set([u'META-INF/mozilla.rsa', u'META-INF/mozilla.sf', u'META-INF/manifest.mf']).issubset(filenames)
[ "def", "is_signed", "(", "file_path", ")", ":", "try", ":", "with", "zipfile", ".", "ZipFile", "(", "file_path", ",", "mode", "=", "'r'", ")", "as", "zf", ":", "filenames", "=", "set", "(", "zf", ".", "namelist", "(", ")", ")", "except", "(", "zipfile", ".", "BadZipfile", ",", "IOError", ")", ":", "filenames", "=", "set", "(", ")", "return", "set", "(", "[", "u'META-INF/mozilla.rsa'", ",", "u'META-INF/mozilla.sf'", ",", "u'META-INF/manifest.mf'", "]", ")", ".", "issubset", "(", "filenames", ")" ]
return true if the file has been signed .
train
false
4,509
def _GetOrCreateTargetByName(targets, target_name): if (target_name in targets): return (False, targets[target_name]) target = Target(target_name) targets[target_name] = target return (True, target)
[ "def", "_GetOrCreateTargetByName", "(", "targets", ",", "target_name", ")", ":", "if", "(", "target_name", "in", "targets", ")", ":", "return", "(", "False", ",", "targets", "[", "target_name", "]", ")", "target", "=", "Target", "(", "target_name", ")", "targets", "[", "target_name", "]", "=", "target", "return", "(", "True", ",", "target", ")" ]
creates or returns the target at targets[target_name] .
train
false
4,510
def haddr_to_str(addr): if (addr is None): return 'None' try: return addrconv.mac.bin_to_text(addr) except: raise AssertionError
[ "def", "haddr_to_str", "(", "addr", ")", ":", "if", "(", "addr", "is", "None", ")", ":", "return", "'None'", "try", ":", "return", "addrconv", ".", "mac", ".", "bin_to_text", "(", "addr", ")", "except", ":", "raise", "AssertionError" ]
format mac address in internal representation into human readable form .
train
false
4,511
def jdepend(registry, xml_parent, data): jdepend = XML.SubElement(xml_parent, 'hudson.plugins.jdepend.JDependRecorder') mapping = [('file', 'configuredJDependFile', None)] helpers.convert_mapping_to_xml(jdepend, data, mapping, fail_required=True)
[ "def", "jdepend", "(", "registry", ",", "xml_parent", ",", "data", ")", ":", "jdepend", "=", "XML", ".", "SubElement", "(", "xml_parent", ",", "'hudson.plugins.jdepend.JDependRecorder'", ")", "mapping", "=", "[", "(", "'file'", ",", "'configuredJDependFile'", ",", "None", ")", "]", "helpers", ".", "convert_mapping_to_xml", "(", "jdepend", ",", "data", ",", "mapping", ",", "fail_required", "=", "True", ")" ]
yaml: jdepend publish jdepend report requires the :jenkins-wiki:jdepend plugin <jdepend+plugin> .
train
false
4,513
def fahrenheit2celsius(temp): return ((5.0 / 9.0) * (temp - 32))
[ "def", "fahrenheit2celsius", "(", "temp", ")", ":", "return", "(", "(", "5.0", "/", "9.0", ")", "*", "(", "temp", "-", "32", ")", ")" ]
returns temperature in celsius .
train
false
4,515
def oo_split(string, separator=','): if isinstance(string, list): return string return string.split(separator)
[ "def", "oo_split", "(", "string", ",", "separator", "=", "','", ")", ":", "if", "isinstance", "(", "string", ",", "list", ")", ":", "return", "string", "return", "string", ".", "split", "(", "separator", ")" ]
this splits the input string into a list .
train
false
4,517
def getDictionaryWithoutList(dictionary, withoutList): dictionaryWithoutList = {} for key in dictionary: if (key not in withoutList): dictionaryWithoutList[key] = dictionary[key] return dictionaryWithoutList
[ "def", "getDictionaryWithoutList", "(", "dictionary", ",", "withoutList", ")", ":", "dictionaryWithoutList", "=", "{", "}", "for", "key", "in", "dictionary", ":", "if", "(", "key", "not", "in", "withoutList", ")", ":", "dictionaryWithoutList", "[", "key", "]", "=", "dictionary", "[", "key", "]", "return", "dictionaryWithoutList" ]
get the dictionary without the keys in the list .
train
false
4,518
def replace_all(text, terms): for (_from, _to) in terms.items(): text = text.replace(_from, _to) return text
[ "def", "replace_all", "(", "text", ",", "terms", ")", ":", "for", "(", "_from", ",", "_to", ")", "in", "terms", ".", "items", "(", ")", ":", "text", "=", "text", ".", "replace", "(", "_from", ",", "_to", ")", "return", "text" ]
replaces all terms contained in a dict .
train
false
4,519
def add_backtrack_keys(products): for (p_k, p_v) in products.iteritems(): p_v['key'] = p_k for (c_k, c_v) in p_v['categories'].iteritems(): c_v['key'] = c_k
[ "def", "add_backtrack_keys", "(", "products", ")", ":", "for", "(", "p_k", ",", "p_v", ")", "in", "products", ".", "iteritems", "(", ")", ":", "p_v", "[", "'key'", "]", "=", "p_k", "for", "(", "c_k", ",", "c_v", ")", "in", "p_v", "[", "'categories'", "]", ".", "iteritems", "(", ")", ":", "c_v", "[", "'key'", "]", "=", "c_k" ]
insert key keys so we can go from product or category back to key .
train
false
4,520
def test_preprocessor_simple(): obj = macroexpand(tokenize('(test "one" "two")')[0], HyASTCompiler(__name__)) assert (obj == HyList(['one', 'two'])) assert (type(obj) == HyList)
[ "def", "test_preprocessor_simple", "(", ")", ":", "obj", "=", "macroexpand", "(", "tokenize", "(", "'(test \"one\" \"two\")'", ")", "[", "0", "]", ",", "HyASTCompiler", "(", "__name__", ")", ")", "assert", "(", "obj", "==", "HyList", "(", "[", "'one'", ",", "'two'", "]", ")", ")", "assert", "(", "type", "(", "obj", ")", "==", "HyList", ")" ]
test basic macro expansion .
train
false
4,521
def test_bc_sk_estimator(): check_estimator(BalanceCascade)
[ "def", "test_bc_sk_estimator", "(", ")", ":", "check_estimator", "(", "BalanceCascade", ")" ]
test the sklearn estimator compatibility .
train
false
4,522
def to_progress_instance(progress): if callable(progress): return CallableRemoteProgress(progress) elif (progress is None): return RemoteProgress() else: return progress
[ "def", "to_progress_instance", "(", "progress", ")", ":", "if", "callable", "(", "progress", ")", ":", "return", "CallableRemoteProgress", "(", "progress", ")", "elif", "(", "progress", "is", "None", ")", ":", "return", "RemoteProgress", "(", ")", "else", ":", "return", "progress" ]
given the progress return a suitable object derived from remoteprogress() .
train
false
4,524
@utils.arg('flavor', metavar='<flavor>', help=_('Name or ID of flavor.')) @utils.arg('action', metavar='<action>', choices=['set', 'unset'], help=_("Actions: 'set' or 'unset'.")) @utils.arg('metadata', metavar='<key=value>', nargs='+', action='append', default=[], help=_('Extra_specs to set/unset (only key is necessary on unset).')) def do_flavor_key(cs, args): flavor = _find_flavor(cs, args.flavor) keypair = _extract_metadata(args) if (args.action == 'set'): flavor.set_keys(keypair) elif (args.action == 'unset'): flavor.unset_keys(keypair.keys())
[ "@", "utils", ".", "arg", "(", "'flavor'", ",", "metavar", "=", "'<flavor>'", ",", "help", "=", "_", "(", "'Name or ID of flavor.'", ")", ")", "@", "utils", ".", "arg", "(", "'action'", ",", "metavar", "=", "'<action>'", ",", "choices", "=", "[", "'set'", ",", "'unset'", "]", ",", "help", "=", "_", "(", "\"Actions: 'set' or 'unset'.\"", ")", ")", "@", "utils", ".", "arg", "(", "'metadata'", ",", "metavar", "=", "'<key=value>'", ",", "nargs", "=", "'+'", ",", "action", "=", "'append'", ",", "default", "=", "[", "]", ",", "help", "=", "_", "(", "'Extra_specs to set/unset (only key is necessary on unset).'", ")", ")", "def", "do_flavor_key", "(", "cs", ",", "args", ")", ":", "flavor", "=", "_find_flavor", "(", "cs", ",", "args", ".", "flavor", ")", "keypair", "=", "_extract_metadata", "(", "args", ")", "if", "(", "args", ".", "action", "==", "'set'", ")", ":", "flavor", ".", "set_keys", "(", "keypair", ")", "elif", "(", "args", ".", "action", "==", "'unset'", ")", ":", "flavor", ".", "unset_keys", "(", "keypair", ".", "keys", "(", ")", ")" ]
set or unset extra_spec for a flavor .
train
false
4,525
def setUp(): Directory.CreateDirectory(DLLS_DIR) cleanUp() File.Copy((IP_DIR + '\\IronPython.dll'), (DLLS_DIR + '\\IronPython.dll')) Directory.SetCurrentDirectory(DLLS_DIR) okAssemblies(50) dupAssemblies(5) overrideNative() corruptDLL() unmanagedDLL() dllVsExe() exeOnly() textFiles() uniqueDLLNames() File.Delete((DLLS_DIR + '\\IronPython.dll'))
[ "def", "setUp", "(", ")", ":", "Directory", ".", "CreateDirectory", "(", "DLLS_DIR", ")", "cleanUp", "(", ")", "File", ".", "Copy", "(", "(", "IP_DIR", "+", "'\\\\IronPython.dll'", ")", ",", "(", "DLLS_DIR", "+", "'\\\\IronPython.dll'", ")", ")", "Directory", ".", "SetCurrentDirectory", "(", "DLLS_DIR", ")", "okAssemblies", "(", "50", ")", "dupAssemblies", "(", "5", ")", "overrideNative", "(", ")", "corruptDLL", "(", ")", "unmanagedDLL", "(", ")", "dllVsExe", "(", ")", "exeOnly", "(", ")", "textFiles", "(", ")", "uniqueDLLNames", "(", ")", "File", ".", "Delete", "(", "(", "DLLS_DIR", "+", "'\\\\IronPython.dll'", ")", ")" ]
set :app:pyramid registry and request thread locals for the duration of a single unit test .
train
false
4,526
def make_gax_subscriber_api(credentials=None, host=None): if (credentials is None): channel = insecure_channel(host) else: channel = make_secure_channel(credentials, DEFAULT_USER_AGENT, SubscriberClient.SERVICE_ADDRESS) return SubscriberClient(channel=channel)
[ "def", "make_gax_subscriber_api", "(", "credentials", "=", "None", ",", "host", "=", "None", ")", ":", "if", "(", "credentials", "is", "None", ")", ":", "channel", "=", "insecure_channel", "(", "host", ")", "else", ":", "channel", "=", "make_secure_channel", "(", "credentials", ",", "DEFAULT_USER_AGENT", ",", "SubscriberClient", ".", "SERVICE_ADDRESS", ")", "return", "SubscriberClient", "(", "channel", "=", "channel", ")" ]
create an instance of the gax subscriber api .
train
false
4,527
def points_for_interval(interval): range = time_range_by_interval[interval] interval = timedelta_by_name(interval) return (range.total_seconds() / interval.total_seconds())
[ "def", "points_for_interval", "(", "interval", ")", ":", "range", "=", "time_range_by_interval", "[", "interval", "]", "interval", "=", "timedelta_by_name", "(", "interval", ")", "return", "(", "range", ".", "total_seconds", "(", ")", "/", "interval", ".", "total_seconds", "(", ")", ")" ]
calculate the number of data points to render for a given interval .
train
false
4,530
def filter_section(context, section): return False
[ "def", "filter_section", "(", "context", ",", "section", ")", ":", "return", "False" ]
test filter section .
train
false
4,532
def verify_cuda_ndarray_interface(obj): require_cuda_ndarray(obj) def requires_attr(attr, typ): if (not hasattr(obj, attr)): raise AttributeError(attr) if (not isinstance(getattr(obj, attr), typ)): raise AttributeError(('%s must be of type %s' % (attr, typ))) requires_attr('shape', tuple) requires_attr('strides', tuple) requires_attr('dtype', np.dtype) requires_attr('size', (int, long))
[ "def", "verify_cuda_ndarray_interface", "(", "obj", ")", ":", "require_cuda_ndarray", "(", "obj", ")", "def", "requires_attr", "(", "attr", ",", "typ", ")", ":", "if", "(", "not", "hasattr", "(", "obj", ",", "attr", ")", ")", ":", "raise", "AttributeError", "(", "attr", ")", "if", "(", "not", "isinstance", "(", "getattr", "(", "obj", ",", "attr", ")", ",", "typ", ")", ")", ":", "raise", "AttributeError", "(", "(", "'%s must be of type %s'", "%", "(", "attr", ",", "typ", ")", ")", ")", "requires_attr", "(", "'shape'", ",", "tuple", ")", "requires_attr", "(", "'strides'", ",", "tuple", ")", "requires_attr", "(", "'dtype'", ",", "np", ".", "dtype", ")", "requires_attr", "(", "'size'", ",", "(", "int", ",", "long", ")", ")" ]
verify the cuda ndarray interface for an obj .
train
false
4,533
def create_missing_metrics_perm(view_menu_set): logging.info(u'Creating missing metrics permissions') metrics = [] for model in [models.SqlMetric, models.DruidMetric]: metrics += list(db.session.query(model).all()) for metric in metrics: if (metric.is_restricted and metric.perm and (metric.perm not in view_menu_set)): merge_perm(sm, u'metric_access', metric.perm)
[ "def", "create_missing_metrics_perm", "(", "view_menu_set", ")", ":", "logging", ".", "info", "(", "u'Creating missing metrics permissions'", ")", "metrics", "=", "[", "]", "for", "model", "in", "[", "models", ".", "SqlMetric", ",", "models", ".", "DruidMetric", "]", ":", "metrics", "+=", "list", "(", "db", ".", "session", ".", "query", "(", "model", ")", ".", "all", "(", ")", ")", "for", "metric", "in", "metrics", ":", "if", "(", "metric", ".", "is_restricted", "and", "metric", ".", "perm", "and", "(", "metric", ".", "perm", "not", "in", "view_menu_set", ")", ")", ":", "merge_perm", "(", "sm", ",", "u'metric_access'", ",", "metric", ".", "perm", ")" ]
create permissions for restricted metrics .
train
false
4,534
def isCommaSeparatedEmailList(field_data, all_data): for supposed_email in field_data.split(','): try: isValidEmail(supposed_email.strip(), '') except ValidationError: raise ValidationError, gettext('Enter valid e-mail addresses separated by commas.')
[ "def", "isCommaSeparatedEmailList", "(", "field_data", ",", "all_data", ")", ":", "for", "supposed_email", "in", "field_data", ".", "split", "(", "','", ")", ":", "try", ":", "isValidEmail", "(", "supposed_email", ".", "strip", "(", ")", ",", "''", ")", "except", "ValidationError", ":", "raise", "ValidationError", ",", "gettext", "(", "'Enter valid e-mail addresses separated by commas.'", ")" ]
checks that field_data is a string of e-mail addresses separated by commas .
train
false
4,535
def m_len(target): from evennia.utils.ansi import ANSI_PARSER if (inherits_from(target, basestring) and ('|lt' in target)): return len(ANSI_PARSER.strip_mxp(target)) return len(target)
[ "def", "m_len", "(", "target", ")", ":", "from", "evennia", ".", "utils", ".", "ansi", "import", "ANSI_PARSER", "if", "(", "inherits_from", "(", "target", ",", "basestring", ")", "and", "(", "'|lt'", "in", "target", ")", ")", ":", "return", "len", "(", "ANSI_PARSER", ".", "strip_mxp", "(", "target", ")", ")", "return", "len", "(", "target", ")" ]
provides length checking for strings with mxp patterns .
train
false
4,539
@verbose def lcmv(evoked, forward, noise_cov, data_cov, reg=0.01, label=None, pick_ori=None, picks=None, rank=None, verbose=None): _check_reference(evoked) info = evoked.info data = evoked.data tmin = evoked.times[0] picks = _setup_picks(picks, info, forward, noise_cov) data = data[picks] stc = _apply_lcmv(data=data, info=info, tmin=tmin, forward=forward, noise_cov=noise_cov, data_cov=data_cov, reg=reg, label=label, picks=picks, rank=rank, pick_ori=pick_ori) return six.advance_iterator(stc)
[ "@", "verbose", "def", "lcmv", "(", "evoked", ",", "forward", ",", "noise_cov", ",", "data_cov", ",", "reg", "=", "0.01", ",", "label", "=", "None", ",", "pick_ori", "=", "None", ",", "picks", "=", "None", ",", "rank", "=", "None", ",", "verbose", "=", "None", ")", ":", "_check_reference", "(", "evoked", ")", "info", "=", "evoked", ".", "info", "data", "=", "evoked", ".", "data", "tmin", "=", "evoked", ".", "times", "[", "0", "]", "picks", "=", "_setup_picks", "(", "picks", ",", "info", ",", "forward", ",", "noise_cov", ")", "data", "=", "data", "[", "picks", "]", "stc", "=", "_apply_lcmv", "(", "data", "=", "data", ",", "info", "=", "info", ",", "tmin", "=", "tmin", ",", "forward", "=", "forward", ",", "noise_cov", "=", "noise_cov", ",", "data_cov", "=", "data_cov", ",", "reg", "=", "reg", ",", "label", "=", "label", ",", "picks", "=", "picks", ",", "rank", "=", "rank", ",", "pick_ori", "=", "pick_ori", ")", "return", "six", ".", "advance_iterator", "(", "stc", ")" ]
linearly constrained minimum variance beamformer .
train
false
4,540
def catalog_service(consul_url=None, service=None, **kwargs): ret = {} query_params = {} if (not consul_url): consul_url = _get_config() if (not consul_url): log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if (not service): raise SaltInvocationError('Required argument "service" is missing.') if ('dc' in kwargs): query_params['dc'] = kwargs['dc'] if ('tag' in kwargs): query_params['tag'] = kwargs['tag'] function = 'catalog/service/{0}'.format(service) ret = _query(consul_url=consul_url, function=function, query_params=query_params) return ret
[ "def", "catalog_service", "(", "consul_url", "=", "None", ",", "service", "=", "None", ",", "**", "kwargs", ")", ":", "ret", "=", "{", "}", "query_params", "=", "{", "}", "if", "(", "not", "consul_url", ")", ":", "consul_url", "=", "_get_config", "(", ")", "if", "(", "not", "consul_url", ")", ":", "log", ".", "error", "(", "'No Consul URL found.'", ")", "ret", "[", "'message'", "]", "=", "'No Consul URL found.'", "ret", "[", "'res'", "]", "=", "False", "return", "ret", "if", "(", "not", "service", ")", ":", "raise", "SaltInvocationError", "(", "'Required argument \"service\" is missing.'", ")", "if", "(", "'dc'", "in", "kwargs", ")", ":", "query_params", "[", "'dc'", "]", "=", "kwargs", "[", "'dc'", "]", "if", "(", "'tag'", "in", "kwargs", ")", ":", "query_params", "[", "'tag'", "]", "=", "kwargs", "[", "'tag'", "]", "function", "=", "'catalog/service/{0}'", ".", "format", "(", "service", ")", "ret", "=", "_query", "(", "consul_url", "=", "consul_url", ",", "function", "=", "function", ",", "query_params", "=", "query_params", ")", "return", "ret" ]
information about the registered service .
train
true
4,541
def RSI(ds, count, timeperiod=(- (2 ** 31))): return call_talib_with_ds(ds, count, talib.RSI, timeperiod)
[ "def", "RSI", "(", "ds", ",", "count", ",", "timeperiod", "=", "(", "-", "(", "2", "**", "31", ")", ")", ")", ":", "return", "call_talib_with_ds", "(", "ds", ",", "count", ",", "talib", ".", "RSI", ",", "timeperiod", ")" ]
relative strength index .
train
false
4,542
def combine_opts(global_config, prefix, local_config={}): prefixlen = len(prefix) subconf = {} for (key, value) in global_config.items(): if key.startswith(prefix): key = key[prefixlen:] subconf[key] = value subconf.update(local_config) return subconf
[ "def", "combine_opts", "(", "global_config", ",", "prefix", ",", "local_config", "=", "{", "}", ")", ":", "prefixlen", "=", "len", "(", "prefix", ")", "subconf", "=", "{", "}", "for", "(", "key", ",", "value", ")", "in", "global_config", ".", "items", "(", ")", ":", "if", "key", ".", "startswith", "(", "prefix", ")", ":", "key", "=", "key", "[", "prefixlen", ":", "]", "subconf", "[", "key", "]", "=", "value", "subconf", ".", "update", "(", "local_config", ")", "return", "subconf" ]
the master combiner .
train
false
4,543
def id_to_ec2_inst_id(instance_id): if (instance_id is None): return None elif uuidutils.is_uuid_like(instance_id): ctxt = context.get_admin_context() int_id = get_int_id_from_instance_uuid(ctxt, instance_id) return id_to_ec2_id(int_id) else: return id_to_ec2_id(instance_id)
[ "def", "id_to_ec2_inst_id", "(", "instance_id", ")", ":", "if", "(", "instance_id", "is", "None", ")", ":", "return", "None", "elif", "uuidutils", ".", "is_uuid_like", "(", "instance_id", ")", ":", "ctxt", "=", "context", ".", "get_admin_context", "(", ")", "int_id", "=", "get_int_id_from_instance_uuid", "(", "ctxt", ",", "instance_id", ")", "return", "id_to_ec2_id", "(", "int_id", ")", "else", ":", "return", "id_to_ec2_id", "(", "instance_id", ")" ]
get or create an ec2 instance id from uuid .
train
false
4,544
def _unpickle(fobj, filename='', mmap_mode=None): unpickler = NumpyUnpickler(filename, fobj, mmap_mode=mmap_mode) obj = None try: obj = unpickler.load() if unpickler.compat_mode: warnings.warn(("The file '%s' has been generated with a joblib version less than 0.10. Please regenerate this pickle file." % filename), DeprecationWarning, stacklevel=3) except UnicodeDecodeError as exc: if PY3_OR_LATER: new_exc = ValueError('You may be trying to read with python 3 a joblib pickle generated with python 2. This feature is not supported by joblib.') new_exc.__cause__ = exc raise new_exc raise return obj
[ "def", "_unpickle", "(", "fobj", ",", "filename", "=", "''", ",", "mmap_mode", "=", "None", ")", ":", "unpickler", "=", "NumpyUnpickler", "(", "filename", ",", "fobj", ",", "mmap_mode", "=", "mmap_mode", ")", "obj", "=", "None", "try", ":", "obj", "=", "unpickler", ".", "load", "(", ")", "if", "unpickler", ".", "compat_mode", ":", "warnings", ".", "warn", "(", "(", "\"The file '%s' has been generated with a joblib version less than 0.10. Please regenerate this pickle file.\"", "%", "filename", ")", ",", "DeprecationWarning", ",", "stacklevel", "=", "3", ")", "except", "UnicodeDecodeError", "as", "exc", ":", "if", "PY3_OR_LATER", ":", "new_exc", "=", "ValueError", "(", "'You may be trying to read with python 3 a joblib pickle generated with python 2. This feature is not supported by joblib.'", ")", "new_exc", ".", "__cause__", "=", "exc", "raise", "new_exc", "raise", "return", "obj" ]
internal unpickling function .
train
false
4,545
def _detect_unboundedness(R): s = generate_unique_node() G = nx.DiGraph() G.add_nodes_from(R) inf = R.graph['inf'] f_inf = float('inf') for u in R: for (v, e) in R[u].items(): w = f_inf for (k, e) in e.items(): if (e['capacity'] == inf): w = min(w, e['weight']) if (w != f_inf): G.add_edge(u, v, weight=w) if nx.negative_edge_cycle(G): raise nx.NetworkXUnbounded('Negative cost cycle of infinite capacity found. Min cost flow may be unbounded below.')
[ "def", "_detect_unboundedness", "(", "R", ")", ":", "s", "=", "generate_unique_node", "(", ")", "G", "=", "nx", ".", "DiGraph", "(", ")", "G", ".", "add_nodes_from", "(", "R", ")", "inf", "=", "R", ".", "graph", "[", "'inf'", "]", "f_inf", "=", "float", "(", "'inf'", ")", "for", "u", "in", "R", ":", "for", "(", "v", ",", "e", ")", "in", "R", "[", "u", "]", ".", "items", "(", ")", ":", "w", "=", "f_inf", "for", "(", "k", ",", "e", ")", "in", "e", ".", "items", "(", ")", ":", "if", "(", "e", "[", "'capacity'", "]", "==", "inf", ")", ":", "w", "=", "min", "(", "w", ",", "e", "[", "'weight'", "]", ")", "if", "(", "w", "!=", "f_inf", ")", ":", "G", ".", "add_edge", "(", "u", ",", "v", ",", "weight", "=", "w", ")", "if", "nx", ".", "negative_edge_cycle", "(", "G", ")", ":", "raise", "nx", ".", "NetworkXUnbounded", "(", "'Negative cost cycle of infinite capacity found. Min cost flow may be unbounded below.'", ")" ]
detect infinite-capacity negative cycles .
train
false
4,546
def putProfileSetting(name, value): global settingsDictionary if ((name in settingsDictionary) and settingsDictionary[name].isProfile()): settingsDictionary[name].setValue(value)
[ "def", "putProfileSetting", "(", "name", ",", "value", ")", ":", "global", "settingsDictionary", "if", "(", "(", "name", "in", "settingsDictionary", ")", "and", "settingsDictionary", "[", "name", "]", ".", "isProfile", "(", ")", ")", ":", "settingsDictionary", "[", "name", "]", ".", "setValue", "(", "value", ")" ]
store a certain value in a profile setting .
train
false
4,547
def backup_config(a_device): DEBUG = True perform_diff = False if DEBUG: print 'Retrieve device configuration via SSH: {}\n'.format(a_device.device_name) ssh_connect = SSHConnection(a_device) ssh_connect.enable_mode() output = ssh_connect.send_command('show run\n') file_name = (a_device.device_name + '.txt') full_path = (global_params.CFGS_DIR + file_name) bup_file = ((global_params.CFGS_DIR + a_device.device_name) + '.old') if os.path.isfile(full_path): cmd_status = subprocess.call(['/bin/mv', full_path, bup_file]) perform_diff = True if DEBUG: print 'Writing configuration file to file system\n' with open(full_path, 'w') as f: f.write(output) a_device.cfg_file = file_name a_device.cfg_archive_time = timezone.make_aware(datetime.now(), timezone.get_current_timezone()) a_device.cfg_last_changed = int(snmp_wrapper(a_device, oid=global_params.OID_RUNNING_LAST_CHANGED)) a_device.save() if perform_diff: return find_diff(full_path, bup_file) else: return None
[ "def", "backup_config", "(", "a_device", ")", ":", "DEBUG", "=", "True", "perform_diff", "=", "False", "if", "DEBUG", ":", "print", "'Retrieve device configuration via SSH: {}\\n'", ".", "format", "(", "a_device", ".", "device_name", ")", "ssh_connect", "=", "SSHConnection", "(", "a_device", ")", "ssh_connect", ".", "enable_mode", "(", ")", "output", "=", "ssh_connect", ".", "send_command", "(", "'show run\\n'", ")", "file_name", "=", "(", "a_device", ".", "device_name", "+", "'.txt'", ")", "full_path", "=", "(", "global_params", ".", "CFGS_DIR", "+", "file_name", ")", "bup_file", "=", "(", "(", "global_params", ".", "CFGS_DIR", "+", "a_device", ".", "device_name", ")", "+", "'.old'", ")", "if", "os", ".", "path", ".", "isfile", "(", "full_path", ")", ":", "cmd_status", "=", "subprocess", ".", "call", "(", "[", "'/bin/mv'", ",", "full_path", ",", "bup_file", "]", ")", "perform_diff", "=", "True", "if", "DEBUG", ":", "print", "'Writing configuration file to file system\\n'", "with", "open", "(", "full_path", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "output", ")", "a_device", ".", "cfg_file", "=", "file_name", "a_device", ".", "cfg_archive_time", "=", "timezone", ".", "make_aware", "(", "datetime", ".", "now", "(", ")", ",", "timezone", ".", "get_current_timezone", "(", ")", ")", "a_device", ".", "cfg_last_changed", "=", "int", "(", "snmp_wrapper", "(", "a_device", ",", "oid", "=", "global_params", ".", "OID_RUNNING_LAST_CHANGED", ")", ")", "a_device", ".", "save", "(", ")", "if", "perform_diff", ":", "return", "find_diff", "(", "full_path", ",", "bup_file", ")", "else", ":", "return", "None" ]
retrieve configuration from network device .
train
false
4,548
@utils.arg('secgroup', metavar='<secgroup>', help=_('ID or name of security group.')) @deprecated_network def do_secgroup_list_rules(cs, args): secgroup = _get_secgroup(cs, args.secgroup) _print_secgroup_rules(secgroup.rules)
[ "@", "utils", ".", "arg", "(", "'secgroup'", ",", "metavar", "=", "'<secgroup>'", ",", "help", "=", "_", "(", "'ID or name of security group.'", ")", ")", "@", "deprecated_network", "def", "do_secgroup_list_rules", "(", "cs", ",", "args", ")", ":", "secgroup", "=", "_get_secgroup", "(", "cs", ",", "args", ".", "secgroup", ")", "_print_secgroup_rules", "(", "secgroup", ".", "rules", ")" ]
list rules for a security group .
train
false
4,550
def getpcmd(pid): if (os.name == 'nt'): cmd = ('wmic path win32_process where ProcessID=%s get Commandline' % (pid,)) with os.popen(cmd, 'r') as p: lines = [line for line in p.readlines() if (line.strip('\r\n ') != '')] if lines: (_, val) = lines return val else: try: with open('/proc/{0}/cmdline'.format(pid), 'r') as fh: return fh.read().replace('\x00', ' ').rstrip() except IOError: pass return '[PROCESS_WITH_PID={}]'.format(pid)
[ "def", "getpcmd", "(", "pid", ")", ":", "if", "(", "os", ".", "name", "==", "'nt'", ")", ":", "cmd", "=", "(", "'wmic path win32_process where ProcessID=%s get Commandline'", "%", "(", "pid", ",", ")", ")", "with", "os", ".", "popen", "(", "cmd", ",", "'r'", ")", "as", "p", ":", "lines", "=", "[", "line", "for", "line", "in", "p", ".", "readlines", "(", ")", "if", "(", "line", ".", "strip", "(", "'\\r\\n '", ")", "!=", "''", ")", "]", "if", "lines", ":", "(", "_", ",", "val", ")", "=", "lines", "return", "val", "else", ":", "try", ":", "with", "open", "(", "'/proc/{0}/cmdline'", ".", "format", "(", "pid", ")", ",", "'r'", ")", "as", "fh", ":", "return", "fh", ".", "read", "(", ")", ".", "replace", "(", "'\\x00'", ",", "' '", ")", ".", "rstrip", "(", ")", "except", "IOError", ":", "pass", "return", "'[PROCESS_WITH_PID={}]'", ".", "format", "(", "pid", ")" ]
returns command of process .
train
false
4,551
@receiver(badge_was_awarded) def notify_award_recipient(sender, award, **kwargs): if (not settings.STAGE): send_award_notification.delay(award)
[ "@", "receiver", "(", "badge_was_awarded", ")", "def", "notify_award_recipient", "(", "sender", ",", "award", ",", "**", "kwargs", ")", ":", "if", "(", "not", "settings", ".", "STAGE", ")", ":", "send_award_notification", ".", "delay", "(", "award", ")" ]
notifies award recipient that he/she has an award! .
train
false
4,552
def apply_regressor(clf, embed_map, use_norm=False): wordvecs = OrderedDict() for (i, w) in enumerate(embed_map.vocab.keys()): if ('_' not in w): wordvecs[w] = clf.predict(embed_map[w]).astype('float32') if use_norm: wordvecs[w] /= norm(wordvecs[w]) return wordvecs
[ "def", "apply_regressor", "(", "clf", ",", "embed_map", ",", "use_norm", "=", "False", ")", ":", "wordvecs", "=", "OrderedDict", "(", ")", "for", "(", "i", ",", "w", ")", "in", "enumerate", "(", "embed_map", ".", "vocab", ".", "keys", "(", ")", ")", ":", "if", "(", "'_'", "not", "in", "w", ")", ":", "wordvecs", "[", "w", "]", "=", "clf", ".", "predict", "(", "embed_map", "[", "w", "]", ")", ".", "astype", "(", "'float32'", ")", "if", "use_norm", ":", "wordvecs", "[", "w", "]", "/=", "norm", "(", "wordvecs", "[", "w", "]", ")", "return", "wordvecs" ]
map words from word2vec into rnn word space function modifed from: URL .
train
false
4,554
def get_config_id(kwargs=None, call=None): if (call == 'action'): raise SaltCloudException('The get_config_id function must be called with -f or --function.') if (kwargs is None): kwargs = {} name = kwargs.get('name', None) linode_id = kwargs.get('linode_id', None) if ((name is None) and (linode_id is None)): raise SaltCloudSystemExit("The get_config_id function requires either a 'name' or a 'linode_id' to be provided.") if (linode_id is None): linode_id = get_linode_id_from_name(name) response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA'] config_id = {'config_id': response[0]['ConfigID']} return config_id
[ "def", "get_config_id", "(", "kwargs", "=", "None", ",", "call", "=", "None", ")", ":", "if", "(", "call", "==", "'action'", ")", ":", "raise", "SaltCloudException", "(", "'The get_config_id function must be called with -f or --function.'", ")", "if", "(", "kwargs", "is", "None", ")", ":", "kwargs", "=", "{", "}", "name", "=", "kwargs", ".", "get", "(", "'name'", ",", "None", ")", "linode_id", "=", "kwargs", ".", "get", "(", "'linode_id'", ",", "None", ")", "if", "(", "(", "name", "is", "None", ")", "and", "(", "linode_id", "is", "None", ")", ")", ":", "raise", "SaltCloudSystemExit", "(", "\"The get_config_id function requires either a 'name' or a 'linode_id' to be provided.\"", ")", "if", "(", "linode_id", "is", "None", ")", ":", "linode_id", "=", "get_linode_id_from_name", "(", "name", ")", "response", "=", "_query", "(", "'linode'", ",", "'config.list'", ",", "args", "=", "{", "'LinodeID'", ":", "linode_id", "}", ")", "[", "'DATA'", "]", "config_id", "=", "{", "'config_id'", ":", "response", "[", "0", "]", "[", "'ConfigID'", "]", "}", "return", "config_id" ]
returns a config_id for a given linode .
train
true
4,555
def test_protocol_relative(): html = 'bad <a href="//ex.mp">link</a>' expect = 'bad link' eq_(expect, bleach.delinkify(html)) eq_(expect, bleach.delinkify(html, allow_relative=True)) eq_(html, bleach.delinkify(html, allow_domains='ex.mp'))
[ "def", "test_protocol_relative", "(", ")", ":", "html", "=", "'bad <a href=\"//ex.mp\">link</a>'", "expect", "=", "'bad link'", "eq_", "(", "expect", ",", "bleach", ".", "delinkify", "(", "html", ")", ")", "eq_", "(", "expect", ",", "bleach", ".", "delinkify", "(", "html", ",", "allow_relative", "=", "True", ")", ")", "eq_", "(", "html", ",", "bleach", ".", "delinkify", "(", "html", ",", "allow_domains", "=", "'ex.mp'", ")", ")" ]
protocol-relative links arent relative .
train
false
4,556
def babi_handler(data_dir, task_number): task = task_list[(task_number - 1)] return BABI(path=data_dir, task=task, subset=subset)
[ "def", "babi_handler", "(", "data_dir", ",", "task_number", ")", ":", "task", "=", "task_list", "[", "(", "task_number", "-", "1", ")", "]", "return", "BABI", "(", "path", "=", "data_dir", ",", "task", "=", "task", ",", "subset", "=", "subset", ")" ]
handle for babi task .
train
false
4,557
def test_fnpickling_many(tmpdir): from ....tests.helper import pytest fn = str(tmpdir.join('test3.pickle')) obj3 = 328.3432 obj4 = 'blahblahfoo' fnpickle(obj3, fn) fnpickle(obj4, fn, append=True) res = fnunpickle(fn, number=(-1)) assert (len(res) == 2) assert (res[0] == obj3) assert (res[1] == obj4) fnpickle(obj4, fn, append=True) res = fnunpickle(fn, number=2) assert (len(res) == 2) with pytest.raises(EOFError): fnunpickle(fn, number=5)
[ "def", "test_fnpickling_many", "(", "tmpdir", ")", ":", "from", "...", ".", "tests", ".", "helper", "import", "pytest", "fn", "=", "str", "(", "tmpdir", ".", "join", "(", "'test3.pickle'", ")", ")", "obj3", "=", "328.3432", "obj4", "=", "'blahblahfoo'", "fnpickle", "(", "obj3", ",", "fn", ")", "fnpickle", "(", "obj4", ",", "fn", ",", "append", "=", "True", ")", "res", "=", "fnunpickle", "(", "fn", ",", "number", "=", "(", "-", "1", ")", ")", "assert", "(", "len", "(", "res", ")", "==", "2", ")", "assert", "(", "res", "[", "0", "]", "==", "obj3", ")", "assert", "(", "res", "[", "1", "]", "==", "obj4", ")", "fnpickle", "(", "obj4", ",", "fn", ",", "append", "=", "True", ")", "res", "=", "fnunpickle", "(", "fn", ",", "number", "=", "2", ")", "assert", "(", "len", "(", "res", ")", "==", "2", ")", "with", "pytest", ".", "raises", "(", "EOFError", ")", ":", "fnunpickle", "(", "fn", ",", "number", "=", "5", ")" ]
tests the fnpickle and fnupickle functions ability to pickle and unpickle multiple objects from a single file .
train
false
4,558
def _EndGroup(buffer, pos, end): return (-1)
[ "def", "_EndGroup", "(", "buffer", ",", "pos", ",", "end", ")", ":", "return", "(", "-", "1", ")" ]
skipping an end_group tag returns -1 to tell the parent loop to break .
train
false
4,561
def defer_or_apply(func): def closure(future, adapt): if isinstance(defer, Deferred): d = Deferred() future.addCallback((lambda r: d.callback(adapt(r)))) return d return adapt(future) return closure
[ "def", "defer_or_apply", "(", "func", ")", ":", "def", "closure", "(", "future", ",", "adapt", ")", ":", "if", "isinstance", "(", "defer", ",", "Deferred", ")", ":", "d", "=", "Deferred", "(", ")", "future", ".", "addCallback", "(", "(", "lambda", "r", ":", "d", ".", "callback", "(", "adapt", "(", "r", ")", ")", ")", ")", "return", "d", "return", "adapt", "(", "future", ")", "return", "closure" ]
decorator to apply an adapter method to a result regardless if it is a deferred or a concrete response .
train
false
4,563
def make_projector_info(info, include_active=True): (proj, nproj, _) = make_projector(info['projs'], info['ch_names'], info['bads'], include_active) return (proj, nproj)
[ "def", "make_projector_info", "(", "info", ",", "include_active", "=", "True", ")", ":", "(", "proj", ",", "nproj", ",", "_", ")", "=", "make_projector", "(", "info", "[", "'projs'", "]", ",", "info", "[", "'ch_names'", "]", ",", "info", "[", "'bads'", "]", ",", "include_active", ")", "return", "(", "proj", ",", "nproj", ")" ]
make an ssp operator using the measurement info .
train
false
4,564
def change_password(username, password, uid=None): if (uid is None): user = list_users() uid = user[username]['index'] if uid: return __execute_cmd('config -g cfgUserAdmin -o cfgUserAdminPassword -i {0} {1}'.format(uid, password)) else: log.warning("'{0}' does not exist".format(username)) return False return True
[ "def", "change_password", "(", "username", ",", "password", ",", "uid", "=", "None", ")", ":", "if", "(", "uid", "is", "None", ")", ":", "user", "=", "list_users", "(", ")", "uid", "=", "user", "[", "username", "]", "[", "'index'", "]", "if", "uid", ":", "return", "__execute_cmd", "(", "'config -g cfgUserAdmin -o cfgUserAdminPassword -i {0} {1}'", ".", "format", "(", "uid", ",", "password", ")", ")", "else", ":", "log", ".", "warning", "(", "\"'{0}' does not exist\"", ".", "format", "(", "username", ")", ")", "return", "False", "return", "True" ]
view function which handles a change password request .
train
true
4,565
@app.route('/sms/receive', methods=['POST']) def receive_sms(): sender = request.values.get('From') body = request.values.get('Body') message = 'Hello, {}, you said: {}'.format(sender, body) response = twiml.Response() response.message(message) return (str(response), 200, {'Content-Type': 'application/xml'})
[ "@", "app", ".", "route", "(", "'/sms/receive'", ",", "methods", "=", "[", "'POST'", "]", ")", "def", "receive_sms", "(", ")", ":", "sender", "=", "request", ".", "values", ".", "get", "(", "'From'", ")", "body", "=", "request", ".", "values", ".", "get", "(", "'Body'", ")", "message", "=", "'Hello, {}, you said: {}'", ".", "format", "(", "sender", ",", "body", ")", "response", "=", "twiml", ".", "Response", "(", ")", "response", ".", "message", "(", "message", ")", "return", "(", "str", "(", "response", ")", ",", "200", ",", "{", "'Content-Type'", ":", "'application/xml'", "}", ")" ]
receives an sms message and replies with a simple greeting .
train
false
4,567
def _get_raw_path(src, dst): if (len(path_map) == 0): _calc_paths() if (src is dst): return [] if (path_map[src][dst][0] is None): return None intermediate = path_map[src][dst][1] if (intermediate is None): return [] return ((_get_raw_path(src, intermediate) + [intermediate]) + _get_raw_path(intermediate, dst))
[ "def", "_get_raw_path", "(", "src", ",", "dst", ")", ":", "if", "(", "len", "(", "path_map", ")", "==", "0", ")", ":", "_calc_paths", "(", ")", "if", "(", "src", "is", "dst", ")", ":", "return", "[", "]", "if", "(", "path_map", "[", "src", "]", "[", "dst", "]", "[", "0", "]", "is", "None", ")", ":", "return", "None", "intermediate", "=", "path_map", "[", "src", "]", "[", "dst", "]", "[", "1", "]", "if", "(", "intermediate", "is", "None", ")", ":", "return", "[", "]", "return", "(", "(", "_get_raw_path", "(", "src", ",", "intermediate", ")", "+", "[", "intermediate", "]", ")", "+", "_get_raw_path", "(", "intermediate", ",", "dst", ")", ")" ]
get a raw path .
train
false
4,569
def get_dtype_kinds(l): typs = set() for arr in l: dtype = arr.dtype if is_categorical_dtype(dtype): typ = 'category' elif is_sparse(arr): typ = 'sparse' elif is_datetimetz(arr): typ = str(arr.dtype) elif is_datetime64_dtype(dtype): typ = 'datetime' elif is_timedelta64_dtype(dtype): typ = 'timedelta' elif is_object_dtype(dtype): typ = 'object' elif is_bool_dtype(dtype): typ = 'bool' elif is_period_dtype(dtype): typ = str(arr.dtype) else: typ = dtype.kind typs.add(typ) return typs
[ "def", "get_dtype_kinds", "(", "l", ")", ":", "typs", "=", "set", "(", ")", "for", "arr", "in", "l", ":", "dtype", "=", "arr", ".", "dtype", "if", "is_categorical_dtype", "(", "dtype", ")", ":", "typ", "=", "'category'", "elif", "is_sparse", "(", "arr", ")", ":", "typ", "=", "'sparse'", "elif", "is_datetimetz", "(", "arr", ")", ":", "typ", "=", "str", "(", "arr", ".", "dtype", ")", "elif", "is_datetime64_dtype", "(", "dtype", ")", ":", "typ", "=", "'datetime'", "elif", "is_timedelta64_dtype", "(", "dtype", ")", ":", "typ", "=", "'timedelta'", "elif", "is_object_dtype", "(", "dtype", ")", ":", "typ", "=", "'object'", "elif", "is_bool_dtype", "(", "dtype", ")", ":", "typ", "=", "'bool'", "elif", "is_period_dtype", "(", "dtype", ")", ":", "typ", "=", "str", "(", "arr", ".", "dtype", ")", "else", ":", "typ", "=", "dtype", ".", "kind", "typs", ".", "add", "(", "typ", ")", "return", "typs" ]
parameters l : list of arrays returns a set of kinds that exist in this list of arrays .
train
false
4,570
def diop_quadratic(eq, param=symbols('t', integer=True)): (var, coeff, diop_type) = classify_diop(eq, _dict=False) if (diop_type == 'binary_quadratic'): return _diop_quadratic(var, coeff, param)
[ "def", "diop_quadratic", "(", "eq", ",", "param", "=", "symbols", "(", "'t'", ",", "integer", "=", "True", ")", ")", ":", "(", "var", ",", "coeff", ",", "diop_type", ")", "=", "classify_diop", "(", "eq", ",", "_dict", "=", "False", ")", "if", "(", "diop_type", "==", "'binary_quadratic'", ")", ":", "return", "_diop_quadratic", "(", "var", ",", "coeff", ",", "param", ")" ]
solves quadratic diophantine equations .
train
false
4,572
def cartesian(arrays, out=None): arrays = [np.asarray(x) for x in arrays] shape = (len(x) for x in arrays) dtype = arrays[0].dtype ix = np.indices(shape) ix = ix.reshape(len(arrays), (-1)).T if (out is None): out = np.empty_like(ix, dtype=dtype) for (n, arr) in enumerate(arrays): out[:, n] = arrays[n][ix[:, n]] return out
[ "def", "cartesian", "(", "arrays", ",", "out", "=", "None", ")", ":", "arrays", "=", "[", "np", ".", "asarray", "(", "x", ")", "for", "x", "in", "arrays", "]", "shape", "=", "(", "len", "(", "x", ")", "for", "x", "in", "arrays", ")", "dtype", "=", "arrays", "[", "0", "]", ".", "dtype", "ix", "=", "np", ".", "indices", "(", "shape", ")", "ix", "=", "ix", ".", "reshape", "(", "len", "(", "arrays", ")", ",", "(", "-", "1", ")", ")", ".", "T", "if", "(", "out", "is", "None", ")", ":", "out", "=", "np", ".", "empty_like", "(", "ix", ",", "dtype", "=", "dtype", ")", "for", "(", "n", ",", "arr", ")", "in", "enumerate", "(", "arrays", ")", ":", "out", "[", ":", ",", "n", "]", "=", "arrays", "[", "n", "]", "[", "ix", "[", ":", ",", "n", "]", "]", "return", "out" ]
cartesian product of a list of arrays parameters: nodes: order: order in which the product is enumerated returns: out: each line corresponds to one point of the product space .
train
true
4,573
@contextmanager def temporary_file(suffix=''): tempfile_stream = NamedTemporaryFile(suffix=suffix, delete=False) tempfile = tempfile_stream.name tempfile_stream.close() (yield tempfile) os.remove(tempfile)
[ "@", "contextmanager", "def", "temporary_file", "(", "suffix", "=", "''", ")", ":", "tempfile_stream", "=", "NamedTemporaryFile", "(", "suffix", "=", "suffix", ",", "delete", "=", "False", ")", "tempfile", "=", "tempfile_stream", ".", "name", "tempfile_stream", ".", "close", "(", ")", "(", "yield", "tempfile", ")", "os", ".", "remove", "(", "tempfile", ")" ]
this is a cross platform temporary file creation .
train
false
4,574
def delete_replication(Bucket, region=None, key=None, keyid=None, profile=None): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.delete_bucket_replication(Bucket=Bucket) return {'deleted': True, 'name': Bucket} except ClientError as e: return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
[ "def", "delete_replication", "(", "Bucket", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "conn", ".", "delete_bucket_replication", "(", "Bucket", "=", "Bucket", ")", "return", "{", "'deleted'", ":", "True", ",", "'name'", ":", "Bucket", "}", "except", "ClientError", "as", "e", ":", "return", "{", "'deleted'", ":", "False", ",", "'error'", ":", "__utils__", "[", "'boto3.get_error'", "]", "(", "e", ")", "}" ]
delete the replication config from the given bucket returns {deleted: true} if replication configuration was deleted and returns {deleted: false} if replication configuration was not deleted .
train
true
4,575
def _get_requirements_to_disable(old_requirements, new_requirements): requirements_to_disable = [] for old_req in old_requirements: found_flag = False for req in new_requirements: if ((req['namespace'] == old_req.namespace) and (req['name'] == old_req.name)): found_flag = True break if (not found_flag): requirements_to_disable.append(old_req.id) return requirements_to_disable
[ "def", "_get_requirements_to_disable", "(", "old_requirements", ",", "new_requirements", ")", ":", "requirements_to_disable", "=", "[", "]", "for", "old_req", "in", "old_requirements", ":", "found_flag", "=", "False", "for", "req", "in", "new_requirements", ":", "if", "(", "(", "req", "[", "'namespace'", "]", "==", "old_req", ".", "namespace", ")", "and", "(", "req", "[", "'name'", "]", "==", "old_req", ".", "name", ")", ")", ":", "found_flag", "=", "True", "break", "if", "(", "not", "found_flag", ")", ":", "requirements_to_disable", ".", "append", "(", "old_req", ".", "id", ")", "return", "requirements_to_disable" ]
get the ids of creditrequirement entries to be disabled that are deleted from the courseware .
train
false
4,576
def liveReceivers(receivers): for receiver in receivers: if isinstance(receiver, WEAKREF_TYPES): receiver = receiver() if (receiver is not None): (yield receiver) else: (yield receiver)
[ "def", "liveReceivers", "(", "receivers", ")", ":", "for", "receiver", "in", "receivers", ":", "if", "isinstance", "(", "receiver", ",", "WEAKREF_TYPES", ")", ":", "receiver", "=", "receiver", "(", ")", "if", "(", "receiver", "is", "not", "None", ")", ":", "(", "yield", "receiver", ")", "else", ":", "(", "yield", "receiver", ")" ]
filter sequence of receivers to get resolved .
train
true
4,577
def _render_login_template(login_url, continue_url, email, admin): if email: login_message = 'Logged in' else: login_message = 'Not logged in' email = 'test@example.com' admin_checked = ('checked' if admin else '') template_dict = {'email': cgi.escape(email, quote=True), 'admin_checked': admin_checked, 'login_message': login_message, 'login_url': cgi.escape(login_url, quote=True), 'continue_url': cgi.escape(continue_url, quote=True)} return (_LOGIN_TEMPLATE % template_dict)
[ "def", "_render_login_template", "(", "login_url", ",", "continue_url", ",", "email", ",", "admin", ")", ":", "if", "email", ":", "login_message", "=", "'Logged in'", "else", ":", "login_message", "=", "'Not logged in'", "email", "=", "'test@example.com'", "admin_checked", "=", "(", "'checked'", "if", "admin", "else", "''", ")", "template_dict", "=", "{", "'email'", ":", "cgi", ".", "escape", "(", "email", ",", "quote", "=", "True", ")", ",", "'admin_checked'", ":", "admin_checked", ",", "'login_message'", ":", "login_message", ",", "'login_url'", ":", "cgi", ".", "escape", "(", "login_url", ",", "quote", "=", "True", ")", ",", "'continue_url'", ":", "cgi", ".", "escape", "(", "continue_url", ",", "quote", "=", "True", ")", "}", "return", "(", "_LOGIN_TEMPLATE", "%", "template_dict", ")" ]
renders the login page .
train
false
4,578
def lenet_arg_scope(weight_decay=0.0): with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_regularizer=slim.l2_regularizer(weight_decay), weights_initializer=tf.truncated_normal_initializer(stddev=0.1), activation_fn=tf.nn.relu) as sc: return sc
[ "def", "lenet_arg_scope", "(", "weight_decay", "=", "0.0", ")", ":", "with", "slim", ".", "arg_scope", "(", "[", "slim", ".", "conv2d", ",", "slim", ".", "fully_connected", "]", ",", "weights_regularizer", "=", "slim", ".", "l2_regularizer", "(", "weight_decay", ")", ",", "weights_initializer", "=", "tf", ".", "truncated_normal_initializer", "(", "stddev", "=", "0.1", ")", ",", "activation_fn", "=", "tf", ".", "nn", ".", "relu", ")", "as", "sc", ":", "return", "sc" ]
defines the default lenet argument scope .
train
false
4,579
def test_matrices_with_C_F_orders(): P_C = np.array([[0.5, 0.5], [0, 1]], order='C') P_F = np.array([[0.5, 0.5], [0, 1]], order='F') stationary_dist = [0.0, 1.0] computed_C_and_F = gth_solve(np.array([[1]])) assert_array_equal(computed_C_and_F, [1]) computed_C = gth_solve(P_C) computed_F = gth_solve(P_F) assert_array_equal(computed_C, stationary_dist) assert_array_equal(computed_F, stationary_dist)
[ "def", "test_matrices_with_C_F_orders", "(", ")", ":", "P_C", "=", "np", ".", "array", "(", "[", "[", "0.5", ",", "0.5", "]", ",", "[", "0", ",", "1", "]", "]", ",", "order", "=", "'C'", ")", "P_F", "=", "np", ".", "array", "(", "[", "[", "0.5", ",", "0.5", "]", ",", "[", "0", ",", "1", "]", "]", ",", "order", "=", "'F'", ")", "stationary_dist", "=", "[", "0.0", ",", "1.0", "]", "computed_C_and_F", "=", "gth_solve", "(", "np", ".", "array", "(", "[", "[", "1", "]", "]", ")", ")", "assert_array_equal", "(", "computed_C_and_F", ",", "[", "1", "]", ")", "computed_C", "=", "gth_solve", "(", "P_C", ")", "computed_F", "=", "gth_solve", "(", "P_F", ")", "assert_array_equal", "(", "computed_C", ",", "stationary_dist", ")", "assert_array_equal", "(", "computed_F", ",", "stationary_dist", ")" ]
test matrices with c- and f-contiguous orders see the issue and fix on numba: github .
train
false
4,580
def process_config(config, schema=None, set_defaults=True): if (schema is None): schema = get_schema() resolver = RefResolver.from_schema(schema) validator = SchemaValidator(schema, resolver=resolver, format_checker=format_checker) if set_defaults: validator.VALIDATORS[u'properties'] = validate_properties_w_defaults try: errors = list(validator.iter_errors(config)) finally: validator.VALIDATORS[u'properties'] = jsonschema.Draft4Validator.VALIDATORS[u'properties'] for e in errors: set_error_message(e) e.json_pointer = (u'/' + u'/'.join(map(str, e.path))) return errors
[ "def", "process_config", "(", "config", ",", "schema", "=", "None", ",", "set_defaults", "=", "True", ")", ":", "if", "(", "schema", "is", "None", ")", ":", "schema", "=", "get_schema", "(", ")", "resolver", "=", "RefResolver", ".", "from_schema", "(", "schema", ")", "validator", "=", "SchemaValidator", "(", "schema", ",", "resolver", "=", "resolver", ",", "format_checker", "=", "format_checker", ")", "if", "set_defaults", ":", "validator", ".", "VALIDATORS", "[", "u'properties'", "]", "=", "validate_properties_w_defaults", "try", ":", "errors", "=", "list", "(", "validator", ".", "iter_errors", "(", "config", ")", ")", "finally", ":", "validator", ".", "VALIDATORS", "[", "u'properties'", "]", "=", "jsonschema", ".", "Draft4Validator", ".", "VALIDATORS", "[", "u'properties'", "]", "for", "e", "in", "errors", ":", "set_error_message", "(", "e", ")", "e", ".", "json_pointer", "=", "(", "u'/'", "+", "u'/'", ".", "join", "(", "map", "(", "str", ",", "e", ".", "path", ")", ")", ")", "return", "errors" ]
validates the config .
train
false