id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
53,750
def __get_hosts_filename(): return __salt__['config.option']('hosts.file')
[ "def", "__get_hosts_filename", "(", ")", ":", "return", "__salt__", "[", "'config.option'", "]", "(", "'hosts.file'", ")" ]
return the path to the appropriate hosts file .
train
false
53,751
def collect_bears(bear_dirs, bear_globs, kinds, log_printer, warn_if_unused_glob=True): bears_found = tuple(([] for i in range(len(kinds)))) bear_globs_with_bears = set() for (bear, glob) in icollect_bears(bear_dirs, bear_globs, kinds, log_printer): index = kinds.index(_get_kind(bear)) bears_found[index].append(bear) bear_globs_with_bears.add(glob) if warn_if_unused_glob: _warn_if_unused_glob(log_printer, bear_globs, bear_globs_with_bears, "No bears matching '{}' were found. Make sure you have coala-bears installed or you have typed the name correctly.") return bears_found
[ "def", "collect_bears", "(", "bear_dirs", ",", "bear_globs", ",", "kinds", ",", "log_printer", ",", "warn_if_unused_glob", "=", "True", ")", ":", "bears_found", "=", "tuple", "(", "(", "[", "]", "for", "i", "in", "range", "(", "len", "(", "kinds", ")", ")", ")", ")", "bear_globs_with_bears", "=", "set", "(", ")", "for", "(", "bear", ",", "glob", ")", "in", "icollect_bears", "(", "bear_dirs", ",", "bear_globs", ",", "kinds", ",", "log_printer", ")", ":", "index", "=", "kinds", ".", "index", "(", "_get_kind", "(", "bear", ")", ")", "bears_found", "[", "index", "]", ".", "append", "(", "bear", ")", "bear_globs_with_bears", ".", "add", "(", "glob", ")", "if", "warn_if_unused_glob", ":", "_warn_if_unused_glob", "(", "log_printer", ",", "bear_globs", ",", "bear_globs_with_bears", ",", "\"No bears matching '{}' were found. Make sure you have coala-bears installed or you have typed the name correctly.\"", ")", "return", "bears_found" ]
collect all bears from bear directories that have a matching kind matching the given globs .
train
false
53,752
def _prep_stats_dict(values): stats = [] d = values.get('stats', {}) for (k, v) in d.iteritems(): stat = models.ComputeNodeStat() stat['key'] = k stat['value'] = v stats.append(stat) values['stats'] = stats
[ "def", "_prep_stats_dict", "(", "values", ")", ":", "stats", "=", "[", "]", "d", "=", "values", ".", "get", "(", "'stats'", ",", "{", "}", ")", "for", "(", "k", ",", "v", ")", "in", "d", ".", "iteritems", "(", ")", ":", "stat", "=", "models", ".", "ComputeNodeStat", "(", ")", "stat", "[", "'key'", "]", "=", "k", "stat", "[", "'value'", "]", "=", "v", "stats", ".", "append", "(", "stat", ")", "values", "[", "'stats'", "]", "=", "stats" ]
make list of computenodestats .
train
false
53,753
def arcball_nearest_axis(point, axes): point = numpy.array(point, dtype=numpy.float64, copy=False) nearest = None mx = (-1.0) for axis in axes: t = numpy.dot(arcball_constrain_to_axis(point, axis), point) if (t > mx): nearest = axis mx = t return nearest
[ "def", "arcball_nearest_axis", "(", "point", ",", "axes", ")", ":", "point", "=", "numpy", ".", "array", "(", "point", ",", "dtype", "=", "numpy", ".", "float64", ",", "copy", "=", "False", ")", "nearest", "=", "None", "mx", "=", "(", "-", "1.0", ")", "for", "axis", "in", "axes", ":", "t", "=", "numpy", ".", "dot", "(", "arcball_constrain_to_axis", "(", "point", ",", "axis", ")", ",", "point", ")", "if", "(", "t", ">", "mx", ")", ":", "nearest", "=", "axis", "mx", "=", "t", "return", "nearest" ]
return axis .
train
true
53,755
def contains_nan(arr, node=None, var=None): if isinstance(arr, theano.gof.type._cdata_type): return False elif isinstance(arr, np.random.mtrand.RandomState): return False elif (var and getattr(var.tag, 'is_rng', False)): return False elif isinstance(arr, slice): return False elif (arr.size == 0): return False elif (cuda.cuda_available and isinstance(arr, cuda.CudaNdarray)): if (node and hasattr(theano.sandbox, 'rng_mrg') and isinstance(node.op, theano.sandbox.rng_mrg.GPU_mrg_uniform)): return False else: compile_gpu_func(True, False, False) return np.isnan(f_gpumin(arr.reshape(arr.size))) elif (pygpu_available and isinstance(arr, GpuArray)): return np.isnan(f_gpua_min(arr.reshape(arr.size))) return np.isnan(np.min(arr))
[ "def", "contains_nan", "(", "arr", ",", "node", "=", "None", ",", "var", "=", "None", ")", ":", "if", "isinstance", "(", "arr", ",", "theano", ".", "gof", ".", "type", ".", "_cdata_type", ")", ":", "return", "False", "elif", "isinstance", "(", "arr", ",", "np", ".", "random", ".", "mtrand", ".", "RandomState", ")", ":", "return", "False", "elif", "(", "var", "and", "getattr", "(", "var", ".", "tag", ",", "'is_rng'", ",", "False", ")", ")", ":", "return", "False", "elif", "isinstance", "(", "arr", ",", "slice", ")", ":", "return", "False", "elif", "(", "arr", ".", "size", "==", "0", ")", ":", "return", "False", "elif", "(", "cuda", ".", "cuda_available", "and", "isinstance", "(", "arr", ",", "cuda", ".", "CudaNdarray", ")", ")", ":", "if", "(", "node", "and", "hasattr", "(", "theano", ".", "sandbox", ",", "'rng_mrg'", ")", "and", "isinstance", "(", "node", ".", "op", ",", "theano", ".", "sandbox", ".", "rng_mrg", ".", "GPU_mrg_uniform", ")", ")", ":", "return", "False", "else", ":", "compile_gpu_func", "(", "True", ",", "False", ",", "False", ")", "return", "np", ".", "isnan", "(", "f_gpumin", "(", "arr", ".", "reshape", "(", "arr", ".", "size", ")", ")", ")", "elif", "(", "pygpu_available", "and", "isinstance", "(", "arr", ",", "GpuArray", ")", ")", ":", "return", "np", ".", "isnan", "(", "f_gpua_min", "(", "arr", ".", "reshape", "(", "arr", ".", "size", ")", ")", ")", "return", "np", ".", "isnan", "(", "np", ".", "min", "(", "arr", ")", ")" ]
test whether a numpy .
train
false
53,757
def wait_for_free_port(host, port, timeout=None): if (not host): raise ValueError("Host values of '' or None are not allowed.") if (timeout is None): timeout = free_port_timeout for trial in range(50): try: check_port(host, port, timeout=timeout) except IOError: time.sleep(timeout) else: return raise IOError(('Port %r not free on %r' % (port, host)))
[ "def", "wait_for_free_port", "(", "host", ",", "port", ",", "timeout", "=", "None", ")", ":", "if", "(", "not", "host", ")", ":", "raise", "ValueError", "(", "\"Host values of '' or None are not allowed.\"", ")", "if", "(", "timeout", "is", "None", ")", ":", "timeout", "=", "free_port_timeout", "for", "trial", "in", "range", "(", "50", ")", ":", "try", ":", "check_port", "(", "host", ",", "port", ",", "timeout", "=", "timeout", ")", "except", "IOError", ":", "time", ".", "sleep", "(", "timeout", ")", "else", ":", "return", "raise", "IOError", "(", "(", "'Port %r not free on %r'", "%", "(", "port", ",", "host", ")", ")", ")" ]
wait for the specified port to become free .
train
false
53,758
def offset_spines(offset=10, fig=None, ax=None): warn_msg = '`offset_spines` is deprecated and will be removed in v0.5' warnings.warn(warn_msg, UserWarning) if ((fig is None) and (ax is None)): axes = plt.gcf().axes elif (fig is not None): axes = fig.axes elif (ax is not None): axes = [ax] for ax_i in axes: for spine in ax_i.spines.values(): _set_spine_position(spine, ('outward', offset))
[ "def", "offset_spines", "(", "offset", "=", "10", ",", "fig", "=", "None", ",", "ax", "=", "None", ")", ":", "warn_msg", "=", "'`offset_spines` is deprecated and will be removed in v0.5'", "warnings", ".", "warn", "(", "warn_msg", ",", "UserWarning", ")", "if", "(", "(", "fig", "is", "None", ")", "and", "(", "ax", "is", "None", ")", ")", ":", "axes", "=", "plt", ".", "gcf", "(", ")", ".", "axes", "elif", "(", "fig", "is", "not", "None", ")", ":", "axes", "=", "fig", ".", "axes", "elif", "(", "ax", "is", "not", "None", ")", ":", "axes", "=", "[", "ax", "]", "for", "ax_i", "in", "axes", ":", "for", "spine", "in", "ax_i", ".", "spines", ".", "values", "(", ")", ":", "_set_spine_position", "(", "spine", ",", "(", "'outward'", ",", "offset", ")", ")" ]
simple function to offset spines away from axes .
train
false
53,759
@verbose def spatio_temporal_dist_connectivity(src, n_times, dist, verbose=None): if (src[0]['dist'] is None): raise RuntimeError('src must have distances included, consider using\nmne_add_patch_info with --dist argument') edges = sparse_block_diag([s['dist'][s['vertno'], :][:, s['vertno']] for s in src]) edges.data[:] = np.less_equal(edges.data, dist) edges = edges.tocsr() edges.eliminate_zeros() edges = edges.tocoo() return _get_connectivity_from_edges(edges, n_times)
[ "@", "verbose", "def", "spatio_temporal_dist_connectivity", "(", "src", ",", "n_times", ",", "dist", ",", "verbose", "=", "None", ")", ":", "if", "(", "src", "[", "0", "]", "[", "'dist'", "]", "is", "None", ")", ":", "raise", "RuntimeError", "(", "'src must have distances included, consider using\\nmne_add_patch_info with --dist argument'", ")", "edges", "=", "sparse_block_diag", "(", "[", "s", "[", "'dist'", "]", "[", "s", "[", "'vertno'", "]", ",", ":", "]", "[", ":", ",", "s", "[", "'vertno'", "]", "]", "for", "s", "in", "src", "]", ")", "edges", ".", "data", "[", ":", "]", "=", "np", ".", "less_equal", "(", "edges", ".", "data", ",", "dist", ")", "edges", "=", "edges", ".", "tocsr", "(", ")", "edges", ".", "eliminate_zeros", "(", ")", "edges", "=", "edges", ".", "tocoo", "(", ")", "return", "_get_connectivity_from_edges", "(", "edges", ",", "n_times", ")" ]
compute connectivity from distances in a source space and time instants .
train
false
53,760
def security_group_rule_get_by_instance(context, instance_uuid): return IMPL.security_group_rule_get_by_instance(context, instance_uuid)
[ "def", "security_group_rule_get_by_instance", "(", "context", ",", "instance_uuid", ")", ":", "return", "IMPL", ".", "security_group_rule_get_by_instance", "(", "context", ",", "instance_uuid", ")" ]
get all rules for a given instance .
train
false
53,761
def get_font(section='main', option='font', font_size_delta=0): font = FONT_CACHE.get((section, option)) if (font is None): families = CONF.get(section, (option + '/family'), None) if (families is None): return QFont() family = get_family(families) weight = QFont.Normal italic = CONF.get(section, (option + '/italic'), False) if CONF.get(section, (option + '/bold'), False): weight = QFont.Bold size = (CONF.get(section, (option + '/size'), 9) + font_size_delta) font = QFont(family, size, weight) font.setItalic(italic) FONT_CACHE[(section, option)] = font size = (CONF.get(section, (option + '/size'), 9) + font_size_delta) font.setPointSize(size) return font
[ "def", "get_font", "(", "section", "=", "'main'", ",", "option", "=", "'font'", ",", "font_size_delta", "=", "0", ")", ":", "font", "=", "FONT_CACHE", ".", "get", "(", "(", "section", ",", "option", ")", ")", "if", "(", "font", "is", "None", ")", ":", "families", "=", "CONF", ".", "get", "(", "section", ",", "(", "option", "+", "'/family'", ")", ",", "None", ")", "if", "(", "families", "is", "None", ")", ":", "return", "QFont", "(", ")", "family", "=", "get_family", "(", "families", ")", "weight", "=", "QFont", ".", "Normal", "italic", "=", "CONF", ".", "get", "(", "section", ",", "(", "option", "+", "'/italic'", ")", ",", "False", ")", "if", "CONF", ".", "get", "(", "section", ",", "(", "option", "+", "'/bold'", ")", ",", "False", ")", ":", "weight", "=", "QFont", ".", "Bold", "size", "=", "(", "CONF", ".", "get", "(", "section", ",", "(", "option", "+", "'/size'", ")", ",", "9", ")", "+", "font_size_delta", ")", "font", "=", "QFont", "(", "family", ",", "size", ",", "weight", ")", "font", ".", "setItalic", "(", "italic", ")", "FONT_CACHE", "[", "(", "section", ",", "option", ")", "]", "=", "font", "size", "=", "(", "CONF", ".", "get", "(", "section", ",", "(", "option", "+", "'/size'", ")", ",", "9", ")", "+", "font_size_delta", ")", "font", ".", "setPointSize", "(", "size", ")", "return", "font" ]
get console font properties depending on os and user options .
train
true
53,762
def flexible_boolean(boolean): if (boolean in ('1', 'true', 'True')): return True else: return False
[ "def", "flexible_boolean", "(", "boolean", ")", ":", "if", "(", "boolean", "in", "(", "'1'", ",", "'true'", ",", "'True'", ")", ")", ":", "return", "True", "else", ":", "return", "False" ]
returns true for any of "1" .
train
false
53,763
def get_all_remote_methods(resolver=None, ns_prefix=u''): if (not resolver): resolver = get_resolver(get_urlconf()) result = {} for name in resolver.reverse_dict.keys(): if (not isinstance(name, six.string_types)): continue try: url = reverse((ns_prefix + name)) resmgr = resolve(url) ViewClass = import_string(u'{0}.{1}'.format(resmgr.func.__module__, resmgr.func.__name__)) if (isclass(ViewClass) and issubclass(ViewClass, JSONResponseMixin)): result[name] = _get_remote_methods_for(ViewClass, url) except (NoReverseMatch, ImproperlyConfigured): pass for (namespace, ns_pattern) in resolver.namespace_dict.items(): sub_res = get_all_remote_methods(ns_pattern[1], ((ns_prefix + namespace) + u':')) if sub_res: result[namespace] = sub_res return result
[ "def", "get_all_remote_methods", "(", "resolver", "=", "None", ",", "ns_prefix", "=", "u''", ")", ":", "if", "(", "not", "resolver", ")", ":", "resolver", "=", "get_resolver", "(", "get_urlconf", "(", ")", ")", "result", "=", "{", "}", "for", "name", "in", "resolver", ".", "reverse_dict", ".", "keys", "(", ")", ":", "if", "(", "not", "isinstance", "(", "name", ",", "six", ".", "string_types", ")", ")", ":", "continue", "try", ":", "url", "=", "reverse", "(", "(", "ns_prefix", "+", "name", ")", ")", "resmgr", "=", "resolve", "(", "url", ")", "ViewClass", "=", "import_string", "(", "u'{0}.{1}'", ".", "format", "(", "resmgr", ".", "func", ".", "__module__", ",", "resmgr", ".", "func", ".", "__name__", ")", ")", "if", "(", "isclass", "(", "ViewClass", ")", "and", "issubclass", "(", "ViewClass", ",", "JSONResponseMixin", ")", ")", ":", "result", "[", "name", "]", "=", "_get_remote_methods_for", "(", "ViewClass", ",", "url", ")", "except", "(", "NoReverseMatch", ",", "ImproperlyConfigured", ")", ":", "pass", "for", "(", "namespace", ",", "ns_pattern", ")", "in", "resolver", ".", "namespace_dict", ".", "items", "(", ")", ":", "sub_res", "=", "get_all_remote_methods", "(", "ns_pattern", "[", "1", "]", ",", "(", "(", "ns_prefix", "+", "namespace", ")", "+", "u':'", ")", ")", "if", "sub_res", ":", "result", "[", "namespace", "]", "=", "sub_res", "return", "result" ]
returns a dictionary to be used for calling djangocall .
train
true
53,765
def print_and_modify(obj, mods, dels): obj.update(mods) for field in dels: try: del obj[field] except KeyError: pass return ui.show_model_changes(obj)
[ "def", "print_and_modify", "(", "obj", ",", "mods", ",", "dels", ")", ":", "obj", ".", "update", "(", "mods", ")", "for", "field", "in", "dels", ":", "try", ":", "del", "obj", "[", "field", "]", "except", "KeyError", ":", "pass", "return", "ui", ".", "show_model_changes", "(", "obj", ")" ]
print the modifications to an item and return a bool indicating whether any changes were made .
train
false
53,767
def backup_destroy(context, backup_id): return IMPL.backup_destroy(context, backup_id)
[ "def", "backup_destroy", "(", "context", ",", "backup_id", ")", ":", "return", "IMPL", ".", "backup_destroy", "(", "context", ",", "backup_id", ")" ]
destroy the backup or raise if it does not exist .
train
false
53,768
def __determine_resource_obj(service, resource): path = resource.split('.') node = service for elem in path: try: node = getattr(node, elem)() except AttributeError: raise AttributeError('"{0}" has no attribute "{1}"'.format('.'.join(path[0:path.index(elem)]), elem)) return node
[ "def", "__determine_resource_obj", "(", "service", ",", "resource", ")", ":", "path", "=", "resource", ".", "split", "(", "'.'", ")", "node", "=", "service", "for", "elem", "in", "path", ":", "try", ":", "node", "=", "getattr", "(", "node", ",", "elem", ")", "(", ")", "except", "AttributeError", ":", "raise", "AttributeError", "(", "'\"{0}\" has no attribute \"{1}\"'", ".", "format", "(", "'.'", ".", "join", "(", "path", "[", "0", ":", "path", ".", "index", "(", "elem", ")", "]", ")", ",", "elem", ")", ")", "return", "node" ]
find the desired resource object method container from the service .
train
false
53,769
def _get_hold(line, pattern=__HOLD_PATTERN, full=True): if full: if (_yum() == 'dnf'): lock_re = '({0}-\\S+)'.format(pattern) else: lock_re = '(\\d+:{0}-\\S+)'.format(pattern) elif (_yum() == 'dnf'): lock_re = '({0}-\\S+)'.format(pattern) else: lock_re = '\\d+:({0}-\\S+)'.format(pattern) match = re.search(lock_re, line) if match: if (not full): woarch = match.group(1).rsplit('.', 1)[0] worel = woarch.rsplit('-', 1)[0] return worel.rsplit('-', 1)[0] else: return match.group(1) return None
[ "def", "_get_hold", "(", "line", ",", "pattern", "=", "__HOLD_PATTERN", ",", "full", "=", "True", ")", ":", "if", "full", ":", "if", "(", "_yum", "(", ")", "==", "'dnf'", ")", ":", "lock_re", "=", "'({0}-\\\\S+)'", ".", "format", "(", "pattern", ")", "else", ":", "lock_re", "=", "'(\\\\d+:{0}-\\\\S+)'", ".", "format", "(", "pattern", ")", "elif", "(", "_yum", "(", ")", "==", "'dnf'", ")", ":", "lock_re", "=", "'({0}-\\\\S+)'", ".", "format", "(", "pattern", ")", "else", ":", "lock_re", "=", "'\\\\d+:({0}-\\\\S+)'", ".", "format", "(", "pattern", ")", "match", "=", "re", ".", "search", "(", "lock_re", ",", "line", ")", "if", "match", ":", "if", "(", "not", "full", ")", ":", "woarch", "=", "match", ".", "group", "(", "1", ")", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "0", "]", "worel", "=", "woarch", ".", "rsplit", "(", "'-'", ",", "1", ")", "[", "0", "]", "return", "worel", ".", "rsplit", "(", "'-'", ",", "1", ")", "[", "0", "]", "else", ":", "return", "match", ".", "group", "(", "1", ")", "return", "None" ]
resolve a package name from a line containing the hold expression .
train
true
53,770
@login_required def delete_favorite(req, id): try: favorite = models.Favorite.objects.get(user=req.user, pk=id) favorite.delete() except ObjectDoesNotExist: pass response = {'has_favorite': 'false'} return HttpResponse(json.dumps(response), content_type='application/json', status=200)
[ "@", "login_required", "def", "delete_favorite", "(", "req", ",", "id", ")", ":", "try", ":", "favorite", "=", "models", ".", "Favorite", ".", "objects", ".", "get", "(", "user", "=", "req", ".", "user", ",", "pk", "=", "id", ")", "favorite", ".", "delete", "(", ")", "except", "ObjectDoesNotExist", ":", "pass", "response", "=", "{", "'has_favorite'", ":", "'false'", "}", "return", "HttpResponse", "(", "json", ".", "dumps", "(", "response", ")", ",", "content_type", "=", "'application/json'", ",", "status", "=", "200", ")" ]
delete favorite and put favorite_info object in response .
train
false
53,771
def set_task_user(f): @functools.wraps(f) def wrapper(*args, **kw): old_user = get_user() set_user(get_task_user()) try: result = f(*args, **kw) finally: set_user(old_user) return result return wrapper
[ "def", "set_task_user", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "wrapper", "(", "*", "args", ",", "**", "kw", ")", ":", "old_user", "=", "get_user", "(", ")", "set_user", "(", "get_task_user", "(", ")", ")", "try", ":", "result", "=", "f", "(", "*", "args", ",", "**", "kw", ")", "finally", ":", "set_user", "(", "old_user", ")", "return", "result", "return", "wrapper" ]
sets the user to be the task user .
train
false
53,772
def parseString(string, namespaces=True): if namespaces: builder = ExpatBuilderNS() else: builder = ExpatBuilder() return builder.parseString(string)
[ "def", "parseString", "(", "string", ",", "namespaces", "=", "True", ")", ":", "if", "namespaces", ":", "builder", "=", "ExpatBuilderNS", "(", ")", "else", ":", "builder", "=", "ExpatBuilder", "(", ")", "return", "builder", ".", "parseString", "(", "string", ")" ]
parse a document from a string .
train
false
53,773
def getblock(lines): blockfinder = BlockFinder() try: tokenize.tokenize(iter(lines).next, blockfinder.tokeneater) except (EndOfBlock, IndentationError): pass return lines[:blockfinder.last]
[ "def", "getblock", "(", "lines", ")", ":", "blockfinder", "=", "BlockFinder", "(", ")", "try", ":", "tokenize", ".", "tokenize", "(", "iter", "(", "lines", ")", ".", "next", ",", "blockfinder", ".", "tokeneater", ")", "except", "(", "EndOfBlock", ",", "IndentationError", ")", ":", "pass", "return", "lines", "[", ":", "blockfinder", ".", "last", "]" ]
extract the block of code at the top of the given list of lines .
train
false
53,774
def tolist(val): if (val is None): return None try: val.extend([]) return val except AttributeError: pass try: return re.split('\\s*,\\s*', val) except TypeError: return list(val)
[ "def", "tolist", "(", "val", ")", ":", "if", "(", "val", "is", "None", ")", ":", "return", "None", "try", ":", "val", ".", "extend", "(", "[", "]", ")", "return", "val", "except", "AttributeError", ":", "pass", "try", ":", "return", "re", ".", "split", "(", "'\\\\s*,\\\\s*'", ",", "val", ")", "except", "TypeError", ":", "return", "list", "(", "val", ")" ]
convert a value that may be a list or a string into a list .
train
true
53,775
def worker_destroy(context, **filters): query = _worker_query(context, **filters) return query.delete()
[ "def", "worker_destroy", "(", "context", ",", "**", "filters", ")", ":", "query", "=", "_worker_query", "(", "context", ",", "**", "filters", ")", "return", "query", ".", "delete", "(", ")" ]
delete a worker .
train
false
53,776
def create_chunks(sequence, size): return (sequence[p:(p + size)] for p in range(0, len(sequence), size))
[ "def", "create_chunks", "(", "sequence", ",", "size", ")", ":", "return", "(", "sequence", "[", "p", ":", "(", "p", "+", "size", ")", "]", "for", "p", "in", "range", "(", "0", ",", "len", "(", "sequence", ")", ",", "size", ")", ")" ]
generate chunks from a sequence .
train
false
53,778
def dont_import_local_tempest_into_lib(logical_line, filename): if ('tempest/lib/' not in filename): return if (not (('from tempest' in logical_line) or ('import tempest' in logical_line))): return if (('from tempest.lib' in logical_line) or ('import tempest.lib' in logical_line)): return msg = 'T112: tempest.lib should not import local tempest code to avoid circular dependency' (yield (0, msg))
[ "def", "dont_import_local_tempest_into_lib", "(", "logical_line", ",", "filename", ")", ":", "if", "(", "'tempest/lib/'", "not", "in", "filename", ")", ":", "return", "if", "(", "not", "(", "(", "'from tempest'", "in", "logical_line", ")", "or", "(", "'import tempest'", "in", "logical_line", ")", ")", ")", ":", "return", "if", "(", "(", "'from tempest.lib'", "in", "logical_line", ")", "or", "(", "'import tempest.lib'", "in", "logical_line", ")", ")", ":", "return", "msg", "=", "'T112: tempest.lib should not import local tempest code to avoid circular dependency'", "(", "yield", "(", "0", ",", "msg", ")", ")" ]
check that tempest .
train
false
53,779
def task_upgrade_kernel(distribution): if is_centos_or_rhel(distribution): return sequence([yum_install(['kernel-devel', 'kernel']), run_from_args(['sync'])]) elif is_ubuntu(distribution): return sequence([]) else: raise DistributionNotSupported(distribution=distribution)
[ "def", "task_upgrade_kernel", "(", "distribution", ")", ":", "if", "is_centos_or_rhel", "(", "distribution", ")", ":", "return", "sequence", "(", "[", "yum_install", "(", "[", "'kernel-devel'", ",", "'kernel'", "]", ")", ",", "run_from_args", "(", "[", "'sync'", "]", ")", "]", ")", "elif", "is_ubuntu", "(", "distribution", ")", ":", "return", "sequence", "(", "[", "]", ")", "else", ":", "raise", "DistributionNotSupported", "(", "distribution", "=", "distribution", ")" ]
upgrade kernel .
train
false
53,780
@app.route('/delay/<delay>') def delay_response(delay): delay = min(float(delay), 10) time.sleep(delay) return jsonify(get_dict('url', 'args', 'form', 'data', 'origin', 'headers', 'files'))
[ "@", "app", ".", "route", "(", "'/delay/<delay>'", ")", "def", "delay_response", "(", "delay", ")", ":", "delay", "=", "min", "(", "float", "(", "delay", ")", ",", "10", ")", "time", ".", "sleep", "(", "delay", ")", "return", "jsonify", "(", "get_dict", "(", "'url'", ",", "'args'", ",", "'form'", ",", "'data'", ",", "'origin'", ",", "'headers'", ",", "'files'", ")", ")" ]
returns a delayed response .
train
true
53,781
def is_larger(unit_1, unit_2): unit_1 = functions.value_for_key(INFORMATION_UNITS, unit_1) unit_2 = functions.value_for_key(INFORMATION_UNITS, unit_2) return (ureg.parse_expression(unit_1) > ureg.parse_expression(unit_2))
[ "def", "is_larger", "(", "unit_1", ",", "unit_2", ")", ":", "unit_1", "=", "functions", ".", "value_for_key", "(", "INFORMATION_UNITS", ",", "unit_1", ")", "unit_2", "=", "functions", ".", "value_for_key", "(", "INFORMATION_UNITS", ",", "unit_2", ")", "return", "(", "ureg", ".", "parse_expression", "(", "unit_1", ")", ">", "ureg", ".", "parse_expression", "(", "unit_2", ")", ")" ]
returns a boolean indicating whether unit_1 is larger than unit_2 .
train
true
53,782
def requirement_available(requirement): try: util.activate(requirement) except errors.DependencyError: return False return True
[ "def", "requirement_available", "(", "requirement", ")", ":", "try", ":", "util", ".", "activate", "(", "requirement", ")", "except", "errors", ".", "DependencyError", ":", "return", "False", "return", "True" ]
checks if requirement can be imported .
train
false
53,784
def detachAcceptMsOriginating(): a = TpPd(pd=3) b = MessageType(mesType=6) c = ForceToStandbyAndSpareHalfOctets() packet = ((a / b) / c) return packet
[ "def", "detachAcceptMsOriginating", "(", ")", ":", "a", "=", "TpPd", "(", "pd", "=", "3", ")", "b", "=", "MessageType", "(", "mesType", "=", "6", ")", "c", "=", "ForceToStandbyAndSpareHalfOctets", "(", ")", "packet", "=", "(", "(", "a", "/", "b", ")", "/", "c", ")", "return", "packet" ]
detach accept section 9 .
train
true
53,785
def parse_http_load(full_load, http_methods): try: (headers, body) = full_load.split('\r\n\r\n', 1) except ValueError: headers = full_load body = '' header_lines = headers.split('\r\n') http_line = get_http_line(header_lines, http_methods) if (not http_line): headers = '' body = full_load header_lines = [line for line in header_lines if (line != http_line)] return (http_line, header_lines, body)
[ "def", "parse_http_load", "(", "full_load", ",", "http_methods", ")", ":", "try", ":", "(", "headers", ",", "body", ")", "=", "full_load", ".", "split", "(", "'\\r\\n\\r\\n'", ",", "1", ")", "except", "ValueError", ":", "headers", "=", "full_load", "body", "=", "''", "header_lines", "=", "headers", ".", "split", "(", "'\\r\\n'", ")", "http_line", "=", "get_http_line", "(", "header_lines", ",", "http_methods", ")", "if", "(", "not", "http_line", ")", ":", "headers", "=", "''", "body", "=", "full_load", "header_lines", "=", "[", "line", "for", "line", "in", "header_lines", "if", "(", "line", "!=", "http_line", ")", "]", "return", "(", "http_line", ",", "header_lines", ",", "body", ")" ]
split the raw load into list of headers and body string .
train
false
53,786
def long_to_bson_ts(val): seconds = (val >> 32) increment = (val & 4294967295) return Timestamp(seconds, increment)
[ "def", "long_to_bson_ts", "(", "val", ")", ":", "seconds", "=", "(", "val", ">>", "32", ")", "increment", "=", "(", "val", "&", "4294967295", ")", "return", "Timestamp", "(", "seconds", ",", "increment", ")" ]
convert integer into bson timestamp .
train
false
53,787
@core_helper def resource_preview(resource, package): if (not resource['url']): return False datapreview.res_format(resource) directly = False data_dict = {'resource': resource, 'package': package} if datapreview.get_preview_plugin(data_dict, return_first=True): url = url_for(controller='package', action='resource_datapreview', resource_id=resource['id'], id=package['id'], qualified=True) else: return False return snippet('dataviewer/snippets/data_preview.html', embed=directly, resource_url=url, raw_resource_url=resource.get('url'))
[ "@", "core_helper", "def", "resource_preview", "(", "resource", ",", "package", ")", ":", "if", "(", "not", "resource", "[", "'url'", "]", ")", ":", "return", "False", "datapreview", ".", "res_format", "(", "resource", ")", "directly", "=", "False", "data_dict", "=", "{", "'resource'", ":", "resource", ",", "'package'", ":", "package", "}", "if", "datapreview", ".", "get_preview_plugin", "(", "data_dict", ",", "return_first", "=", "True", ")", ":", "url", "=", "url_for", "(", "controller", "=", "'package'", ",", "action", "=", "'resource_datapreview'", ",", "resource_id", "=", "resource", "[", "'id'", "]", ",", "id", "=", "package", "[", "'id'", "]", ",", "qualified", "=", "True", ")", "else", ":", "return", "False", "return", "snippet", "(", "'dataviewer/snippets/data_preview.html'", ",", "embed", "=", "directly", ",", "resource_url", "=", "url", ",", "raw_resource_url", "=", "resource", ".", "get", "(", "'url'", ")", ")" ]
returns a rendered snippet for a embedded resource preview .
train
false
53,788
def keybinding(attr): ks = getattr(QKeySequence, attr) return from_qvariant(QKeySequence.keyBindings(ks)[0], str)
[ "def", "keybinding", "(", "attr", ")", ":", "ks", "=", "getattr", "(", "QKeySequence", ",", "attr", ")", "return", "from_qvariant", "(", "QKeySequence", ".", "keyBindings", "(", "ks", ")", "[", "0", "]", ",", "str", ")" ]
return keybinding .
train
true
53,789
def get_current_timezone(): return getattr(_active, 'value', get_default_timezone())
[ "def", "get_current_timezone", "(", ")", ":", "return", "getattr", "(", "_active", ",", "'value'", ",", "get_default_timezone", "(", ")", ")" ]
returns the currently active time zone as a tzinfo instance .
train
false
53,790
def _get_lights(): return _query(None, None)
[ "def", "_get_lights", "(", ")", ":", "return", "_query", "(", "None", ",", "None", ")" ]
get all available lighting devices .
train
false
53,792
def _ssh_slave_addresses(ssh_bin, master_address, ec2_key_pair_file): if ((not ec2_key_pair_file) or (not os.path.exists(ec2_key_pair_file))): return [] cmd = "hadoop dfsadmin -report | grep ^Name | cut -f2 -d: | cut -f2 -d' '" args = [('bash -c "%s"' % cmd)] ips = to_string(_check_output(*_ssh_run(ssh_bin, master_address, ec2_key_pair_file, args))) return [ip for ip in ips.split('\n') if ip]
[ "def", "_ssh_slave_addresses", "(", "ssh_bin", ",", "master_address", ",", "ec2_key_pair_file", ")", ":", "if", "(", "(", "not", "ec2_key_pair_file", ")", "or", "(", "not", "os", ".", "path", ".", "exists", "(", "ec2_key_pair_file", ")", ")", ")", ":", "return", "[", "]", "cmd", "=", "\"hadoop dfsadmin -report | grep ^Name | cut -f2 -d: | cut -f2 -d' '\"", "args", "=", "[", "(", "'bash -c \"%s\"'", "%", "cmd", ")", "]", "ips", "=", "to_string", "(", "_check_output", "(", "*", "_ssh_run", "(", "ssh_bin", ",", "master_address", ",", "ec2_key_pair_file", ",", "args", ")", ")", ")", "return", "[", "ip", "for", "ip", "in", "ips", ".", "split", "(", "'\\n'", ")", "if", "ip", "]" ]
get the ip addresses of the slave nodes .
train
false
53,793
def get_user_api_key(): return os.environ.get('GALAXY_TEST_USER_API_KEY', DEFAULT_GALAXY_USER_API_KEY)
[ "def", "get_user_api_key", "(", ")", ":", "return", "os", ".", "environ", ".", "get", "(", "'GALAXY_TEST_USER_API_KEY'", ",", "DEFAULT_GALAXY_USER_API_KEY", ")" ]
test user api key to use for functional tests .
train
false
53,794
def overwrite_from_dates(asof, dense_dates, sparse_dates, asset_idx, value): if (asof is pd.NaT): return first_row = dense_dates.searchsorted(asof) next_idx = sparse_dates.searchsorted(asof.asm8, 'right') if (next_idx == len(sparse_dates)): last_row = (len(dense_dates) - 1) else: last_row = (dense_dates.searchsorted(sparse_dates[next_idx]) - 1) if (first_row > last_row): return (first, last) = asset_idx (yield Float64Overwrite(first_row, last_row, first, last, value))
[ "def", "overwrite_from_dates", "(", "asof", ",", "dense_dates", ",", "sparse_dates", ",", "asset_idx", ",", "value", ")", ":", "if", "(", "asof", "is", "pd", ".", "NaT", ")", ":", "return", "first_row", "=", "dense_dates", ".", "searchsorted", "(", "asof", ")", "next_idx", "=", "sparse_dates", ".", "searchsorted", "(", "asof", ".", "asm8", ",", "'right'", ")", "if", "(", "next_idx", "==", "len", "(", "sparse_dates", ")", ")", ":", "last_row", "=", "(", "len", "(", "dense_dates", ")", "-", "1", ")", "else", ":", "last_row", "=", "(", "dense_dates", ".", "searchsorted", "(", "sparse_dates", "[", "next_idx", "]", ")", "-", "1", ")", "if", "(", "first_row", ">", "last_row", ")", ":", "return", "(", "first", ",", "last", ")", "=", "asset_idx", "(", "yield", "Float64Overwrite", "(", "first_row", ",", "last_row", ",", "first", ",", "last", ",", "value", ")", ")" ]
construct a float64overwrite with the correct start and end date based on the asof date of the delta .
train
false
53,795
def item_create(item, item_id, item_type, create='create', extra_args=None, cibfile=None): cmd = ['pcs'] if isinstance(cibfile, six.string_types): cmd += ['-f', cibfile] if isinstance(item, six.string_types): cmd += [item] elif isinstance(item, (list, tuple)): cmd += item if (item in ['constraint']): if isinstance(item_type, six.string_types): cmd += [item_type] if isinstance(create, six.string_types): cmd += [create] elif isinstance(create, (list, tuple)): cmd += create if (item not in ['constraint']): cmd += [item_id] if isinstance(item_type, six.string_types): cmd += [item_type] if isinstance(extra_args, (list, tuple)): if (item in ['constraint']): extra_args = (extra_args + ['id={0}'.format(item_id)]) cmd += extra_args return __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False)
[ "def", "item_create", "(", "item", ",", "item_id", ",", "item_type", ",", "create", "=", "'create'", ",", "extra_args", "=", "None", ",", "cibfile", "=", "None", ")", ":", "cmd", "=", "[", "'pcs'", "]", "if", "isinstance", "(", "cibfile", ",", "six", ".", "string_types", ")", ":", "cmd", "+=", "[", "'-f'", ",", "cibfile", "]", "if", "isinstance", "(", "item", ",", "six", ".", "string_types", ")", ":", "cmd", "+=", "[", "item", "]", "elif", "isinstance", "(", "item", ",", "(", "list", ",", "tuple", ")", ")", ":", "cmd", "+=", "item", "if", "(", "item", "in", "[", "'constraint'", "]", ")", ":", "if", "isinstance", "(", "item_type", ",", "six", ".", "string_types", ")", ":", "cmd", "+=", "[", "item_type", "]", "if", "isinstance", "(", "create", ",", "six", ".", "string_types", ")", ":", "cmd", "+=", "[", "create", "]", "elif", "isinstance", "(", "create", ",", "(", "list", ",", "tuple", ")", ")", ":", "cmd", "+=", "create", "if", "(", "item", "not", "in", "[", "'constraint'", "]", ")", ":", "cmd", "+=", "[", "item_id", "]", "if", "isinstance", "(", "item_type", ",", "six", ".", "string_types", ")", ":", "cmd", "+=", "[", "item_type", "]", "if", "isinstance", "(", "extra_args", ",", "(", "list", ",", "tuple", ")", ")", ":", "if", "(", "item", "in", "[", "'constraint'", "]", ")", ":", "extra_args", "=", "(", "extra_args", "+", "[", "'id={0}'", ".", "format", "(", "item_id", ")", "]", ")", "cmd", "+=", "extra_args", "return", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ",", "output_loglevel", "=", "'trace'", ",", "python_shell", "=", "False", ")" ]
create an item via pcs command item config .
train
true
53,796
def _connect_user(request, facebook, overwrite=True): if (not request.user.is_authenticated()): raise ValueError('Connect user can only be used on authenticated users') if (not facebook.is_authenticated()): raise ValueError('Facebook needs to be authenticated for connect flows') data = facebook.facebook_profile_data() facebook_id = data['id'] old_connections = _get_old_connections(facebook_id, request.user.id)[:20] if (old_connections and (not (request.POST.get('confirm_connect') or request.GET.get('confirm_connect')))): raise facebook_exceptions.AlreadyConnectedError(list(old_connections)) user = _update_user(request.user, facebook, overwrite=overwrite) return user
[ "def", "_connect_user", "(", "request", ",", "facebook", ",", "overwrite", "=", "True", ")", ":", "if", "(", "not", "request", ".", "user", ".", "is_authenticated", "(", ")", ")", ":", "raise", "ValueError", "(", "'Connect user can only be used on authenticated users'", ")", "if", "(", "not", "facebook", ".", "is_authenticated", "(", ")", ")", ":", "raise", "ValueError", "(", "'Facebook needs to be authenticated for connect flows'", ")", "data", "=", "facebook", ".", "facebook_profile_data", "(", ")", "facebook_id", "=", "data", "[", "'id'", "]", "old_connections", "=", "_get_old_connections", "(", "facebook_id", ",", "request", ".", "user", ".", "id", ")", "[", ":", "20", "]", "if", "(", "old_connections", "and", "(", "not", "(", "request", ".", "POST", ".", "get", "(", "'confirm_connect'", ")", "or", "request", ".", "GET", ".", "get", "(", "'confirm_connect'", ")", ")", ")", ")", ":", "raise", "facebook_exceptions", ".", "AlreadyConnectedError", "(", "list", "(", "old_connections", ")", ")", "user", "=", "_update_user", "(", "request", ".", "user", ",", "facebook", ",", "overwrite", "=", "overwrite", ")", "return", "user" ]
update the fields on the user model and connects it to the facebook account .
train
false
53,797
def path_separator(): return PATH_SEPARATOR[_os.name]
[ "def", "path_separator", "(", ")", ":", "return", "PATH_SEPARATOR", "[", "_os", ".", "name", "]" ]
get the path separator for the current operating system .
train
false
53,798
def _string_from_json(value, _): return value
[ "def", "_string_from_json", "(", "value", ",", "_", ")", ":", "return", "value" ]
noop string -> string coercion .
train
false
53,800
def is_coroutine(function): return ((function.__code__.co_flags & 128) or getattr(function, '_is_coroutine', False))
[ "def", "is_coroutine", "(", "function", ")", ":", "return", "(", "(", "function", ".", "__code__", ".", "co_flags", "&", "128", ")", "or", "getattr", "(", "function", ",", "'_is_coroutine'", ",", "False", ")", ")" ]
returns true if the passed in function is a coroutine .
train
false
53,801
def nextLine(): caller = currentframe(1) return (getsourcefile(sys.modules[caller.f_globals['__name__']]), (caller.f_lineno + 1))
[ "def", "nextLine", "(", ")", ":", "caller", "=", "currentframe", "(", "1", ")", "return", "(", "getsourcefile", "(", "sys", ".", "modules", "[", "caller", ".", "f_globals", "[", "'__name__'", "]", "]", ")", ",", "(", "caller", ".", "f_lineno", "+", "1", ")", ")" ]
retrive the file name and line number immediately after where this function is called .
train
false
53,802
def _ConvertToCygpath(path): if (sys.platform == 'cygwin'): p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE) path = p.communicate()[0].strip() return path
[ "def", "_ConvertToCygpath", "(", "path", ")", ":", "if", "(", "sys", ".", "platform", "==", "'cygwin'", ")", ":", "p", "=", "subprocess", ".", "Popen", "(", "[", "'cygpath'", ",", "path", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "path", "=", "p", ".", "communicate", "(", ")", "[", "0", "]", ".", "strip", "(", ")", "return", "path" ]
convert to cygwin path if we are using cygwin .
train
false
53,803
def load_werkzeug(path): sys.path[0] = path wz.__dict__.clear() for key in sys.modules.keys(): if (key.startswith('werkzeug.') or (key == 'werkzeug')): sys.modules.pop(key, None) import werkzeug for key in werkzeug.__all__: setattr(wz, key, getattr(werkzeug, key)) hg_tag = find_hg_tag(path) try: f = open(os.path.join(path, 'setup.py')) except IOError: pass else: try: for line in f: line = line.strip() if line.startswith('version='): return (line[8:].strip(' DCTB ,')[1:(-1)], hg_tag) finally: f.close() print >>sys.stderr, 'Unknown werkzeug version loaded' sys.exit(2)
[ "def", "load_werkzeug", "(", "path", ")", ":", "sys", ".", "path", "[", "0", "]", "=", "path", "wz", ".", "__dict__", ".", "clear", "(", ")", "for", "key", "in", "sys", ".", "modules", ".", "keys", "(", ")", ":", "if", "(", "key", ".", "startswith", "(", "'werkzeug.'", ")", "or", "(", "key", "==", "'werkzeug'", ")", ")", ":", "sys", ".", "modules", ".", "pop", "(", "key", ",", "None", ")", "import", "werkzeug", "for", "key", "in", "werkzeug", ".", "__all__", ":", "setattr", "(", "wz", ",", "key", ",", "getattr", "(", "werkzeug", ",", "key", ")", ")", "hg_tag", "=", "find_hg_tag", "(", "path", ")", "try", ":", "f", "=", "open", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'setup.py'", ")", ")", "except", "IOError", ":", "pass", "else", ":", "try", ":", "for", "line", "in", "f", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "line", ".", "startswith", "(", "'version='", ")", ":", "return", "(", "line", "[", "8", ":", "]", ".", "strip", "(", "' DCTB ,'", ")", "[", "1", ":", "(", "-", "1", ")", "]", ",", "hg_tag", ")", "finally", ":", "f", ".", "close", "(", ")", "print", ">>", "sys", ".", "stderr", ",", "'Unknown werkzeug version loaded'", "sys", ".", "exit", "(", "2", ")" ]
load werkzeug .
train
true
53,804
def get_ip_version(network): if (netaddr.IPNetwork(network).version == 6): return 'IPv6' elif (netaddr.IPNetwork(network).version == 4): return 'IPv4'
[ "def", "get_ip_version", "(", "network", ")", ":", "if", "(", "netaddr", ".", "IPNetwork", "(", "network", ")", ".", "version", "==", "6", ")", ":", "return", "'IPv6'", "elif", "(", "netaddr", ".", "IPNetwork", "(", "network", ")", ".", "version", "==", "4", ")", ":", "return", "'IPv4'" ]
returns the ip version of a network .
train
false
53,805
def make_secret_key(project_directory): local_setting = os.path.join(project_directory, 'config/settings/local.py') set_secret_key(local_setting) env_file = os.path.join(project_directory, 'env.example') set_secret_key(env_file)
[ "def", "make_secret_key", "(", "project_directory", ")", ":", "local_setting", "=", "os", ".", "path", ".", "join", "(", "project_directory", ",", "'config/settings/local.py'", ")", "set_secret_key", "(", "local_setting", ")", "env_file", "=", "os", ".", "path", ".", "join", "(", "project_directory", ",", "'env.example'", ")", "set_secret_key", "(", "env_file", ")" ]
generates and saves random secret key .
train
false
53,806
def samplesize_confint_proportion(proportion, half_length, alpha=0.05, method='normal'): q_ = proportion if (method == 'normal'): n = ((q_ * (1 - q_)) / ((half_length / stats.norm.isf((alpha / 2.0))) ** 2)) else: raise NotImplementedError('only "normal" is available') return n
[ "def", "samplesize_confint_proportion", "(", "proportion", ",", "half_length", ",", "alpha", "=", "0.05", ",", "method", "=", "'normal'", ")", ":", "q_", "=", "proportion", "if", "(", "method", "==", "'normal'", ")", ":", "n", "=", "(", "(", "q_", "*", "(", "1", "-", "q_", ")", ")", "/", "(", "(", "half_length", "/", "stats", ".", "norm", ".", "isf", "(", "(", "alpha", "/", "2.0", ")", ")", ")", "**", "2", ")", ")", "else", ":", "raise", "NotImplementedError", "(", "'only \"normal\" is available'", ")", "return", "n" ]
find sample size to get desired confidence interval length parameters proportion : float in proportion or quantile half_length : float in desired half length of the confidence interval alpha : float in significance level .
train
false
53,808
def get_pointer(ctypes_func): return ctypes.cast(ctypes_func, ctypes.c_void_p).value
[ "def", "get_pointer", "(", "ctypes_func", ")", ":", "return", "ctypes", ".", "cast", "(", "ctypes_func", ",", "ctypes", ".", "c_void_p", ")", ".", "value" ]
get a pointer to the underlying function for a ctypes function as an integer .
train
false
53,809
def _generate_meta(): d = {'root_url': request.url_root} return d
[ "def", "_generate_meta", "(", ")", ":", "d", "=", "{", "'root_url'", ":", "request", ".", "url_root", "}", "return", "d" ]
generate meta information for export .
train
false
53,810
def test_system_numerics_complex(): print 'TODO'
[ "def", "test_system_numerics_complex", "(", ")", ":", "print", "'TODO'" ]
URL this should be tested minimally here .
train
false
53,812
def next_redirect(data, default, default_view, **get_kwargs): next = data.get('next', default) if (next is None): next = urlresolvers.reverse(default_view) if get_kwargs: if ('#' in next): tmp = next.rsplit('#', 1) next = tmp[0] anchor = ('#' + tmp[1]) else: anchor = '' joiner = ((('?' in next) and '&') or '?') next += ((joiner + urllib.urlencode(get_kwargs)) + anchor) return HttpResponseRedirect(next)
[ "def", "next_redirect", "(", "data", ",", "default", ",", "default_view", ",", "**", "get_kwargs", ")", ":", "next", "=", "data", ".", "get", "(", "'next'", ",", "default", ")", "if", "(", "next", "is", "None", ")", ":", "next", "=", "urlresolvers", ".", "reverse", "(", "default_view", ")", "if", "get_kwargs", ":", "if", "(", "'#'", "in", "next", ")", ":", "tmp", "=", "next", ".", "rsplit", "(", "'#'", ",", "1", ")", "next", "=", "tmp", "[", "0", "]", "anchor", "=", "(", "'#'", "+", "tmp", "[", "1", "]", ")", "else", ":", "anchor", "=", "''", "joiner", "=", "(", "(", "(", "'?'", "in", "next", ")", "and", "'&'", ")", "or", "'?'", ")", "next", "+=", "(", "(", "joiner", "+", "urllib", ".", "urlencode", "(", "get_kwargs", ")", ")", "+", "anchor", ")", "return", "HttpResponseRedirect", "(", "next", ")" ]
handle the "where should i go next?" part of comment views .
train
false
53,813
def quote_unix(value): value = six.moves.shlex_quote(value) return value
[ "def", "quote_unix", "(", "value", ")", ":", "value", "=", "six", ".", "moves", ".", "shlex_quote", "(", "value", ")", "return", "value" ]
return a quoted version of the value which can be used as one token in a shell command line .
train
false
53,814
@LocalContext def unpack_many(data, word_size=None): word_size = (word_size or context.word_size) endianness = context.endianness sign = context.sign if (word_size == 'all'): return [unpack(data, word_size)] if ((word_size % 8) != 0): raise ValueError('unpack_many(): word_size must be a multiple of 8') out = [] n = (word_size // 8) for i in range(0, len(data), n): out.append(unpack(data[i:(i + n)], word_size)) return list(map(int, out))
[ "@", "LocalContext", "def", "unpack_many", "(", "data", ",", "word_size", "=", "None", ")", ":", "word_size", "=", "(", "word_size", "or", "context", ".", "word_size", ")", "endianness", "=", "context", ".", "endianness", "sign", "=", "context", ".", "sign", "if", "(", "word_size", "==", "'all'", ")", ":", "return", "[", "unpack", "(", "data", ",", "word_size", ")", "]", "if", "(", "(", "word_size", "%", "8", ")", "!=", "0", ")", ":", "raise", "ValueError", "(", "'unpack_many(): word_size must be a multiple of 8'", ")", "out", "=", "[", "]", "n", "=", "(", "word_size", "//", "8", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "data", ")", ",", "n", ")", ":", "out", ".", "append", "(", "unpack", "(", "data", "[", "i", ":", "(", "i", "+", "n", ")", "]", ",", "word_size", ")", ")", "return", "list", "(", "map", "(", "int", ",", "out", ")", ")" ]
unpack -> int list splits data into groups of word_size//8 bytes and calls :func:unpack on each group .
train
false
53,815
@_FFI.callback(u'Value(ExternContext*, uint8_t*, uint64_t)') def extern_create_exception(context_handle, msg_ptr, msg_len): c = _FFI.from_handle(context_handle) msg = bytes(_FFI.buffer(msg_ptr, msg_len)).decode(u'utf-8') return c.to_value(Exception(msg))
[ "@", "_FFI", ".", "callback", "(", "u'Value(ExternContext*, uint8_t*, uint64_t)'", ")", "def", "extern_create_exception", "(", "context_handle", ",", "msg_ptr", ",", "msg_len", ")", ":", "c", "=", "_FFI", ".", "from_handle", "(", "context_handle", ")", "msg", "=", "bytes", "(", "_FFI", ".", "buffer", "(", "msg_ptr", ",", "msg_len", ")", ")", ".", "decode", "(", "u'utf-8'", ")", "return", "c", ".", "to_value", "(", "Exception", "(", "msg", ")", ")" ]
given a utf8 message string .
train
false
53,816
def stub_out(test, funcs): for (module, func) in funcs.items(): test.stub_out(module, func)
[ "def", "stub_out", "(", "test", ",", "funcs", ")", ":", "for", "(", "module", ",", "func", ")", "in", "funcs", ".", "items", "(", ")", ":", "test", ".", "stub_out", "(", "module", ",", "func", ")" ]
set the stubs in mapping in the db api .
train
false
53,817
def decode_barcode_8(nt_barcode): if (len(nt_barcode) != 8): raise ValueError('barcode must be 8 nt long.') if set(list(nt_barcode)).difference(CUR_ENC_FO.keys()): raise ValueError('Only A,T,C,G valid chars.') decoded = nt_to_cw(CUR_ENC_FO, nt_barcode) (num_errors, sym) = calc_syndrome(decoded, 16) if (num_errors == 1): nt_barcode = unpack_bitstr(CUR_REV_ENC_SI, ''.join(map(str, decoded))) elif (num_errors > 1): nt_barcode = None return (nt_barcode, (num_errors / 2.0))
[ "def", "decode_barcode_8", "(", "nt_barcode", ")", ":", "if", "(", "len", "(", "nt_barcode", ")", "!=", "8", ")", ":", "raise", "ValueError", "(", "'barcode must be 8 nt long.'", ")", "if", "set", "(", "list", "(", "nt_barcode", ")", ")", ".", "difference", "(", "CUR_ENC_FO", ".", "keys", "(", ")", ")", ":", "raise", "ValueError", "(", "'Only A,T,C,G valid chars.'", ")", "decoded", "=", "nt_to_cw", "(", "CUR_ENC_FO", ",", "nt_barcode", ")", "(", "num_errors", ",", "sym", ")", "=", "calc_syndrome", "(", "decoded", ",", "16", ")", "if", "(", "num_errors", "==", "1", ")", ":", "nt_barcode", "=", "unpack_bitstr", "(", "CUR_REV_ENC_SI", ",", "''", ".", "join", "(", "map", "(", "str", ",", "decoded", ")", ")", ")", "elif", "(", "num_errors", ">", "1", ")", ":", "nt_barcode", "=", "None", "return", "(", "nt_barcode", ",", "(", "num_errors", "/", "2.0", ")", ")" ]
decode length 8 barcode .
train
false
53,820
def global_subsystem_instance(subsystem_type, options=None): init_subsystem(subsystem_type, options) return subsystem_type.global_instance()
[ "def", "global_subsystem_instance", "(", "subsystem_type", ",", "options", "=", "None", ")", ":", "init_subsystem", "(", "subsystem_type", ",", "options", ")", "return", "subsystem_type", ".", "global_instance", "(", ")" ]
returns the global instance of a subsystem .
train
false
53,821
def _check_surfaces(surfs): for surf in surfs: _assert_complete_surface(surf) for (surf_1, surf_2) in zip(surfs[:(-1)], surfs[1:]): logger.info(('Checking that %s surface is inside %s surface...' % (_surf_name[surf_2['id']], _surf_name[surf_1['id']]))) _assert_inside(surf_2, surf_1)
[ "def", "_check_surfaces", "(", "surfs", ")", ":", "for", "surf", "in", "surfs", ":", "_assert_complete_surface", "(", "surf", ")", "for", "(", "surf_1", ",", "surf_2", ")", "in", "zip", "(", "surfs", "[", ":", "(", "-", "1", ")", "]", ",", "surfs", "[", "1", ":", "]", ")", ":", "logger", ".", "info", "(", "(", "'Checking that %s surface is inside %s surface...'", "%", "(", "_surf_name", "[", "surf_2", "[", "'id'", "]", "]", ",", "_surf_name", "[", "surf_1", "[", "'id'", "]", "]", ")", ")", ")", "_assert_inside", "(", "surf_2", ",", "surf_1", ")" ]
check that the surfaces are complete and non-intersecting .
train
false
53,822
def is_pidfile_stale(pidfile): result = False pidfile_pid = pidfile.read_pid() if (pidfile_pid is not None): try: os.kill(pidfile_pid, signal.SIG_DFL) except OSError as exc: if (exc.errno == errno.ESRCH): result = True return result
[ "def", "is_pidfile_stale", "(", "pidfile", ")", ":", "result", "=", "False", "pidfile_pid", "=", "pidfile", ".", "read_pid", "(", ")", "if", "(", "pidfile_pid", "is", "not", "None", ")", ":", "try", ":", "os", ".", "kill", "(", "pidfile_pid", ",", "signal", ".", "SIG_DFL", ")", "except", "OSError", "as", "exc", ":", "if", "(", "exc", ".", "errno", "==", "errno", ".", "ESRCH", ")", ":", "result", "=", "True", "return", "result" ]
determine whether a pid file is stale .
train
false
53,823
def trimHistory(): failed_db_con = db.DBConnection('failed.db') failed_db_con.action(('DELETE FROM history WHERE date < ' + str((datetime.datetime.today() - datetime.timedelta(days=30)).strftime(History.date_format))))
[ "def", "trimHistory", "(", ")", ":", "failed_db_con", "=", "db", ".", "DBConnection", "(", "'failed.db'", ")", "failed_db_con", ".", "action", "(", "(", "'DELETE FROM history WHERE date < '", "+", "str", "(", "(", "datetime", ".", "datetime", ".", "today", "(", ")", "-", "datetime", ".", "timedelta", "(", "days", "=", "30", ")", ")", ".", "strftime", "(", "History", ".", "date_format", ")", ")", ")", ")" ]
trims history table to 1 month of history from today .
train
false
53,826
def buttap(N): if (abs(int(N)) != N): raise ValueError('Filter order must be a nonnegative integer') z = numpy.array([]) m = numpy.arange(((- N) + 1), N, 2) p = (- numpy.exp((((1j * pi) * m) / (2 * N)))) k = 1 return (z, p, k)
[ "def", "buttap", "(", "N", ")", ":", "if", "(", "abs", "(", "int", "(", "N", ")", ")", "!=", "N", ")", ":", "raise", "ValueError", "(", "'Filter order must be a nonnegative integer'", ")", "z", "=", "numpy", ".", "array", "(", "[", "]", ")", "m", "=", "numpy", ".", "arange", "(", "(", "(", "-", "N", ")", "+", "1", ")", ",", "N", ",", "2", ")", "p", "=", "(", "-", "numpy", ".", "exp", "(", "(", "(", "(", "1j", "*", "pi", ")", "*", "m", ")", "/", "(", "2", "*", "N", ")", ")", ")", ")", "k", "=", "1", "return", "(", "z", ",", "p", ",", "k", ")" ]
return for analog prototype of nth-order butterworth filter .
train
false
53,828
def create_version_h(svn_version): if ('-' in svn_version): while (svn_version.count('.') < 2): svn_version = svn_version.replace('-', '.0-') else: while (svn_version.count('.') < 2): svn_version += '.0' version_h = open(os.path.join(get_base_dir(), 'src', 'lxml', 'includes', 'lxml-version.h'), 'w') version_h.write(('#ifndef LXML_VERSION_STRING\n#define LXML_VERSION_STRING "%s"\n#endif\n' % svn_version)) version_h.close()
[ "def", "create_version_h", "(", "svn_version", ")", ":", "if", "(", "'-'", "in", "svn_version", ")", ":", "while", "(", "svn_version", ".", "count", "(", "'.'", ")", "<", "2", ")", ":", "svn_version", "=", "svn_version", ".", "replace", "(", "'-'", ",", "'.0-'", ")", "else", ":", "while", "(", "svn_version", ".", "count", "(", "'.'", ")", "<", "2", ")", ":", "svn_version", "+=", "'.0'", "version_h", "=", "open", "(", "os", ".", "path", ".", "join", "(", "get_base_dir", "(", ")", ",", "'src'", ",", "'lxml'", ",", "'includes'", ",", "'lxml-version.h'", ")", ",", "'w'", ")", "version_h", ".", "write", "(", "(", "'#ifndef LXML_VERSION_STRING\\n#define LXML_VERSION_STRING \"%s\"\\n#endif\\n'", "%", "svn_version", ")", ")", "version_h", ".", "close", "(", ")" ]
create lxml-version .
train
false
53,829
@authenticated_json_view @has_request_variables def json_subscription_property(request, user_profile, subscription_data=REQ(validator=check_list(check_dict([('stream', check_string), ('property', check_string), ('value', check_variable_type([check_string, check_bool]))])))): if (request.method != 'POST'): return json_error(_('Invalid verb')) property_converters = {'color': check_string, 'in_home_view': check_bool, 'desktop_notifications': check_bool, 'audible_notifications': check_bool, 'pin_to_top': check_bool} response_data = [] for change in subscription_data: stream_name = change['stream'] property = change['property'] value = change['value'] if (property not in property_converters): return json_error((_('Unknown subscription property: %s') % (property,))) sub = get_subscription_or_die(stream_name, user_profile)[0] property_conversion = property_converters[property](property, value) if property_conversion: return json_error(property_conversion) do_change_subscription_property(user_profile, sub, stream_name, property, value) response_data.append({'stream': stream_name, 'property': property, 'value': value}) return json_success({'subscription_data': response_data})
[ "@", "authenticated_json_view", "@", "has_request_variables", "def", "json_subscription_property", "(", "request", ",", "user_profile", ",", "subscription_data", "=", "REQ", "(", "validator", "=", "check_list", "(", "check_dict", "(", "[", "(", "'stream'", ",", "check_string", ")", ",", "(", "'property'", ",", "check_string", ")", ",", "(", "'value'", ",", "check_variable_type", "(", "[", "check_string", ",", "check_bool", "]", ")", ")", "]", ")", ")", ")", ")", ":", "if", "(", "request", ".", "method", "!=", "'POST'", ")", ":", "return", "json_error", "(", "_", "(", "'Invalid verb'", ")", ")", "property_converters", "=", "{", "'color'", ":", "check_string", ",", "'in_home_view'", ":", "check_bool", ",", "'desktop_notifications'", ":", "check_bool", ",", "'audible_notifications'", ":", "check_bool", ",", "'pin_to_top'", ":", "check_bool", "}", "response_data", "=", "[", "]", "for", "change", "in", "subscription_data", ":", "stream_name", "=", "change", "[", "'stream'", "]", "property", "=", "change", "[", "'property'", "]", "value", "=", "change", "[", "'value'", "]", "if", "(", "property", "not", "in", "property_converters", ")", ":", "return", "json_error", "(", "(", "_", "(", "'Unknown subscription property: %s'", ")", "%", "(", "property", ",", ")", ")", ")", "sub", "=", "get_subscription_or_die", "(", "stream_name", ",", "user_profile", ")", "[", "0", "]", "property_conversion", "=", "property_converters", "[", "property", "]", "(", "property", ",", "value", ")", "if", "property_conversion", ":", "return", "json_error", "(", "property_conversion", ")", "do_change_subscription_property", "(", "user_profile", ",", "sub", ",", "stream_name", ",", "property", ",", "value", ")", "response_data", ".", "append", "(", "{", "'stream'", ":", "stream_name", ",", "'property'", ":", "property", ",", "'value'", ":", "value", "}", ")", "return", "json_success", "(", "{", "'subscription_data'", ":", "response_data", "}", ")" ]
this is the entry point to changing subscription properties .
train
false
53,830
def project_time_week(row): try: thisdate = row['project_time.date'] except AttributeError: return current.messages['NONE'] if (not thisdate): return current.messages['NONE'] day = thisdate.date() monday = (day - datetime.timedelta(days=day.weekday())) return monday
[ "def", "project_time_week", "(", "row", ")", ":", "try", ":", "thisdate", "=", "row", "[", "'project_time.date'", "]", "except", "AttributeError", ":", "return", "current", ".", "messages", "[", "'NONE'", "]", "if", "(", "not", "thisdate", ")", ":", "return", "current", ".", "messages", "[", "'NONE'", "]", "day", "=", "thisdate", ".", "date", "(", ")", "monday", "=", "(", "day", "-", "datetime", ".", "timedelta", "(", "days", "=", "day", ".", "weekday", "(", ")", ")", ")", "return", "monday" ]
virtual field for project_time - returns the date of the monday of this entry .
train
false
53,831
def start_time(pid): if ((not isinstance(pid, int)) or (pid < 0)): return None if stem.util.proc.is_available(): try: return float(stem.util.proc.stats(pid, stem.util.proc.Stat.START_TIME)[0]) except IOError: pass try: ps_results = call(('ps -p %s -o etime' % pid), []) if (len(ps_results) >= 2): etime = ps_results[1].strip() return (time.time() - stem.util.str_tools.parse_short_time_label(etime)) except: pass return None
[ "def", "start_time", "(", "pid", ")", ":", "if", "(", "(", "not", "isinstance", "(", "pid", ",", "int", ")", ")", "or", "(", "pid", "<", "0", ")", ")", ":", "return", "None", "if", "stem", ".", "util", ".", "proc", ".", "is_available", "(", ")", ":", "try", ":", "return", "float", "(", "stem", ".", "util", ".", "proc", ".", "stats", "(", "pid", ",", "stem", ".", "util", ".", "proc", ".", "Stat", ".", "START_TIME", ")", "[", "0", "]", ")", "except", "IOError", ":", "pass", "try", ":", "ps_results", "=", "call", "(", "(", "'ps -p %s -o etime'", "%", "pid", ")", ",", "[", "]", ")", "if", "(", "len", "(", "ps_results", ")", ">=", "2", ")", ":", "etime", "=", "ps_results", "[", "1", "]", ".", "strip", "(", ")", "return", "(", "time", ".", "time", "(", ")", "-", "stem", ".", "util", ".", "str_tools", ".", "parse_short_time_label", "(", "etime", ")", ")", "except", ":", "pass", "return", "None" ]
provides the unix timestamp when the given process started .
train
false
53,832
def init(mpstate): return SerialModule(mpstate)
[ "def", "init", "(", "mpstate", ")", ":", "return", "SerialModule", "(", "mpstate", ")" ]
initialise module .
train
false
53,834
def _get_search_rank(collection_id): rights = rights_manager.get_collection_rights(collection_id) rank = (_DEFAULT_RANK + (_STATUS_PUBLICIZED_BONUS if (rights.status == rights_manager.ACTIVITY_STATUS_PUBLICIZED) else 0)) return max(rank, 0)
[ "def", "_get_search_rank", "(", "collection_id", ")", ":", "rights", "=", "rights_manager", ".", "get_collection_rights", "(", "collection_id", ")", "rank", "=", "(", "_DEFAULT_RANK", "+", "(", "_STATUS_PUBLICIZED_BONUS", "if", "(", "rights", ".", "status", "==", "rights_manager", ".", "ACTIVITY_STATUS_PUBLICIZED", ")", "else", "0", ")", ")", "return", "max", "(", "rank", ",", "0", ")" ]
gets the search rank of a given collection .
train
false
53,836
def append_slash_redirect(environ, code=301): new_path = (environ['PATH_INFO'].strip('/') + '/') query_string = environ.get('QUERY_STRING') if query_string: new_path += ('?' + query_string) return redirect(new_path, code)
[ "def", "append_slash_redirect", "(", "environ", ",", "code", "=", "301", ")", ":", "new_path", "=", "(", "environ", "[", "'PATH_INFO'", "]", ".", "strip", "(", "'/'", ")", "+", "'/'", ")", "query_string", "=", "environ", ".", "get", "(", "'QUERY_STRING'", ")", "if", "query_string", ":", "new_path", "+=", "(", "'?'", "+", "query_string", ")", "return", "redirect", "(", "new_path", ",", "code", ")" ]
redirect to the same url but with a slash appended .
train
true
53,838
def group_list_of_dict(array): result = defaultdict(list) for item in array: for (key, value) in item.items(): result[key] += (value if isinstance(value, list) else [value]) return result
[ "def", "group_list_of_dict", "(", "array", ")", ":", "result", "=", "defaultdict", "(", "list", ")", "for", "item", "in", "array", ":", "for", "(", "key", ",", "value", ")", "in", "item", ".", "items", "(", ")", ":", "result", "[", "key", "]", "+=", "(", "value", "if", "isinstance", "(", "value", ",", "list", ")", "else", "[", "value", "]", ")", "return", "result" ]
helper method to group list of dict to dict with all possible values .
train
false
53,839
def bind_expression_to_resources(expr, resources): if (resources is None): resources = {} return expr._subs({k: bz.data(v, dshape=k.dshape) for (k, v) in iteritems(resources)})
[ "def", "bind_expression_to_resources", "(", "expr", ",", "resources", ")", ":", "if", "(", "resources", "is", "None", ")", ":", "resources", "=", "{", "}", "return", "expr", ".", "_subs", "(", "{", "k", ":", "bz", ".", "data", "(", "v", ",", "dshape", "=", "k", ".", "dshape", ")", "for", "(", "k", ",", "v", ")", "in", "iteritems", "(", "resources", ")", "}", ")" ]
bind a blaze expression to resources .
train
true
53,840
@release.command() def changelog(): print changelog_as_markdown()
[ "@", "release", ".", "command", "(", ")", "def", "changelog", "(", ")", ":", "print", "changelog_as_markdown", "(", ")" ]
get the most recent versions changelog as markdown .
train
false
53,842
def cscore(v1, v2): v1_b = v1.astype(bool) v2_b = v2.astype(bool) sij = (v1_b * v2_b).sum() return ((v1_b.sum() - sij) * (v2_b.sum() - sij))
[ "def", "cscore", "(", "v1", ",", "v2", ")", ":", "v1_b", "=", "v1", ".", "astype", "(", "bool", ")", "v2_b", "=", "v2", ".", "astype", "(", "bool", ")", "sij", "=", "(", "v1_b", "*", "v2_b", ")", ".", "sum", "(", ")", "return", "(", "(", "v1_b", ".", "sum", "(", ")", "-", "sij", ")", "*", "(", "v2_b", ".", "sum", "(", ")", "-", "sij", ")", ")" ]
calculate c-score between v1 and v2 according to stone and roberts 1990 .
train
false
53,843
def compute_grad(J, f): if isinstance(J, LinearOperator): return J.rmatvec(f) else: return J.T.dot(f)
[ "def", "compute_grad", "(", "J", ",", "f", ")", ":", "if", "isinstance", "(", "J", ",", "LinearOperator", ")", ":", "return", "J", ".", "rmatvec", "(", "f", ")", "else", ":", "return", "J", ".", "T", ".", "dot", "(", "f", ")" ]
compute gradient of the least-squares cost function .
train
false
53,844
def require_module(module): def check_require_module(f): try: __import__(module) return f except ImportError: def new_f(self, *args, **kwargs): self.skipTest(('%s can not be imported.' % module)) new_f.__name__ = f.__name__ return new_f return check_require_module
[ "def", "require_module", "(", "module", ")", ":", "def", "check_require_module", "(", "f", ")", ":", "try", ":", "__import__", "(", "module", ")", "return", "f", "except", "ImportError", ":", "def", "new_f", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", ":", "self", ".", "skipTest", "(", "(", "'%s can not be imported.'", "%", "module", ")", ")", "new_f", ".", "__name__", "=", "f", ".", "__name__", "return", "new_f", "return", "check_require_module" ]
check if the given module is loaded .
train
false
53,845
def bayesian_info_criterion_lsq(ssr, n_params, n_samples): return bayesian_info_criterion((((-0.5) * n_samples) * np.log((ssr / n_samples))), n_params, n_samples)
[ "def", "bayesian_info_criterion_lsq", "(", "ssr", ",", "n_params", ",", "n_samples", ")", ":", "return", "bayesian_info_criterion", "(", "(", "(", "(", "-", "0.5", ")", "*", "n_samples", ")", "*", "np", ".", "log", "(", "(", "ssr", "/", "n_samples", ")", ")", ")", ",", "n_params", ",", "n_samples", ")" ]
computes the bayesian information criterion assuming that the observations come from a gaussian distribution .
train
false
53,846
def with_inline_css(html_without_css): css_filepath = settings.NOTIFICATION_EMAIL_CSS if (not css_filepath.startswith('/')): css_filepath = file_path_finder(settings.NOTIFICATION_EMAIL_CSS) if css_filepath: with open(css_filepath, 'r') as _file: css_content = _file.read() html_with_inline_css = pynliner.fromString(((('<style>' + css_content) + '</style>') + html_without_css)) return html_with_inline_css return html_without_css
[ "def", "with_inline_css", "(", "html_without_css", ")", ":", "css_filepath", "=", "settings", ".", "NOTIFICATION_EMAIL_CSS", "if", "(", "not", "css_filepath", ".", "startswith", "(", "'/'", ")", ")", ":", "css_filepath", "=", "file_path_finder", "(", "settings", ".", "NOTIFICATION_EMAIL_CSS", ")", "if", "css_filepath", ":", "with", "open", "(", "css_filepath", ",", "'r'", ")", "as", "_file", ":", "css_content", "=", "_file", ".", "read", "(", ")", "html_with_inline_css", "=", "pynliner", ".", "fromString", "(", "(", "(", "(", "'<style>'", "+", "css_content", ")", "+", "'</style>'", ")", "+", "html_without_css", ")", ")", "return", "html_with_inline_css", "return", "html_without_css" ]
returns html with inline css if the css file path exists else returns html with out the inline css .
train
false
53,848
@testing.requires_testing_data def test_preload_modify(): tempdir = _TempDir() rng = np.random.RandomState(0) for preload in [False, True, 'memmap.dat']: raw = read_raw_fif(fif_fname, preload=preload) nsamp = ((raw.last_samp - raw.first_samp) + 1) picks = pick_types(raw.info, meg='grad', exclude='bads') data = rng.randn(len(picks), (nsamp // 2)) try: raw[picks, :(nsamp // 2)] = data except RuntimeError as err: if (not preload): continue else: raise err tmp_fname = op.join(tempdir, 'raw.fif') raw.save(tmp_fname, overwrite=True) raw_new = read_raw_fif(tmp_fname) (data_new, _) = raw_new[picks, :(nsamp / 2)] assert_allclose(data, data_new)
[ "@", "testing", ".", "requires_testing_data", "def", "test_preload_modify", "(", ")", ":", "tempdir", "=", "_TempDir", "(", ")", "rng", "=", "np", ".", "random", ".", "RandomState", "(", "0", ")", "for", "preload", "in", "[", "False", ",", "True", ",", "'memmap.dat'", "]", ":", "raw", "=", "read_raw_fif", "(", "fif_fname", ",", "preload", "=", "preload", ")", "nsamp", "=", "(", "(", "raw", ".", "last_samp", "-", "raw", ".", "first_samp", ")", "+", "1", ")", "picks", "=", "pick_types", "(", "raw", ".", "info", ",", "meg", "=", "'grad'", ",", "exclude", "=", "'bads'", ")", "data", "=", "rng", ".", "randn", "(", "len", "(", "picks", ")", ",", "(", "nsamp", "//", "2", ")", ")", "try", ":", "raw", "[", "picks", ",", ":", "(", "nsamp", "//", "2", ")", "]", "=", "data", "except", "RuntimeError", "as", "err", ":", "if", "(", "not", "preload", ")", ":", "continue", "else", ":", "raise", "err", "tmp_fname", "=", "op", ".", "join", "(", "tempdir", ",", "'raw.fif'", ")", "raw", ".", "save", "(", "tmp_fname", ",", "overwrite", "=", "True", ")", "raw_new", "=", "read_raw_fif", "(", "tmp_fname", ")", "(", "data_new", ",", "_", ")", "=", "raw_new", "[", "picks", ",", ":", "(", "nsamp", "/", "2", ")", "]", "assert_allclose", "(", "data", ",", "data_new", ")" ]
test preloading and modifying data .
train
false
53,849
def sample_content(name): with open(('tests/components/media_player/yamaha_samples/%s' % name), encoding='utf-8') as content: return content.read()
[ "def", "sample_content", "(", "name", ")", ":", "with", "open", "(", "(", "'tests/components/media_player/yamaha_samples/%s'", "%", "name", ")", ",", "encoding", "=", "'utf-8'", ")", "as", "content", ":", "return", "content", ".", "read", "(", ")" ]
read content into a string from a file .
train
false
53,850
def write_trace(expt_dir, best_val, best_job, n_candidates, n_pending, n_complete): trace_fh = open(os.path.join(expt_dir, 'trace.csv'), 'a') trace_fh.write(('%d,%f,%d,%d,%d,%d\n' % (time.time(), best_val, best_job, n_candidates, n_pending, n_complete))) trace_fh.close()
[ "def", "write_trace", "(", "expt_dir", ",", "best_val", ",", "best_job", ",", "n_candidates", ",", "n_pending", ",", "n_complete", ")", ":", "trace_fh", "=", "open", "(", "os", ".", "path", ".", "join", "(", "expt_dir", ",", "'trace.csv'", ")", ",", "'a'", ")", "trace_fh", ".", "write", "(", "(", "'%d,%f,%d,%d,%d,%d\\n'", "%", "(", "time", ".", "time", "(", ")", ",", "best_val", ",", "best_job", ",", "n_candidates", ",", "n_pending", ",", "n_complete", ")", ")", ")", "trace_fh", ".", "close", "(", ")" ]
append current experiment state to trace file .
train
false
53,854
def squared_loss(y_true, y_pred): return (((y_true - y_pred) ** 2).mean() / 2)
[ "def", "squared_loss", "(", "y_true", ",", "y_pred", ")", ":", "return", "(", "(", "(", "y_true", "-", "y_pred", ")", "**", "2", ")", ".", "mean", "(", ")", "/", "2", ")" ]
compute the squared loss for regression .
train
false
53,856
def code_name(code, number=0): hash_digest = hashlib.md5(code.encode('utf-8')).hexdigest() return '<ipython-input-{0}-{1}>'.format(number, hash_digest[:12])
[ "def", "code_name", "(", "code", ",", "number", "=", "0", ")", ":", "hash_digest", "=", "hashlib", ".", "md5", "(", "code", ".", "encode", "(", "'utf-8'", ")", ")", ".", "hexdigest", "(", ")", "return", "'<ipython-input-{0}-{1}>'", ".", "format", "(", "number", ",", "hash_digest", "[", ":", "12", "]", ")" ]
compute a unique name for code for caching .
train
true
53,857
def pack_bitstring(bits): ret = '' i = packed = 0 for bit in bits: if bit: packed += 128 i += 1 if (i == 8): ret += chr(packed) i = packed = 0 else: packed >>= 1 if ((i > 0) and (i < 8)): packed >>= (7 - i) ret += chr(packed) return ret
[ "def", "pack_bitstring", "(", "bits", ")", ":", "ret", "=", "''", "i", "=", "packed", "=", "0", "for", "bit", "in", "bits", ":", "if", "bit", ":", "packed", "+=", "128", "i", "+=", "1", "if", "(", "i", "==", "8", ")", ":", "ret", "+=", "chr", "(", "packed", ")", "i", "=", "packed", "=", "0", "else", ":", "packed", ">>=", "1", "if", "(", "(", "i", ">", "0", ")", "and", "(", "i", "<", "8", ")", ")", ":", "packed", ">>=", "(", "7", "-", "i", ")", "ret", "+=", "chr", "(", "packed", ")", "return", "ret" ]
creates a string out of an array of bits .
train
false
53,859
def generate_fused_type(codes): cytypes = map((lambda x: CY_TYPES[x]), codes) name = (codes + '_number_t') declaration = [(('ctypedef fused ' + name) + ':')] for cytype in cytypes: declaration.append((' ' + cytype)) declaration = '\n'.join(declaration) return (name, declaration)
[ "def", "generate_fused_type", "(", "codes", ")", ":", "cytypes", "=", "map", "(", "(", "lambda", "x", ":", "CY_TYPES", "[", "x", "]", ")", ",", "codes", ")", "name", "=", "(", "codes", "+", "'_number_t'", ")", "declaration", "=", "[", "(", "(", "'ctypedef fused '", "+", "name", ")", "+", "':'", ")", "]", "for", "cytype", "in", "cytypes", ":", "declaration", ".", "append", "(", "(", "' '", "+", "cytype", ")", ")", "declaration", "=", "'\\n'", ".", "join", "(", "declaration", ")", "return", "(", "name", ",", "declaration", ")" ]
generate name of and cython code for a fused type .
train
false
53,860
@timefunc(1) def conesearch_timer(*args, **kwargs): return conesearch(*args, **kwargs)
[ "@", "timefunc", "(", "1", ")", "def", "conesearch_timer", "(", "*", "args", ",", "**", "kwargs", ")", ":", "return", "conesearch", "(", "*", "args", ",", "**", "kwargs", ")" ]
time a single cone search using astropy .
train
false
53,861
def is_valid_connection_id(entry): return is_valid_circuit_id(entry)
[ "def", "is_valid_connection_id", "(", "entry", ")", ":", "return", "is_valid_circuit_id", "(", "entry", ")" ]
checks if a string is a valid format for being a connection identifier .
train
false
53,862
def join_list(delimeter): def join_string_lambda(value): return delimeter.join(value) return join_string_lambda
[ "def", "join_list", "(", "delimeter", ")", ":", "def", "join_string_lambda", "(", "value", ")", ":", "return", "delimeter", ".", "join", "(", "value", ")", "return", "join_string_lambda" ]
join a list into a string using the delimeter .
train
false
53,863
def get_elliptic_curves(): return _EllipticCurve._get_elliptic_curves(_lib)
[ "def", "get_elliptic_curves", "(", ")", ":", "return", "_EllipticCurve", ".", "_get_elliptic_curves", "(", "_lib", ")" ]
return a set of objects representing the elliptic curves supported in the openssl build in use .
train
false
53,865
def reset(): _runtime.reset()
[ "def", "reset", "(", ")", ":", "_runtime", ".", "reset", "(", ")" ]
reset the cuda subsystem for the current thread .
train
false
53,866
def escape4xml(value): if isinstance(value, int): value = str(value) value = _re_amp.sub('&amp;', value) value = value.replace('"', '&quot;').replace("'", '&apos;') value = value.replace('<', '&lt;').replace('>', '&gt;') if isinstance(value, unicode): value = value.encode('ascii', 'xmlcharrefreplace') return value
[ "def", "escape4xml", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "int", ")", ":", "value", "=", "str", "(", "value", ")", "value", "=", "_re_amp", ".", "sub", "(", "'&amp;'", ",", "value", ")", "value", "=", "value", ".", "replace", "(", "'\"'", ",", "'&quot;'", ")", ".", "replace", "(", "\"'\"", ",", "'&apos;'", ")", "value", "=", "value", ".", "replace", "(", "'<'", ",", "'&lt;'", ")", ".", "replace", "(", "'>'", ",", "'&gt;'", ")", "if", "isinstance", "(", "value", ",", "unicode", ")", ":", "value", "=", "value", ".", "encode", "(", "'ascii'", ",", "'xmlcharrefreplace'", ")", "return", "value" ]
escape some chars that cant be present in a xml value .
train
false
53,867
def owner(*paths): return __salt__['lowpkg.owner'](*paths)
[ "def", "owner", "(", "*", "paths", ")", ":", "return", "__salt__", "[", "'lowpkg.owner'", "]", "(", "*", "paths", ")" ]
return the name of the package that owns the file .
train
false
53,868
def set_default_etree(etree): from pyamf import xml return xml.set_default_interface(etree)
[ "def", "set_default_etree", "(", "etree", ")", ":", "from", "pyamf", "import", "xml", "return", "xml", ".", "set_default_interface", "(", "etree", ")" ]
sets the default interface that will called apon to both de/serialise xml entities .
train
false
53,869
def find_it(): with RopeContext() as ctx: (_, offset) = env.get_offset_params() try: occurrences = findit.find_occurrences(ctx.project, ctx.resource, offset) except exceptions.BadIdentifierError: occurrences = [] lst = [] for oc in occurrences: lst.append(dict(filename=oc.resource.path, text=(env.lines[(oc.lineno - 1)] if (oc.resource.real_path == env.curbuf.name) else ''), lnum=oc.lineno)) env.let('loclist._loclist', lst)
[ "def", "find_it", "(", ")", ":", "with", "RopeContext", "(", ")", "as", "ctx", ":", "(", "_", ",", "offset", ")", "=", "env", ".", "get_offset_params", "(", ")", "try", ":", "occurrences", "=", "findit", ".", "find_occurrences", "(", "ctx", ".", "project", ",", "ctx", ".", "resource", ",", "offset", ")", "except", "exceptions", ".", "BadIdentifierError", ":", "occurrences", "=", "[", "]", "lst", "=", "[", "]", "for", "oc", "in", "occurrences", ":", "lst", ".", "append", "(", "dict", "(", "filename", "=", "oc", ".", "resource", ".", "path", ",", "text", "=", "(", "env", ".", "lines", "[", "(", "oc", ".", "lineno", "-", "1", ")", "]", "if", "(", "oc", ".", "resource", ".", "real_path", "==", "env", ".", "curbuf", ".", "name", ")", "else", "''", ")", ",", "lnum", "=", "oc", ".", "lineno", ")", ")", "env", ".", "let", "(", "'loclist._loclist'", ",", "lst", ")" ]
find occurrences .
train
false
53,870
def inroot_notwritable(prefix): return (abspath(prefix).startswith(context.root_dir) and (not context.root_writable))
[ "def", "inroot_notwritable", "(", "prefix", ")", ":", "return", "(", "abspath", "(", "prefix", ")", ".", "startswith", "(", "context", ".", "root_dir", ")", "and", "(", "not", "context", ".", "root_writable", ")", ")" ]
return true if the prefix is under root and root is not writeable .
train
false
53,871
def find_tables(clause, check_columns=False, include_aliases=False, include_joins=False, include_selects=False, include_crud=False): tables = [] _visitors = {} if include_selects: _visitors['select'] = _visitors['compound_select'] = tables.append if include_joins: _visitors['join'] = tables.append if include_aliases: _visitors['alias'] = tables.append if include_crud: _visitors['insert'] = _visitors['update'] = _visitors['delete'] = (lambda ent: tables.append(ent.table)) if check_columns: def visit_column(column): tables.append(column.table) _visitors['column'] = visit_column _visitors['table'] = tables.append visitors.traverse(clause, {'column_collections': False}, _visitors) return tables
[ "def", "find_tables", "(", "clause", ",", "check_columns", "=", "False", ",", "include_aliases", "=", "False", ",", "include_joins", "=", "False", ",", "include_selects", "=", "False", ",", "include_crud", "=", "False", ")", ":", "tables", "=", "[", "]", "_visitors", "=", "{", "}", "if", "include_selects", ":", "_visitors", "[", "'select'", "]", "=", "_visitors", "[", "'compound_select'", "]", "=", "tables", ".", "append", "if", "include_joins", ":", "_visitors", "[", "'join'", "]", "=", "tables", ".", "append", "if", "include_aliases", ":", "_visitors", "[", "'alias'", "]", "=", "tables", ".", "append", "if", "include_crud", ":", "_visitors", "[", "'insert'", "]", "=", "_visitors", "[", "'update'", "]", "=", "_visitors", "[", "'delete'", "]", "=", "(", "lambda", "ent", ":", "tables", ".", "append", "(", "ent", ".", "table", ")", ")", "if", "check_columns", ":", "def", "visit_column", "(", "column", ")", ":", "tables", ".", "append", "(", "column", ".", "table", ")", "_visitors", "[", "'column'", "]", "=", "visit_column", "_visitors", "[", "'table'", "]", "=", "tables", ".", "append", "visitors", ".", "traverse", "(", "clause", ",", "{", "'column_collections'", ":", "False", "}", ",", "_visitors", ")", "return", "tables" ]
locate table objects within the given expression .
train
false
53,873
def get_benchmark_returns(symbol, start_date, end_date): return pd.read_csv(format_yahoo_index_url(symbol, start_date, end_date), parse_dates=['Date'], index_col='Date', usecols=['Adj Close', 'Date'], squeeze=True).sort_index().tz_localize('UTC').pct_change(1).iloc[1:]
[ "def", "get_benchmark_returns", "(", "symbol", ",", "start_date", ",", "end_date", ")", ":", "return", "pd", ".", "read_csv", "(", "format_yahoo_index_url", "(", "symbol", ",", "start_date", ",", "end_date", ")", ",", "parse_dates", "=", "[", "'Date'", "]", ",", "index_col", "=", "'Date'", ",", "usecols", "=", "[", "'Adj Close'", ",", "'Date'", "]", ",", "squeeze", "=", "True", ")", ".", "sort_index", "(", ")", ".", "tz_localize", "(", "'UTC'", ")", ".", "pct_change", "(", "1", ")", ".", "iloc", "[", "1", ":", "]" ]
get a series of benchmark returns from yahoo .
train
false
53,876
def set_date(name, date): cmd = 'chage -d {0} {1}'.format(date, name) return (not __salt__['cmd.run'](cmd, python_shell=False))
[ "def", "set_date", "(", "name", ",", "date", ")", ":", "cmd", "=", "'chage -d {0} {1}'", ".", "format", "(", "date", ",", "name", ")", "return", "(", "not", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", ")" ]
sets the value for the date the password was last changed to days since the epoch .
train
false
53,877
def get_os_vendor(): logging.warn('utils.get_os_vendor() is deprecated, please use autotest.client.shared.distro.detect() instead') vendor = 'Unknown' if os.path.isfile('/etc/SuSE-release'): return 'SUSE' issue = '/etc/issue' if (not os.path.isfile(issue)): return vendor if file_contains_pattern(issue, 'Red Hat'): vendor = 'Red Hat' if file_contains_pattern(issue, 'CentOS'): vendor = 'Red Hat' elif file_contains_pattern(issue, 'Fedora'): vendor = 'Fedora' elif file_contains_pattern(issue, 'SUSE'): vendor = 'SUSE' elif file_contains_pattern(issue, 'Ubuntu'): vendor = 'Ubuntu' elif file_contains_pattern(issue, 'Debian'): vendor = 'Debian' logging.debug('Detected OS vendor: %s', vendor) return vendor
[ "def", "get_os_vendor", "(", ")", ":", "logging", ".", "warn", "(", "'utils.get_os_vendor() is deprecated, please use autotest.client.shared.distro.detect() instead'", ")", "vendor", "=", "'Unknown'", "if", "os", ".", "path", ".", "isfile", "(", "'/etc/SuSE-release'", ")", ":", "return", "'SUSE'", "issue", "=", "'/etc/issue'", "if", "(", "not", "os", ".", "path", ".", "isfile", "(", "issue", ")", ")", ":", "return", "vendor", "if", "file_contains_pattern", "(", "issue", ",", "'Red Hat'", ")", ":", "vendor", "=", "'Red Hat'", "if", "file_contains_pattern", "(", "issue", ",", "'CentOS'", ")", ":", "vendor", "=", "'Red Hat'", "elif", "file_contains_pattern", "(", "issue", ",", "'Fedora'", ")", ":", "vendor", "=", "'Fedora'", "elif", "file_contains_pattern", "(", "issue", ",", "'SUSE'", ")", ":", "vendor", "=", "'SUSE'", "elif", "file_contains_pattern", "(", "issue", ",", "'Ubuntu'", ")", ":", "vendor", "=", "'Ubuntu'", "elif", "file_contains_pattern", "(", "issue", ",", "'Debian'", ")", ":", "vendor", "=", "'Debian'", "logging", ".", "debug", "(", "'Detected OS vendor: %s'", ",", "vendor", ")", "return", "vendor" ]
try to guess whats the os vendor .
train
false
53,878
def path_to_local_track_uri(relpath): if isinstance(relpath, compat.text_type): relpath = relpath.encode(u'utf-8') return (u'local:track:%s' % urllib.quote(relpath))
[ "def", "path_to_local_track_uri", "(", "relpath", ")", ":", "if", "isinstance", "(", "relpath", ",", "compat", ".", "text_type", ")", ":", "relpath", "=", "relpath", ".", "encode", "(", "u'utf-8'", ")", "return", "(", "u'local:track:%s'", "%", "urllib", ".", "quote", "(", "relpath", ")", ")" ]
convert path relative to :confval:local/media_dir to local track uri .
train
false