id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
35,526
def cwd(pid): if stem.util.proc.is_available(): try: return stem.util.proc.cwd(pid) except IOError: pass logging_prefix = ('cwd(%s):' % pid) if is_available('pwdx'): results = call((GET_CWD_PWDX % pid), None) if (not results): log.debug(("%s pwdx didn't return any results" % logging_prefix)) elif results[0].endswith('No such process'): log.debug(('%s pwdx processes reported for this pid' % logging_prefix)) elif ((len(results) != 1) or (results[0].count(' ') != 1) or (not results[0].startswith(('%s: ' % pid)))): log.debug(('%s we got unexpected output from pwdx: %s' % (logging_prefix, results))) else: return results[0].split(' ', 1)[1].strip() if is_available('lsof'): results = call((GET_CWD_LSOF % pid), []) if ((len(results) == 2) and results[1].startswith('n/')): lsof_result = results[1][1:].strip() if (' ' not in lsof_result): return lsof_result else: log.debug(('%s we got unexpected output from lsof: %s' % (logging_prefix, results))) return None
[ "def", "cwd", "(", "pid", ")", ":", "if", "stem", ".", "util", ".", "proc", ".", "is_available", "(", ")", ":", "try", ":", "return", "stem", ".", "util", ".", "proc", ".", "cwd", "(", "pid", ")", "except", "IOError", ":", "pass", "logging_prefix", "=", "(", "'cwd(%s):'", "%", "pid", ")", "if", "is_available", "(", "'pwdx'", ")", ":", "results", "=", "call", "(", "(", "GET_CWD_PWDX", "%", "pid", ")", ",", "None", ")", "if", "(", "not", "results", ")", ":", "log", ".", "debug", "(", "(", "\"%s pwdx didn't return any results\"", "%", "logging_prefix", ")", ")", "elif", "results", "[", "0", "]", ".", "endswith", "(", "'No such process'", ")", ":", "log", ".", "debug", "(", "(", "'%s pwdx processes reported for this pid'", "%", "logging_prefix", ")", ")", "elif", "(", "(", "len", "(", "results", ")", "!=", "1", ")", "or", "(", "results", "[", "0", "]", ".", "count", "(", "' '", ")", "!=", "1", ")", "or", "(", "not", "results", "[", "0", "]", ".", "startswith", "(", "(", "'%s: '", "%", "pid", ")", ")", ")", ")", ":", "log", ".", "debug", "(", "(", "'%s we got unexpected output from pwdx: %s'", "%", "(", "logging_prefix", ",", "results", ")", ")", ")", "else", ":", "return", "results", "[", "0", "]", ".", "split", "(", "' '", ",", "1", ")", "[", "1", "]", ".", "strip", "(", ")", "if", "is_available", "(", "'lsof'", ")", ":", "results", "=", "call", "(", "(", "GET_CWD_LSOF", "%", "pid", ")", ",", "[", "]", ")", "if", "(", "(", "len", "(", "results", ")", "==", "2", ")", "and", "results", "[", "1", "]", ".", "startswith", "(", "'n/'", ")", ")", ":", "lsof_result", "=", "results", "[", "1", "]", "[", "1", ":", "]", ".", "strip", "(", ")", "if", "(", "' '", "not", "in", "lsof_result", ")", ":", "return", "lsof_result", "else", ":", "log", ".", "debug", "(", "(", "'%s we got unexpected output from lsof: %s'", "%", "(", "logging_prefix", ",", "results", ")", ")", ")", "return", "None" ]
provides the current working directory for the given process .
train
false
35,527
@verbose def _read_coil_defs(elekta_defs=False, verbose=None): coil_dir = op.join(op.split(__file__)[0], '..', 'data') coils = list() if elekta_defs: coils += _read_coil_def_file(op.join(coil_dir, 'coil_def_Elekta.dat')) coils += _read_coil_def_file(op.join(coil_dir, 'coil_def.dat')) return coils
[ "@", "verbose", "def", "_read_coil_defs", "(", "elekta_defs", "=", "False", ",", "verbose", "=", "None", ")", ":", "coil_dir", "=", "op", ".", "join", "(", "op", ".", "split", "(", "__file__", ")", "[", "0", "]", ",", "'..'", ",", "'data'", ")", "coils", "=", "list", "(", ")", "if", "elekta_defs", ":", "coils", "+=", "_read_coil_def_file", "(", "op", ".", "join", "(", "coil_dir", ",", "'coil_def_Elekta.dat'", ")", ")", "coils", "+=", "_read_coil_def_file", "(", "op", ".", "join", "(", "coil_dir", ",", "'coil_def.dat'", ")", ")", "return", "coils" ]
read a coil definition file .
train
false
35,529
def has_course_author_access(user, course_key): return has_studio_write_access(user, course_key)
[ "def", "has_course_author_access", "(", "user", ",", "course_key", ")", ":", "return", "has_studio_write_access", "(", "user", ",", "course_key", ")" ]
old name for has_studio_write_access .
train
false
35,530
def unique_rows(ar): if (ar.ndim != 2): raise ValueError(('unique_rows() only makes sense for 2D arrays, got %dd' % ar.ndim)) ar = np.ascontiguousarray(ar) ar_row_view = ar.view(('|S%d' % (ar.itemsize * ar.shape[1]))) (_, unique_row_indices) = np.unique(ar_row_view, return_index=True) ar_out = ar[unique_row_indices] return ar_out
[ "def", "unique_rows", "(", "ar", ")", ":", "if", "(", "ar", ".", "ndim", "!=", "2", ")", ":", "raise", "ValueError", "(", "(", "'unique_rows() only makes sense for 2D arrays, got %dd'", "%", "ar", ".", "ndim", ")", ")", "ar", "=", "np", ".", "ascontiguousarray", "(", "ar", ")", "ar_row_view", "=", "ar", ".", "view", "(", "(", "'|S%d'", "%", "(", "ar", ".", "itemsize", "*", "ar", ".", "shape", "[", "1", "]", ")", ")", ")", "(", "_", ",", "unique_row_indices", ")", "=", "np", ".", "unique", "(", "ar_row_view", ",", "return_index", "=", "True", ")", "ar_out", "=", "ar", "[", "unique_row_indices", "]", "return", "ar_out" ]
remove repeated rows from a 2d array .
train
false
35,531
def findCheckerFactories(): return getPlugins(ICheckerFactory)
[ "def", "findCheckerFactories", "(", ")", ":", "return", "getPlugins", "(", "ICheckerFactory", ")" ]
find all objects that implement l{icheckerfactory} .
train
false
35,532
def sina_download_by_vkey(vkey, title=None, output_dir='.', merge=True, info_only=False): url = ('http://video.sina.com/v/flvideo/%s_0.flv' % vkey) (type, ext, size) = url_info(url) print_info(site_info, title, 'flv', size) if (not info_only): download_urls([url], title, 'flv', size, output_dir=output_dir, merge=merge)
[ "def", "sina_download_by_vkey", "(", "vkey", ",", "title", "=", "None", ",", "output_dir", "=", "'.'", ",", "merge", "=", "True", ",", "info_only", "=", "False", ")", ":", "url", "=", "(", "'http://video.sina.com/v/flvideo/%s_0.flv'", "%", "vkey", ")", "(", "type", ",", "ext", ",", "size", ")", "=", "url_info", "(", "url", ")", "print_info", "(", "site_info", ",", "title", ",", "'flv'", ",", "size", ")", "if", "(", "not", "info_only", ")", ":", "download_urls", "(", "[", "url", "]", ",", "title", ",", "'flv'", ",", "size", ",", "output_dir", "=", "output_dir", ",", "merge", "=", "merge", ")" ]
downloads a sina video by its unique vkey .
train
true
35,533
def __get_metadata(vm_): md = config.get_cloud_config_value('metadata', vm_, __opts__, default='{}', search_global=False) try: metadata = literal_eval(md) except Exception: metadata = None if ((not metadata) or (not isinstance(metadata, dict))): metadata = {'items': [{'key': 'salt-cloud-profile', 'value': vm_['profile']}]} else: metadata['salt-cloud-profile'] = vm_['profile'] items = [] for (k, v) in six.iteritems(metadata): items.append({'key': k, 'value': v}) metadata = {'items': items} return metadata
[ "def", "__get_metadata", "(", "vm_", ")", ":", "md", "=", "config", ".", "get_cloud_config_value", "(", "'metadata'", ",", "vm_", ",", "__opts__", ",", "default", "=", "'{}'", ",", "search_global", "=", "False", ")", "try", ":", "metadata", "=", "literal_eval", "(", "md", ")", "except", "Exception", ":", "metadata", "=", "None", "if", "(", "(", "not", "metadata", ")", "or", "(", "not", "isinstance", "(", "metadata", ",", "dict", ")", ")", ")", ":", "metadata", "=", "{", "'items'", ":", "[", "{", "'key'", ":", "'salt-cloud-profile'", ",", "'value'", ":", "vm_", "[", "'profile'", "]", "}", "]", "}", "else", ":", "metadata", "[", "'salt-cloud-profile'", "]", "=", "vm_", "[", "'profile'", "]", "items", "=", "[", "]", "for", "(", "k", ",", "v", ")", "in", "six", ".", "iteritems", "(", "metadata", ")", ":", "items", ".", "append", "(", "{", "'key'", ":", "k", ",", "'value'", ":", "v", "}", ")", "metadata", "=", "{", "'items'", ":", "items", "}", "return", "metadata" ]
get configured metadata and add salt-cloud-profile .
train
true
35,534
def _loop_exits_early(loop): loop_nodes = (astroid.For, astroid.While) for child in loop.body: if isinstance(child, loop_nodes): for orelse in (child.orelse or ()): for _ in orelse.nodes_of_class(astroid.Break, skip_klass=loop_nodes): return True continue for _ in child.nodes_of_class(astroid.Break, skip_klass=loop_nodes): return True return False
[ "def", "_loop_exits_early", "(", "loop", ")", ":", "loop_nodes", "=", "(", "astroid", ".", "For", ",", "astroid", ".", "While", ")", "for", "child", "in", "loop", ".", "body", ":", "if", "isinstance", "(", "child", ",", "loop_nodes", ")", ":", "for", "orelse", "in", "(", "child", ".", "orelse", "or", "(", ")", ")", ":", "for", "_", "in", "orelse", ".", "nodes_of_class", "(", "astroid", ".", "Break", ",", "skip_klass", "=", "loop_nodes", ")", ":", "return", "True", "continue", "for", "_", "in", "child", ".", "nodes_of_class", "(", "astroid", ".", "Break", ",", "skip_klass", "=", "loop_nodes", ")", ":", "return", "True", "return", "False" ]
returns true if a loop has a break statement in its body .
train
false
35,535
def device_to_host(dst, src, size, stream=0): varargs = [] if stream: assert isinstance(stream, Stream) fn = driver.cuMemcpyDtoHAsync varargs.append(stream.handle) else: fn = driver.cuMemcpyDtoH fn(host_pointer(dst), device_pointer(src), size, *varargs)
[ "def", "device_to_host", "(", "dst", ",", "src", ",", "size", ",", "stream", "=", "0", ")", ":", "varargs", "=", "[", "]", "if", "stream", ":", "assert", "isinstance", "(", "stream", ",", "Stream", ")", "fn", "=", "driver", ".", "cuMemcpyDtoHAsync", "varargs", ".", "append", "(", "stream", ".", "handle", ")", "else", ":", "fn", "=", "driver", ".", "cuMemcpyDtoH", "fn", "(", "host_pointer", "(", "dst", ")", ",", "device_pointer", "(", "src", ")", ",", "size", ",", "*", "varargs", ")" ]
note: the underlying data pointer from the host data buffer is used and it should not be changed until the operation which can be asynchronous completes .
train
false
35,536
def wait_for_snapshot_status(client, snapshot_id, status): body = client.show_snapshot(snapshot_id)['snapshot'] snapshot_status = body['status'] start = int(time.time()) while (snapshot_status != status): time.sleep(client.build_interval) body = client.show_snapshot(snapshot_id)['snapshot'] snapshot_status = body['status'] if (snapshot_status == 'error'): raise exceptions.SnapshotBuildErrorException(snapshot_id=snapshot_id) if ((int(time.time()) - start) >= client.build_timeout): message = ('Snapshot %s failed to reach %s status (current %s) within the required time (%s s).' % (snapshot_id, status, snapshot_status, client.build_timeout)) raise lib_exc.TimeoutException(message)
[ "def", "wait_for_snapshot_status", "(", "client", ",", "snapshot_id", ",", "status", ")", ":", "body", "=", "client", ".", "show_snapshot", "(", "snapshot_id", ")", "[", "'snapshot'", "]", "snapshot_status", "=", "body", "[", "'status'", "]", "start", "=", "int", "(", "time", ".", "time", "(", ")", ")", "while", "(", "snapshot_status", "!=", "status", ")", ":", "time", ".", "sleep", "(", "client", ".", "build_interval", ")", "body", "=", "client", ".", "show_snapshot", "(", "snapshot_id", ")", "[", "'snapshot'", "]", "snapshot_status", "=", "body", "[", "'status'", "]", "if", "(", "snapshot_status", "==", "'error'", ")", ":", "raise", "exceptions", ".", "SnapshotBuildErrorException", "(", "snapshot_id", "=", "snapshot_id", ")", "if", "(", "(", "int", "(", "time", ".", "time", "(", ")", ")", "-", "start", ")", ">=", "client", ".", "build_timeout", ")", ":", "message", "=", "(", "'Snapshot %s failed to reach %s status (current %s) within the required time (%s s).'", "%", "(", "snapshot_id", ",", "status", ",", "snapshot_status", ",", "client", ".", "build_timeout", ")", ")", "raise", "lib_exc", ".", "TimeoutException", "(", "message", ")" ]
waits for a snapshot to reach a given status .
train
false
35,537
def break_args_options(line): tokens = line.split(' ') args = [] options = tokens[:] for token in tokens: if (token.startswith('-') or token.startswith('--')): break else: args.append(token) options.pop(0) return (' '.join(args), ' '.join(options))
[ "def", "break_args_options", "(", "line", ")", ":", "tokens", "=", "line", ".", "split", "(", "' '", ")", "args", "=", "[", "]", "options", "=", "tokens", "[", ":", "]", "for", "token", "in", "tokens", ":", "if", "(", "token", ".", "startswith", "(", "'-'", ")", "or", "token", ".", "startswith", "(", "'--'", ")", ")", ":", "break", "else", ":", "args", ".", "append", "(", "token", ")", "options", ".", "pop", "(", "0", ")", "return", "(", "' '", ".", "join", "(", "args", ")", ",", "' '", ".", "join", "(", "options", ")", ")" ]
break up the line into an args and options string .
train
true
35,538
def require_partition(func): @wraps(func) def new_func(*args, **kw): if (not is_partition(*args[:2])): raise nx.NetworkXError('`partition` is not a valid partition of the nodes of G') return func(*args, **kw) return new_func
[ "def", "require_partition", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "new_func", "(", "*", "args", ",", "**", "kw", ")", ":", "if", "(", "not", "is_partition", "(", "*", "args", "[", ":", "2", "]", ")", ")", ":", "raise", "nx", ".", "NetworkXError", "(", "'`partition` is not a valid partition of the nodes of G'", ")", "return", "func", "(", "*", "args", ",", "**", "kw", ")", "return", "new_func" ]
decorator that raises an exception if a partition is not a valid partition of the nodes of a graph .
train
false
35,539
def make_string_set(info, name): return StringSet(info, name, case_flags=make_case_flags(info))
[ "def", "make_string_set", "(", "info", ",", "name", ")", ":", "return", "StringSet", "(", "info", ",", "name", ",", "case_flags", "=", "make_case_flags", "(", "info", ")", ")" ]
makes a string set .
train
false
35,540
def test_setv_builtins(): cant_compile(u'(setv None 42)') cant_compile(u'(defn get [&rest args] 42)') can_compile(u'(defclass A [] (defn get [self] 42))') can_compile(u'\n (defclass A []\n (defn get [self] 42)\n (defclass B []\n (defn get [self] 42))\n (defn if* [self] 0))\n ')
[ "def", "test_setv_builtins", "(", ")", ":", "cant_compile", "(", "u'(setv None 42)'", ")", "cant_compile", "(", "u'(defn get [&rest args] 42)'", ")", "can_compile", "(", "u'(defclass A [] (defn get [self] 42))'", ")", "can_compile", "(", "u'\\n (defclass A []\\n (defn get [self] 42)\\n (defclass B []\\n (defn get [self] 42))\\n (defn if* [self] 0))\\n '", ")" ]
ensure that assigning to a builtin fails .
train
false
35,541
def markup_serialize_tokens(tokens, markup_func): for token in tokens: for pre in token.pre_tags: (yield pre) html = token.html() html = markup_func(html, token.annotation) if token.trailing_whitespace: html += token.trailing_whitespace (yield html) for post in token.post_tags: (yield post)
[ "def", "markup_serialize_tokens", "(", "tokens", ",", "markup_func", ")", ":", "for", "token", "in", "tokens", ":", "for", "pre", "in", "token", ".", "pre_tags", ":", "(", "yield", "pre", ")", "html", "=", "token", ".", "html", "(", ")", "html", "=", "markup_func", "(", "html", ",", "token", ".", "annotation", ")", "if", "token", ".", "trailing_whitespace", ":", "html", "+=", "token", ".", "trailing_whitespace", "(", "yield", "html", ")", "for", "post", "in", "token", ".", "post_tags", ":", "(", "yield", "post", ")" ]
serialize the list of tokens into a list of text chunks .
train
true
35,542
@register.inclusion_tag(engine.get_template('inclusion.html'), takes_context=True) def inclusion_no_params_with_context_from_template(context): return {'result': ('inclusion_no_params_with_context_from_template - Expected result (context value: %s)' % context['value'])}
[ "@", "register", ".", "inclusion_tag", "(", "engine", ".", "get_template", "(", "'inclusion.html'", ")", ",", "takes_context", "=", "True", ")", "def", "inclusion_no_params_with_context_from_template", "(", "context", ")", ":", "return", "{", "'result'", ":", "(", "'inclusion_no_params_with_context_from_template - Expected result (context value: %s)'", "%", "context", "[", "'value'", "]", ")", "}" ]
expected inclusion_no_params_with_context_from_template __doc__ .
train
false
35,546
def update_topic_similarities(data): _validate_topic_similarities(data) data = data.splitlines() data = list(csv.reader(data)) topics_list = data[0] topic_similarities_values = data[1:] topic_similarities_dict = get_topic_similarities_dict() for (row_ind, topic_1) in enumerate(topics_list): for (col_ind, topic_2) in enumerate(topics_list): topic_similarities_dict[topic_1][topic_2] = float(topic_similarities_values[row_ind][col_ind]) save_topic_similarities(topic_similarities_dict)
[ "def", "update_topic_similarities", "(", "data", ")", ":", "_validate_topic_similarities", "(", "data", ")", "data", "=", "data", ".", "splitlines", "(", ")", "data", "=", "list", "(", "csv", ".", "reader", "(", "data", ")", ")", "topics_list", "=", "data", "[", "0", "]", "topic_similarities_values", "=", "data", "[", "1", ":", "]", "topic_similarities_dict", "=", "get_topic_similarities_dict", "(", ")", "for", "(", "row_ind", ",", "topic_1", ")", "in", "enumerate", "(", "topics_list", ")", ":", "for", "(", "col_ind", ",", "topic_2", ")", "in", "enumerate", "(", "topics_list", ")", ":", "topic_similarities_dict", "[", "topic_1", "]", "[", "topic_2", "]", "=", "float", "(", "topic_similarities_values", "[", "row_ind", "]", "[", "col_ind", "]", ")", "save_topic_similarities", "(", "topic_similarities_dict", ")" ]
updates all topic similarity pairs given by data .
train
false
35,547
def _execute_with_revision(q, rev_table, context): model = context['model'] session = model.Session revision_id = context.get('revision_id') revision_date = context.get('revision_date') if revision_id: revision = session.query(context['model'].Revision).filter_by(id=revision_id).first() if (not revision): raise logic.NotFound revision_date = revision.timestamp q = q.where((rev_table.c.revision_timestamp <= revision_date)) q = q.where((rev_table.c.expired_timestamp > revision_date)) return session.execute(q)
[ "def", "_execute_with_revision", "(", "q", ",", "rev_table", ",", "context", ")", ":", "model", "=", "context", "[", "'model'", "]", "session", "=", "model", ".", "Session", "revision_id", "=", "context", ".", "get", "(", "'revision_id'", ")", "revision_date", "=", "context", ".", "get", "(", "'revision_date'", ")", "if", "revision_id", ":", "revision", "=", "session", ".", "query", "(", "context", "[", "'model'", "]", ".", "Revision", ")", ".", "filter_by", "(", "id", "=", "revision_id", ")", ".", "first", "(", ")", "if", "(", "not", "revision", ")", ":", "raise", "logic", ".", "NotFound", "revision_date", "=", "revision", ".", "timestamp", "q", "=", "q", ".", "where", "(", "(", "rev_table", ".", "c", ".", "revision_timestamp", "<=", "revision_date", ")", ")", "q", "=", "q", ".", "where", "(", "(", "rev_table", ".", "c", ".", "expired_timestamp", ">", "revision_date", ")", ")", "return", "session", ".", "execute", "(", "q", ")" ]
takes an sqlalchemy query (q) that is a select on an object revision table .
train
false
35,548
def version_in(dirname, indexname=None): from whoosh.filedb.filestore import FileStorage storage = FileStorage(dirname) return version(storage, indexname=indexname)
[ "def", "version_in", "(", "dirname", ",", "indexname", "=", "None", ")", ":", "from", "whoosh", ".", "filedb", ".", "filestore", "import", "FileStorage", "storage", "=", "FileStorage", "(", "dirname", ")", "return", "version", "(", "storage", ",", "indexname", "=", "indexname", ")" ]
returns a tuple of .
train
false
35,550
def get_timezone_gmt(datetime=None, width='long', locale=LC_TIME): if (datetime is None): datetime = datetime_.utcnow() elif isinstance(datetime, (int, long)): datetime = datetime_.utcfromtimestamp(datetime).time() if (datetime.tzinfo is None): datetime = datetime.replace(tzinfo=UTC) locale = Locale.parse(locale) offset = datetime.tzinfo.utcoffset(datetime) seconds = ((((offset.days * 24) * 60) * 60) + offset.seconds) (hours, seconds) = divmod(seconds, 3600) if (width == 'short'): pattern = u'%+03d%02d' else: pattern = (locale.zone_formats['gmt'] % '%+03d:%02d') return (pattern % (hours, (seconds // 60)))
[ "def", "get_timezone_gmt", "(", "datetime", "=", "None", ",", "width", "=", "'long'", ",", "locale", "=", "LC_TIME", ")", ":", "if", "(", "datetime", "is", "None", ")", ":", "datetime", "=", "datetime_", ".", "utcnow", "(", ")", "elif", "isinstance", "(", "datetime", ",", "(", "int", ",", "long", ")", ")", ":", "datetime", "=", "datetime_", ".", "utcfromtimestamp", "(", "datetime", ")", ".", "time", "(", ")", "if", "(", "datetime", ".", "tzinfo", "is", "None", ")", ":", "datetime", "=", "datetime", ".", "replace", "(", "tzinfo", "=", "UTC", ")", "locale", "=", "Locale", ".", "parse", "(", "locale", ")", "offset", "=", "datetime", ".", "tzinfo", ".", "utcoffset", "(", "datetime", ")", "seconds", "=", "(", "(", "(", "(", "offset", ".", "days", "*", "24", ")", "*", "60", ")", "*", "60", ")", "+", "offset", ".", "seconds", ")", "(", "hours", ",", "seconds", ")", "=", "divmod", "(", "seconds", ",", "3600", ")", "if", "(", "width", "==", "'short'", ")", ":", "pattern", "=", "u'%+03d%02d'", "else", ":", "pattern", "=", "(", "locale", ".", "zone_formats", "[", "'gmt'", "]", "%", "'%+03d:%02d'", ")", "return", "(", "pattern", "%", "(", "hours", ",", "(", "seconds", "//", "60", ")", ")", ")" ]
return the timezone associated with the given datetime object formatted as string indicating the offset from gmt .
train
false
35,552
def is_nthpow_residue(a, n, m): (a, n, m) = [as_int(i) for i in (a, n, m)] if (m <= 0): raise ValueError('m must be > 0') if (n < 0): raise ValueError('n must be >= 0') if (a < 0): raise ValueError('a must be >= 0') if (n == 0): if (m == 1): return False return (a == 1) if (n == 1): return True if (n == 2): return is_quad_residue(a, m) return _is_nthpow_residue_bign(a, n, m)
[ "def", "is_nthpow_residue", "(", "a", ",", "n", ",", "m", ")", ":", "(", "a", ",", "n", ",", "m", ")", "=", "[", "as_int", "(", "i", ")", "for", "i", "in", "(", "a", ",", "n", ",", "m", ")", "]", "if", "(", "m", "<=", "0", ")", ":", "raise", "ValueError", "(", "'m must be > 0'", ")", "if", "(", "n", "<", "0", ")", ":", "raise", "ValueError", "(", "'n must be >= 0'", ")", "if", "(", "a", "<", "0", ")", ":", "raise", "ValueError", "(", "'a must be >= 0'", ")", "if", "(", "n", "==", "0", ")", ":", "if", "(", "m", "==", "1", ")", ":", "return", "False", "return", "(", "a", "==", "1", ")", "if", "(", "n", "==", "1", ")", ":", "return", "True", "if", "(", "n", "==", "2", ")", ":", "return", "is_quad_residue", "(", "a", ",", "m", ")", "return", "_is_nthpow_residue_bign", "(", "a", ",", "n", ",", "m", ")" ]
returns true if x**n == a has solutions .
train
false
35,553
def _get_adjusted_bar(x, bar_width, series_index, num_plots): adjust = (((((-0.5) * num_plots) - 1) + series_index) * bar_width) return [(x_val + adjust) for x_val in x]
[ "def", "_get_adjusted_bar", "(", "x", ",", "bar_width", ",", "series_index", ",", "num_plots", ")", ":", "adjust", "=", "(", "(", "(", "(", "(", "-", "0.5", ")", "*", "num_plots", ")", "-", "1", ")", "+", "series_index", ")", "*", "bar_width", ")", "return", "[", "(", "x_val", "+", "adjust", ")", "for", "x_val", "in", "x", "]" ]
adjust the list x to take the multiple series into account .
train
false
35,556
def group_update_member(group_id, users_id_list): group = get_object(UserGroup, id=group_id) if group: group.user_set.clear() for user_id in users_id_list: user = get_object(UserGroup, id=user_id) if isinstance(user, UserGroup): group.user_set.add(user)
[ "def", "group_update_member", "(", "group_id", ",", "users_id_list", ")", ":", "group", "=", "get_object", "(", "UserGroup", ",", "id", "=", "group_id", ")", "if", "group", ":", "group", ".", "user_set", ".", "clear", "(", ")", "for", "user_id", "in", "users_id_list", ":", "user", "=", "get_object", "(", "UserGroup", ",", "id", "=", "user_id", ")", "if", "isinstance", "(", "user", ",", "UserGroup", ")", ":", "group", ".", "user_set", ".", "add", "(", "user", ")" ]
user group update member .
train
false
35,558
def Simplify(parser_return): if parser_return.tree: node = SimplifyNode(parser_return.tree) ValidateNode(node) return node return parser_return
[ "def", "Simplify", "(", "parser_return", ")", ":", "if", "parser_return", ".", "tree", ":", "node", "=", "SimplifyNode", "(", "parser_return", ".", "tree", ")", "ValidateNode", "(", "node", ")", "return", "node", "return", "parser_return" ]
simplifies the output of the parser .
train
false
35,559
def index_alt(): s3_redirect_default(URL(f='person'))
[ "def", "index_alt", "(", ")", ":", "s3_redirect_default", "(", "URL", "(", "f", "=", "'person'", ")", ")" ]
module homepage for non-admin users when no cms content found .
train
false
35,560
def to_symbol(i): if (i == 0): return '' if (i == 11): return '+' if (i == 12): return '*' return str((i - 1))
[ "def", "to_symbol", "(", "i", ")", ":", "if", "(", "i", "==", "0", ")", ":", "return", "''", "if", "(", "i", "==", "11", ")", ":", "return", "'+'", "if", "(", "i", "==", "12", ")", ":", "return", "'*'", "return", "str", "(", "(", "i", "-", "1", ")", ")" ]
covert ids to text .
train
false
35,561
@cleanup def test_remove_from_figure_with_gridspec(): _test_remove_from_figure(True)
[ "@", "cleanup", "def", "test_remove_from_figure_with_gridspec", "(", ")", ":", "_test_remove_from_figure", "(", "True", ")" ]
make sure that remove_from_figure removes the colorbar and properly restores the gridspec .
train
false
35,562
def is_current_event_safe(): for (_, filename, _, _, _, _) in inspect.stack(): if filename.endswith(UNSAFE_FILES): return False return True
[ "def", "is_current_event_safe", "(", ")", ":", "for", "(", "_", ",", "filename", ",", "_", ",", "_", ",", "_", ",", "_", ")", "in", "inspect", ".", "stack", "(", ")", ":", "if", "filename", ".", "endswith", "(", "UNSAFE_FILES", ")", ":", "return", "False", "return", "True" ]
tests the current stack for unsafe locations that would likely cause recursion if an attempt to send to sentry was made .
train
false
35,563
@defer.inlineCallbacks def getMaster(case, reactor, config_dict): basedir = FilePath(case.mktemp()) basedir.createDirectory() config_dict['buildbotNetUsageData'] = None master = BuildMaster(basedir.path, reactor=reactor, config_loader=DictLoader(config_dict)) if ('db_url' not in config_dict): config_dict['db_url'] = 'sqlite://' master.config.db['db_url'] = config_dict['db_url'] (yield master.db.setup(check_version=False)) (yield master.db.model.upgrade()) master.db.setup = (lambda : None) (yield master.startService()) case.addCleanup(master.db.pool.shutdown) case.addCleanup(master.stopService) defer.returnValue(master)
[ "@", "defer", ".", "inlineCallbacks", "def", "getMaster", "(", "case", ",", "reactor", ",", "config_dict", ")", ":", "basedir", "=", "FilePath", "(", "case", ".", "mktemp", "(", ")", ")", "basedir", ".", "createDirectory", "(", ")", "config_dict", "[", "'buildbotNetUsageData'", "]", "=", "None", "master", "=", "BuildMaster", "(", "basedir", ".", "path", ",", "reactor", "=", "reactor", ",", "config_loader", "=", "DictLoader", "(", "config_dict", ")", ")", "if", "(", "'db_url'", "not", "in", "config_dict", ")", ":", "config_dict", "[", "'db_url'", "]", "=", "'sqlite://'", "master", ".", "config", ".", "db", "[", "'db_url'", "]", "=", "config_dict", "[", "'db_url'", "]", "(", "yield", "master", ".", "db", ".", "setup", "(", "check_version", "=", "False", ")", ")", "(", "yield", "master", ".", "db", ".", "model", ".", "upgrade", "(", ")", ")", "master", ".", "db", ".", "setup", "=", "(", "lambda", ":", "None", ")", "(", "yield", "master", ".", "startService", "(", ")", ")", "case", ".", "addCleanup", "(", "master", ".", "db", ".", "pool", ".", "shutdown", ")", "case", ".", "addCleanup", "(", "master", ".", "stopService", ")", "defer", ".", "returnValue", "(", "master", ")" ]
create a started buildmaster with the given configuration .
train
false
35,565
def libvlc_media_list_player_set_playback_mode(p_mlp, e_mode): f = (_Cfunctions.get('libvlc_media_list_player_set_playback_mode', None) or _Cfunction('libvlc_media_list_player_set_playback_mode', ((1,), (1,)), None, None, MediaListPlayer, PlaybackMode)) return f(p_mlp, e_mode)
[ "def", "libvlc_media_list_player_set_playback_mode", "(", "p_mlp", ",", "e_mode", ")", ":", "f", "=", "(", "_Cfunctions", ".", "get", "(", "'libvlc_media_list_player_set_playback_mode'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_media_list_player_set_playback_mode'", ",", "(", "(", "1", ",", ")", ",", "(", "1", ",", ")", ")", ",", "None", ",", "None", ",", "MediaListPlayer", ",", "PlaybackMode", ")", ")", "return", "f", "(", "p_mlp", ",", "e_mode", ")" ]
sets the playback mode for the playlist .
train
true
35,566
@pytest.mark.parametrize('which', ['next', 'prev', 'next-category', 'prev-category']) def test_completion_item_focus_no_model(which, completionview, qtbot): with qtbot.assertNotEmitted(completionview.selection_changed): completionview.completion_item_focus(which) model = base.BaseCompletionModel() filtermodel = sortfilter.CompletionFilterModel(model, parent=completionview) completionview.set_model(filtermodel) completionview.set_model(None) with qtbot.assertNotEmitted(completionview.selection_changed): completionview.completion_item_focus(which)
[ "@", "pytest", ".", "mark", ".", "parametrize", "(", "'which'", ",", "[", "'next'", ",", "'prev'", ",", "'next-category'", ",", "'prev-category'", "]", ")", "def", "test_completion_item_focus_no_model", "(", "which", ",", "completionview", ",", "qtbot", ")", ":", "with", "qtbot", ".", "assertNotEmitted", "(", "completionview", ".", "selection_changed", ")", ":", "completionview", ".", "completion_item_focus", "(", "which", ")", "model", "=", "base", ".", "BaseCompletionModel", "(", ")", "filtermodel", "=", "sortfilter", ".", "CompletionFilterModel", "(", "model", ",", "parent", "=", "completionview", ")", "completionview", ".", "set_model", "(", "filtermodel", ")", "completionview", ".", "set_model", "(", "None", ")", "with", "qtbot", ".", "assertNotEmitted", "(", "completionview", ".", "selection_changed", ")", ":", "completionview", ".", "completion_item_focus", "(", "which", ")" ]
test that selectionchanged is not fired when the model is none .
train
false
35,567
def test_lof_precomputed(random_state=42): rng = np.random.RandomState(random_state) X = rng.random_sample((10, 4)) Y = rng.random_sample((3, 4)) DXX = metrics.pairwise_distances(X, metric='euclidean') DYX = metrics.pairwise_distances(Y, X, metric='euclidean') lof_X = neighbors.LocalOutlierFactor(n_neighbors=3) lof_X.fit(X) pred_X_X = lof_X._predict() pred_X_Y = lof_X._predict(Y) lof_D = neighbors.LocalOutlierFactor(n_neighbors=3, algorithm='brute', metric='precomputed') lof_D.fit(DXX) pred_D_X = lof_D._predict() pred_D_Y = lof_D._predict(DYX) assert_array_almost_equal(pred_X_X, pred_D_X) assert_array_almost_equal(pred_X_Y, pred_D_Y)
[ "def", "test_lof_precomputed", "(", "random_state", "=", "42", ")", ":", "rng", "=", "np", ".", "random", ".", "RandomState", "(", "random_state", ")", "X", "=", "rng", ".", "random_sample", "(", "(", "10", ",", "4", ")", ")", "Y", "=", "rng", ".", "random_sample", "(", "(", "3", ",", "4", ")", ")", "DXX", "=", "metrics", ".", "pairwise_distances", "(", "X", ",", "metric", "=", "'euclidean'", ")", "DYX", "=", "metrics", ".", "pairwise_distances", "(", "Y", ",", "X", ",", "metric", "=", "'euclidean'", ")", "lof_X", "=", "neighbors", ".", "LocalOutlierFactor", "(", "n_neighbors", "=", "3", ")", "lof_X", ".", "fit", "(", "X", ")", "pred_X_X", "=", "lof_X", ".", "_predict", "(", ")", "pred_X_Y", "=", "lof_X", ".", "_predict", "(", "Y", ")", "lof_D", "=", "neighbors", ".", "LocalOutlierFactor", "(", "n_neighbors", "=", "3", ",", "algorithm", "=", "'brute'", ",", "metric", "=", "'precomputed'", ")", "lof_D", ".", "fit", "(", "DXX", ")", "pred_D_X", "=", "lof_D", ".", "_predict", "(", ")", "pred_D_Y", "=", "lof_D", ".", "_predict", "(", "DYX", ")", "assert_array_almost_equal", "(", "pred_X_X", ",", "pred_D_X", ")", "assert_array_almost_equal", "(", "pred_X_Y", ",", "pred_D_Y", ")" ]
tests lof with a distance matrix .
train
false
35,569
def safe_obtain(proxy): if (type(proxy) in [list, str, bytes, dict, set, type(None)]): return proxy conn = object.__getattribute__(proxy, '____conn__')() return json.loads(conn.root.json_dumps(proxy))
[ "def", "safe_obtain", "(", "proxy", ")", ":", "if", "(", "type", "(", "proxy", ")", "in", "[", "list", ",", "str", ",", "bytes", ",", "dict", ",", "set", ",", "type", "(", "None", ")", "]", ")", ":", "return", "proxy", "conn", "=", "object", ".", "__getattribute__", "(", "proxy", ",", "'____conn__'", ")", "(", ")", "return", "json", ".", "loads", "(", "conn", ".", "root", ".", "json_dumps", "(", "proxy", ")", ")" ]
safe version of rpycs rpyc .
train
false
35,570
def rewrite_video_url(cdn_base_url, original_video_url): if ((not cdn_base_url) or (not original_video_url)): return None parsed = urlparse(original_video_url) rewritten_url = ((cdn_base_url.rstrip('/') + '/') + parsed.path.lstrip('/')) validator = URLValidator() try: validator(rewritten_url) return rewritten_url except ValidationError: log.warn('Invalid CDN rewrite URL encountered, %s', rewritten_url) return None
[ "def", "rewrite_video_url", "(", "cdn_base_url", ",", "original_video_url", ")", ":", "if", "(", "(", "not", "cdn_base_url", ")", "or", "(", "not", "original_video_url", ")", ")", ":", "return", "None", "parsed", "=", "urlparse", "(", "original_video_url", ")", "rewritten_url", "=", "(", "(", "cdn_base_url", ".", "rstrip", "(", "'/'", ")", "+", "'/'", ")", "+", "parsed", ".", "path", ".", "lstrip", "(", "'/'", ")", ")", "validator", "=", "URLValidator", "(", ")", "try", ":", "validator", "(", "rewritten_url", ")", "return", "rewritten_url", "except", "ValidationError", ":", "log", ".", "warn", "(", "'Invalid CDN rewrite URL encountered, %s'", ",", "rewritten_url", ")", "return", "None" ]
returns a re-written video url for cases when an alternate source has been configured and is selected using factors like user location .
train
false
35,571
def covariance_game(nums_actions, rho, random_state=None): N = len(nums_actions) if (N <= 1): raise ValueError('length of nums_actions must be at least 2') if (not (((-1) / (N - 1)) <= rho <= 1)): lb = ('-1' if (N == 2) else '-1/{0}'.format((N - 1))) raise ValueError('rho must be in [{0}, 1]'.format(lb)) mean = np.zeros(N) cov = np.empty((N, N)) cov.fill(rho) cov[(range(N), range(N))] = 1 random_state = check_random_state(random_state) payoff_profile_array = random_state.multivariate_normal(mean, cov, nums_actions) g = NormalFormGame(payoff_profile_array) return g
[ "def", "covariance_game", "(", "nums_actions", ",", "rho", ",", "random_state", "=", "None", ")", ":", "N", "=", "len", "(", "nums_actions", ")", "if", "(", "N", "<=", "1", ")", ":", "raise", "ValueError", "(", "'length of nums_actions must be at least 2'", ")", "if", "(", "not", "(", "(", "(", "-", "1", ")", "/", "(", "N", "-", "1", ")", ")", "<=", "rho", "<=", "1", ")", ")", ":", "lb", "=", "(", "'-1'", "if", "(", "N", "==", "2", ")", "else", "'-1/{0}'", ".", "format", "(", "(", "N", "-", "1", ")", ")", ")", "raise", "ValueError", "(", "'rho must be in [{0}, 1]'", ".", "format", "(", "lb", ")", ")", "mean", "=", "np", ".", "zeros", "(", "N", ")", "cov", "=", "np", ".", "empty", "(", "(", "N", ",", "N", ")", ")", "cov", ".", "fill", "(", "rho", ")", "cov", "[", "(", "range", "(", "N", ")", ",", "range", "(", "N", ")", ")", "]", "=", "1", "random_state", "=", "check_random_state", "(", "random_state", ")", "payoff_profile_array", "=", "random_state", ".", "multivariate_normal", "(", "mean", ",", "cov", ",", "nums_actions", ")", "g", "=", "NormalFormGame", "(", "payoff_profile_array", ")", "return", "g" ]
return a random normalformgame instance where the payoff profiles are drawn independently from the standard multi-normal with the covariance of any pair of payoffs equal to rho .
train
true
35,572
def asymmetric_round_price_to_penny(price, prefer_round_down, diff=(0.0095 - 0.005)): epsilon = (float_info.epsilon * 10) diff = (diff - epsilon) rounded = round((price - (diff if prefer_round_down else (- diff))), 2) if zp_math.tolerant_equals(rounded, 0.0): return 0.0 return rounded
[ "def", "asymmetric_round_price_to_penny", "(", "price", ",", "prefer_round_down", ",", "diff", "=", "(", "0.0095", "-", "0.005", ")", ")", ":", "epsilon", "=", "(", "float_info", ".", "epsilon", "*", "10", ")", "diff", "=", "(", "diff", "-", "epsilon", ")", "rounded", "=", "round", "(", "(", "price", "-", "(", "diff", "if", "prefer_round_down", "else", "(", "-", "diff", ")", ")", ")", ",", "2", ")", "if", "zp_math", ".", "tolerant_equals", "(", "rounded", ",", "0.0", ")", ":", "return", "0.0", "return", "rounded" ]
asymmetric rounding function for adjusting prices to two places in a way that "improves" the price .
train
true
35,573
def split_tag(tag): version = tag.split('--', 1)[0] build = tag.split('--', 1)[1] return (version, build)
[ "def", "split_tag", "(", "tag", ")", ":", "version", "=", "tag", ".", "split", "(", "'--'", ",", "1", ")", "[", "0", "]", "build", "=", "tag", ".", "split", "(", "'--'", ",", "1", ")", "[", "1", "]", "return", "(", "version", ",", "build", ")" ]
split mulled image name into conda version and conda build .
train
false
35,574
def read_conll(file_): sents = [] for sent_str in file_.read().strip().split(u'\n\n'): ids = [] words = [] heads = [] labels = [] tags = [] for (i, line) in enumerate(sent_str.split(u'\n')): (word, pos_string, head_idx, label) = _parse_line(line) words.append(word) if (head_idx < 0): head_idx = i ids.append(i) heads.append(head_idx) labels.append(label) tags.append(pos_string) text = u' '.join(words) annot = (ids, words, tags, heads, labels, ([u'O'] * len(ids))) sents.append((None, [(annot, [])])) return sents
[ "def", "read_conll", "(", "file_", ")", ":", "sents", "=", "[", "]", "for", "sent_str", "in", "file_", ".", "read", "(", ")", ".", "strip", "(", ")", ".", "split", "(", "u'\\n\\n'", ")", ":", "ids", "=", "[", "]", "words", "=", "[", "]", "heads", "=", "[", "]", "labels", "=", "[", "]", "tags", "=", "[", "]", "for", "(", "i", ",", "line", ")", "in", "enumerate", "(", "sent_str", ".", "split", "(", "u'\\n'", ")", ")", ":", "(", "word", ",", "pos_string", ",", "head_idx", ",", "label", ")", "=", "_parse_line", "(", "line", ")", "words", ".", "append", "(", "word", ")", "if", "(", "head_idx", "<", "0", ")", ":", "head_idx", "=", "i", "ids", ".", "append", "(", "i", ")", "heads", ".", "append", "(", "head_idx", ")", "labels", ".", "append", "(", "label", ")", "tags", ".", "append", "(", "pos_string", ")", "text", "=", "u' '", ".", "join", "(", "words", ")", "annot", "=", "(", "ids", ",", "words", ",", "tags", ",", "heads", ",", "labels", ",", "(", "[", "u'O'", "]", "*", "len", "(", "ids", ")", ")", ")", "sents", ".", "append", "(", "(", "None", ",", "[", "(", "annot", ",", "[", "]", ")", "]", ")", ")", "return", "sents" ]
read a standard conll/malt-style format .
train
false
35,576
def user_followee_list(context, data_dict): _check_access('user_followee_list', context, data_dict) if (not context.get('skip_validation')): schema = (context.get('schema') or ckan.logic.schema.default_follow_user_schema()) (data_dict, errors) = _validate(data_dict, schema, context) if errors: raise ValidationError(errors) model = context['model'] user_id = _get_or_bust(data_dict, 'id') followees = model.UserFollowingUser.followee_list(user_id) users = [model.User.get(followee.object_id) for followee in followees] users = [user for user in users if (user is not None)] return model_dictize.user_list_dictize(users, context)
[ "def", "user_followee_list", "(", "context", ",", "data_dict", ")", ":", "_check_access", "(", "'user_followee_list'", ",", "context", ",", "data_dict", ")", "if", "(", "not", "context", ".", "get", "(", "'skip_validation'", ")", ")", ":", "schema", "=", "(", "context", ".", "get", "(", "'schema'", ")", "or", "ckan", ".", "logic", ".", "schema", ".", "default_follow_user_schema", "(", ")", ")", "(", "data_dict", ",", "errors", ")", "=", "_validate", "(", "data_dict", ",", "schema", ",", "context", ")", "if", "errors", ":", "raise", "ValidationError", "(", "errors", ")", "model", "=", "context", "[", "'model'", "]", "user_id", "=", "_get_or_bust", "(", "data_dict", ",", "'id'", ")", "followees", "=", "model", ".", "UserFollowingUser", ".", "followee_list", "(", "user_id", ")", "users", "=", "[", "model", ".", "User", ".", "get", "(", "followee", ".", "object_id", ")", "for", "followee", "in", "followees", "]", "users", "=", "[", "user", "for", "user", "in", "users", "if", "(", "user", "is", "not", "None", ")", "]", "return", "model_dictize", ".", "user_list_dictize", "(", "users", ",", "context", ")" ]
return the list of users that are followed by the given user .
train
false
35,578
def get_cached_value(identifier, item, context, **kwargs): allow_cache = True if ('allow_cache' in kwargs): allow_cache = kwargs.pop('allow_cache') key = _get_cache_key_for_context(identifier, item, context, **kwargs) if (not allow_cache): return (key, None) return (key, cache.get(key))
[ "def", "get_cached_value", "(", "identifier", ",", "item", ",", "context", ",", "**", "kwargs", ")", ":", "allow_cache", "=", "True", "if", "(", "'allow_cache'", "in", "kwargs", ")", ":", "allow_cache", "=", "kwargs", ".", "pop", "(", "'allow_cache'", ")", "key", "=", "_get_cache_key_for_context", "(", "identifier", ",", "item", ",", "context", ",", "**", "kwargs", ")", "if", "(", "not", "allow_cache", ")", ":", "return", "(", "key", ",", "None", ")", "return", "(", "key", ",", "cache", ".", "get", "(", "key", ")", ")" ]
get item from context cache by identifier accepts optional kwargs parameter allow_cache which will skip fetching the actual cached object .
train
false
35,579
def find_modules(scripts=(), includes=(), packages=(), excludes=(), path=None, debug=0): scripts = set(scripts) includes = set(includes) packages = set(packages) excludes = set(excludes) plat_prepare(includes, packages, excludes) mf = modulegraph.ModuleGraph(path=path, excludes=(excludes - includes), implies=get_implies(), debug=debug) find_needed_modules(mf, scripts, includes, packages) return mf
[ "def", "find_modules", "(", "scripts", "=", "(", ")", ",", "includes", "=", "(", ")", ",", "packages", "=", "(", ")", ",", "excludes", "=", "(", ")", ",", "path", "=", "None", ",", "debug", "=", "0", ")", ":", "scripts", "=", "set", "(", "scripts", ")", "includes", "=", "set", "(", "includes", ")", "packages", "=", "set", "(", "packages", ")", "excludes", "=", "set", "(", "excludes", ")", "plat_prepare", "(", "includes", ",", "packages", ",", "excludes", ")", "mf", "=", "modulegraph", ".", "ModuleGraph", "(", "path", "=", "path", ",", "excludes", "=", "(", "excludes", "-", "includes", ")", ",", "implies", "=", "get_implies", "(", ")", ",", "debug", "=", "debug", ")", "find_needed_modules", "(", "mf", ",", "scripts", ",", "includes", ",", "packages", ")", "return", "mf" ]
used to dynamically load custom classes .
train
false
35,580
def _collapse_addresses_recursive(addresses): while True: last_addr = None ret_array = [] optimized = False for cur_addr in addresses: if (not ret_array): last_addr = cur_addr ret_array.append(cur_addr) elif ((cur_addr.network_address >= last_addr.network_address) and (cur_addr.broadcast_address <= last_addr.broadcast_address)): optimized = True elif (cur_addr == list(last_addr.supernet().subnets())[1]): ret_array[(-1)] = last_addr = last_addr.supernet() optimized = True else: last_addr = cur_addr ret_array.append(cur_addr) addresses = ret_array if (not optimized): return addresses
[ "def", "_collapse_addresses_recursive", "(", "addresses", ")", ":", "while", "True", ":", "last_addr", "=", "None", "ret_array", "=", "[", "]", "optimized", "=", "False", "for", "cur_addr", "in", "addresses", ":", "if", "(", "not", "ret_array", ")", ":", "last_addr", "=", "cur_addr", "ret_array", ".", "append", "(", "cur_addr", ")", "elif", "(", "(", "cur_addr", ".", "network_address", ">=", "last_addr", ".", "network_address", ")", "and", "(", "cur_addr", ".", "broadcast_address", "<=", "last_addr", ".", "broadcast_address", ")", ")", ":", "optimized", "=", "True", "elif", "(", "cur_addr", "==", "list", "(", "last_addr", ".", "supernet", "(", ")", ".", "subnets", "(", ")", ")", "[", "1", "]", ")", ":", "ret_array", "[", "(", "-", "1", ")", "]", "=", "last_addr", "=", "last_addr", ".", "supernet", "(", ")", "optimized", "=", "True", "else", ":", "last_addr", "=", "cur_addr", "ret_array", ".", "append", "(", "cur_addr", ")", "addresses", "=", "ret_array", "if", "(", "not", "optimized", ")", ":", "return", "addresses" ]
loops through the addresses .
train
true
35,581
def machine_create(hostname, machine_group=None, owner=None): try: machine = Machine.objects.get(hostname__exact=hostname) except Machine.DoesNotExist: machine = Machine.objects.create(hostname=hostname) if (machine_group is not None): machine.machine_group = machine_group if (owner is not None): machine.owner = owner return machine
[ "def", "machine_create", "(", "hostname", ",", "machine_group", "=", "None", ",", "owner", "=", "None", ")", ":", "try", ":", "machine", "=", "Machine", ".", "objects", ".", "get", "(", "hostname__exact", "=", "hostname", ")", "except", "Machine", ".", "DoesNotExist", ":", "machine", "=", "Machine", ".", "objects", ".", "create", "(", "hostname", "=", "hostname", ")", "if", "(", "machine_group", "is", "not", "None", ")", ":", "machine", ".", "machine_group", "=", "machine_group", "if", "(", "owner", "is", "not", "None", ")", ":", "machine", ".", "owner", "=", "owner", "return", "machine" ]
creates a new machine being silent if it already exists .
train
false
35,582
def _cmp_by_local_origin(path1, path2): if (path1.source == path2.source): return None if (path1.source is None): return path1 if (path2.source is None): return path2 return None
[ "def", "_cmp_by_local_origin", "(", "path1", ",", "path2", ")", ":", "if", "(", "path1", ".", "source", "==", "path2", ".", "source", ")", ":", "return", "None", "if", "(", "path1", ".", "source", "is", "None", ")", ":", "return", "path1", "if", "(", "path2", ".", "source", "is", "None", ")", ":", "return", "path2", "return", "None" ]
select locally originating path as best path .
train
true
35,583
def try_staticfiles_lookup(path): try: url = staticfiles_storage.url(path) except Exception as err: log.warning("staticfiles_storage couldn't find path {0}: {1}".format(path, str(err))) url = path return url
[ "def", "try_staticfiles_lookup", "(", "path", ")", ":", "try", ":", "url", "=", "staticfiles_storage", ".", "url", "(", "path", ")", "except", "Exception", "as", "err", ":", "log", ".", "warning", "(", "\"staticfiles_storage couldn't find path {0}: {1}\"", ".", "format", "(", "path", ",", "str", "(", "err", ")", ")", ")", "url", "=", "path", "return", "url" ]
try to lookup a path in staticfiles_storage .
train
false
35,584
def _randomizedSelect(array, begin, end, i): if (begin == end): return array[begin] q = _randomizedPartition(array, begin, end) k = ((q - begin) + 1) if (i < k): return _randomizedSelect(array, begin, q, i) else: return _randomizedSelect(array, (q + 1), end, (i - k))
[ "def", "_randomizedSelect", "(", "array", ",", "begin", ",", "end", ",", "i", ")", ":", "if", "(", "begin", "==", "end", ")", ":", "return", "array", "[", "begin", "]", "q", "=", "_randomizedPartition", "(", "array", ",", "begin", ",", "end", ")", "k", "=", "(", "(", "q", "-", "begin", ")", "+", "1", ")", "if", "(", "i", "<", "k", ")", ":", "return", "_randomizedSelect", "(", "array", ",", "begin", ",", "q", ",", "i", ")", "else", ":", "return", "_randomizedSelect", "(", "array", ",", "(", "q", "+", "1", ")", ",", "end", ",", "(", "i", "-", "k", ")", ")" ]
allows to select the ith smallest element from array without sorting it .
train
false
35,585
def create_thread(exploration_id, state_name, original_author_id, subject, text): _create_models_for_thread_and_first_message(exploration_id, state_name, original_author_id, subject, text, False)
[ "def", "create_thread", "(", "exploration_id", ",", "state_name", ",", "original_author_id", ",", "subject", ",", "text", ")", ":", "_create_models_for_thread_and_first_message", "(", "exploration_id", ",", "state_name", ",", "original_author_id", ",", "subject", ",", "text", ",", "False", ")" ]
creates a thread and its first message .
train
false
35,586
def getcallargs(func, *positional, **named): (args, varargs, varkw, defaults) = getargspec(func) f_name = func.__name__ arg2value = {} assigned_tuple_params = [] def assign(arg, value): if isinstance(arg, str): arg2value[arg] = value else: assigned_tuple_params.append(arg) value = iter(value) for (i, subarg) in enumerate(arg): try: subvalue = next(value) except StopIteration: raise ValueError(('need more than %d %s to unpack' % (i, ('values' if (i > 1) else 'value')))) assign(subarg, subvalue) try: next(value) except StopIteration: pass else: raise ValueError('too many values to unpack') def is_assigned(arg): if isinstance(arg, str): return (arg in arg2value) return (arg in assigned_tuple_params) if (ismethod(func) and (func.im_self is not None)): positional = ((func.im_self,) + positional) num_pos = len(positional) num_total = (num_pos + len(named)) num_args = len(args) num_defaults = (len(defaults) if defaults else 0) for (arg, value) in zip(args, positional): assign(arg, value) if varargs: if (num_pos > num_args): assign(varargs, positional[(- (num_pos - num_args)):]) else: assign(varargs, ()) elif (0 < num_args < num_pos): raise TypeError(('%s() takes %s %d %s (%d given)' % (f_name, ('at most' if defaults else 'exactly'), num_args, ('arguments' if (num_args > 1) else 'argument'), num_total))) elif ((num_args == 0) and num_total): if varkw: if num_pos: raise TypeError(('%s() takes exactly 0 arguments (%d given)' % (f_name, num_total))) else: raise TypeError(('%s() takes no arguments (%d given)' % (f_name, num_total))) for arg in args: if (isinstance(arg, str) and (arg in named)): if is_assigned(arg): raise TypeError(("%s() got multiple values for keyword argument '%s'" % (f_name, arg))) else: assign(arg, named.pop(arg)) if defaults: for (arg, value) in zip(args[(- num_defaults):], defaults): if (not is_assigned(arg)): assign(arg, value) if varkw: assign(varkw, named) elif named: unexpected = next(iter(named)) try: unicode except NameError: pass else: if isinstance(unexpected, unicode): unexpected = unexpected.encode(sys.getdefaultencoding(), 'replace') raise TypeError(("%s() got an unexpected keyword argument '%s'" % (f_name, unexpected))) unassigned = (num_args - len([arg for arg in args if is_assigned(arg)])) if unassigned: num_required = (num_args - num_defaults) raise TypeError(('%s() takes %s %d %s (%d given)' % (f_name, ('at least' if defaults else 'exactly'), num_required, ('arguments' if (num_required > 1) else 'argument'), num_total))) return arg2value
[ "def", "getcallargs", "(", "func", ",", "*", "positional", ",", "**", "named", ")", ":", "(", "args", ",", "varargs", ",", "varkw", ",", "defaults", ")", "=", "getargspec", "(", "func", ")", "f_name", "=", "func", ".", "__name__", "arg2value", "=", "{", "}", "assigned_tuple_params", "=", "[", "]", "def", "assign", "(", "arg", ",", "value", ")", ":", "if", "isinstance", "(", "arg", ",", "str", ")", ":", "arg2value", "[", "arg", "]", "=", "value", "else", ":", "assigned_tuple_params", ".", "append", "(", "arg", ")", "value", "=", "iter", "(", "value", ")", "for", "(", "i", ",", "subarg", ")", "in", "enumerate", "(", "arg", ")", ":", "try", ":", "subvalue", "=", "next", "(", "value", ")", "except", "StopIteration", ":", "raise", "ValueError", "(", "(", "'need more than %d %s to unpack'", "%", "(", "i", ",", "(", "'values'", "if", "(", "i", ">", "1", ")", "else", "'value'", ")", ")", ")", ")", "assign", "(", "subarg", ",", "subvalue", ")", "try", ":", "next", "(", "value", ")", "except", "StopIteration", ":", "pass", "else", ":", "raise", "ValueError", "(", "'too many values to unpack'", ")", "def", "is_assigned", "(", "arg", ")", ":", "if", "isinstance", "(", "arg", ",", "str", ")", ":", "return", "(", "arg", "in", "arg2value", ")", "return", "(", "arg", "in", "assigned_tuple_params", ")", "if", "(", "ismethod", "(", "func", ")", "and", "(", "func", ".", "im_self", "is", "not", "None", ")", ")", ":", "positional", "=", "(", "(", "func", ".", "im_self", ",", ")", "+", "positional", ")", "num_pos", "=", "len", "(", "positional", ")", "num_total", "=", "(", "num_pos", "+", "len", "(", "named", ")", ")", "num_args", "=", "len", "(", "args", ")", "num_defaults", "=", "(", "len", "(", "defaults", ")", "if", "defaults", "else", "0", ")", "for", "(", "arg", ",", "value", ")", "in", "zip", "(", "args", ",", "positional", ")", ":", "assign", "(", "arg", ",", "value", ")", "if", "varargs", ":", "if", "(", "num_pos", ">", "num_args", ")", ":", "assign", "(", "varargs", ",", "positional", "[", "(", "-", "(", "num_pos", "-", "num_args", ")", ")", ":", "]", ")", "else", ":", "assign", "(", "varargs", ",", "(", ")", ")", "elif", "(", "0", "<", "num_args", "<", "num_pos", ")", ":", "raise", "TypeError", "(", "(", "'%s() takes %s %d %s (%d given)'", "%", "(", "f_name", ",", "(", "'at most'", "if", "defaults", "else", "'exactly'", ")", ",", "num_args", ",", "(", "'arguments'", "if", "(", "num_args", ">", "1", ")", "else", "'argument'", ")", ",", "num_total", ")", ")", ")", "elif", "(", "(", "num_args", "==", "0", ")", "and", "num_total", ")", ":", "if", "varkw", ":", "if", "num_pos", ":", "raise", "TypeError", "(", "(", "'%s() takes exactly 0 arguments (%d given)'", "%", "(", "f_name", ",", "num_total", ")", ")", ")", "else", ":", "raise", "TypeError", "(", "(", "'%s() takes no arguments (%d given)'", "%", "(", "f_name", ",", "num_total", ")", ")", ")", "for", "arg", "in", "args", ":", "if", "(", "isinstance", "(", "arg", ",", "str", ")", "and", "(", "arg", "in", "named", ")", ")", ":", "if", "is_assigned", "(", "arg", ")", ":", "raise", "TypeError", "(", "(", "\"%s() got multiple values for keyword argument '%s'\"", "%", "(", "f_name", ",", "arg", ")", ")", ")", "else", ":", "assign", "(", "arg", ",", "named", ".", "pop", "(", "arg", ")", ")", "if", "defaults", ":", "for", "(", "arg", ",", "value", ")", "in", "zip", "(", "args", "[", "(", "-", "num_defaults", ")", ":", "]", ",", "defaults", ")", ":", "if", "(", "not", "is_assigned", "(", "arg", ")", ")", ":", "assign", "(", "arg", ",", "value", ")", "if", "varkw", ":", "assign", "(", "varkw", ",", "named", ")", "elif", "named", ":", "unexpected", "=", "next", "(", "iter", "(", "named", ")", ")", "try", ":", "unicode", "except", "NameError", ":", "pass", "else", ":", "if", "isinstance", "(", "unexpected", ",", "unicode", ")", ":", "unexpected", "=", "unexpected", ".", "encode", "(", "sys", ".", "getdefaultencoding", "(", ")", ",", "'replace'", ")", "raise", "TypeError", "(", "(", "\"%s() got an unexpected keyword argument '%s'\"", "%", "(", "f_name", ",", "unexpected", ")", ")", ")", "unassigned", "=", "(", "num_args", "-", "len", "(", "[", "arg", "for", "arg", "in", "args", "if", "is_assigned", "(", "arg", ")", "]", ")", ")", "if", "unassigned", ":", "num_required", "=", "(", "num_args", "-", "num_defaults", ")", "raise", "TypeError", "(", "(", "'%s() takes %s %d %s (%d given)'", "%", "(", "f_name", ",", "(", "'at least'", "if", "defaults", "else", "'exactly'", ")", ",", "num_required", ",", "(", "'arguments'", "if", "(", "num_required", ">", "1", ")", "else", "'argument'", ")", ",", "num_total", ")", ")", ")", "return", "arg2value" ]
this is a simplified inspect .
train
true
35,587
def _ValidateNotIpV4Address(host): matcher = _URL_IP_V4_ADDR_RE.match(host) if (matcher and (sum((1 for x in matcher.groups() if (int(x) <= 255))) == 4)): raise validation.ValidationError(("Host may not match an ipv4 address '%s'" % host)) return matcher
[ "def", "_ValidateNotIpV4Address", "(", "host", ")", ":", "matcher", "=", "_URL_IP_V4_ADDR_RE", ".", "match", "(", "host", ")", "if", "(", "matcher", "and", "(", "sum", "(", "(", "1", "for", "x", "in", "matcher", ".", "groups", "(", ")", "if", "(", "int", "(", "x", ")", "<=", "255", ")", ")", ")", "==", "4", ")", ")", ":", "raise", "validation", ".", "ValidationError", "(", "(", "\"Host may not match an ipv4 address '%s'\"", "%", "host", ")", ")", "return", "matcher" ]
validate host is not an ipv4 address .
train
false
35,588
def kill(coro): return KillEvent(coro)
[ "def", "kill", "(", "coro", ")", ":", "return", "KillEvent", "(", "coro", ")" ]
kill greenlet asynchronously .
train
false
35,589
def _dijkstra(G, source, weight, pred=None, paths=None, cutoff=None, target=None): return _dijkstra_multisource(G, [source], weight, pred=pred, paths=paths, cutoff=cutoff, target=target)
[ "def", "_dijkstra", "(", "G", ",", "source", ",", "weight", ",", "pred", "=", "None", ",", "paths", "=", "None", ",", "cutoff", "=", "None", ",", "target", "=", "None", ")", ":", "return", "_dijkstra_multisource", "(", "G", ",", "[", "source", "]", ",", "weight", ",", "pred", "=", "pred", ",", "paths", "=", "paths", ",", "cutoff", "=", "cutoff", ",", "target", "=", "target", ")" ]
uses dijkstras algorithm to find shortest weighted paths from a single source .
train
false
35,590
def convexity_defect(contour, curvature): kinks = [] mean = np.mean(curvature) if (mean > 0): kink_index = [i for i in range(len(curvature)) if (curvature[i] < 0)] else: kink_index = [i for i in range(len(curvature)) if (curvature[i] > 0)] for s in kink_index: kinks.append(contour[(s + 1)]) return (kinks, kink_index)
[ "def", "convexity_defect", "(", "contour", ",", "curvature", ")", ":", "kinks", "=", "[", "]", "mean", "=", "np", ".", "mean", "(", "curvature", ")", "if", "(", "mean", ">", "0", ")", ":", "kink_index", "=", "[", "i", "for", "i", "in", "range", "(", "len", "(", "curvature", ")", ")", "if", "(", "curvature", "[", "i", "]", "<", "0", ")", "]", "else", ":", "kink_index", "=", "[", "i", "for", "i", "in", "range", "(", "len", "(", "curvature", ")", ")", "if", "(", "curvature", "[", "i", "]", ">", "0", ")", "]", "for", "s", "in", "kink_index", ":", "kinks", ".", "append", "(", "contour", "[", "(", "s", "+", "1", ")", "]", ")", "return", "(", "kinks", ",", "kink_index", ")" ]
contour is array shape=(number of points .
train
false
35,592
def uptodate(name, bin_env=None, user=None, cwd=None, use_vt=False): ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to update.'} try: packages = __salt__['pip.list_upgrades'](bin_env=bin_env, user=user, cwd=cwd) except Exception as e: ret['comment'] = str(e) return ret if (not packages): ret['comment'] = 'System is already up-to-date.' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'System update will be performed' ret['result'] = None return ret updated = __salt__['pip.upgrade'](bin_env=bin_env, user=user, cwd=cwd, use_vt=use_vt) if (updated.get('result') is False): ret.update(updated) elif updated: ret['changes'] = updated ret['comment'] = 'Upgrade successful.' ret['result'] = True else: ret['comment'] = 'Upgrade failed.' return ret
[ "def", "uptodate", "(", "name", ",", "bin_env", "=", "None", ",", "user", "=", "None", ",", "cwd", "=", "None", ",", "use_vt", "=", "False", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "False", ",", "'comment'", ":", "'Failed to update.'", "}", "try", ":", "packages", "=", "__salt__", "[", "'pip.list_upgrades'", "]", "(", "bin_env", "=", "bin_env", ",", "user", "=", "user", ",", "cwd", "=", "cwd", ")", "except", "Exception", "as", "e", ":", "ret", "[", "'comment'", "]", "=", "str", "(", "e", ")", "return", "ret", "if", "(", "not", "packages", ")", ":", "ret", "[", "'comment'", "]", "=", "'System is already up-to-date.'", "ret", "[", "'result'", "]", "=", "True", "return", "ret", "elif", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'System update will be performed'", "ret", "[", "'result'", "]", "=", "None", "return", "ret", "updated", "=", "__salt__", "[", "'pip.upgrade'", "]", "(", "bin_env", "=", "bin_env", ",", "user", "=", "user", ",", "cwd", "=", "cwd", ",", "use_vt", "=", "use_vt", ")", "if", "(", "updated", ".", "get", "(", "'result'", ")", "is", "False", ")", ":", "ret", ".", "update", "(", "updated", ")", "elif", "updated", ":", "ret", "[", "'changes'", "]", "=", "updated", "ret", "[", "'comment'", "]", "=", "'Upgrade successful.'", "ret", "[", "'result'", "]", "=", "True", "else", ":", "ret", "[", "'comment'", "]", "=", "'Upgrade failed.'", "return", "ret" ]
call the rest endpoint to see if the packages on the "server" are up to date .
train
true
35,593
def prepend_www(url): parsed = urlparse(url) if (parsed.netloc.split('.')[0] != 'www'): return (((parsed.scheme + '://www.') + parsed.netloc) + parsed.path) else: return url
[ "def", "prepend_www", "(", "url", ")", ":", "parsed", "=", "urlparse", "(", "url", ")", "if", "(", "parsed", ".", "netloc", ".", "split", "(", "'.'", ")", "[", "0", "]", "!=", "'www'", ")", ":", "return", "(", "(", "(", "parsed", ".", "scheme", "+", "'://www.'", ")", "+", "parsed", ".", "netloc", ")", "+", "parsed", ".", "path", ")", "else", ":", "return", "url" ]
changes google .
train
true
35,594
def get_process_token(): GetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess GetCurrentProcess.restype = wintypes.HANDLE OpenProcessToken = ctypes.windll.advapi32.OpenProcessToken OpenProcessToken.argtypes = (wintypes.HANDLE, wintypes.DWORD, ctypes.POINTER(wintypes.HANDLE)) OpenProcessToken.restype = wintypes.BOOL token = wintypes.HANDLE() TOKEN_ALL_ACCESS = 983551 res = OpenProcessToken(GetCurrentProcess(), TOKEN_ALL_ACCESS, token) if (not (res > 0)): raise RuntimeError("Couldn't get process token") return token
[ "def", "get_process_token", "(", ")", ":", "GetCurrentProcess", "=", "ctypes", ".", "windll", ".", "kernel32", ".", "GetCurrentProcess", "GetCurrentProcess", ".", "restype", "=", "wintypes", ".", "HANDLE", "OpenProcessToken", "=", "ctypes", ".", "windll", ".", "advapi32", ".", "OpenProcessToken", "OpenProcessToken", ".", "argtypes", "=", "(", "wintypes", ".", "HANDLE", ",", "wintypes", ".", "DWORD", ",", "ctypes", ".", "POINTER", "(", "wintypes", ".", "HANDLE", ")", ")", "OpenProcessToken", ".", "restype", "=", "wintypes", ".", "BOOL", "token", "=", "wintypes", ".", "HANDLE", "(", ")", "TOKEN_ALL_ACCESS", "=", "983551", "res", "=", "OpenProcessToken", "(", "GetCurrentProcess", "(", ")", ",", "TOKEN_ALL_ACCESS", ",", "token", ")", "if", "(", "not", "(", "res", ">", "0", ")", ")", ":", "raise", "RuntimeError", "(", "\"Couldn't get process token\"", ")", "return", "token" ]
get the current process token .
train
false
35,595
def validate_lowercase(string): if (not string.islower()): raise ValidationError(_(u'This value must be all lowercase.'))
[ "def", "validate_lowercase", "(", "string", ")", ":", "if", "(", "not", "string", ".", "islower", "(", ")", ")", ":", "raise", "ValidationError", "(", "_", "(", "u'This value must be all lowercase.'", ")", ")" ]
validates that a string is lowercase .
train
false
35,596
def as_unittest(func): class Tester(unittest.TestCase, ): def test(self): func() Tester.__name__ = func.__name__ return Tester
[ "def", "as_unittest", "(", "func", ")", ":", "class", "Tester", "(", "unittest", ".", "TestCase", ",", ")", ":", "def", "test", "(", "self", ")", ":", "func", "(", ")", "Tester", ".", "__name__", "=", "func", ".", "__name__", "return", "Tester" ]
decorator to make a simple function into a normal test via unittest .
train
false
35,598
def file_upload_echo_content(request): def read_and_close(f): with f: return f.read().decode('utf-8') r = {k: read_and_close(f) for (k, f) in request.FILES.items()} return HttpResponse(json.dumps(r))
[ "def", "file_upload_echo_content", "(", "request", ")", ":", "def", "read_and_close", "(", "f", ")", ":", "with", "f", ":", "return", "f", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "r", "=", "{", "k", ":", "read_and_close", "(", "f", ")", "for", "(", "k", ",", "f", ")", "in", "request", ".", "FILES", ".", "items", "(", ")", "}", "return", "HttpResponse", "(", "json", ".", "dumps", "(", "r", ")", ")" ]
simple view to echo back the content of uploaded files for tests .
train
false
35,599
def next_week(t): return (this_week(t) + WEEK)
[ "def", "next_week", "(", "t", ")", ":", "return", "(", "this_week", "(", "t", ")", "+", "WEEK", ")" ]
return timestamp for start of next week .
train
false
35,600
def popmin(a, b): if (len(a) == 0): return (b[0], a, b[1:]) elif (len(b) == 0): return (a[0], a[1:], b) elif (a[0] > b[0]): return (b[0], a, b[1:]) else: return (a[0], a[1:], b)
[ "def", "popmin", "(", "a", ",", "b", ")", ":", "if", "(", "len", "(", "a", ")", "==", "0", ")", ":", "return", "(", "b", "[", "0", "]", ",", "a", ",", "b", "[", "1", ":", "]", ")", "elif", "(", "len", "(", "b", ")", "==", "0", ")", ":", "return", "(", "a", "[", "0", "]", ",", "a", "[", "1", ":", "]", ",", "b", ")", "elif", "(", "a", "[", "0", "]", ">", "b", "[", "0", "]", ")", ":", "return", "(", "b", "[", "0", "]", ",", "a", ",", "b", "[", "1", ":", "]", ")", "else", ":", "return", "(", "a", "[", "0", "]", ",", "a", "[", "1", ":", "]", ",", "b", ")" ]
popmin -> where i is min and a/b are the results of removing i from the relevant sequence .
train
false
35,601
@treeio_login_required @handle_response_format def receivable_delete(request, receivable_id, response_format='html'): receivable = get_object_or_404(Liability, pk=receivable_id) if (not request.user.profile.has_permission(receivable, mode='w')): return user_denied(request, "You don't have access to this Receivable", response_format) if request.POST: if ('delete' in request.POST): if ('trash' in request.POST): receivable.trash = True receivable.save() else: receivable.delete() return HttpResponseRedirect(reverse('finance_index_receivables')) elif ('cancel' in request.POST): return HttpResponseRedirect(reverse('finance_receivable_view', args=[receivable.id])) return render_to_response('finance/receivable_delete', {'liability': receivable}, context_instance=RequestContext(request), response_format=response_format)
[ "@", "treeio_login_required", "@", "handle_response_format", "def", "receivable_delete", "(", "request", ",", "receivable_id", ",", "response_format", "=", "'html'", ")", ":", "receivable", "=", "get_object_or_404", "(", "Liability", ",", "pk", "=", "receivable_id", ")", "if", "(", "not", "request", ".", "user", ".", "profile", ".", "has_permission", "(", "receivable", ",", "mode", "=", "'w'", ")", ")", ":", "return", "user_denied", "(", "request", ",", "\"You don't have access to this Receivable\"", ",", "response_format", ")", "if", "request", ".", "POST", ":", "if", "(", "'delete'", "in", "request", ".", "POST", ")", ":", "if", "(", "'trash'", "in", "request", ".", "POST", ")", ":", "receivable", ".", "trash", "=", "True", "receivable", ".", "save", "(", ")", "else", ":", "receivable", ".", "delete", "(", ")", "return", "HttpResponseRedirect", "(", "reverse", "(", "'finance_index_receivables'", ")", ")", "elif", "(", "'cancel'", "in", "request", ".", "POST", ")", ":", "return", "HttpResponseRedirect", "(", "reverse", "(", "'finance_receivable_view'", ",", "args", "=", "[", "receivable", ".", "id", "]", ")", ")", "return", "render_to_response", "(", "'finance/receivable_delete'", ",", "{", "'liability'", ":", "receivable", "}", ",", "context_instance", "=", "RequestContext", "(", "request", ")", ",", "response_format", "=", "response_format", ")" ]
receivable delete .
train
false
35,602
def get_draft_subtree_roots(draft_nodes): urls = [draft_node.url for draft_node in draft_nodes] for draft_node in draft_nodes: if (draft_node.parent_url not in urls): (yield draft_node)
[ "def", "get_draft_subtree_roots", "(", "draft_nodes", ")", ":", "urls", "=", "[", "draft_node", ".", "url", "for", "draft_node", "in", "draft_nodes", "]", "for", "draft_node", "in", "draft_nodes", ":", "if", "(", "draft_node", ".", "parent_url", "not", "in", "urls", ")", ":", "(", "yield", "draft_node", ")" ]
takes a list of draft_nodes .
train
false
35,603
def same_kind(src, dest): return ((DATETIME_UNITS[src] < 5) == (DATETIME_UNITS[dest] < 5))
[ "def", "same_kind", "(", "src", ",", "dest", ")", ":", "return", "(", "(", "DATETIME_UNITS", "[", "src", "]", "<", "5", ")", "==", "(", "DATETIME_UNITS", "[", "dest", "]", "<", "5", ")", ")" ]
whether the *src* and *dest* units are of the same kind .
train
false
35,605
def _update_with_callback(xblock, user, old_metadata=None, old_content=None): if callable(getattr(xblock, 'editor_saved', None)): if (old_metadata is None): old_metadata = own_metadata(xblock) if (old_content is None): old_content = xblock.get_explicitly_set_fields_by_scope(Scope.content) xblock.xmodule_runtime = StudioEditModuleRuntime(user) xblock.editor_saved(user, old_metadata, old_content) return modulestore().update_item(xblock, user.id)
[ "def", "_update_with_callback", "(", "xblock", ",", "user", ",", "old_metadata", "=", "None", ",", "old_content", "=", "None", ")", ":", "if", "callable", "(", "getattr", "(", "xblock", ",", "'editor_saved'", ",", "None", ")", ")", ":", "if", "(", "old_metadata", "is", "None", ")", ":", "old_metadata", "=", "own_metadata", "(", "xblock", ")", "if", "(", "old_content", "is", "None", ")", ":", "old_content", "=", "xblock", ".", "get_explicitly_set_fields_by_scope", "(", "Scope", ".", "content", ")", "xblock", ".", "xmodule_runtime", "=", "StudioEditModuleRuntime", "(", "user", ")", "xblock", ".", "editor_saved", "(", "user", ",", "old_metadata", ",", "old_content", ")", "return", "modulestore", "(", ")", ".", "update_item", "(", "xblock", ",", "user", ".", "id", ")" ]
updates the xblock in the modulestore .
train
false
35,606
def send_confirmation(): form_class = _security.send_confirmation_form if request.json: form = form_class(MultiDict(request.json)) else: form = form_class() if form.validate_on_submit(): send_confirmation_instructions(form.user) if (request.json is None): do_flash(*get_message('CONFIRMATION_REQUEST', email=form.user.email)) if request.json: return _render_json(form) return _security.render_template(config_value('SEND_CONFIRMATION_TEMPLATE'), send_confirmation_form=form, **_ctx('send_confirmation'))
[ "def", "send_confirmation", "(", ")", ":", "form_class", "=", "_security", ".", "send_confirmation_form", "if", "request", ".", "json", ":", "form", "=", "form_class", "(", "MultiDict", "(", "request", ".", "json", ")", ")", "else", ":", "form", "=", "form_class", "(", ")", "if", "form", ".", "validate_on_submit", "(", ")", ":", "send_confirmation_instructions", "(", "form", ".", "user", ")", "if", "(", "request", ".", "json", "is", "None", ")", ":", "do_flash", "(", "*", "get_message", "(", "'CONFIRMATION_REQUEST'", ",", "email", "=", "form", ".", "user", ".", "email", ")", ")", "if", "request", ".", "json", ":", "return", "_render_json", "(", "form", ")", "return", "_security", ".", "render_template", "(", "config_value", "(", "'SEND_CONFIRMATION_TEMPLATE'", ")", ",", "send_confirmation_form", "=", "form", ",", "**", "_ctx", "(", "'send_confirmation'", ")", ")" ]
view function which sends confirmation instructions .
train
true
35,608
def _get_read_time(read_name): time_list = [] remainder = _string_as_base_36(read_name[:6]) for denominator in _time_denominators: (this_term, remainder) = divmod(remainder, denominator) time_list.append(this_term) time_list.append(remainder) time_list[0] += 2000 return time_list
[ "def", "_get_read_time", "(", "read_name", ")", ":", "time_list", "=", "[", "]", "remainder", "=", "_string_as_base_36", "(", "read_name", "[", ":", "6", "]", ")", "for", "denominator", "in", "_time_denominators", ":", "(", "this_term", ",", "remainder", ")", "=", "divmod", "(", "remainder", ",", "denominator", ")", "time_list", ".", "append", "(", "this_term", ")", "time_list", ".", "append", "(", "remainder", ")", "time_list", "[", "0", "]", "+=", "2000", "return", "time_list" ]
extract time from first 6 characters of read name .
train
false
35,609
def rs_integrate(p, x): R = p.ring p1 = R.zero n = R.gens.index(x) mn = ([0] * R.ngens) mn[n] = 1 mn = tuple(mn) for expv in p: e = monomial_mul(expv, mn) p1[e] = (p[expv] / (expv[n] + 1)) return p1
[ "def", "rs_integrate", "(", "p", ",", "x", ")", ":", "R", "=", "p", ".", "ring", "p1", "=", "R", ".", "zero", "n", "=", "R", ".", "gens", ".", "index", "(", "x", ")", "mn", "=", "(", "[", "0", "]", "*", "R", ".", "ngens", ")", "mn", "[", "n", "]", "=", "1", "mn", "=", "tuple", "(", "mn", ")", "for", "expv", "in", "p", ":", "e", "=", "monomial_mul", "(", "expv", ",", "mn", ")", "p1", "[", "e", "]", "=", "(", "p", "[", "expv", "]", "/", "(", "expv", "[", "n", "]", "+", "1", ")", ")", "return", "p1" ]
integrate p with respect to x .
train
false
35,610
def min_width(string, cols, padding=' '): is_color = isinstance(string, ColoredString) stack = tsplit(str(string), NEWLINES) for (i, substring) in enumerate(stack): _sub = clean(substring).ljust((cols + 0), padding) if is_color: _sub = _sub.replace(clean(substring), substring) stack[i] = _sub return '\n'.join(stack)
[ "def", "min_width", "(", "string", ",", "cols", ",", "padding", "=", "' '", ")", ":", "is_color", "=", "isinstance", "(", "string", ",", "ColoredString", ")", "stack", "=", "tsplit", "(", "str", "(", "string", ")", ",", "NEWLINES", ")", "for", "(", "i", ",", "substring", ")", "in", "enumerate", "(", "stack", ")", ":", "_sub", "=", "clean", "(", "substring", ")", ".", "ljust", "(", "(", "cols", "+", "0", ")", ",", "padding", ")", "if", "is_color", ":", "_sub", "=", "_sub", ".", "replace", "(", "clean", "(", "substring", ")", ",", "substring", ")", "stack", "[", "i", "]", "=", "_sub", "return", "'\\n'", ".", "join", "(", "stack", ")" ]
returns given string with right padding .
train
true
35,612
def get_unique_filename(filename): return (u'%s__%s' % (uuid4(), filename))
[ "def", "get_unique_filename", "(", "filename", ")", ":", "return", "(", "u'%s__%s'", "%", "(", "uuid4", "(", ")", ",", "filename", ")", ")" ]
check if path is unique .
train
false
35,613
def _get_read_xy(read_name): number = _string_as_base_36(read_name[9:]) return divmod(number, 4096)
[ "def", "_get_read_xy", "(", "read_name", ")", ":", "number", "=", "_string_as_base_36", "(", "read_name", "[", "9", ":", "]", ")", "return", "divmod", "(", "number", ",", "4096", ")" ]
extract coordinates from last 5 characters of read name .
train
false
35,614
def _make_data_stub(client): if (client.emulator_host is None): return make_secure_stub(client.credentials, client.user_agent, bigtable_pb2.BigtableStub, DATA_API_HOST, extra_options=_GRPC_MAX_LENGTH_OPTIONS) else: return make_insecure_stub(bigtable_pb2.BigtableStub, client.emulator_host)
[ "def", "_make_data_stub", "(", "client", ")", ":", "if", "(", "client", ".", "emulator_host", "is", "None", ")", ":", "return", "make_secure_stub", "(", "client", ".", "credentials", ",", "client", ".", "user_agent", ",", "bigtable_pb2", ".", "BigtableStub", ",", "DATA_API_HOST", ",", "extra_options", "=", "_GRPC_MAX_LENGTH_OPTIONS", ")", "else", ":", "return", "make_insecure_stub", "(", "bigtable_pb2", ".", "BigtableStub", ",", "client", ".", "emulator_host", ")" ]
creates grpc stub to make requests to the data api .
train
false
35,616
def dual_decorator(func): @functools.wraps(func) def inner(*args, **kw): if ((len(args) == 1) and (not kw) and callable(args[0]) and (not ((type(args[0]) == type) and issubclass(args[0], BaseException)))): return func()(args[0]) else: return func(*args, **kw) return inner
[ "def", "dual_decorator", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "inner", "(", "*", "args", ",", "**", "kw", ")", ":", "if", "(", "(", "len", "(", "args", ")", "==", "1", ")", "and", "(", "not", "kw", ")", "and", "callable", "(", "args", "[", "0", "]", ")", "and", "(", "not", "(", "(", "type", "(", "args", "[", "0", "]", ")", "==", "type", ")", "and", "issubclass", "(", "args", "[", "0", "]", ",", "BaseException", ")", ")", ")", ")", ":", "return", "func", "(", ")", "(", "args", "[", "0", "]", ")", "else", ":", "return", "func", "(", "*", "args", ",", "**", "kw", ")", "return", "inner" ]
this is a decorator that converts a paramaterized decorator for no-param use .
train
false
35,617
def capture_warnings(capture): global _warnings_showwarning if capture: if (_warnings_showwarning is None): _warnings_showwarning = warnings.showwarning warnings.showwarning = idle_showwarning elif (_warnings_showwarning is not None): warnings.showwarning = _warnings_showwarning _warnings_showwarning = None
[ "def", "capture_warnings", "(", "capture", ")", ":", "global", "_warnings_showwarning", "if", "capture", ":", "if", "(", "_warnings_showwarning", "is", "None", ")", ":", "_warnings_showwarning", "=", "warnings", ".", "showwarning", "warnings", ".", "showwarning", "=", "idle_showwarning", "elif", "(", "_warnings_showwarning", "is", "not", "None", ")", ":", "warnings", ".", "showwarning", "=", "_warnings_showwarning", "_warnings_showwarning", "=", "None" ]
replace warning .
train
false
35,618
def cloud_add_link_task(cookie, tokens, source_url, save_path, vcode='', vcode_input=''): url = ''.join([const.PAN_URL, 'rest/2.0/services/cloud_dl?channel=chunlei&clienttype=0&web=1', '&bdstoken=', tokens['bdstoken']]) type_ = '' if source_url.startswith('ed2k'): type_ = '&type=3' if (not save_path.endswith('/')): save_path = (save_path + '/') data = ['method=add_task&app_id=250528', '&source_url=', encoder.encode_uri_component(source_url), '&save_path=', encoder.encode_uri_component(save_path), '&type=', type_] if vcode: data.append('&input=') data.append(vcode_input) data.append('&vcode=') data.append(vcode) data = ''.join(data) req = net.urlopen(url, headers={'Cookie': cookie.header_output()}, data=data.encode()) if req: content = req.data return json.loads(content.decode()) else: return None
[ "def", "cloud_add_link_task", "(", "cookie", ",", "tokens", ",", "source_url", ",", "save_path", ",", "vcode", "=", "''", ",", "vcode_input", "=", "''", ")", ":", "url", "=", "''", ".", "join", "(", "[", "const", ".", "PAN_URL", ",", "'rest/2.0/services/cloud_dl?channel=chunlei&clienttype=0&web=1'", ",", "'&bdstoken='", ",", "tokens", "[", "'bdstoken'", "]", "]", ")", "type_", "=", "''", "if", "source_url", ".", "startswith", "(", "'ed2k'", ")", ":", "type_", "=", "'&type=3'", "if", "(", "not", "save_path", ".", "endswith", "(", "'/'", ")", ")", ":", "save_path", "=", "(", "save_path", "+", "'/'", ")", "data", "=", "[", "'method=add_task&app_id=250528'", ",", "'&source_url='", ",", "encoder", ".", "encode_uri_component", "(", "source_url", ")", ",", "'&save_path='", ",", "encoder", ".", "encode_uri_component", "(", "save_path", ")", ",", "'&type='", ",", "type_", "]", "if", "vcode", ":", "data", ".", "append", "(", "'&input='", ")", "data", ".", "append", "(", "vcode_input", ")", "data", ".", "append", "(", "'&vcode='", ")", "data", ".", "append", "(", "vcode", ")", "data", "=", "''", ".", "join", "(", "data", ")", "req", "=", "net", ".", "urlopen", "(", "url", ",", "headers", "=", "{", "'Cookie'", ":", "cookie", ".", "header_output", "(", ")", "}", ",", "data", "=", "data", ".", "encode", "(", ")", ")", "if", "req", ":", "content", "=", "req", ".", "data", "return", "json", ".", "loads", "(", "content", ".", "decode", "(", ")", ")", "else", ":", "return", "None" ]
source_url - 可以是http/https/ftp等一般的链接 可以是emule这样的链接 path - 要保存到哪个目录 .
train
true
35,619
def run_once(f): @functools.wraps(f) def wrapper(): if (not wrapper.already_ran): f() wrapper.already_ran = True wrapper.already_ran = False return wrapper
[ "def", "run_once", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "wrapper", "(", ")", ":", "if", "(", "not", "wrapper", ".", "already_ran", ")", ":", "f", "(", ")", "wrapper", ".", "already_ran", "=", "True", "wrapper", ".", "already_ran", "=", "False", "return", "wrapper" ]
a decorator to ensure the decorated function is only executed once .
train
false
35,621
def pytest_ignore_collect(path, config): if ('appengine/standard' in str(path)): if six.PY3: return True if ('GAE_SDK_PATH' not in os.environ): return True return False
[ "def", "pytest_ignore_collect", "(", "path", ",", "config", ")", ":", "if", "(", "'appengine/standard'", "in", "str", "(", "path", ")", ")", ":", "if", "six", ".", "PY3", ":", "return", "True", "if", "(", "'GAE_SDK_PATH'", "not", "in", "os", ".", "environ", ")", ":", "return", "True", "return", "False" ]
ignore paths that would otherwise be collceted by the doctest plugin and lead to importerror due to missing dependencies .
train
false
35,622
def Geometric(name, p): return rv(name, GeometricDistribution, p)
[ "def", "Geometric", "(", "name", ",", "p", ")", ":", "return", "rv", "(", "name", ",", "GeometricDistribution", ",", "p", ")" ]
create a discrete random variable with a geometric distribution .
train
false
35,623
def load_pickle(filename): try: if pd: return (pd.read_pickle(filename), None) else: with open(filename, 'rb') as fid: data = pickle.load(fid) return (data, None) except Exception as err: return (None, str(err))
[ "def", "load_pickle", "(", "filename", ")", ":", "try", ":", "if", "pd", ":", "return", "(", "pd", ".", "read_pickle", "(", "filename", ")", ",", "None", ")", "else", ":", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "fid", ":", "data", "=", "pickle", ".", "load", "(", "fid", ")", "return", "(", "data", ",", "None", ")", "except", "Exception", "as", "err", ":", "return", "(", "None", ",", "str", "(", "err", ")", ")" ]
load a pickle file as a dictionary .
train
true
35,624
def _get_html_and_errors(request, doc, rendering_params): (doc_html, ks_errors) = (doc.html, None) render_raw_fallback = False base_url = request.build_absolute_uri('/') if rendering_params['use_rendered']: if ((request.GET.get('bleach_new', False) is not False) and request.user.is_authenticated()): (doc_html, ks_errors) = kumascript.post(request, doc_html, request.LANGUAGE_CODE, True) else: cache_control = None if request.user.is_authenticated(): ua_cc = request.META.get('HTTP_CACHE_CONTROL') if (ua_cc == 'no-cache'): cache_control = 'no-cache' try: (r_body, r_errors) = doc.get_rendered(cache_control, base_url) if r_body: doc_html = r_body if r_errors: ks_errors = r_errors except DocumentRenderedContentNotAvailable: render_raw_fallback = True return (doc_html, ks_errors, render_raw_fallback)
[ "def", "_get_html_and_errors", "(", "request", ",", "doc", ",", "rendering_params", ")", ":", "(", "doc_html", ",", "ks_errors", ")", "=", "(", "doc", ".", "html", ",", "None", ")", "render_raw_fallback", "=", "False", "base_url", "=", "request", ".", "build_absolute_uri", "(", "'/'", ")", "if", "rendering_params", "[", "'use_rendered'", "]", ":", "if", "(", "(", "request", ".", "GET", ".", "get", "(", "'bleach_new'", ",", "False", ")", "is", "not", "False", ")", "and", "request", ".", "user", ".", "is_authenticated", "(", ")", ")", ":", "(", "doc_html", ",", "ks_errors", ")", "=", "kumascript", ".", "post", "(", "request", ",", "doc_html", ",", "request", ".", "LANGUAGE_CODE", ",", "True", ")", "else", ":", "cache_control", "=", "None", "if", "request", ".", "user", ".", "is_authenticated", "(", ")", ":", "ua_cc", "=", "request", ".", "META", ".", "get", "(", "'HTTP_CACHE_CONTROL'", ")", "if", "(", "ua_cc", "==", "'no-cache'", ")", ":", "cache_control", "=", "'no-cache'", "try", ":", "(", "r_body", ",", "r_errors", ")", "=", "doc", ".", "get_rendered", "(", "cache_control", ",", "base_url", ")", "if", "r_body", ":", "doc_html", "=", "r_body", "if", "r_errors", ":", "ks_errors", "=", "r_errors", "except", "DocumentRenderedContentNotAvailable", ":", "render_raw_fallback", "=", "True", "return", "(", "doc_html", ",", "ks_errors", ",", "render_raw_fallback", ")" ]
get the initial html for a document .
train
false
35,625
def filefind(filename, path_dirs=None): filename = filename.strip('"').strip("'") if (os.path.isabs(filename) and os.path.isfile(filename)): return filename if (path_dirs is None): path_dirs = ('',) elif isinstance(path_dirs, str): path_dirs = (path_dirs,) for path in path_dirs: if (path == '.'): path = os.getcwd() testname = expand_path(os.path.join(path, filename)) if os.path.isfile(testname): return os.path.abspath(testname) raise IOError(('File %r does not exist in any of the search paths: %r' % (filename, path_dirs)))
[ "def", "filefind", "(", "filename", ",", "path_dirs", "=", "None", ")", ":", "filename", "=", "filename", ".", "strip", "(", "'\"'", ")", ".", "strip", "(", "\"'\"", ")", "if", "(", "os", ".", "path", ".", "isabs", "(", "filename", ")", "and", "os", ".", "path", ".", "isfile", "(", "filename", ")", ")", ":", "return", "filename", "if", "(", "path_dirs", "is", "None", ")", ":", "path_dirs", "=", "(", "''", ",", ")", "elif", "isinstance", "(", "path_dirs", ",", "str", ")", ":", "path_dirs", "=", "(", "path_dirs", ",", ")", "for", "path", "in", "path_dirs", ":", "if", "(", "path", "==", "'.'", ")", ":", "path", "=", "os", ".", "getcwd", "(", ")", "testname", "=", "expand_path", "(", "os", ".", "path", ".", "join", "(", "path", ",", "filename", ")", ")", "if", "os", ".", "path", ".", "isfile", "(", "testname", ")", ":", "return", "os", ".", "path", ".", "abspath", "(", "testname", ")", "raise", "IOError", "(", "(", "'File %r does not exist in any of the search paths: %r'", "%", "(", "filename", ",", "path_dirs", ")", ")", ")" ]
find a file by looking through a sequence of paths .
train
false
35,626
def _generate_request_id_hash(): return hashlib.sha1(str(_request_id)).hexdigest()[:8].upper()
[ "def", "_generate_request_id_hash", "(", ")", ":", "return", "hashlib", ".", "sha1", "(", "str", "(", "_request_id", ")", ")", ".", "hexdigest", "(", ")", "[", ":", "8", "]", ".", "upper", "(", ")" ]
generates a hash of the current request id .
train
false
35,628
def list_from_multiproperty(*external_names): def list_from_multiproperty_lambda(unused_value, bulkload_state): result = [] for external_name in external_names: value = bulkload_state.current_dictionary.get(external_name) if value: result.append(value) return result return list_from_multiproperty_lambda
[ "def", "list_from_multiproperty", "(", "*", "external_names", ")", ":", "def", "list_from_multiproperty_lambda", "(", "unused_value", ",", "bulkload_state", ")", ":", "result", "=", "[", "]", "for", "external_name", "in", "external_names", ":", "value", "=", "bulkload_state", ".", "current_dictionary", ".", "get", "(", "external_name", ")", "if", "value", ":", "result", ".", "append", "(", "value", ")", "return", "result", "return", "list_from_multiproperty_lambda" ]
create a list from multiple properties .
train
false
35,629
def get_app_path(app_name, *joins): return get_pymodule_path(app_name, *joins)
[ "def", "get_app_path", "(", "app_name", ",", "*", "joins", ")", ":", "return", "get_pymodule_path", "(", "app_name", ",", "*", "joins", ")" ]
return path of given app .
train
false
35,630
def get_all_names(): for filename in glob.glob(os.path.join(REQ_DIR, 'requirements-*.txt-raw')): basename = os.path.basename(filename) (yield basename[len('requirements-'):(- len('.txt-raw'))])
[ "def", "get_all_names", "(", ")", ":", "for", "filename", "in", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "REQ_DIR", ",", "'requirements-*.txt-raw'", ")", ")", ":", "basename", "=", "os", ".", "path", ".", "basename", "(", "filename", ")", "(", "yield", "basename", "[", "len", "(", "'requirements-'", ")", ":", "(", "-", "len", "(", "'.txt-raw'", ")", ")", "]", ")" ]
get all requirement names based on filenames .
train
false
35,631
def test_get_templates(): env = branca.utilities.get_templates() assert isinstance(env, jinja2.environment.Environment)
[ "def", "test_get_templates", "(", ")", ":", "env", "=", "branca", ".", "utilities", ".", "get_templates", "(", ")", "assert", "isinstance", "(", "env", ",", "jinja2", ".", "environment", ".", "Environment", ")" ]
test template getting .
train
false
35,633
def get_unique_counter_from_url(sp): pos = sp.rfind('%23') if (pos != (-1)): return int(sp[(pos + 3):]) else: return None
[ "def", "get_unique_counter_from_url", "(", "sp", ")", ":", "pos", "=", "sp", ".", "rfind", "(", "'%23'", ")", "if", "(", "pos", "!=", "(", "-", "1", ")", ")", ":", "return", "int", "(", "sp", "[", "(", "pos", "+", "3", ")", ":", "]", ")", "else", ":", "return", "None" ]
extract the unique counter from the url if it has one .
train
false
35,636
def dtlz4(individual, obj, alpha): xc = individual[:(obj - 1)] xm = individual[(obj - 1):] g = sum((((xi - 0.5) ** 2) for xi in xm)) f = [((1.0 + g) * reduce(mul, (cos(((0.5 * (xi ** alpha)) * pi)) for xi in xc), 1.0))] f.extend(((((1.0 + g) * reduce(mul, (cos(((0.5 * (xi ** alpha)) * pi)) for xi in xc[:m]), 1)) * sin(((0.5 * (xc[m] ** alpha)) * pi))) for m in range((obj - 2), (-1), (-1)))) return f
[ "def", "dtlz4", "(", "individual", ",", "obj", ",", "alpha", ")", ":", "xc", "=", "individual", "[", ":", "(", "obj", "-", "1", ")", "]", "xm", "=", "individual", "[", "(", "obj", "-", "1", ")", ":", "]", "g", "=", "sum", "(", "(", "(", "(", "xi", "-", "0.5", ")", "**", "2", ")", "for", "xi", "in", "xm", ")", ")", "f", "=", "[", "(", "(", "1.0", "+", "g", ")", "*", "reduce", "(", "mul", ",", "(", "cos", "(", "(", "(", "0.5", "*", "(", "xi", "**", "alpha", ")", ")", "*", "pi", ")", ")", "for", "xi", "in", "xc", ")", ",", "1.0", ")", ")", "]", "f", ".", "extend", "(", "(", "(", "(", "(", "1.0", "+", "g", ")", "*", "reduce", "(", "mul", ",", "(", "cos", "(", "(", "(", "0.5", "*", "(", "xi", "**", "alpha", ")", ")", "*", "pi", ")", ")", "for", "xi", "in", "xc", "[", ":", "m", "]", ")", ",", "1", ")", ")", "*", "sin", "(", "(", "(", "0.5", "*", "(", "xc", "[", "m", "]", "**", "alpha", ")", ")", "*", "pi", ")", ")", ")", "for", "m", "in", "range", "(", "(", "obj", "-", "2", ")", ",", "(", "-", "1", ")", ",", "(", "-", "1", ")", ")", ")", ")", "return", "f" ]
dtlz4 multiobjective function .
train
false
35,637
def get_release(artifactory_url, repository, group_id, artifact_id, packaging, version, target_dir='/tmp', target_file=None, classifier=None, username=None, password=None): log.debug('======================== MODULE FUNCTION: artifactory.get_release(artifactory_url=%s, repository=%s, group_id=%s, artifact_id=%s, packaging=%s, version=%s, target_dir=%s, classifier=%s)', artifactory_url, repository, group_id, artifact_id, packaging, version, target_dir, classifier) headers = {} if (username and password): headers['Authorization'] = 'Basic {0}'.format(base64.encodestring('{0}:{1}'.format(username, password)).replace('\n', '')) (release_url, file_name) = _get_release_url(repository, group_id, artifact_id, packaging, version, artifactory_url, classifier) target_file = __resolve_target_file(file_name, target_dir, target_file) return __save_artifact(release_url, target_file, headers)
[ "def", "get_release", "(", "artifactory_url", ",", "repository", ",", "group_id", ",", "artifact_id", ",", "packaging", ",", "version", ",", "target_dir", "=", "'/tmp'", ",", "target_file", "=", "None", ",", "classifier", "=", "None", ",", "username", "=", "None", ",", "password", "=", "None", ")", ":", "log", ".", "debug", "(", "'======================== MODULE FUNCTION: artifactory.get_release(artifactory_url=%s, repository=%s, group_id=%s, artifact_id=%s, packaging=%s, version=%s, target_dir=%s, classifier=%s)'", ",", "artifactory_url", ",", "repository", ",", "group_id", ",", "artifact_id", ",", "packaging", ",", "version", ",", "target_dir", ",", "classifier", ")", "headers", "=", "{", "}", "if", "(", "username", "and", "password", ")", ":", "headers", "[", "'Authorization'", "]", "=", "'Basic {0}'", ".", "format", "(", "base64", ".", "encodestring", "(", "'{0}:{1}'", ".", "format", "(", "username", ",", "password", ")", ")", ".", "replace", "(", "'\\n'", ",", "''", ")", ")", "(", "release_url", ",", "file_name", ")", "=", "_get_release_url", "(", "repository", ",", "group_id", ",", "artifact_id", ",", "packaging", ",", "version", ",", "artifactory_url", ",", "classifier", ")", "target_file", "=", "__resolve_target_file", "(", "file_name", ",", "target_dir", ",", "target_file", ")", "return", "__save_artifact", "(", "release_url", ",", "target_file", ",", "headers", ")" ]
gets the specified release of the artifact artifactory_url url of artifactory instance repository release repository in artifactory to retrieve artifact from .
train
true
35,638
def sort(filename, key, outputFile, fields=None, watermark=((1024 * 1024) * 100)): if (fields is not None): assert set(key).issubset(set([f[0] for f in fields])) with FileRecordStream(filename) as f: if fields: fieldNames = [ff[0] for ff in fields] indices = [f.getFieldNames().index(name) for name in fieldNames] assert (len(indices) == len(fields)) else: fileds = f.getFields() fieldNames = f.getFieldNames() indices = None key = [fieldNames.index(name) for name in key] chunk = 0 records = [] for (i, r) in enumerate(f): if indices: temp = [] for i in indices: temp.append(r[i]) r = temp records.append(r) available_memory = psutil.avail_phymem() if (available_memory < watermark): _sortChunk(records, key, chunk, fields) records = [] chunk += 1 if (len(records) > 0): _sortChunk(records, key, chunk, fields) chunk += 1 _mergeFiles(key, chunk, outputFile, fields)
[ "def", "sort", "(", "filename", ",", "key", ",", "outputFile", ",", "fields", "=", "None", ",", "watermark", "=", "(", "(", "1024", "*", "1024", ")", "*", "100", ")", ")", ":", "if", "(", "fields", "is", "not", "None", ")", ":", "assert", "set", "(", "key", ")", ".", "issubset", "(", "set", "(", "[", "f", "[", "0", "]", "for", "f", "in", "fields", "]", ")", ")", "with", "FileRecordStream", "(", "filename", ")", "as", "f", ":", "if", "fields", ":", "fieldNames", "=", "[", "ff", "[", "0", "]", "for", "ff", "in", "fields", "]", "indices", "=", "[", "f", ".", "getFieldNames", "(", ")", ".", "index", "(", "name", ")", "for", "name", "in", "fieldNames", "]", "assert", "(", "len", "(", "indices", ")", "==", "len", "(", "fields", ")", ")", "else", ":", "fileds", "=", "f", ".", "getFields", "(", ")", "fieldNames", "=", "f", ".", "getFieldNames", "(", ")", "indices", "=", "None", "key", "=", "[", "fieldNames", ".", "index", "(", "name", ")", "for", "name", "in", "key", "]", "chunk", "=", "0", "records", "=", "[", "]", "for", "(", "i", ",", "r", ")", "in", "enumerate", "(", "f", ")", ":", "if", "indices", ":", "temp", "=", "[", "]", "for", "i", "in", "indices", ":", "temp", ".", "append", "(", "r", "[", "i", "]", ")", "r", "=", "temp", "records", ".", "append", "(", "r", ")", "available_memory", "=", "psutil", ".", "avail_phymem", "(", ")", "if", "(", "available_memory", "<", "watermark", ")", ":", "_sortChunk", "(", "records", ",", "key", ",", "chunk", ",", "fields", ")", "records", "=", "[", "]", "chunk", "+=", "1", "if", "(", "len", "(", "records", ")", ">", "0", ")", ":", "_sortChunk", "(", "records", ",", "key", ",", "chunk", ",", "fields", ")", "chunk", "+=", "1", "_mergeFiles", "(", "key", ",", "chunk", ",", "outputFile", ",", "fields", ")" ]
takes a list of integers and sorts them in ascending order .
train
true
35,639
def _parse_object_status(status): if (not isinstance(status, list)): status = [status] status_set = set() action_set = set() for val in status: for s in ('COMPLETE', 'FAILED', 'IN_PROGRESS'): index = val.rfind(s) if (index != (-1)): status_set.add(val[index:]) if (index > 1): action_set.add(val[:(index - 1)]) break return (action_set, status_set)
[ "def", "_parse_object_status", "(", "status", ")", ":", "if", "(", "not", "isinstance", "(", "status", ",", "list", ")", ")", ":", "status", "=", "[", "status", "]", "status_set", "=", "set", "(", ")", "action_set", "=", "set", "(", ")", "for", "val", "in", "status", ":", "for", "s", "in", "(", "'COMPLETE'", ",", "'FAILED'", ",", "'IN_PROGRESS'", ")", ":", "index", "=", "val", ".", "rfind", "(", "s", ")", "if", "(", "index", "!=", "(", "-", "1", ")", ")", ":", "status_set", ".", "add", "(", "val", "[", "index", ":", "]", ")", "if", "(", "index", ">", "1", ")", ":", "action_set", ".", "add", "(", "val", "[", ":", "(", "index", "-", "1", ")", "]", ")", "break", "return", "(", "action_set", ",", "status_set", ")" ]
parse input status into action and status if possible .
train
false
35,640
def expanduser(path): return path
[ "def", "expanduser", "(", "path", ")", ":", "return", "path" ]
expand ~ and ~user constructs .
train
false
35,641
def fix_addons_linter_output(validation, listed=True): if ('messages' in validation): return validation def _merged_messages(): for type_ in ('errors', 'notices', 'warnings'): for msg in validation[type_]: msg['uid'] = uuid.uuid4().hex msg['type'] = msg.pop('_type') msg['id'] = [msg.pop('code')] msg['tier'] = 1 (yield msg) identified_files = {name: {'path': path} for (name, path) in validation['metadata'].get('jsLibs', {}).items()} return {'success': (not validation['errors']), 'compatibility_summary': {'warnings': 0, 'errors': 0, 'notices': 0}, 'notices': validation['summary']['notices'], 'warnings': validation['summary']['warnings'], 'errors': validation['summary']['errors'], 'messages': list(_merged_messages()), 'metadata': {'listed': listed, 'identified_files': identified_files, 'processed_by_addons_linter': True, 'is_webextension': True}, 'signing_summary': {'low': 0, 'medium': 0, 'high': 0, 'trivial': 0}, 'detected_type': 'extension', 'ending_tier': 5}
[ "def", "fix_addons_linter_output", "(", "validation", ",", "listed", "=", "True", ")", ":", "if", "(", "'messages'", "in", "validation", ")", ":", "return", "validation", "def", "_merged_messages", "(", ")", ":", "for", "type_", "in", "(", "'errors'", ",", "'notices'", ",", "'warnings'", ")", ":", "for", "msg", "in", "validation", "[", "type_", "]", ":", "msg", "[", "'uid'", "]", "=", "uuid", ".", "uuid4", "(", ")", ".", "hex", "msg", "[", "'type'", "]", "=", "msg", ".", "pop", "(", "'_type'", ")", "msg", "[", "'id'", "]", "=", "[", "msg", ".", "pop", "(", "'code'", ")", "]", "msg", "[", "'tier'", "]", "=", "1", "(", "yield", "msg", ")", "identified_files", "=", "{", "name", ":", "{", "'path'", ":", "path", "}", "for", "(", "name", ",", "path", ")", "in", "validation", "[", "'metadata'", "]", ".", "get", "(", "'jsLibs'", ",", "{", "}", ")", ".", "items", "(", ")", "}", "return", "{", "'success'", ":", "(", "not", "validation", "[", "'errors'", "]", ")", ",", "'compatibility_summary'", ":", "{", "'warnings'", ":", "0", ",", "'errors'", ":", "0", ",", "'notices'", ":", "0", "}", ",", "'notices'", ":", "validation", "[", "'summary'", "]", "[", "'notices'", "]", ",", "'warnings'", ":", "validation", "[", "'summary'", "]", "[", "'warnings'", "]", ",", "'errors'", ":", "validation", "[", "'summary'", "]", "[", "'errors'", "]", ",", "'messages'", ":", "list", "(", "_merged_messages", "(", ")", ")", ",", "'metadata'", ":", "{", "'listed'", ":", "listed", ",", "'identified_files'", ":", "identified_files", ",", "'processed_by_addons_linter'", ":", "True", ",", "'is_webextension'", ":", "True", "}", ",", "'signing_summary'", ":", "{", "'low'", ":", "0", ",", "'medium'", ":", "0", ",", "'high'", ":", "0", ",", "'trivial'", ":", "0", "}", ",", "'detected_type'", ":", "'extension'", ",", "'ending_tier'", ":", "5", "}" ]
make sure the output from the addons-linter is the same as amo-validator for backwards compatibility reasons .
train
false
35,643
def loggedIn(avatar, group): j = avatar.join(group) j.addCallback(joinedGroup, avatar) return j
[ "def", "loggedIn", "(", "avatar", ",", "group", ")", ":", "j", "=", "avatar", ".", "join", "(", "group", ")", "j", ".", "addCallback", "(", "joinedGroup", ",", "avatar", ")", "return", "j" ]
logged in successfully .
train
false
35,645
def gatherResults(deferredList): d = DeferredList(deferredList, fireOnOneErrback=True) d.addCallback(_parseDListResult) return d
[ "def", "gatherResults", "(", "deferredList", ")", ":", "d", "=", "DeferredList", "(", "deferredList", ",", "fireOnOneErrback", "=", "True", ")", "d", ".", "addCallback", "(", "_parseDListResult", ")", "return", "d" ]
returns list with result of given l{deferred}s .
train
false
35,647
def grant_permission(username, resource=None, resource_type='keyspace', permission=None, contact_points=None, port=None, cql_user=None, cql_pass=None): permission_cql = ('grant {0}'.format(permission) if permission else 'grant all permissions') resource_cql = ('on {0} {1}'.format(resource_type, resource) if resource else 'on all keyspaces') query = '{0} {1} to {2}'.format(permission_cql, resource_cql, username) log.debug("Attempting to grant permissions with query '{0}'".format(query)) try: cql_query(query, contact_points, port, cql_user, cql_pass) except CommandExecutionError: log.critical('Could not grant permissions.') raise except BaseException as e: log.critical('Unexpected error while granting permissions: {0}'.format(str(e))) raise return True
[ "def", "grant_permission", "(", "username", ",", "resource", "=", "None", ",", "resource_type", "=", "'keyspace'", ",", "permission", "=", "None", ",", "contact_points", "=", "None", ",", "port", "=", "None", ",", "cql_user", "=", "None", ",", "cql_pass", "=", "None", ")", ":", "permission_cql", "=", "(", "'grant {0}'", ".", "format", "(", "permission", ")", "if", "permission", "else", "'grant all permissions'", ")", "resource_cql", "=", "(", "'on {0} {1}'", ".", "format", "(", "resource_type", ",", "resource", ")", "if", "resource", "else", "'on all keyspaces'", ")", "query", "=", "'{0} {1} to {2}'", ".", "format", "(", "permission_cql", ",", "resource_cql", ",", "username", ")", "log", ".", "debug", "(", "\"Attempting to grant permissions with query '{0}'\"", ".", "format", "(", "query", ")", ")", "try", ":", "cql_query", "(", "query", ",", "contact_points", ",", "port", ",", "cql_user", ",", "cql_pass", ")", "except", "CommandExecutionError", ":", "log", ".", "critical", "(", "'Could not grant permissions.'", ")", "raise", "except", "BaseException", "as", "e", ":", "log", ".", "critical", "(", "'Unexpected error while granting permissions: {0}'", ".", "format", "(", "str", "(", "e", ")", ")", ")", "raise", "return", "True" ]
grant permissions to a user .
train
true
35,648
def sentence(): sections = [u' '.join(random.sample(WORDS, random.randint(3, 12))) for i in range(random.randint(1, 5))] s = u', '.join(sections) return (u'%s%s%s' % (s[0].upper(), s[1:], random.choice('?.')))
[ "def", "sentence", "(", ")", ":", "sections", "=", "[", "u' '", ".", "join", "(", "random", ".", "sample", "(", "WORDS", ",", "random", ".", "randint", "(", "3", ",", "12", ")", ")", ")", "for", "i", "in", "range", "(", "random", ".", "randint", "(", "1", ",", "5", ")", ")", "]", "s", "=", "u', '", ".", "join", "(", "sections", ")", "return", "(", "u'%s%s%s'", "%", "(", "s", "[", "0", "]", ".", "upper", "(", ")", ",", "s", "[", "1", ":", "]", ",", "random", ".", "choice", "(", "'?.'", ")", ")", ")" ]
returns a randomly generated sentence of lorem ipsum text .
train
false
35,649
def report_issues(output, flaky_tests): jira_keys = frozenset().union(*(flaky.jira_keys for (_, flaky) in flaky_tests)) for jira_key in sorted(jira_keys): output.write('{}\n'.format(jira_key))
[ "def", "report_issues", "(", "output", ",", "flaky_tests", ")", ":", "jira_keys", "=", "frozenset", "(", ")", ".", "union", "(", "*", "(", "flaky", ".", "jira_keys", "for", "(", "_", ",", "flaky", ")", "in", "flaky_tests", ")", ")", "for", "jira_key", "in", "sorted", "(", "jira_keys", ")", ":", "output", ".", "write", "(", "'{}\\n'", ".", "format", "(", "jira_key", ")", ")" ]
print all issues for flaky tests .
train
false
35,650
@task def _migrate_collection_colors(ids, model): cls = FeedApp if (model == 'collection'): cls = FeedCollection for obj in cls.objects.filter(id__in=ids): if (obj.background_color and (not obj.color)): try: color = {'#CE001C': 'ruby', '#F78813': 'amber', '#00953F': 'emerald', '#0099D0': 'aquamarine', '#1E1E9C': 'sapphire', '#5A197E': 'amethyst', '#A20D55': 'garnet'}.get(obj.background_color, 'aquamarine') except KeyError: continue obj.update(color=color) log.info(('Migrated %s:%s from %s to %s' % (model, unicode(obj.id), obj.background_color, color)))
[ "@", "task", "def", "_migrate_collection_colors", "(", "ids", ",", "model", ")", ":", "cls", "=", "FeedApp", "if", "(", "model", "==", "'collection'", ")", ":", "cls", "=", "FeedCollection", "for", "obj", "in", "cls", ".", "objects", ".", "filter", "(", "id__in", "=", "ids", ")", ":", "if", "(", "obj", ".", "background_color", "and", "(", "not", "obj", ".", "color", ")", ")", ":", "try", ":", "color", "=", "{", "'#CE001C'", ":", "'ruby'", ",", "'#F78813'", ":", "'amber'", ",", "'#00953F'", ":", "'emerald'", ",", "'#0099D0'", ":", "'aquamarine'", ",", "'#1E1E9C'", ":", "'sapphire'", ",", "'#5A197E'", ":", "'amethyst'", ",", "'#A20D55'", ":", "'garnet'", "}", ".", "get", "(", "obj", ".", "background_color", ",", "'aquamarine'", ")", "except", "KeyError", ":", "continue", "obj", ".", "update", "(", "color", "=", "color", ")", "log", ".", "info", "(", "(", "'Migrated %s:%s from %s to %s'", "%", "(", "model", ",", "unicode", "(", "obj", ".", "id", ")", ",", "obj", ".", "background_color", ",", "color", ")", ")", ")" ]
migrate deprecated background color to color .
train
false
35,652
@register.simple_tag(takes_context=True) def post_process_fieldsets(context, fieldset): if fieldset.model_admin.fieldsets: return fieldset fields_to_include = set(fieldset.form.fields.keys()) for f in (u'id', u'DELETE', u'ORDER'): fields_to_include.discard(f) def _filter_recursive(fields): ret = [] for f in fields: if isinstance(f, (list, tuple)): sub = _filter_recursive(f) if sub: ret.append(sub) elif (f in fields_to_include): ret.append(f) fields_to_include.discard(f) return ret new_fields = _filter_recursive(fieldset.fields) for f in fields_to_include: new_fields.append(f) if context.get(u'request'): new_fields.extend(list(fieldset.model_admin.get_readonly_fields(context.get(u'request'), context.get(u'original')))) fieldset.fields = new_fields return u''
[ "@", "register", ".", "simple_tag", "(", "takes_context", "=", "True", ")", "def", "post_process_fieldsets", "(", "context", ",", "fieldset", ")", ":", "if", "fieldset", ".", "model_admin", ".", "fieldsets", ":", "return", "fieldset", "fields_to_include", "=", "set", "(", "fieldset", ".", "form", ".", "fields", ".", "keys", "(", ")", ")", "for", "f", "in", "(", "u'id'", ",", "u'DELETE'", ",", "u'ORDER'", ")", ":", "fields_to_include", ".", "discard", "(", "f", ")", "def", "_filter_recursive", "(", "fields", ")", ":", "ret", "=", "[", "]", "for", "f", "in", "fields", ":", "if", "isinstance", "(", "f", ",", "(", "list", ",", "tuple", ")", ")", ":", "sub", "=", "_filter_recursive", "(", "f", ")", "if", "sub", ":", "ret", ".", "append", "(", "sub", ")", "elif", "(", "f", "in", "fields_to_include", ")", ":", "ret", ".", "append", "(", "f", ")", "fields_to_include", ".", "discard", "(", "f", ")", "return", "ret", "new_fields", "=", "_filter_recursive", "(", "fieldset", ".", "fields", ")", "for", "f", "in", "fields_to_include", ":", "new_fields", ".", "append", "(", "f", ")", "if", "context", ".", "get", "(", "u'request'", ")", ":", "new_fields", ".", "extend", "(", "list", "(", "fieldset", ".", "model_admin", ".", "get_readonly_fields", "(", "context", ".", "get", "(", "u'request'", ")", ",", "context", ".", "get", "(", "u'original'", ")", ")", ")", ")", "fieldset", ".", "fields", "=", "new_fields", "return", "u''" ]
removes a few fields from feincms admin inlines .
train
false
35,653
def get_all_vlan_bindings(): LOG.debug(_('get_all_vlan_bindings() called')) session = db.get_session() try: bindings = session.query(network_models_v2.Vlan_Binding).all() return bindings except exc.NoResultFound: return []
[ "def", "get_all_vlan_bindings", "(", ")", ":", "LOG", ".", "debug", "(", "_", "(", "'get_all_vlan_bindings() called'", ")", ")", "session", "=", "db", ".", "get_session", "(", ")", "try", ":", "bindings", "=", "session", ".", "query", "(", "network_models_v2", ".", "Vlan_Binding", ")", ".", "all", "(", ")", "return", "bindings", "except", "exc", ".", "NoResultFound", ":", "return", "[", "]" ]
lists all the vlan to network associations .
train
false