id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
27,440
def getDjangoObjects(context): c = context.extra k = 'django_objects' try: return c[k] except KeyError: c[k] = DjangoReferenceCollection() return c[k]
[ "def", "getDjangoObjects", "(", "context", ")", ":", "c", "=", "context", ".", "extra", "k", "=", "'django_objects'", "try", ":", "return", "c", "[", "k", "]", "except", "KeyError", ":", "c", "[", "k", "]", "=", "DjangoReferenceCollection", "(", ")", "...
returns a reference to the c{django_objects} on the context .
train
true
27,442
def externalize(taskclass_or_taskobject): compatible_copy = (copy.copy if six.PY3 else copy.deepcopy) copied_value = compatible_copy(taskclass_or_taskobject) if (copied_value is taskclass_or_taskobject): clazz = taskclass_or_taskobject @_task_wraps(clazz) class _CopyOfClass(clazz, ): _visible_in_registry = False _CopyOfClass.run = None return _CopyOfClass else: copied_value.run = None return copied_value
[ "def", "externalize", "(", "taskclass_or_taskobject", ")", ":", "compatible_copy", "=", "(", "copy", ".", "copy", "if", "six", ".", "PY3", "else", "copy", ".", "deepcopy", ")", "copied_value", "=", "compatible_copy", "(", "taskclass_or_taskobject", ")", "if", ...
returns an externalized version of a task .
train
true
27,444
def tabulate(table, wrap_column=0, output=sys.stdout): column_widths = [get_max_width(table, i) for i in range(len(table[0]))] column_widths[(-1)] = len(table[0][(-1)]) if ((wrap_column is not None) and (wrap_column > 0)): column_widths = [(width if (width <= wrap_column) else wrap_column) for width in column_widths] headers = table.pop(0) for i in range(len(headers)): padding = (2 if (i < (len(headers) - 1)) else 0) output.write(unicode(headers[i]).ljust((column_widths[i] + padding))) output.write(u'\n') for i in range(len(headers)): padding = (2 if (i < (len(headers) - 1)) else 0) if headers[i]: output.write((u'-' * len(headers[i])).ljust((column_widths[i] + padding))) else: output.write(u''.ljust((column_widths[i] + padding))) output.write(u'\n') added_row = False for (j, row) in enumerate(table): for i in range(len(row)): padding = (2 if (i < (len(row) - 1)) else 0) column = unicode(row[i]) if ((wrap_column is not None) and (wrap_column != 0) and (len(column) > wrap_column)): wrapped = textwrap.wrap(column, wrap_column) column = wrapped.pop(0) lines = u''.join(wrapped) if added_row: table[(j + 1)][i] = lines else: table.insert((j + 1), ([u''] * len(row))) table[(j + 1)][i] = lines added_row = True output.write(column.ljust((column_widths[i] + padding))) added_row = False output.write(u'\n') output.write(u'\n')
[ "def", "tabulate", "(", "table", ",", "wrap_column", "=", "0", ",", "output", "=", "sys", ".", "stdout", ")", ":", "column_widths", "=", "[", "get_max_width", "(", "table", ",", "i", ")", "for", "i", "in", "range", "(", "len", "(", "table", "[", "0...
tabulate -> iterator arguments: func: the function to tabulate over .
train
false
27,445
def subnets(): return salt.utils.network.subnets()
[ "def", "subnets", "(", ")", ":", "return", "salt", ".", "utils", ".", "network", ".", "subnets", "(", ")" ]
returns a list of ipv4 subnets to which the host belongs .
train
false
27,446
def pportInp(): return port.DlPortReadPortUchar(baseAddress)
[ "def", "pportInp", "(", ")", ":", "return", "port", ".", "DlPortReadPortUchar", "(", "baseAddress", ")" ]
input from baseaddress .
train
false
27,447
def get_github_creds(): netrc_auth = requests.utils.get_netrc_auth(u'https://api.github.com') if netrc_auth: return netrc_auth config_file = path(u'~/.config/edx-release').expand() if config_file.isfile(): with open(config_file) as f: config = json.load(f) github_creds = config.get(u'credentials', {}).get(u'api.github.com', {}) username = github_creds.get(u'username', u'') token = github_creds.get(u'token', u'') if (username and token): return (username, token) return None
[ "def", "get_github_creds", "(", ")", ":", "netrc_auth", "=", "requests", ".", "utils", ".", "get_netrc_auth", "(", "u'https://api.github.com'", ")", "if", "netrc_auth", ":", "return", "netrc_auth", "config_file", "=", "path", "(", "u'~/.config/edx-release'", ")", ...
returns github credentials if they exist .
train
false
27,449
def list_terms(type): kwargs = {'type': type} result = util.callm(('%s/%s' % ('artist', 'list_terms')), kwargs) return result['response']['terms']
[ "def", "list_terms", "(", "type", ")", ":", "kwargs", "=", "{", "'type'", ":", "type", "}", "result", "=", "util", ".", "callm", "(", "(", "'%s/%s'", "%", "(", "'artist'", ",", "'list_terms'", ")", ")", ",", "kwargs", ")", "return", "result", "[", ...
get a list of best terms to use with search args: kwargs: type : the type of term to return .
train
false
27,450
def _type_reconstructor(reconstructor, reconstructor_args, state): obj = reconstructor(*reconstructor_args) if state: obj.__dict__.update(state) return type(obj)._intern(obj)
[ "def", "_type_reconstructor", "(", "reconstructor", ",", "reconstructor_args", ",", "state", ")", ":", "obj", "=", "reconstructor", "(", "*", "reconstructor_args", ")", "if", "state", ":", "obj", ".", "__dict__", ".", "update", "(", "state", ")", "return", "...
rebuild function for unpickling types .
train
false
27,451
def walk_trees(store, tree1_id, tree2_id, prune_identical=False): mode1 = ((tree1_id and stat.S_IFDIR) or None) mode2 = ((tree2_id and stat.S_IFDIR) or None) todo = [(TreeEntry('', mode1, tree1_id), TreeEntry('', mode2, tree2_id))] while todo: (entry1, entry2) = todo.pop() is_tree1 = _is_tree(entry1) is_tree2 = _is_tree(entry2) if (prune_identical and is_tree1 and is_tree2 and (entry1 == entry2)): continue tree1 = ((is_tree1 and store[entry1.sha]) or None) tree2 = ((is_tree2 and store[entry2.sha]) or None) path = (entry1.path or entry2.path) todo.extend(reversed(_merge_entries(path, tree1, tree2))) (yield (entry1, entry2))
[ "def", "walk_trees", "(", "store", ",", "tree1_id", ",", "tree2_id", ",", "prune_identical", "=", "False", ")", ":", "mode1", "=", "(", "(", "tree1_id", "and", "stat", ".", "S_IFDIR", ")", "or", "None", ")", "mode2", "=", "(", "(", "tree2_id", "and", ...
recursively walk all the entries of two trees .
train
false
27,453
def _cmp_by_igp_cost(path1, path2): return None
[ "def", "_cmp_by_igp_cost", "(", "path1", ",", "path2", ")", ":", "return", "None" ]
select the route with the lowest igp cost to the next hop .
train
false
27,454
def _read_string(fid, n_bytes, decode=True): s0 = fid.read(n_bytes) s = s0.split('\x00')[0] return (s.decode('utf-8') if decode else s)
[ "def", "_read_string", "(", "fid", ",", "n_bytes", ",", "decode", "=", "True", ")", ":", "s0", "=", "fid", ".", "read", "(", "n_bytes", ")", "s", "=", "s0", ".", "split", "(", "'\\x00'", ")", "[", "0", "]", "return", "(", "s", ".", "decode", "(...
read string .
train
false
27,455
def _define_atomic_inc_dec(module, op, ordering): ftype = ir.FunctionType(_word_type, [_word_type.as_pointer()]) fn_atomic = ir.Function(module, ftype, name='nrt_atomic_{0}'.format(op)) [ptr] = fn_atomic.args bb = fn_atomic.append_basic_block() builder = ir.IRBuilder(bb) ONE = ir.Constant(_word_type, 1) if (not _disable_atomicity): oldval = builder.atomic_rmw(op, ptr, ONE, ordering=ordering) res = getattr(builder, op)(oldval, ONE) builder.ret(res) else: oldval = builder.load(ptr) newval = getattr(builder, op)(oldval, ONE) builder.store(newval, ptr) builder.ret(oldval) return fn_atomic
[ "def", "_define_atomic_inc_dec", "(", "module", ",", "op", ",", "ordering", ")", ":", "ftype", "=", "ir", ".", "FunctionType", "(", "_word_type", ",", "[", "_word_type", ".", "as_pointer", "(", ")", "]", ")", "fn_atomic", "=", "ir", ".", "Function", "(",...
define a llvm function for atomic increment/decrement to the given module argument op is the operation "add"/"sub" .
train
false
27,456
def channel_capacity(n, m, sum_x=1): if ((n * m) == 0): print 'The range of both input and output values must be greater than zero' return ('failed', np.nan, np.nan) P = np.ones((m, n)) x = cvx.Variable(rows=n, cols=1) y = (P * x) c = np.sum((P * np.log2(P)), axis=0) I = ((c * x) + cvx.sum_entries(cvx.entr(y))) obj = cvx.Minimize((- I)) constraints = [(cvx.sum_entries(x) == sum_x), (x >= 0)] prob = cvx.Problem(obj, constraints) prob.solve() if (prob.status == 'optimal'): return (prob.status, prob.value, x.value) else: return (prob.status, np.nan, np.nan)
[ "def", "channel_capacity", "(", "n", ",", "m", ",", "sum_x", "=", "1", ")", ":", "if", "(", "(", "n", "*", "m", ")", "==", "0", ")", ":", "print", "'The range of both input and output values must be greater than zero'", "return", "(", "'failed'", ",", "np", ...
boyd and vandenberghe .
train
false
27,457
def hex_array(data): return ' '.join((('0x%02x' % byte) for byte in bytearray(data)))
[ "def", "hex_array", "(", "data", ")", ":", "return", "' '", ".", "join", "(", "(", "(", "'0x%02x'", "%", "byte", ")", "for", "byte", "in", "bytearray", "(", "data", ")", ")", ")" ]
convert six .
train
false
27,458
def hex_to_sha(hex): assert (len(hex) == 40), ('Incorrect length of hexsha: %s' % hex) try: return binascii.unhexlify(hex) except TypeError as exc: if (not isinstance(hex, bytes)): raise raise ValueError(exc.args[0])
[ "def", "hex_to_sha", "(", "hex", ")", ":", "assert", "(", "len", "(", "hex", ")", "==", "40", ")", ",", "(", "'Incorrect length of hexsha: %s'", "%", "hex", ")", "try", ":", "return", "binascii", ".", "unhexlify", "(", "hex", ")", "except", "TypeError", ...
takes a hex sha and returns a binary sha .
train
false
27,459
def save_inventory(inventory_json, save_path): if (INVENTORY_FILENAME == save_path): inventory_file = file_find(save_path) else: inventory_file = os.path.join(save_path, INVENTORY_FILENAME) with open(inventory_file, 'wb') as f: f.write(inventory_json) logger.info('Inventory written')
[ "def", "save_inventory", "(", "inventory_json", ",", "save_path", ")", ":", "if", "(", "INVENTORY_FILENAME", "==", "save_path", ")", ":", "inventory_file", "=", "file_find", "(", "save_path", ")", "else", ":", "inventory_file", "=", "os", ".", "path", ".", "...
save an inventory dictionary .
train
false
27,460
def precedence_traditional(item): from sympy import Integral, Sum, Product, Limit, Derivative from sympy.core.expr import UnevaluatedExpr if isinstance(item, (Integral, Sum, Product, Limit, Derivative)): return PRECEDENCE['Mul'] elif isinstance(item, UnevaluatedExpr): return precedence_traditional(item.args[0]) else: return precedence(item)
[ "def", "precedence_traditional", "(", "item", ")", ":", "from", "sympy", "import", "Integral", ",", "Sum", ",", "Product", ",", "Limit", ",", "Derivative", "from", "sympy", ".", "core", ".", "expr", "import", "UnevaluatedExpr", "if", "isinstance", "(", "item...
returns the precedence of a given object according to the traditional rules of mathematics .
train
false
27,463
def disable_hpauth(): global mb_auth mb_auth = False
[ "def", "disable_hpauth", "(", ")", ":", "global", "mb_auth", "mb_auth", "=", "False" ]
disable the authentication for musicbrainz xml api .
train
false
27,464
def processlist(**connection_args): ret = [] dbc = _connect(**connection_args) if (dbc is None): return [] cur = dbc.cursor() _execute(cur, 'SHOW FULL PROCESSLIST') hdr = [c[0] for c in cur.description] for _ in range(cur.rowcount): row = cur.fetchone() idx_r = {} for idx_j in range(len(hdr)): idx_r[hdr[idx_j]] = row[idx_j] ret.append(idx_r) cur.close() return ret
[ "def", "processlist", "(", "**", "connection_args", ")", ":", "ret", "=", "[", "]", "dbc", "=", "_connect", "(", "**", "connection_args", ")", "if", "(", "dbc", "is", "None", ")", ":", "return", "[", "]", "cur", "=", "dbc", ".", "cursor", "(", ")",...
retrieves the processlist from the mysql server via "show full processlist" .
train
true
27,465
def gf_normal(f, p, K): return gf_trunc(list(map(K, f)), p)
[ "def", "gf_normal", "(", "f", ",", "p", ",", "K", ")", ":", "return", "gf_trunc", "(", "list", "(", "map", "(", "K", ",", "f", ")", ")", ",", "p", ")" ]
normalize all coefficients in k .
train
false
27,468
@register.filter def currency(value): set_locale() if (not value): value = 0 value = locale.currency(Decimal(value), grouping=True) if (platform.system() == u'Windows'): try: value = str(value, encoding=locale.getpreferredencoding()) except TypeError: pass return value
[ "@", "register", ".", "filter", "def", "currency", "(", "value", ")", ":", "set_locale", "(", ")", "if", "(", "not", "value", ")", ":", "value", "=", "0", "value", "=", "locale", ".", "currency", "(", "Decimal", "(", "value", ")", ",", "grouping", ...
format a value as currency according to locale .
train
false
27,469
def roberts(image, mask=None): assert_nD(image, 2) out = np.sqrt(((roberts_pos_diag(image, mask) ** 2) + (roberts_neg_diag(image, mask) ** 2))) out /= np.sqrt(2) return out
[ "def", "roberts", "(", "image", ",", "mask", "=", "None", ")", ":", "assert_nD", "(", "image", ",", "2", ")", "out", "=", "np", ".", "sqrt", "(", "(", "(", "roberts_pos_diag", "(", "image", ",", "mask", ")", "**", "2", ")", "+", "(", "roberts_neg...
find the edge magnitude using roberts cross operator .
train
false
27,470
def test_proxy_post(): client = Client() client.login(username='test', password='test') (httpd, finish) = run_test_server() try: finish_conf = proxy.conf.WHITELIST.set_for_testing('127\\.0\\.0\\.1:\\d*') try: response_post = client.post(('/proxy/127.0.0.1/%s/' % httpd.server_port), dict(foo='bar', foo2='bar')) finally: finish_conf() assert_true(('Hello there' in response_post.content)) assert_true(('You requested: /.' in response_post.content)) assert_true(('foo=bar' in response_post.content)) assert_true(('foo2=bar' in response_post.content)) finally: finish()
[ "def", "test_proxy_post", "(", ")", ":", "client", "=", "Client", "(", ")", "client", ".", "login", "(", "username", "=", "'test'", ",", "password", "=", "'test'", ")", "(", "httpd", ",", "finish", ")", "=", "run_test_server", "(", ")", "try", ":", "...
proxying test .
train
false
27,471
def extract_want_line_capabilities(text): split_text = text.rstrip().split(' ') if (len(split_text) < 3): return (text, []) return (' '.join(split_text[:2]), split_text[2:])
[ "def", "extract_want_line_capabilities", "(", "text", ")", ":", "split_text", "=", "text", ".", "rstrip", "(", ")", ".", "split", "(", "' '", ")", "if", "(", "len", "(", "split_text", ")", "<", "3", ")", ":", "return", "(", "text", ",", "[", "]", "...
extract a capabilities list from a want line .
train
false
27,473
def _element_to_bson(key, value, check_keys, opts): if (not isinstance(key, string_type)): raise InvalidDocument(('documents must have only string keys, key was %r' % (key,))) if check_keys: if key.startswith('$'): raise InvalidDocument(("key %r must not start with '$'" % (key,))) if ('.' in key): raise InvalidDocument(("key %r must not contain '.'" % (key,))) name = _make_name(key) return _name_value_to_bson(name, value, check_keys, opts)
[ "def", "_element_to_bson", "(", "key", ",", "value", ",", "check_keys", ",", "opts", ")", ":", "if", "(", "not", "isinstance", "(", "key", ",", "string_type", ")", ")", ":", "raise", "InvalidDocument", "(", "(", "'documents must have only string keys, key was %r...
encode a single key .
train
true
27,478
def CallWithRetryAsync(retry_policy, func, *args, **kwargs): inner_callback = kwargs.get('callback', None) assert ('callback' in kwargs), 'CallWithRetryAsync requires a named "callback" argument that is not None.' retry_manager = retry_policy.CreateManager() def _OnCompletedCall(*callback_args, **callback_kwargs): 'Called when the operation has completed. Determine whether to retry,\n based on the arguments to the callback.\n ' retry_func = functools.partial(func, *args, **kwargs) if (not retry_manager.MaybeRetryOnResult(retry_func, *callback_args, **callback_kwargs)): exception_context.check_retry = False inner_callback(*callback_args, **callback_kwargs) def _OnException(type, value, tb): 'Called if the operation raises an exception. Determine whether to retry\n or re-raise the exception, based on the exception details.\n ' if exception_context.check_retry: retry_func = functools.partial(func, *args, **kwargs) return retry_manager.MaybeRetryOnException(retry_func, type, value, tb) kwargs['callback'] = _OnCompletedCall exception_context = ExceptionStackContext(_OnException) exception_context.check_retry = True with exception_context: func(*args, **kwargs)
[ "def", "CallWithRetryAsync", "(", "retry_policy", ",", "func", ",", "*", "args", ",", "**", "kwargs", ")", ":", "inner_callback", "=", "kwargs", ".", "get", "(", "'callback'", ",", "None", ")", "assert", "(", "'callback'", "in", "kwargs", ")", ",", "'Cal...
this is a higher-order function that wraps an arbitrary asynchronous function in order to add retry functionality .
train
false
27,482
def _index(key, sequence, testfn=None, keyfn=None): index = 0 for element in sequence: value = element if keyfn: value = keyfn(value) if (((not testfn) and (value == key)) or (testfn and testfn(value, key))): return index index = (index + 1) return None
[ "def", "_index", "(", "key", ",", "sequence", ",", "testfn", "=", "None", ",", "keyfn", "=", "None", ")", ":", "index", "=", "0", "for", "element", "in", "sequence", ":", "value", "=", "element", "if", "keyfn", ":", "value", "=", "keyfn", "(", "val...
return the index of key within sequence .
train
false
27,483
def cgconfig_stop(): return service_cgconfig_control('stop')
[ "def", "cgconfig_stop", "(", ")", ":", "return", "service_cgconfig_control", "(", "'stop'", ")" ]
start cgconfig service .
train
false
27,484
def SSLeay_version(type): return _ffi.string(_lib.SSLeay_version(type))
[ "def", "SSLeay_version", "(", "type", ")", ":", "return", "_ffi", ".", "string", "(", "_lib", ".", "SSLeay_version", "(", "type", ")", ")" ]
return a string describing the version of openssl in use .
train
false
27,485
def office(): return s3db.org_office_controller()
[ "def", "office", "(", ")", ":", "return", "s3db", ".", "org_office_controller", "(", ")" ]
restful crud controller .
train
false
27,486
@library.filter def fe(format_string, *args, **kwargs): args = [jinja2.escape(smart_text(v)) for v in args] for k in kwargs: kwargs[k] = jinja2.escape(smart_text(kwargs[k])) if isinstance(format_string, str): format_string = unicode(format_string) return jinja2.Markup(format_string.format(*args, **kwargs))
[ "@", "library", ".", "filter", "def", "fe", "(", "format_string", ",", "*", "args", ",", "**", "kwargs", ")", ":", "args", "=", "[", "jinja2", ".", "escape", "(", "smart_text", "(", "v", ")", ")", "for", "v", "in", "args", "]", "for", "k", "in", ...
format a safe string with potentially unsafe arguments .
train
false
27,487
@register.filter(u'timesince', is_safe=False) def timesince_filter(value, arg=None): if (not value): return u'' try: if arg: return timesince(value, arg) return timesince(value) except (ValueError, TypeError): return u''
[ "@", "register", ".", "filter", "(", "u'timesince'", ",", "is_safe", "=", "False", ")", "def", "timesince_filter", "(", "value", ",", "arg", "=", "None", ")", ":", "if", "(", "not", "value", ")", ":", "return", "u''", "try", ":", "if", "arg", ":", ...
formats a date as the time since that date .
train
false
27,489
def parse_test_output(txt): err_m = re.search('^FAILED \\(errors=(\\d+)\\)', txt, re.MULTILINE) if err_m: nerr = int(err_m.group(1)) nfail = 0 return (nerr, nfail) fail_m = re.search('^FAILED \\(failures=(\\d+)\\)', txt, re.MULTILINE) if fail_m: nerr = 0 nfail = int(fail_m.group(1)) return (nerr, nfail) both_m = re.search('^FAILED \\(errors=(\\d+), failures=(\\d+)\\)', txt, re.MULTILINE) if both_m: nerr = int(both_m.group(1)) nfail = int(both_m.group(2)) return (nerr, nfail) return (0, 0)
[ "def", "parse_test_output", "(", "txt", ")", ":", "err_m", "=", "re", ".", "search", "(", "'^FAILED \\\\(errors=(\\\\d+)\\\\)'", ",", "txt", ",", "re", ".", "MULTILINE", ")", "if", "err_m", ":", "nerr", "=", "int", "(", "err_m", ".", "group", "(", "1", ...
parse the output of a test run and return errors .
train
false
27,490
def diff_filenames(*args): out = git.diff_tree(name_only=True, no_commit_id=True, r=True, z=True, *args)[STDOUT] return _parse_diff_filenames(out)
[ "def", "diff_filenames", "(", "*", "args", ")", ":", "out", "=", "git", ".", "diff_tree", "(", "name_only", "=", "True", ",", "no_commit_id", "=", "True", ",", "r", "=", "True", ",", "z", "=", "True", ",", "*", "args", ")", "[", "STDOUT", "]", "r...
return a list of filenames that have been modified .
train
false
27,491
def arguments_format_string(args, types): if (len(args) == 0): return '' parts = [printf_format_for_type(x['type'], types) for x in args] return ', '.join(parts)
[ "def", "arguments_format_string", "(", "args", ",", "types", ")", ":", "if", "(", "len", "(", "args", ")", "==", "0", ")", ":", "return", "''", "parts", "=", "[", "printf_format_for_type", "(", "x", "[", "'type'", "]", ",", "types", ")", "for", "x", ...
returns a format string for printing the given arguments with printf() .
train
false
27,492
def interleave_planes(ipixels, apixels, ipsize, apsize): itotal = len(ipixels) atotal = len(apixels) newtotal = (itotal + atotal) newpsize = (ipsize + apsize) out = array(ipixels.typecode) out.extend(ipixels) out.extend(apixels) for i in range(ipsize): out[i:newtotal:newpsize] = ipixels[i:itotal:ipsize] for i in range(apsize): out[(i + ipsize):newtotal:newpsize] = apixels[i:atotal:apsize] return out
[ "def", "interleave_planes", "(", "ipixels", ",", "apixels", ",", "ipsize", ",", "apsize", ")", ":", "itotal", "=", "len", "(", "ipixels", ")", "atotal", "=", "len", "(", "apixels", ")", "newtotal", "=", "(", "itotal", "+", "atotal", ")", "newpsize", "=...
interleave planes .
train
true
27,493
def make_inmemorystatepersister(test_case): state_persister = InMemoryStatePersister() return (state_persister, state_persister.get_state)
[ "def", "make_inmemorystatepersister", "(", "test_case", ")", ":", "state_persister", "=", "InMemoryStatePersister", "(", ")", "return", "(", "state_persister", ",", "state_persister", ".", "get_state", ")" ]
create a inmemorystatepersister for use in tests .
train
false
27,494
@register.simple_tag(takes_context=True) def diff_expand_link(context, expanding, tooltip, expand_pos_1=None, expand_pos_2=None, text=None): if (expanding == u'all'): image_class = u'rb-icon-diff-expand-all' expand_pos = None else: lines_of_context = context[u'lines_of_context'] expand_pos = ((lines_of_context[0] + expand_pos_1), (lines_of_context[1] + expand_pos_2)) image_class = (u'rb-icon-diff-expand-%s' % expanding) return _diff_expand_link(context, True, text, tooltip, expand_pos, image_class)
[ "@", "register", ".", "simple_tag", "(", "takes_context", "=", "True", ")", "def", "diff_expand_link", "(", "context", ",", "expanding", ",", "tooltip", ",", "expand_pos_1", "=", "None", ",", "expand_pos_2", "=", "None", ",", "text", "=", "None", ")", ":",...
renders a diff expansion link .
train
false
27,496
def test_lex_fractions(): objs = tokenize('1/2') assert (objs == [HyExpression([HySymbol('fraction'), HyInteger(1), HyInteger(2)])])
[ "def", "test_lex_fractions", "(", ")", ":", "objs", "=", "tokenize", "(", "'1/2'", ")", "assert", "(", "objs", "==", "[", "HyExpression", "(", "[", "HySymbol", "(", "'fraction'", ")", ",", "HyInteger", "(", "1", ")", ",", "HyInteger", "(", "2", ")", ...
make sure that fractions are valid expressions .
train
false
27,497
def get_order_source_modifier_modules(): return load_module_instances(u'SHUUP_ORDER_SOURCE_MODIFIER_MODULES', u'order_source_modifier_module')
[ "def", "get_order_source_modifier_modules", "(", ")", ":", "return", "load_module_instances", "(", "u'SHUUP_ORDER_SOURCE_MODIFIER_MODULES'", ",", "u'order_source_modifier_module'", ")" ]
get a list of configured order source modifier module instances .
train
false
27,498
def PrePlot(num=None, rows=None, cols=None): if num: _Brewer.InitializeIter(num) if ((rows is None) and (cols is None)): return if ((rows is not None) and (cols is None)): cols = 1 if ((cols is not None) and (rows is None)): rows = 1 size_map = {(1, 1): (8, 6), (1, 2): (14, 6), (1, 3): (14, 6), (2, 2): (10, 10), (2, 3): (16, 10), (3, 1): (8, 10)} if ((rows, cols) in size_map): fig = pyplot.gcf() fig.set_size_inches(*size_map[(rows, cols)]) if ((rows > 1) or (cols > 1)): pyplot.subplot(rows, cols, 1) global SUBPLOT_ROWS, SUBPLOT_COLS SUBPLOT_ROWS = rows SUBPLOT_COLS = cols
[ "def", "PrePlot", "(", "num", "=", "None", ",", "rows", "=", "None", ",", "cols", "=", "None", ")", ":", "if", "num", ":", "_Brewer", ".", "InitializeIter", "(", "num", ")", "if", "(", "(", "rows", "is", "None", ")", "and", "(", "cols", "is", "...
takes hints about whats coming .
train
false
27,499
def init(mpstate): return SerialModule(mpstate)
[ "def", "init", "(", "mpstate", ")", ":", "return", "SerialModule", "(", "mpstate", ")" ]
open the connection to the junos device .
train
false
27,501
@Profiler.profile def test_core(n): for i in range(n): with engine.begin() as conn: conn.execute(Customer.__table__.insert(), dict(name=('customer name %d' % i), description=('customer description %d' % i)))
[ "@", "Profiler", ".", "profile", "def", "test_core", "(", "n", ")", ":", "for", "i", "in", "range", "(", "n", ")", ":", "with", "engine", ".", "begin", "(", ")", "as", "conn", ":", "conn", ".", "execute", "(", "Customer", ".", "__table__", ".", "...
individual insert/commit pairs using core .
train
false
27,502
def mock_render_template(*args, **kwargs): return pprint.pformat((args, kwargs)).decode()
[ "def", "mock_render_template", "(", "*", "args", ",", "**", "kwargs", ")", ":", "return", "pprint", ".", "pformat", "(", "(", "args", ",", "kwargs", ")", ")", ".", "decode", "(", ")" ]
pretty-print the args and kwargs .
train
false
27,503
def gcd2(a, b): while a: (a, b) = ((b % a), a) return b
[ "def", "gcd2", "(", "a", ",", "b", ")", ":", "while", "a", ":", "(", "a", ",", "b", ")", "=", "(", "(", "b", "%", "a", ")", ",", "a", ")", "return", "b" ]
greatest common divisor using euclids algorithm .
train
true
27,504
def cms_sign_text(text, signing_cert_file_name, signing_key_file_name): _ensure_subprocess() process = subprocess.Popen(['openssl', 'cms', '-sign', '-signer', signing_cert_file_name, '-inkey', signing_key_file_name, '-outform', 'PEM', '-nosmimecap', '-nodetach', '-nocerts', '-noattr'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (output, err) = process.communicate(text) retcode = process.poll() if (retcode or ('Error' in err)): LOG.error(('Signing error: %s' % err)) raise subprocess.CalledProcessError(retcode, 'openssl') return output
[ "def", "cms_sign_text", "(", "text", ",", "signing_cert_file_name", ",", "signing_key_file_name", ")", ":", "_ensure_subprocess", "(", ")", "process", "=", "subprocess", ".", "Popen", "(", "[", "'openssl'", ",", "'cms'", ",", "'-sign'", ",", "'-signer'", ",", ...
uses openssl to sign a document produces a base64 encoding of a der formatted cms document URL .
train
false
27,505
def _ensure_empty_directory(path): if path.exists(): if (not path.isdir()): raise UsageError('{} is not a directory'.format(path.path)) if path.listdir(): raise UsageError('{} is not empty'.format(path.path)) return try: path.makedirs() path.chmod(stat.S_IRWXU) except OSError as e: raise UsageError('Can not create {}. {}: {}.'.format(path.path, e.filename, e.strerror))
[ "def", "_ensure_empty_directory", "(", "path", ")", ":", "if", "path", ".", "exists", "(", ")", ":", "if", "(", "not", "path", ".", "isdir", "(", ")", ")", ":", "raise", "UsageError", "(", "'{} is not a directory'", ".", "format", "(", "path", ".", "pa...
the path should not exist or it should be an empty directory .
train
false
27,508
def remove_config(chassis_id=None, community=None, contact=None, location=None, test=False, commit=True): dic = {'template_name': 'delete_snmp_config', 'test': test, 'commit': commit} if chassis_id: dic['chassis_id'] = chassis_id if community: dic['community'] = community if contact: dic['contact'] = contact if location: dic['location'] = location return __salt__['net.load_template'](**dic)
[ "def", "remove_config", "(", "chassis_id", "=", "None", ",", "community", "=", "None", ",", "contact", "=", "None", ",", "location", "=", "None", ",", "test", "=", "False", ",", "commit", "=", "True", ")", ":", "dic", "=", "{", "'template_name'", ":", ...
removes a configuration element from the snmp configuration .
train
true
27,509
def asrun(ascript): osa = subprocess.Popen(['osascript', '-'], stdin=subprocess.PIPE, stdout=subprocess.PIPE) return osa.communicate(ascript)[0]
[ "def", "asrun", "(", "ascript", ")", ":", "osa", "=", "subprocess", ".", "Popen", "(", "[", "'osascript'", ",", "'-'", "]", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "return", "osa", ".", "commu...
run the given applescript and return the standard output and error .
train
false
27,510
def build_kubelet_args(facts): cloud_cfg_path = os.path.join(facts['common']['config_base'], 'cloudprovider') if ('node' in facts): kubelet_args = {} if ('cloudprovider' in facts): if ('kind' in facts['cloudprovider']): if (facts['cloudprovider']['kind'] == 'aws'): kubelet_args['cloud-provider'] = ['aws'] kubelet_args['cloud-config'] = [(cloud_cfg_path + '/aws.conf')] if (facts['cloudprovider']['kind'] == 'openstack'): kubelet_args['cloud-provider'] = ['openstack'] kubelet_args['cloud-config'] = [(cloud_cfg_path + '/openstack.conf')] if (facts['cloudprovider']['kind'] == 'gce'): kubelet_args['cloud-provider'] = ['gce'] kubelet_args['cloud-config'] = [(cloud_cfg_path + '/gce.conf')] if (('labels' in facts['node']) and isinstance(facts['node']['labels'], dict)): labels_str = list(map((lambda x: '='.join(x)), facts['node']['labels'].items())) if (labels_str != ''): kubelet_args['node-labels'] = labels_str if (kubelet_args != {}): facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], []) return facts
[ "def", "build_kubelet_args", "(", "facts", ")", ":", "cloud_cfg_path", "=", "os", ".", "path", ".", "join", "(", "facts", "[", "'common'", "]", "[", "'config_base'", "]", ",", "'cloudprovider'", ")", "if", "(", "'node'", "in", "facts", ")", ":", "kubelet...
build node kubelet_args in the node-config .
train
false
27,512
def set_url_query_parameter(url, param_name, param_value): if (not isinstance(param_name, basestring)): raise Exception(('URL query parameter name must be a string, received %s' % param_name)) (scheme, netloc, path, query_string, fragment) = urlparse.urlsplit(url) query_params = urlparse.parse_qs(query_string) query_params[param_name] = [param_value] new_query_string = urllib.urlencode(query_params, doseq=True) return urlparse.urlunsplit((scheme, netloc, path, new_query_string, fragment))
[ "def", "set_url_query_parameter", "(", "url", ",", "param_name", ",", "param_value", ")", ":", "if", "(", "not", "isinstance", "(", "param_name", ",", "basestring", ")", ")", ":", "raise", "Exception", "(", "(", "'URL query parameter name must be a string, received ...
set or replace a query parameter .
train
false
27,513
def find_lexer_for_filename(filename): filename = (filename or '') (root, ext) = os.path.splitext(filename) if (ext in custom_extension_lexer_mapping): lexer = get_lexer_by_name(custom_extension_lexer_mapping[ext]) else: try: lexer = get_lexer_for_filename(filename) except ClassNotFound: return TextLexer() return lexer
[ "def", "find_lexer_for_filename", "(", "filename", ")", ":", "filename", "=", "(", "filename", "or", "''", ")", "(", "root", ",", "ext", ")", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "if", "(", "ext", "in", "custom_extension_lexer_m...
get a pygments lexer given a filename .
train
true
27,515
@task(queue='web') def fileify(version_pk, commit): version = Version.objects.get(pk=version_pk) project = version.project if (not project.cdn_enabled): return if (not commit): log.info(LOG_TEMPLATE.format(project=project.slug, version=version.slug, msg='Imported File not being built because no commit information')) path = project.rtd_build_path(version.slug) if path: log.info(LOG_TEMPLATE.format(project=version.project.slug, version=version.slug, msg='Creating ImportedFiles')) _manage_imported_files(version, path, commit) else: log.info(LOG_TEMPLATE.format(project=project.slug, version=version.slug, msg='No ImportedFile files'))
[ "@", "task", "(", "queue", "=", "'web'", ")", "def", "fileify", "(", "version_pk", ",", "commit", ")", ":", "version", "=", "Version", ".", "objects", ".", "get", "(", "pk", "=", "version_pk", ")", "project", "=", "version", ".", "project", "if", "("...
create importedfile objects for all of a versions files .
train
false
27,516
def fmtTimeSpan(time, pad=0, point=0, short=False, after=False, unit=99): (type, point) = optimalPeriod(time, point, unit) time = convertSecondsTo(time, type) if (not point): time = int(round(time)) if short: fmt = shortTimeFmt(type) elif after: fmt = afterTimeTable[type](_pluralCount(time, point)) else: fmt = timeTable[type](_pluralCount(time, point)) timestr = ('%(a)d.%(b)df' % {'a': pad, 'b': point}) return locale.format_string(('%' + (fmt % timestr)), time)
[ "def", "fmtTimeSpan", "(", "time", ",", "pad", "=", "0", ",", "point", "=", "0", ",", "short", "=", "False", ",", "after", "=", "False", ",", "unit", "=", "99", ")", ":", "(", "type", ",", "point", ")", "=", "optimalPeriod", "(", "time", ",", "...
return a string representing a time span .
train
false
27,517
def next_setting_utc(hass, entity_id=None): entity_id = (entity_id or ENTITY_ID) state = hass.states.get(ENTITY_ID) try: return dt_util.parse_datetime(state.attributes[STATE_ATTR_NEXT_SETTING]) except (AttributeError, KeyError): return None
[ "def", "next_setting_utc", "(", "hass", ",", "entity_id", "=", "None", ")", ":", "entity_id", "=", "(", "entity_id", "or", "ENTITY_ID", ")", "state", "=", "hass", ".", "states", ".", "get", "(", "ENTITY_ID", ")", "try", ":", "return", "dt_util", ".", "...
utc datetime object of the next sun setting .
train
false
27,518
def remove_mock_addon(addon_config): settings.ADDONS_AVAILABLE_DICT.pop(addon_config.short_name, None) try: settings.ADDONS_AVAILABLE.remove(addon_config) except ValueError: pass try: settings.ADDONS_REQUESTED.remove(addon_config.short_name) except ValueError: pass
[ "def", "remove_mock_addon", "(", "addon_config", ")", ":", "settings", ".", "ADDONS_AVAILABLE_DICT", ".", "pop", "(", "addon_config", ".", "short_name", ",", "None", ")", "try", ":", "settings", ".", "ADDONS_AVAILABLE", ".", "remove", "(", "addon_config", ")", ...
given an addonconfig instance .
train
false
27,519
def _stitch_rows(raw_rows): try: max_len = max((len(x) for x in raw_rows)) for row in raw_rows: assert (len(row) == max_len) except AssertionError: for (idx, row) in enumerate(raw_rows): if (len(row) != max_len): assert ((len(row) + 2) == max_len) raw_rows[idx] = (([(' ' * len(row[0]))] + row) + [(' ' * len(row[0]))]) cmbn_rows = [] for (idx, row) in enumerate(raw_rows[0]): cmbn_row = ''.join((aln_row[idx] for aln_row in raw_rows)) cmbn_rows.append(cmbn_row) if (len(cmbn_rows) == 5): (cmbn_rows[0], cmbn_rows[1]) = _flip_codons(cmbn_rows[0], cmbn_rows[1]) (cmbn_rows[4], cmbn_rows[3]) = _flip_codons(cmbn_rows[4], cmbn_rows[3]) return cmbn_rows
[ "def", "_stitch_rows", "(", "raw_rows", ")", ":", "try", ":", "max_len", "=", "max", "(", "(", "len", "(", "x", ")", "for", "x", "in", "raw_rows", ")", ")", "for", "row", "in", "raw_rows", ":", "assert", "(", "len", "(", "row", ")", "==", "max_le...
stitches together the parsed alignment rows and returns them in a list .
train
false
27,521
def parse_short_time_label(label): (days, hours, minutes, seconds) = ('0', '0', '0', '0') if ('-' in label): (days, label) = label.split('-', 1) time_comp = label.split(':') if (len(time_comp) == 3): (hours, minutes, seconds) = time_comp elif (len(time_comp) == 2): (minutes, seconds) = time_comp else: raise ValueError(("Invalid time format, we expected '[[dd-]hh:]mm:ss' or 'mm:ss.ss': %s" % label)) try: time_sum = int(float(seconds)) time_sum += (int(minutes) * 60) time_sum += (int(hours) * 3600) time_sum += (int(days) * 86400) return time_sum except ValueError: raise ValueError(('Non-numeric value in time entry: %s' % label))
[ "def", "parse_short_time_label", "(", "label", ")", ":", "(", "days", ",", "hours", ",", "minutes", ",", "seconds", ")", "=", "(", "'0'", ",", "'0'", ",", "'0'", ",", "'0'", ")", "if", "(", "'-'", "in", "label", ")", ":", "(", "days", ",", "label...
provides the number of seconds corresponding to the formatting used for the cputime and etime fields of ps: [[dd-]hh:]mm:ss or mm:ss .
train
false
27,522
def newline_with_copy_margin(event): b = event.current_buffer cursor_start_pos = b.document.cursor_position_col b.newline(copy_margin=True) b.cursor_up(count=1) cursor_end_pos = b.document.cursor_position_col if (cursor_start_pos != cursor_end_pos): pos_diff = (cursor_start_pos - cursor_end_pos) b.cursor_right(count=pos_diff)
[ "def", "newline_with_copy_margin", "(", "event", ")", ":", "b", "=", "event", ".", "current_buffer", "cursor_start_pos", "=", "b", ".", "document", ".", "cursor_position_col", "b", ".", "newline", "(", "copy_margin", "=", "True", ")", "b", ".", "cursor_up", ...
preserve margin and cursor position when using control-o to insert a newline in emacs mode .
train
false
27,523
def get_mock_hadoop_cmd_args(): cmd_log = os.path.join(get_mock_dir(), 'cmd.log') if (not os.path.exists(cmd_log)): return [] with open(cmd_log) as f: return [shlex_split(cmd) for cmd in f]
[ "def", "get_mock_hadoop_cmd_args", "(", ")", ":", "cmd_log", "=", "os", ".", "path", ".", "join", "(", "get_mock_dir", "(", ")", ",", "'cmd.log'", ")", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "cmd_log", ")", ")", ":", "return", "[", ...
get a list for each invocation of hadoop .
train
false
27,524
def shell_escape(string): return re.sub('([^A-Za-z0-9_])', '\\\\\\1', string)
[ "def", "shell_escape", "(", "string", ")", ":", "return", "re", ".", "sub", "(", "'([^A-Za-z0-9_])'", ",", "'\\\\\\\\\\\\1'", ",", "string", ")" ]
quote meta-characters in the args for the unix shell .
train
false
27,525
def get_patches_from_dir(path): patches = [] for (root, subdirs, files) in core.walk(path): for name in [f for f in files if f.endswith(u'.patch')]: patches.append(core.decode(os.path.join(root, name))) return patches
[ "def", "get_patches_from_dir", "(", "path", ")", ":", "patches", "=", "[", "]", "for", "(", "root", ",", "subdirs", ",", "files", ")", "in", "core", ".", "walk", "(", "path", ")", ":", "for", "name", "in", "[", "f", "for", "f", "in", "files", "if...
find patches in a subdirectory .
train
false
27,527
def get_indexer_absolute_numbering(indexer_id, indexer, sceneAbsoluteNumber, fallback_to_xem=True, scene_season=None): if ((indexer_id is None) or (sceneAbsoluteNumber is None)): return sceneAbsoluteNumber indexer_id = int(indexer_id) indexer = int(indexer) if (scene_season is None): dbData = [x[u'doc'] for x in sickrage.srCore.mainDB.db.get_many(u'scene_numbering', indexer_id, with_doc=True) if ((x[u'doc'][u'indexer'] == indexer) and (x[u'doc'][u'scene_absolute_number'] == sceneAbsoluteNumber))] else: dbData = [x[u'doc'] for x in sickrage.srCore.mainDB.db.get_many(u'scene_numbering', indexer_id, with_doc=True) if ((x[u'doc'][u'indexer'] == indexer) and (x[u'doc'][u'scene_absolute_number'] == sceneAbsoluteNumber) and (x[u'doc'][u'scene_season'] == scene_season))] if dbData: return int((dbData[0][u'absolute_number'] or 0)) else: if fallback_to_xem: return get_indexer_absolute_numbering_for_xem(indexer_id, indexer, sceneAbsoluteNumber, scene_season) return sceneAbsoluteNumber
[ "def", "get_indexer_absolute_numbering", "(", "indexer_id", ",", "indexer", ",", "sceneAbsoluteNumber", ",", "fallback_to_xem", "=", "True", ",", "scene_season", "=", "None", ")", ":", "if", "(", "(", "indexer_id", "is", "None", ")", "or", "(", "sceneAbsoluteNum...
returns a tuple .
train
false
27,528
def checkEnvironment(): if (sys.version_info[0:2] < (2, 4)): fatal('This script must be run with Python 2.4 or later') if (platform.system() != 'Darwin'): fatal('This script should be run on a Mac OS X 10.4 (or later) system') if (int(platform.release().split('.')[0]) < 8): fatal('This script should be run on a Mac OS X 10.4 (or later) system') if (not os.path.exists(SDKPATH)): fatal(('Please install the latest version of Xcode and the %s SDK' % os.path.basename(SDKPATH[:(-4)]))) frameworks = {} for framework in ['Tcl', 'Tk']: fwpth = ('Library/Frameworks/%s.framework/Versions/Current' % framework) sysfw = os.path.join(SDKPATH, 'System', fwpth) libfw = os.path.join(SDKPATH, fwpth) usrfw = os.path.join(os.getenv('HOME'), fwpth) frameworks[framework] = os.readlink(sysfw) if (not os.path.exists(libfw)): fatal(('Please install a link to a current %s %s as %s so the user can override the system framework.' % (framework, frameworks[framework], libfw))) if (os.readlink(libfw) != os.readlink(sysfw)): fatal(('Version of %s must match %s' % (libfw, sysfw))) if os.path.exists(usrfw): fatal(('Please rename %s to avoid possible dynamic load issues.' % usrfw)) if (frameworks['Tcl'] != frameworks['Tk']): fatal('The Tcl and Tk frameworks are not the same version.') EXPECTED_SHARED_LIBS['_tkinter.so'] = [('/Library/Frameworks/Tcl.framework/Versions/%s/Tcl' % frameworks['Tcl']), ('/Library/Frameworks/Tk.framework/Versions/%s/Tk' % frameworks['Tk'])] environ_var_prefixes = ['CPATH', 'C_INCLUDE_', 'DYLD_', 'LANG', 'LC_', 'LD_', 'LIBRARY_', 'PATH', 'PYTHON'] for ev in list(os.environ): for prefix in environ_var_prefixes: if ev.startswith(prefix): print ('INFO: deleting environment variable %s=%s' % (ev, os.environ[ev])) del os.environ[ev] base_path = '/bin:/sbin:/usr/bin:/usr/sbin' if ('SDK_TOOLS_BIN' in os.environ): base_path = ((os.environ['SDK_TOOLS_BIN'] + ':') + base_path) OLD_DEVELOPER_TOOLS = '/Developer/Tools' if os.path.isdir(OLD_DEVELOPER_TOOLS): base_path = ((base_path + ':') + OLD_DEVELOPER_TOOLS) os.environ['PATH'] = base_path print ('Setting default PATH: %s' % os.environ['PATH']) runCommand('hg --version') runCommand('sphinx-build --version')
[ "def", "checkEnvironment", "(", ")", ":", "if", "(", "sys", ".", "version_info", "[", "0", ":", "2", "]", "<", "(", "2", ",", "4", ")", ")", ":", "fatal", "(", "'This script must be run with Python 2.4 or later'", ")", "if", "(", "platform", ".", "system...
check that were running on a supported system .
train
false
27,529
def pad_string(string): return string.ljust(512)
[ "def", "pad_string", "(", "string", ")", ":", "return", "string", ".", "ljust", "(", "512", ")" ]
pad a string for safe http error responses .
train
false
27,530
def getScreens(): if importCtypesFailed: return False count = CGDisplayCount() cocoa.CGGetActiveDisplayList(0, None, ctypes.byref(count)) displays = (CGDirectDisplayID * count.value)() cocoa.CGGetActiveDisplayList(count.value, displays, ctypes.byref(count)) return [id for id in displays]
[ "def", "getScreens", "(", ")", ":", "if", "importCtypesFailed", ":", "return", "False", "count", "=", "CGDisplayCount", "(", ")", "cocoa", ".", "CGGetActiveDisplayList", "(", "0", ",", "None", ",", "ctypes", ".", "byref", "(", "count", ")", ")", "displays"...
get a list of display ids from cocoa .
train
false
27,532
def set_dev_value(hass, address, channel, param, value, proxy=None): data = {ATTR_ADDRESS: address, ATTR_CHANNEL: channel, ATTR_PARAM: param, ATTR_VALUE: value, ATTR_PROXY: proxy} hass.services.call(DOMAIN, SERVICE_SET_DEV_VALUE, data)
[ "def", "set_dev_value", "(", "hass", ",", "address", ",", "channel", ",", "param", ",", "value", ",", "proxy", "=", "None", ")", ":", "data", "=", "{", "ATTR_ADDRESS", ":", "address", ",", "ATTR_CHANNEL", ":", "channel", ",", "ATTR_PARAM", ":", "param", ...
send virtual keypress to homematic controlller .
train
false
27,533
def applications(): apps = _call_system_profiler('SPApplicationsDataType') appdict = {} for a in apps: details = dict(a) details.pop('_name') if ('lastModified' in details): details['lastModified'] = details['lastModified'].strftime('%Y-%m-%d %H:%M:%S') if ('info' in details): try: details['info'] = '{0}: {1}'.format(details['info'][0], details['info'][1].strftime('%Y-%m-%d %H:%M:%S')) except (IndexError, AttributeError): pass if (a['_name'] not in appdict): appdict[a['_name']] = [] appdict[a['_name']].append(details) return appdict
[ "def", "applications", "(", ")", ":", "apps", "=", "_call_system_profiler", "(", "'SPApplicationsDataType'", ")", "appdict", "=", "{", "}", "for", "a", "in", "apps", ":", "details", "=", "dict", "(", "a", ")", "details", ".", "pop", "(", "'_name'", ")", ...
return the results of a call to system_profiler -xml -detail full spapplicationsdatatype as a dictionary .
train
true
27,534
def _find_elem_with_wait(context, by, wait_time=MAX_WAIT_TIME): return WebDriverWait(context.browser, wait_time).until(EC.presence_of_element_located(by))
[ "def", "_find_elem_with_wait", "(", "context", ",", "by", ",", "wait_time", "=", "MAX_WAIT_TIME", ")", ":", "return", "WebDriverWait", "(", "context", ".", "browser", ",", "wait_time", ")", ".", "until", "(", "EC", ".", "presence_of_element_located", "(", "by"...
tries to find an element with an explicit timeout .
train
false
27,536
def _lanscan_getnode(): return _find_mac('lanscan', '-ai', ['lan0'], (lambda i: 0))
[ "def", "_lanscan_getnode", "(", ")", ":", "return", "_find_mac", "(", "'lanscan'", ",", "'-ai'", ",", "[", "'lan0'", "]", ",", "(", "lambda", "i", ":", "0", ")", ")" ]
get the hardware address on unix by running lanscan .
train
false
27,538
def sqf_normal(a, b, c, steps=False): ABC = (A, B, C) = _remove_gcd(a, b, c) sq = tuple((square_factor(i) for i in ABC)) sqf = (A, B, C) = tuple([(i // (j ** 2)) for (i, j) in zip(ABC, sq)]) pc = igcd(A, B) A /= pc B /= pc pa = igcd(B, C) B /= pa C /= pa pb = igcd(A, C) A /= pb B /= pb A *= pa B *= pb C *= pc if steps: return (sq, sqf, (A, B, C)) else: return (A, B, C)
[ "def", "sqf_normal", "(", "a", ",", "b", ",", "c", ",", "steps", "=", "False", ")", ":", "ABC", "=", "(", "A", ",", "B", ",", "C", ")", "=", "_remove_gcd", "(", "a", ",", "b", ",", "c", ")", "sq", "=", "tuple", "(", "(", "square_factor", "(...
return a .
train
false
27,539
def _AddConditionalProperty(properties, condition, name, value): if (name not in properties): properties[name] = {} values = properties[name] if (value not in values): values[value] = [] conditions = values[value] conditions.append(condition)
[ "def", "_AddConditionalProperty", "(", "properties", ",", "condition", ",", "name", ",", "value", ")", ":", "if", "(", "name", "not", "in", "properties", ")", ":", "properties", "[", "name", "]", "=", "{", "}", "values", "=", "properties", "[", "name", ...
adds a property / conditional value pair to a dictionary .
train
false
27,540
def VerifyNoCollidingTargets(targets): used = {} for target in targets: (path, name) = target.rsplit(':', 1) (subdir, gyp) = os.path.split(path) if (not subdir): subdir = '.' key = ((subdir + ':') + name) if (key in used): raise GypError(('Duplicate target name "%s" in directory "%s" used both in "%s" and "%s".' % (name, subdir, gyp, used[key]))) used[key] = gyp
[ "def", "VerifyNoCollidingTargets", "(", "targets", ")", ":", "used", "=", "{", "}", "for", "target", "in", "targets", ":", "(", "path", ",", "name", ")", "=", "target", ".", "rsplit", "(", "':'", ",", "1", ")", "(", "subdir", ",", "gyp", ")", "=", ...
verify that no two targets in the same directory share the same name .
train
false
27,541
def make_sure_keen_schemas_match(source_collection, destination_collection, keen_client): source_schema = keen_client.get_collection(source_collection) destination_schema = keen_client.get_collection(destination_collection) return (source_schema == destination_schema)
[ "def", "make_sure_keen_schemas_match", "(", "source_collection", ",", "destination_collection", ",", "keen_client", ")", ":", "source_schema", "=", "keen_client", ".", "get_collection", "(", "source_collection", ")", "destination_schema", "=", "keen_client", ".", "get_col...
helper function to check if two given collections have matching schemas in keen .
train
false
27,542
@listens_for(Event, 'after_insert') def after_insert(mapper, connection, target): link_table = Version.__table__ version = Version.query.order_by(Version.id.desc()).first() if version: version_id = version.id connection.execute(link_table.update().where((link_table.c.id == version_id)).values(event_id=target.id))
[ "@", "listens_for", "(", "Event", ",", "'after_insert'", ")", "def", "after_insert", "(", "mapper", ",", "connection", ",", "target", ")", ":", "link_table", "=", "Version", ".", "__table__", "version", "=", "Version", ".", "query", ".", "order_by", "(", "...
update version after insert to db .
train
false
27,544
def _call_system_profiler(datatype): p = subprocess.Popen([PROFILER_BINARY, '-detailLevel', 'full', '-xml', datatype], stdout=subprocess.PIPE) (sysprofresults, sysprof_stderr) = p.communicate(input=None) if six.PY2: plist = plistlib.readPlistFromString(sysprofresults) else: plist = plistlib.readPlistFromBytes(sysprofresults) try: apps = plist[0]['_items'] except (IndexError, KeyError): apps = [] return apps
[ "def", "_call_system_profiler", "(", "datatype", ")", ":", "p", "=", "subprocess", ".", "Popen", "(", "[", "PROFILER_BINARY", ",", "'-detailLevel'", ",", "'full'", ",", "'-xml'", ",", "datatype", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "("...
call out to system_profiler .
train
true
27,546
def _parse_mime_message(mime_message): if isinstance(mime_message, email.Message.Message): return mime_message elif isinstance(mime_message, basestring): return email.message_from_string(mime_message) else: return email.message_from_file(mime_message)
[ "def", "_parse_mime_message", "(", "mime_message", ")", ":", "if", "isinstance", "(", "mime_message", ",", "email", ".", "Message", ".", "Message", ")", ":", "return", "mime_message", "elif", "isinstance", "(", "mime_message", ",", "basestring", ")", ":", "ret...
helper function converts a mime_message in to email .
train
false
27,548
@contextfunction def sales_opportunity_list(context, opportunities, skip_group=False): request = context['request'] response_format = 'html' if ('response_format' in context): response_format = context['response_format'] return Markup(render_to_string('sales/tags/opportunity_list', {'opportunities': opportunities, 'skip_group': skip_group}, context_instance=RequestContext(request), response_format=response_format))
[ "@", "contextfunction", "def", "sales_opportunity_list", "(", "context", ",", "opportunities", ",", "skip_group", "=", "False", ")", ":", "request", "=", "context", "[", "'request'", "]", "response_format", "=", "'html'", "if", "(", "'response_format'", "in", "c...
print a list of opportunitys .
train
false
27,549
def entry_probe_from_definition(df): template = Template(ENTRY_PROBE_TEMPLATE) mapping = {'__LIBRARY__': df.get('library', ''), '__NAME__': df['api'], '__ARGUMENTS_PUSH_ON_STACK__': push_on_stack_section(df['args'])} return template.substitute(mapping)
[ "def", "entry_probe_from_definition", "(", "df", ")", ":", "template", "=", "Template", "(", "ENTRY_PROBE_TEMPLATE", ")", "mapping", "=", "{", "'__LIBRARY__'", ":", "df", ".", "get", "(", "'library'", ",", "''", ")", ",", "'__NAME__'", ":", "df", "[", "'ap...
generates an entry dtrace probe from the given api definition .
train
false
27,551
@timethis def countdown(n): while (n > 0): n -= 1
[ "@", "timethis", "def", "countdown", "(", "n", ")", ":", "while", "(", "n", ">", "0", ")", ":", "n", "-=", "1" ]
counts down .
train
false
27,553
def IsDecltype(clean_lines, linenum, column): (text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column) if (start_col < 0): return False if Search('\\bdecltype\\s*$', text[0:start_col]): return True return False
[ "def", "IsDecltype", "(", "clean_lines", ",", "linenum", ",", "column", ")", ":", "(", "text", ",", "_", ",", "start_col", ")", "=", "ReverseCloseExpression", "(", "clean_lines", ",", "linenum", ",", "column", ")", "if", "(", "start_col", "<", "0", ")", ...
check if the token ending on is decltype() .
train
true
27,555
def powdenest(eq, force=False, polar=False): from sympy.simplify.simplify import posify if force: (eq, rep) = posify(eq) return powdenest(eq, force=False).xreplace(rep) if polar: (eq, rep) = polarify(eq) return unpolarify(powdenest(unpolarify(eq, exponents_only=True)), rep) new = powsimp(sympify(eq)) return new.xreplace(Transform(_denest_pow, filter=(lambda m: (m.is_Pow or (m.func is exp)))))
[ "def", "powdenest", "(", "eq", ",", "force", "=", "False", ",", "polar", "=", "False", ")", ":", "from", "sympy", ".", "simplify", ".", "simplify", "import", "posify", "if", "force", ":", "(", "eq", ",", "rep", ")", "=", "posify", "(", "eq", ")", ...
collect exponents on powers as assumptions allow .
train
false
27,556
def get_diag(code, command): import tempfile import shutil code = (code + u'\n') try: tmpdir = tempfile.mkdtemp() (fd, diag_name) = tempfile.mkstemp(dir=tmpdir) f = os.fdopen(fd, 'w') f.write(code.encode('utf-8')) f.close() format = _draw_mode.lower() draw_name = ((diag_name + '.') + format) saved_argv = sys.argv argv = [diag_name, '-T', format, '-o', draw_name] if (_draw_mode == 'SVG'): argv += ['--ignore-pil'] command.main(argv) file_name = ((diag_name + '.') + _publish_mode.lower()) with io.open(file_name, 'rb') as f: data = f.read() f.close() finally: for file in os.listdir(tmpdir): os.unlink(((tmpdir + '/') + file)) shutil.rmtree(tmpdir) return data
[ "def", "get_diag", "(", "code", ",", "command", ")", ":", "import", "tempfile", "import", "shutil", "code", "=", "(", "code", "+", "u'\\n'", ")", "try", ":", "tmpdir", "=", "tempfile", ".", "mkdtemp", "(", ")", "(", "fd", ",", "diag_name", ")", "=", ...
generate diagramm and return data .
train
true
27,557
def is_blank_line(line, allow_spaces=0): if (not line): return 1 if allow_spaces: return (line.rstrip() == '') return ((line[0] == '\n') or (line[0] == '\r'))
[ "def", "is_blank_line", "(", "line", ",", "allow_spaces", "=", "0", ")", ":", "if", "(", "not", "line", ")", ":", "return", "1", "if", "allow_spaces", ":", "return", "(", "line", ".", "rstrip", "(", ")", "==", "''", ")", "return", "(", "(", "line",...
is_blank_line -> boolean return whether a line is blank .
train
false
27,558
def _ustr(obj): try: return str(obj) except UnicodeEncodeError as e: return unicode(obj)
[ "def", "_ustr", "(", "obj", ")", ":", "try", ":", "return", "str", "(", "obj", ")", "except", "UnicodeEncodeError", "as", "e", ":", "return", "unicode", "(", "obj", ")" ]
drop-in replacement for str that tries to be unicode friendly .
train
false
27,559
def create_superuser(): print("\nCreate a superuser below. The superuser is Player #1, the 'owner' account of the server.\n") django.core.management.call_command('createsuperuser', interactive=True)
[ "def", "create_superuser", "(", ")", ":", "print", "(", "\"\\nCreate a superuser below. The superuser is Player #1, the 'owner' account of the server.\\n\"", ")", "django", ".", "core", ".", "management", ".", "call_command", "(", "'createsuperuser'", ",", "interactive", "=",...
create the superuser player .
train
false
27,560
@preloaderStop def SelectSearchResult(listItems, **kwargs): return printList(listItems, showSelector=True, **kwargs)
[ "@", "preloaderStop", "def", "SelectSearchResult", "(", "listItems", ",", "**", "kwargs", ")", ":", "return", "printList", "(", "listItems", ",", "showSelector", "=", "True", ",", "**", "kwargs", ")" ]
select a search result .
train
false
27,561
def random_lower(t): return ''.join(((c.lower() if (random.random() > 0.5) else c) for c in t))
[ "def", "random_lower", "(", "t", ")", ":", "return", "''", ".", "join", "(", "(", "(", "c", ".", "lower", "(", ")", "if", "(", "random", ".", "random", "(", ")", ">", "0.5", ")", "else", "c", ")", "for", "c", "in", "t", ")", ")" ]
change random chars of the string to lower case .
train
false
27,564
def slice_(value, arg): try: bits = [] for x in arg.split(':'): if (len(x) == 0): bits.append(None) else: bits.append(int(x)) return value[slice(*bits)] except (ValueError, TypeError): return value
[ "def", "slice_", "(", "value", ",", "arg", ")", ":", "try", ":", "bits", "=", "[", "]", "for", "x", "in", "arg", ".", "split", "(", "':'", ")", ":", "if", "(", "len", "(", "x", ")", "==", "0", ")", ":", "bits", ".", "append", "(", "None", ...
returns a slice of the list .
train
false
27,565
def MkDirListWidget(w): msg = Tix.Message(w, relief=Tix.FLAT, width=240, anchor=Tix.N, text='The Tix DirList widget gives a graphical representation of the file system directory and makes it easy for the user to choose and access directories.') dirlist = Tix.DirList(w, options='hlist.padY 1 hlist.width 25 hlist.height 16') msg.pack(side=Tix.TOP, expand=1, fill=Tix.BOTH, padx=3, pady=3) dirlist.pack(side=Tix.TOP, padx=3, pady=3)
[ "def", "MkDirListWidget", "(", "w", ")", ":", "msg", "=", "Tix", ".", "Message", "(", "w", ",", "relief", "=", "Tix", ".", "FLAT", ",", "width", "=", "240", ",", "anchor", "=", "Tix", ".", "N", ",", "text", "=", "'The Tix DirList widget gives a graphic...
the tixdirlist widget gives a graphical representation of the file system directory and makes it easy for the user to choose and access directories .
train
false
27,566
def setup_stream_handlers(options): class StdoutFilter(logging.Filter, ): def filter(self, record): return (record.levelno in (logging.DEBUG, logging.INFO)) if log.handlers: for handler in log.handlers: log.removeHandler(handler) stdout_handler = logging.StreamHandler(sys.stdout) stdout_handler.setLevel(logging.WARNING) stdout_handler.addFilter(StdoutFilter()) if options.debug: stdout_handler.setLevel(logging.DEBUG) elif options.verbose: stdout_handler.setLevel(logging.INFO) else: stdout_handler.setLevel(logging.WARNING) log.addHandler(stdout_handler) stderr_handler = logging.StreamHandler(sys.stderr) stderr_handler.setLevel(logging.WARNING) log.addHandler(stderr_handler)
[ "def", "setup_stream_handlers", "(", "options", ")", ":", "class", "StdoutFilter", "(", "logging", ".", "Filter", ",", ")", ":", "def", "filter", "(", "self", ",", "record", ")", ":", "return", "(", "record", ".", "levelno", "in", "(", "logging", ".", ...
setup logging stream handlers according to the options .
train
false
27,567
def organizations_enabled(): return settings.FEATURES.get('ORGANIZATIONS_APP', False)
[ "def", "organizations_enabled", "(", ")", ":", "return", "settings", ".", "FEATURES", ".", "get", "(", "'ORGANIZATIONS_APP'", ",", "False", ")" ]
returns boolean indication if organizations app is enabled on not .
train
false
27,568
def parallel_dict_from_expr(exprs, **args): (reps, opt) = _parallel_dict_from_expr(exprs, build_options(args)) return (reps, opt.gens)
[ "def", "parallel_dict_from_expr", "(", "exprs", ",", "**", "args", ")", ":", "(", "reps", ",", "opt", ")", "=", "_parallel_dict_from_expr", "(", "exprs", ",", "build_options", "(", "args", ")", ")", "return", "(", "reps", ",", "opt", ".", "gens", ")" ]
transform expressions into a multinomial form .
train
false
27,569
def s3_utc(dt): if dt: if (dt.tzinfo is None): return dt.replace(tzinfo=dateutil.tz.tzutc()) return dt.astimezone(dateutil.tz.tzutc()) else: return None
[ "def", "s3_utc", "(", "dt", ")", ":", "if", "dt", ":", "if", "(", "dt", ".", "tzinfo", "is", "None", ")", ":", "return", "dt", ".", "replace", "(", "tzinfo", "=", "dateutil", ".", "tz", ".", "tzutc", "(", ")", ")", "return", "dt", ".", "astimez...
get a datetime object for the same date/time as the datetime object .
train
false
27,572
def left_multiply(J, d, copy=True): if (copy and (not isinstance(J, LinearOperator))): J = J.copy() if issparse(J): J.data *= np.repeat(d, np.diff(J.indptr)) elif isinstance(J, LinearOperator): J = left_multiplied_operator(J, d) else: J *= d[:, np.newaxis] return J
[ "def", "left_multiply", "(", "J", ",", "d", ",", "copy", "=", "True", ")", ":", "if", "(", "copy", "and", "(", "not", "isinstance", "(", "J", ",", "LinearOperator", ")", ")", ")", ":", "J", "=", "J", ".", "copy", "(", ")", "if", "issparse", "("...
compute diag(d) j .
train
false
27,573
def InterpolatePaths(path, labels): if ('%%LABEL%%' not in path): return dict([(path, None)]) else: paths = {} for label in labels: paths[path.replace('%%LABEL%%', label)] = label return paths
[ "def", "InterpolatePaths", "(", "path", ",", "labels", ")", ":", "if", "(", "'%%LABEL%%'", "not", "in", "path", ")", ":", "return", "dict", "(", "[", "(", "path", ",", "None", ")", "]", ")", "else", ":", "paths", "=", "{", "}", "for", "label", "i...
interpolate paths with %%label%% markers .
train
false
27,574
def latest_release_version(): version = latest_release_tag()[len('ckan-'):] return version
[ "def", "latest_release_version", "(", ")", ":", "version", "=", "latest_release_tag", "(", ")", "[", "len", "(", "'ckan-'", ")", ":", "]", "return", "version" ]
return the version number of the latest stable release .
train
false
27,576
def _make_array(val, copy=False): val = np.array(val, copy=copy, subok=True) if (not ((val.dtype == np.float64) or (val.dtype.kind in u'OSUa'))): val = np.asanyarray(val, dtype=np.float64) return val
[ "def", "_make_array", "(", "val", ",", "copy", "=", "False", ")", ":", "val", "=", "np", ".", "array", "(", "val", ",", "copy", "=", "copy", ",", "subok", "=", "True", ")", "if", "(", "not", "(", "(", "val", ".", "dtype", "==", "np", ".", "fl...
take val and convert/reshape to an array .
train
false
27,577
def clean_pyc(path): if (not os.access(path, os.W_OK)): warnings.warn('{0} is not writable so cannot delete stale *pyc files'.format(path)) return print 'Cleaning *pyc files (if writable) from: {0}'.format(path) for (root, __dirs, files) in os.walk(path): pyc_files = filter((lambda filename: filename.endswith('.pyc')), files) py_files = set(filter((lambda filename: filename.endswith('.py')), files)) excess_pyc_files = filter((lambda pyc_filename: (pyc_filename[:(-1)] not in py_files)), pyc_files) for excess_pyc_file in excess_pyc_files: full_path = os.path.join(root, excess_pyc_file) os.remove(full_path)
[ "def", "clean_pyc", "(", "path", ")", ":", "if", "(", "not", "os", ".", "access", "(", "path", ",", "os", ".", "W_OK", ")", ")", ":", "warnings", ".", "warn", "(", "'{0} is not writable so cannot delete stale *pyc files'", ".", "format", "(", "path", ")", ...
delete all *pyc files recursively in a path .
train
false
27,578
def test_run_tests_if_main(): x = [] def test_a(): x.append(True) @np.testing.dec.skipif(True) def test_b(): return try: __name__ = '__main__' run_tests_if_main(measure_mem=False) def test_c(): raise RuntimeError try: __name__ = '__main__' run_tests_if_main(measure_mem=False) except RuntimeError: pass else: raise RuntimeError('Error not raised') finally: del __name__ assert_true((len(x) == 2)) assert_true((x[0] and x[1]))
[ "def", "test_run_tests_if_main", "(", ")", ":", "x", "=", "[", "]", "def", "test_a", "(", ")", ":", "x", ".", "append", "(", "True", ")", "@", "np", ".", "testing", ".", "dec", ".", "skipif", "(", "True", ")", "def", "test_b", "(", ")", ":", "r...
test run_tests_if_main functionality .
train
false