id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
5,260
def get_volume_type_qos_specs(volume_type_id): ctxt = context.get_admin_context() res = db.volume_type_qos_specs_get(ctxt, volume_type_id) return res
[ "def", "get_volume_type_qos_specs", "(", "volume_type_id", ")", ":", "ctxt", "=", "context", ".", "get_admin_context", "(", ")", "res", "=", "db", ".", "volume_type_qos_specs_get", "(", "ctxt", ",", "volume_type_id", ")", "return", "res" ]
get all qos specs for given volume type .
train
false
5,261
def is_mounted(src, mount_point, fstype, perm=None, verbose=True, fstype_mtab=None): if (perm is None): perm = '' if (fstype_mtab is None): fstype_mtab = fstype mount_point = os.path.realpath(mount_point) if (fstype not in ['nfs', 'smbfs', 'glusterfs', 'hugetlbfs', 'ubifs']): if src: src = os.path.realpath(src) else: src = '' mount_string = ('%s %s %s %s' % (src, mount_point, fstype_mtab, perm)) if (mount_string.strip() in file('/etc/mtab').read()): logging.debug('%s is successfully mounted', src) return True else: if verbose: logging.error("Can't find mounted NFS share - /etc/mtab contents \n%s", file('/etc/mtab').read()) return False
[ "def", "is_mounted", "(", "src", ",", "mount_point", ",", "fstype", ",", "perm", "=", "None", ",", "verbose", "=", "True", ",", "fstype_mtab", "=", "None", ")", ":", "if", "(", "perm", "is", "None", ")", ":", "perm", "=", "''", "if", "(", "fstype_mtab", "is", "None", ")", ":", "fstype_mtab", "=", "fstype", "mount_point", "=", "os", ".", "path", ".", "realpath", "(", "mount_point", ")", "if", "(", "fstype", "not", "in", "[", "'nfs'", ",", "'smbfs'", ",", "'glusterfs'", ",", "'hugetlbfs'", ",", "'ubifs'", "]", ")", ":", "if", "src", ":", "src", "=", "os", ".", "path", ".", "realpath", "(", "src", ")", "else", ":", "src", "=", "''", "mount_string", "=", "(", "'%s %s %s %s'", "%", "(", "src", ",", "mount_point", ",", "fstype_mtab", ",", "perm", ")", ")", "if", "(", "mount_string", ".", "strip", "(", ")", "in", "file", "(", "'/etc/mtab'", ")", ".", "read", "(", ")", ")", ":", "logging", ".", "debug", "(", "'%s is successfully mounted'", ",", "src", ")", "return", "True", "else", ":", "if", "verbose", ":", "logging", ".", "error", "(", "\"Can't find mounted NFS share - /etc/mtab contents \\n%s\"", ",", "file", "(", "'/etc/mtab'", ")", ".", "read", "(", ")", ")", "return", "False" ]
check mount status from /etc/mtab .
train
false
5,262
def timeline(): result = Storage() inspection = [] creation = [] table = db.building_nzseel1 dbresult = db((table.deleted == False)).select(table.date, table.estimated_damage, orderby=(~ table.date)) inspection = getformatedData(dbresult) dbresult = db((table.deleted == False)).select(table.created_on, table.estimated_damage, orderby=(~ table.created_on)) creation = getformatedData(dbresult) totals = [0, 0, 0, 0, 0, 0, 0, 0] for line in inspection: if (line[0][1] == 'Total'): for i in range(8): totals[i] += line[(i + 1)] return dict(inspection=inspection, creation=creation, totals=totals)
[ "def", "timeline", "(", ")", ":", "result", "=", "Storage", "(", ")", "inspection", "=", "[", "]", "creation", "=", "[", "]", "table", "=", "db", ".", "building_nzseel1", "dbresult", "=", "db", "(", "(", "table", ".", "deleted", "==", "False", ")", ")", ".", "select", "(", "table", ".", "date", ",", "table", ".", "estimated_damage", ",", "orderby", "=", "(", "~", "table", ".", "date", ")", ")", "inspection", "=", "getformatedData", "(", "dbresult", ")", "dbresult", "=", "db", "(", "(", "table", ".", "deleted", "==", "False", ")", ")", ".", "select", "(", "table", ".", "created_on", ",", "table", ".", "estimated_damage", ",", "orderby", "=", "(", "~", "table", ".", "created_on", ")", ")", "creation", "=", "getformatedData", "(", "dbresult", ")", "totals", "=", "[", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", "]", "for", "line", "in", "inspection", ":", "if", "(", "line", "[", "0", "]", "[", "1", "]", "==", "'Total'", ")", ":", "for", "i", "in", "range", "(", "8", ")", ":", "totals", "[", "i", "]", "+=", "line", "[", "(", "i", "+", "1", ")", "]", "return", "dict", "(", "inspection", "=", "inspection", ",", "creation", "=", "creation", ",", "totals", "=", "totals", ")" ]
shows a users timeline or if no user is logged in it will redirect to the public timeline .
train
false
5,264
def textFiles(): file_name = (Directory.GetCurrentDirectory() + '\\fooGARBAGE.dll') file = open(file_name, 'w') print >>file, garbage file.close() createAssembly('TXTDLL', 'TXTDLL', 7) File.Move('fooTXTDLL.dll', 'fooTXTDLL.txt')
[ "def", "textFiles", "(", ")", ":", "file_name", "=", "(", "Directory", ".", "GetCurrentDirectory", "(", ")", "+", "'\\\\fooGARBAGE.dll'", ")", "file", "=", "open", "(", "file_name", ",", "'w'", ")", "print", ">>", "file", ",", "garbage", "file", ".", "close", "(", ")", "createAssembly", "(", "'TXTDLL'", ",", "'TXTDLL'", ",", "7", ")", "File", ".", "Move", "(", "'fooTXTDLL.dll'", ",", "'fooTXTDLL.txt'", ")" ]
creates * .
train
false
5,265
def _list_readline(x): x = iter(x) def readline(): return next(x) return readline
[ "def", "_list_readline", "(", "x", ")", ":", "x", "=", "iter", "(", "x", ")", "def", "readline", "(", ")", ":", "return", "next", "(", "x", ")", "return", "readline" ]
given a list .
train
false
5,266
def fix_ext_py(filename): if filename.endswith(('.pyc', '.pyo')): filename = filename[:(-1)] return filename
[ "def", "fix_ext_py", "(", "filename", ")", ":", "if", "filename", ".", "endswith", "(", "(", "'.pyc'", ",", "'.pyo'", ")", ")", ":", "filename", "=", "filename", "[", ":", "(", "-", "1", ")", "]", "return", "filename" ]
given a .
train
false
5,267
def compute_hash(localfn): with open(localfn, u'rb') as f: h = hashlib.md5() block = f.read(conf.compute_hash_block_size) while block: h.update(block) block = f.read(conf.compute_hash_block_size) return h.hexdigest()
[ "def", "compute_hash", "(", "localfn", ")", ":", "with", "open", "(", "localfn", ",", "u'rb'", ")", "as", "f", ":", "h", "=", "hashlib", ".", "md5", "(", ")", "block", "=", "f", ".", "read", "(", "conf", ".", "compute_hash_block_size", ")", "while", "block", ":", "h", ".", "update", "(", "block", ")", "block", "=", "f", ".", "read", "(", "conf", ".", "compute_hash_block_size", ")", "return", "h", ".", "hexdigest", "(", ")" ]
computes the md5 hash for a file .
train
false
5,268
def list_upgrades(refresh=True, **kwargs): if salt.utils.is_true(refresh): refresh_db() upgrades = {} lines = __salt__['cmd.run_stdout']('/opt/csw/bin/pkgutil -A --parse').splitlines() for line in lines: comps = line.split(' DCTB ') if (comps[2] == 'SAME'): continue if (comps[2] == 'not installed'): continue upgrades[comps[0]] = comps[1] return upgrades
[ "def", "list_upgrades", "(", "refresh", "=", "True", ",", "**", "kwargs", ")", ":", "if", "salt", ".", "utils", ".", "is_true", "(", "refresh", ")", ":", "refresh_db", "(", ")", "upgrades", "=", "{", "}", "lines", "=", "__salt__", "[", "'cmd.run_stdout'", "]", "(", "'/opt/csw/bin/pkgutil -A --parse'", ")", ".", "splitlines", "(", ")", "for", "line", "in", "lines", ":", "comps", "=", "line", ".", "split", "(", "' DCTB '", ")", "if", "(", "comps", "[", "2", "]", "==", "'SAME'", ")", ":", "continue", "if", "(", "comps", "[", "2", "]", "==", "'not installed'", ")", ":", "continue", "upgrades", "[", "comps", "[", "0", "]", "]", "=", "comps", "[", "1", "]", "return", "upgrades" ]
list all available package upgrades on this system cli example: .
train
true
5,269
def is_redis_available(): redis_conn = connect_to_redis() try: return redis_conn.ping() except Exception: log.exception(u'Redis is not available') return False
[ "def", "is_redis_available", "(", ")", ":", "redis_conn", "=", "connect_to_redis", "(", ")", "try", ":", "return", "redis_conn", ".", "ping", "(", ")", "except", "Exception", ":", "log", ".", "exception", "(", "u'Redis is not available'", ")", "return", "False" ]
check whether redis is available .
train
false
5,270
def normalized(normalize): global _normalized _normalized = normalize
[ "def", "normalized", "(", "normalize", ")", ":", "global", "_normalized", "_normalized", "=", "normalize" ]
set flag controlling normalization of hadamard gates by 1/sqrt(2) .
train
false
5,271
def _parse_main(path=MAIN_CF): with salt.utils.fopen(path, 'r') as fh_: full_conf = fh_.read() conf_list = [] for line in full_conf.splitlines(): if (not line.strip()): conf_list.append(line) continue if re.match(SWWS, line): if (not conf_list): conf_list.append(line) continue if (not isinstance(conf_list[(-1)], str)): conf_list[(-1)] = '' conf_list[(-1)] = '\n'.join([conf_list[(-1)], line]) else: conf_list.append(line) pairs = {} for line in conf_list: if (not line.strip()): continue if line.startswith('#'): continue comps = line.split('=') pairs[comps[0].strip()] = '='.join(comps[1:]).strip() return (pairs, conf_list)
[ "def", "_parse_main", "(", "path", "=", "MAIN_CF", ")", ":", "with", "salt", ".", "utils", ".", "fopen", "(", "path", ",", "'r'", ")", "as", "fh_", ":", "full_conf", "=", "fh_", ".", "read", "(", ")", "conf_list", "=", "[", "]", "for", "line", "in", "full_conf", ".", "splitlines", "(", ")", ":", "if", "(", "not", "line", ".", "strip", "(", ")", ")", ":", "conf_list", ".", "append", "(", "line", ")", "continue", "if", "re", ".", "match", "(", "SWWS", ",", "line", ")", ":", "if", "(", "not", "conf_list", ")", ":", "conf_list", ".", "append", "(", "line", ")", "continue", "if", "(", "not", "isinstance", "(", "conf_list", "[", "(", "-", "1", ")", "]", ",", "str", ")", ")", ":", "conf_list", "[", "(", "-", "1", ")", "]", "=", "''", "conf_list", "[", "(", "-", "1", ")", "]", "=", "'\\n'", ".", "join", "(", "[", "conf_list", "[", "(", "-", "1", ")", "]", ",", "line", "]", ")", "else", ":", "conf_list", ".", "append", "(", "line", ")", "pairs", "=", "{", "}", "for", "line", "in", "conf_list", ":", "if", "(", "not", "line", ".", "strip", "(", ")", ")", ":", "continue", "if", "line", ".", "startswith", "(", "'#'", ")", ":", "continue", "comps", "=", "line", ".", "split", "(", "'='", ")", "pairs", "[", "comps", "[", "0", "]", ".", "strip", "(", ")", "]", "=", "'='", ".", "join", "(", "comps", "[", "1", ":", "]", ")", ".", "strip", "(", ")", "return", "(", "pairs", ",", "conf_list", ")" ]
parse files in the style of main .
train
false
5,272
def stage_platform_hpp(zmqroot): platform_hpp = pjoin(zmqroot, 'src', 'platform.hpp') if os.path.exists(platform_hpp): info('already have platform.hpp') return if (os.name == 'nt'): platform_dir = pjoin(zmqroot, 'builds', 'msvc') else: info('attempting ./configure to generate platform.hpp') p = Popen('./configure', cwd=zmqroot, shell=True, stdout=PIPE, stderr=PIPE) (o, e) = p.communicate() if p.returncode: warn(('failed to configure libzmq:\n%s' % e)) if (sys.platform == 'darwin'): platform_dir = pjoin(HERE, 'include_darwin') elif sys.platform.startswith('freebsd'): platform_dir = pjoin(HERE, 'include_freebsd') elif sys.platform.startswith('linux-armv'): platform_dir = pjoin(HERE, 'include_linux-armv') else: platform_dir = pjoin(HERE, 'include_linux') else: return info(('staging platform.hpp from: %s' % platform_dir)) shutil.copy(pjoin(platform_dir, 'platform.hpp'), platform_hpp)
[ "def", "stage_platform_hpp", "(", "zmqroot", ")", ":", "platform_hpp", "=", "pjoin", "(", "zmqroot", ",", "'src'", ",", "'platform.hpp'", ")", "if", "os", ".", "path", ".", "exists", "(", "platform_hpp", ")", ":", "info", "(", "'already have platform.hpp'", ")", "return", "if", "(", "os", ".", "name", "==", "'nt'", ")", ":", "platform_dir", "=", "pjoin", "(", "zmqroot", ",", "'builds'", ",", "'msvc'", ")", "else", ":", "info", "(", "'attempting ./configure to generate platform.hpp'", ")", "p", "=", "Popen", "(", "'./configure'", ",", "cwd", "=", "zmqroot", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "(", "o", ",", "e", ")", "=", "p", ".", "communicate", "(", ")", "if", "p", ".", "returncode", ":", "warn", "(", "(", "'failed to configure libzmq:\\n%s'", "%", "e", ")", ")", "if", "(", "sys", ".", "platform", "==", "'darwin'", ")", ":", "platform_dir", "=", "pjoin", "(", "HERE", ",", "'include_darwin'", ")", "elif", "sys", ".", "platform", ".", "startswith", "(", "'freebsd'", ")", ":", "platform_dir", "=", "pjoin", "(", "HERE", ",", "'include_freebsd'", ")", "elif", "sys", ".", "platform", ".", "startswith", "(", "'linux-armv'", ")", ":", "platform_dir", "=", "pjoin", "(", "HERE", ",", "'include_linux-armv'", ")", "else", ":", "platform_dir", "=", "pjoin", "(", "HERE", ",", "'include_linux'", ")", "else", ":", "return", "info", "(", "(", "'staging platform.hpp from: %s'", "%", "platform_dir", ")", ")", "shutil", ".", "copy", "(", "pjoin", "(", "platform_dir", ",", "'platform.hpp'", ")", ",", "platform_hpp", ")" ]
stage platform .
train
true
5,274
def collect_user_config_js(addon_configs): js_modules = [] for addon_config in addon_configs: js_path = paths.resolve_addon_path(addon_config, 'user-cfg.js') if js_path: js_modules.append(js_path) return js_modules
[ "def", "collect_user_config_js", "(", "addon_configs", ")", ":", "js_modules", "=", "[", "]", "for", "addon_config", "in", "addon_configs", ":", "js_path", "=", "paths", ".", "resolve_addon_path", "(", "addon_config", ",", "'user-cfg.js'", ")", "if", "js_path", ":", "js_modules", ".", "append", "(", "js_path", ")", "return", "js_modules" ]
collect webpack bundles for each of the addons user-cfg .
train
false
5,275
def _matrix_mask(data, mask): if (mask is None): mask = np.zeros(data.shape, np.bool) if isinstance(mask, np.ndarray): if (mask.shape != data.shape): raise ValueError('Mask must have the same shape as data.') mask = pd.DataFrame(mask, index=data.index, columns=data.columns, dtype=np.bool) elif isinstance(mask, pd.DataFrame): if ((not mask.index.equals(data.index)) and mask.columns.equals(data.columns)): err = 'Mask must have the same index and columns as data.' raise ValueError(err) mask = (mask | pd.isnull(data)) return mask
[ "def", "_matrix_mask", "(", "data", ",", "mask", ")", ":", "if", "(", "mask", "is", "None", ")", ":", "mask", "=", "np", ".", "zeros", "(", "data", ".", "shape", ",", "np", ".", "bool", ")", "if", "isinstance", "(", "mask", ",", "np", ".", "ndarray", ")", ":", "if", "(", "mask", ".", "shape", "!=", "data", ".", "shape", ")", ":", "raise", "ValueError", "(", "'Mask must have the same shape as data.'", ")", "mask", "=", "pd", ".", "DataFrame", "(", "mask", ",", "index", "=", "data", ".", "index", ",", "columns", "=", "data", ".", "columns", ",", "dtype", "=", "np", ".", "bool", ")", "elif", "isinstance", "(", "mask", ",", "pd", ".", "DataFrame", ")", ":", "if", "(", "(", "not", "mask", ".", "index", ".", "equals", "(", "data", ".", "index", ")", ")", "and", "mask", ".", "columns", ".", "equals", "(", "data", ".", "columns", ")", ")", ":", "err", "=", "'Mask must have the same index and columns as data.'", "raise", "ValueError", "(", "err", ")", "mask", "=", "(", "mask", "|", "pd", ".", "isnull", "(", "data", ")", ")", "return", "mask" ]
ensure that data and mask are compatabile and add missing values .
train
false
5,277
def _format_align(body, spec_dict): if ((len(body) > 0) and (body[0] in '-+')): sign = body[0] body = body[1:] else: sign = '' if (sign != '-'): if (spec_dict['sign'] in ' +'): sign = spec_dict['sign'] else: sign = '' minimumwidth = spec_dict['minimumwidth'] fill = spec_dict['fill'] padding = (fill * max((minimumwidth - len((sign + body))), 0)) align = spec_dict['align'] if (align == '<'): result = ((padding + sign) + body) elif (align == '>'): result = ((sign + body) + padding) elif (align == '='): result = ((sign + padding) + body) else: half = (len(padding) // 2) result = (((padding[:half] + sign) + body) + padding[half:]) if spec_dict['unicode']: result = unicode(result) return result
[ "def", "_format_align", "(", "body", ",", "spec_dict", ")", ":", "if", "(", "(", "len", "(", "body", ")", ">", "0", ")", "and", "(", "body", "[", "0", "]", "in", "'-+'", ")", ")", ":", "sign", "=", "body", "[", "0", "]", "body", "=", "body", "[", "1", ":", "]", "else", ":", "sign", "=", "''", "if", "(", "sign", "!=", "'-'", ")", ":", "if", "(", "spec_dict", "[", "'sign'", "]", "in", "' +'", ")", ":", "sign", "=", "spec_dict", "[", "'sign'", "]", "else", ":", "sign", "=", "''", "minimumwidth", "=", "spec_dict", "[", "'minimumwidth'", "]", "fill", "=", "spec_dict", "[", "'fill'", "]", "padding", "=", "(", "fill", "*", "max", "(", "(", "minimumwidth", "-", "len", "(", "(", "sign", "+", "body", ")", ")", ")", ",", "0", ")", ")", "align", "=", "spec_dict", "[", "'align'", "]", "if", "(", "align", "==", "'<'", ")", ":", "result", "=", "(", "(", "padding", "+", "sign", ")", "+", "body", ")", "elif", "(", "align", "==", "'>'", ")", ":", "result", "=", "(", "(", "sign", "+", "body", ")", "+", "padding", ")", "elif", "(", "align", "==", "'='", ")", ":", "result", "=", "(", "(", "sign", "+", "padding", ")", "+", "body", ")", "else", ":", "half", "=", "(", "len", "(", "padding", ")", "//", "2", ")", "result", "=", "(", "(", "(", "padding", "[", ":", "half", "]", "+", "sign", ")", "+", "body", ")", "+", "padding", "[", "half", ":", "]", ")", "if", "spec_dict", "[", "'unicode'", "]", ":", "result", "=", "unicode", "(", "result", ")", "return", "result" ]
given an unpadded .
train
false
5,278
def _resolve_requirements_chain(requirements): chain = [] if isinstance(requirements, string_types): requirements = [requirements] for req_file in requirements: chain.append(req_file) chain.extend(_resolve_requirements_chain(_find_req(req_file))) return chain
[ "def", "_resolve_requirements_chain", "(", "requirements", ")", ":", "chain", "=", "[", "]", "if", "isinstance", "(", "requirements", ",", "string_types", ")", ":", "requirements", "=", "[", "requirements", "]", "for", "req_file", "in", "requirements", ":", "chain", ".", "append", "(", "req_file", ")", "chain", ".", "extend", "(", "_resolve_requirements_chain", "(", "_find_req", "(", "req_file", ")", ")", ")", "return", "chain" ]
return an array of requirements file paths that can be used to complete the no_chown==false && user != none conundrum .
train
true
5,283
def tplquad(func, a, b, gfun, hfun, qfun, rfun, args=(), epsabs=1.49e-08, epsrel=1.49e-08): def ranges0(*args): return [qfun(args[1], args[0]), rfun(args[1], args[0])] def ranges1(*args): return [gfun(args[0]), hfun(args[0])] ranges = [ranges0, ranges1, [a, b]] return nquad(func, ranges, args=args)
[ "def", "tplquad", "(", "func", ",", "a", ",", "b", ",", "gfun", ",", "hfun", ",", "qfun", ",", "rfun", ",", "args", "=", "(", ")", ",", "epsabs", "=", "1.49e-08", ",", "epsrel", "=", "1.49e-08", ")", ":", "def", "ranges0", "(", "*", "args", ")", ":", "return", "[", "qfun", "(", "args", "[", "1", "]", ",", "args", "[", "0", "]", ")", ",", "rfun", "(", "args", "[", "1", "]", ",", "args", "[", "0", "]", ")", "]", "def", "ranges1", "(", "*", "args", ")", ":", "return", "[", "gfun", "(", "args", "[", "0", "]", ")", ",", "hfun", "(", "args", "[", "0", "]", ")", "]", "ranges", "=", "[", "ranges0", ",", "ranges1", ",", "[", "a", ",", "b", "]", "]", "return", "nquad", "(", "func", ",", "ranges", ",", "args", "=", "args", ")" ]
compute a triple integral .
train
false
5,284
def make_query_from_filter(sample_filter, require_meter=True): q = {} if sample_filter.user: q['user_id'] = sample_filter.user if sample_filter.project: q['project_id'] = sample_filter.project if sample_filter.meter: q['counter_name'] = sample_filter.meter elif require_meter: raise RuntimeError('Missing required meter specifier') ts_range = make_timestamp_range(sample_filter.start_timestamp, sample_filter.end_timestamp, sample_filter.start_timestamp_op, sample_filter.end_timestamp_op) if ts_range: q['timestamp'] = ts_range if sample_filter.resource: q['resource_id'] = sample_filter.resource if sample_filter.source: q['source'] = sample_filter.source if sample_filter.message_id: q['message_id'] = sample_filter.message_id q.update(dict(((('resource_%s' % k), v) for (k, v) in six.iteritems(improve_keys(sample_filter.metaquery, metaquery=True))))) return q
[ "def", "make_query_from_filter", "(", "sample_filter", ",", "require_meter", "=", "True", ")", ":", "q", "=", "{", "}", "if", "sample_filter", ".", "user", ":", "q", "[", "'user_id'", "]", "=", "sample_filter", ".", "user", "if", "sample_filter", ".", "project", ":", "q", "[", "'project_id'", "]", "=", "sample_filter", ".", "project", "if", "sample_filter", ".", "meter", ":", "q", "[", "'counter_name'", "]", "=", "sample_filter", ".", "meter", "elif", "require_meter", ":", "raise", "RuntimeError", "(", "'Missing required meter specifier'", ")", "ts_range", "=", "make_timestamp_range", "(", "sample_filter", ".", "start_timestamp", ",", "sample_filter", ".", "end_timestamp", ",", "sample_filter", ".", "start_timestamp_op", ",", "sample_filter", ".", "end_timestamp_op", ")", "if", "ts_range", ":", "q", "[", "'timestamp'", "]", "=", "ts_range", "if", "sample_filter", ".", "resource", ":", "q", "[", "'resource_id'", "]", "=", "sample_filter", ".", "resource", "if", "sample_filter", ".", "source", ":", "q", "[", "'source'", "]", "=", "sample_filter", ".", "source", "if", "sample_filter", ".", "message_id", ":", "q", "[", "'message_id'", "]", "=", "sample_filter", ".", "message_id", "q", ".", "update", "(", "dict", "(", "(", "(", "(", "'resource_%s'", "%", "k", ")", ",", "v", ")", "for", "(", "k", ",", "v", ")", "in", "six", ".", "iteritems", "(", "improve_keys", "(", "sample_filter", ".", "metaquery", ",", "metaquery", "=", "True", ")", ")", ")", ")", ")", "return", "q" ]
return a query dictionary based on the settings in the filter .
train
false
5,285
def memoize_method(method): def wrapper(self, *args, **kwargs): dct = self.__dict__.setdefault('_memoize_method_dct', {}) key = (args, frozenset(kwargs.items())) try: return dct[key] except KeyError: result = method(self, *args, **kwargs) dct[key] = result return result return wrapper
[ "def", "memoize_method", "(", "method", ")", ":", "def", "wrapper", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", ":", "dct", "=", "self", ".", "__dict__", ".", "setdefault", "(", "'_memoize_method_dct'", ",", "{", "}", ")", "key", "=", "(", "args", ",", "frozenset", "(", "kwargs", ".", "items", "(", ")", ")", ")", "try", ":", "return", "dct", "[", "key", "]", "except", "KeyError", ":", "result", "=", "method", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", "dct", "[", "key", "]", "=", "result", "return", "result", "return", "wrapper" ]
a normal memoize function .
train
false
5,286
def headers_to_container_info(headers, status_int=HTTP_OK): headers = dict(((k.lower(), v) for (k, v) in dict(headers).iteritems())) return {'status': status_int, 'read_acl': headers.get('x-container-read'), 'write_acl': headers.get('x-container-write'), 'sync_key': headers.get('x-container-sync-key'), 'object_count': headers.get('x-container-object-count'), 'bytes': headers.get('x-container-bytes-used'), 'versions': headers.get('x-versions-location'), 'cors': {'allow_origin': headers.get('x-container-meta-access-control-allow-origin'), 'allow_headers': headers.get('x-container-meta-access-control-allow-headers'), 'expose_headers': headers.get('x-container-meta-access-control-expose-headers'), 'max_age': headers.get('x-container-meta-access-control-max-age')}, 'meta': dict(((key[17:], value) for (key, value) in headers.iteritems() if key.startswith('x-container-meta-')))}
[ "def", "headers_to_container_info", "(", "headers", ",", "status_int", "=", "HTTP_OK", ")", ":", "headers", "=", "dict", "(", "(", "(", "k", ".", "lower", "(", ")", ",", "v", ")", "for", "(", "k", ",", "v", ")", "in", "dict", "(", "headers", ")", ".", "iteritems", "(", ")", ")", ")", "return", "{", "'status'", ":", "status_int", ",", "'read_acl'", ":", "headers", ".", "get", "(", "'x-container-read'", ")", ",", "'write_acl'", ":", "headers", ".", "get", "(", "'x-container-write'", ")", ",", "'sync_key'", ":", "headers", ".", "get", "(", "'x-container-sync-key'", ")", ",", "'object_count'", ":", "headers", ".", "get", "(", "'x-container-object-count'", ")", ",", "'bytes'", ":", "headers", ".", "get", "(", "'x-container-bytes-used'", ")", ",", "'versions'", ":", "headers", ".", "get", "(", "'x-versions-location'", ")", ",", "'cors'", ":", "{", "'allow_origin'", ":", "headers", ".", "get", "(", "'x-container-meta-access-control-allow-origin'", ")", ",", "'allow_headers'", ":", "headers", ".", "get", "(", "'x-container-meta-access-control-allow-headers'", ")", ",", "'expose_headers'", ":", "headers", ".", "get", "(", "'x-container-meta-access-control-expose-headers'", ")", ",", "'max_age'", ":", "headers", ".", "get", "(", "'x-container-meta-access-control-max-age'", ")", "}", ",", "'meta'", ":", "dict", "(", "(", "(", "key", "[", "17", ":", "]", ",", "value", ")", "for", "(", "key", ",", "value", ")", "in", "headers", ".", "iteritems", "(", ")", "if", "key", ".", "startswith", "(", "'x-container-meta-'", ")", ")", ")", "}" ]
construct a cacheable dict of container info based on response headers .
train
false
5,288
def incr_mean_variance_axis(X, axis, last_mean, last_var, last_n): _raise_error_wrong_axis(axis) if isinstance(X, sp.csr_matrix): if (axis == 0): return _incr_mean_var_axis0(X, last_mean=last_mean, last_var=last_var, last_n=last_n) else: return _incr_mean_var_axis0(X.T, last_mean=last_mean, last_var=last_var, last_n=last_n) elif isinstance(X, sp.csc_matrix): if (axis == 0): return _incr_mean_var_axis0(X, last_mean=last_mean, last_var=last_var, last_n=last_n) else: return _incr_mean_var_axis0(X.T, last_mean=last_mean, last_var=last_var, last_n=last_n) else: _raise_typeerror(X)
[ "def", "incr_mean_variance_axis", "(", "X", ",", "axis", ",", "last_mean", ",", "last_var", ",", "last_n", ")", ":", "_raise_error_wrong_axis", "(", "axis", ")", "if", "isinstance", "(", "X", ",", "sp", ".", "csr_matrix", ")", ":", "if", "(", "axis", "==", "0", ")", ":", "return", "_incr_mean_var_axis0", "(", "X", ",", "last_mean", "=", "last_mean", ",", "last_var", "=", "last_var", ",", "last_n", "=", "last_n", ")", "else", ":", "return", "_incr_mean_var_axis0", "(", "X", ".", "T", ",", "last_mean", "=", "last_mean", ",", "last_var", "=", "last_var", ",", "last_n", "=", "last_n", ")", "elif", "isinstance", "(", "X", ",", "sp", ".", "csc_matrix", ")", ":", "if", "(", "axis", "==", "0", ")", ":", "return", "_incr_mean_var_axis0", "(", "X", ",", "last_mean", "=", "last_mean", ",", "last_var", "=", "last_var", ",", "last_n", "=", "last_n", ")", "else", ":", "return", "_incr_mean_var_axis0", "(", "X", ".", "T", ",", "last_mean", "=", "last_mean", ",", "last_var", "=", "last_var", ",", "last_n", "=", "last_n", ")", "else", ":", "_raise_typeerror", "(", "X", ")" ]
compute incremental mean and variance along an axix on a csr or csc matrix .
train
false
5,290
def Bar(xs, ys, **options): options = _UnderrideColor(options) options = _Underride(options, linewidth=0, alpha=0.6) pyplot.bar(xs, ys, **options)
[ "def", "Bar", "(", "xs", ",", "ys", ",", "**", "options", ")", ":", "options", "=", "_UnderrideColor", "(", "options", ")", "options", "=", "_Underride", "(", "options", ",", "linewidth", "=", "0", ",", "alpha", "=", "0.6", ")", "pyplot", ".", "bar", "(", "xs", ",", "ys", ",", "**", "options", ")" ]
create a bar chart using :class:barbuilder <bokeh .
train
false
5,291
@pytest.fixture def redirect_webengine_data(data_tmpdir, monkeypatch): monkeypatch.setenv('XDG_DATA_HOME', str(data_tmpdir)) monkeypatch.setenv('HOME', str(data_tmpdir))
[ "@", "pytest", ".", "fixture", "def", "redirect_webengine_data", "(", "data_tmpdir", ",", "monkeypatch", ")", ":", "monkeypatch", ".", "setenv", "(", "'XDG_DATA_HOME'", ",", "str", "(", "data_tmpdir", ")", ")", "monkeypatch", ".", "setenv", "(", "'HOME'", ",", "str", "(", "data_tmpdir", ")", ")" ]
set xdg_data_home and home to a temp location .
train
false
5,292
def get_params(section_div, params_class): parameters = [] params_row = section_div.find('tr', attrs={'class': params_class}) if (not params_row): return params_cells = params_row.find('td', {'class': 'field-body'}) for tag in params_cells.children: try: t = tag.text t.strip() if t.startswith('\n'): t = t.lstrip() t = ((' ' * 4) + t) t = t.replace('\n', ' ') t.rstrip() t = (t + '\n') parameters.append(t) except AttributeError: pass return ''.join(parameters).rstrip()
[ "def", "get_params", "(", "section_div", ",", "params_class", ")", ":", "parameters", "=", "[", "]", "params_row", "=", "section_div", ".", "find", "(", "'tr'", ",", "attrs", "=", "{", "'class'", ":", "params_class", "}", ")", "if", "(", "not", "params_row", ")", ":", "return", "params_cells", "=", "params_row", ".", "find", "(", "'td'", ",", "{", "'class'", ":", "'field-body'", "}", ")", "for", "tag", "in", "params_cells", ".", "children", ":", "try", ":", "t", "=", "tag", ".", "text", "t", ".", "strip", "(", ")", "if", "t", ".", "startswith", "(", "'\\n'", ")", ":", "t", "=", "t", ".", "lstrip", "(", ")", "t", "=", "(", "(", "' '", "*", "4", ")", "+", "t", ")", "t", "=", "t", ".", "replace", "(", "'\\n'", ",", "' '", ")", "t", ".", "rstrip", "(", ")", "t", "=", "(", "t", "+", "'\\n'", ")", "parameters", ".", "append", "(", "t", ")", "except", "AttributeError", ":", "pass", "return", "''", ".", "join", "(", "parameters", ")", ".", "rstrip", "(", ")" ]
parse and return the parameters or returns of the documentation topic .
train
false
5,293
def _convert_to_idn(url): parts = list(urlparse.urlsplit(url)) try: parts[1].encode(u'ascii') except UnicodeEncodeError: host = parts[1].rsplit(u':', 1) newhost = [] port = u'' if (len(host) == 2): port = host.pop() for h in host[0].split(u'.'): newhost.append(h.encode(u'idna').decode(u'utf-8')) parts[1] = u'.'.join(newhost) if port: parts[1] += (u':' + port) return urlparse.urlunsplit(parts) else: return url
[ "def", "_convert_to_idn", "(", "url", ")", ":", "parts", "=", "list", "(", "urlparse", ".", "urlsplit", "(", "url", ")", ")", "try", ":", "parts", "[", "1", "]", ".", "encode", "(", "u'ascii'", ")", "except", "UnicodeEncodeError", ":", "host", "=", "parts", "[", "1", "]", ".", "rsplit", "(", "u':'", ",", "1", ")", "newhost", "=", "[", "]", "port", "=", "u''", "if", "(", "len", "(", "host", ")", "==", "2", ")", ":", "port", "=", "host", ".", "pop", "(", ")", "for", "h", "in", "host", "[", "0", "]", ".", "split", "(", "u'.'", ")", ":", "newhost", ".", "append", "(", "h", ".", "encode", "(", "u'idna'", ")", ".", "decode", "(", "u'utf-8'", ")", ")", "parts", "[", "1", "]", "=", "u'.'", ".", "join", "(", "newhost", ")", "if", "port", ":", "parts", "[", "1", "]", "+=", "(", "u':'", "+", "port", ")", "return", "urlparse", ".", "urlunsplit", "(", "parts", ")", "else", ":", "return", "url" ]
convert a url to idn notation .
train
false
5,294
def DeletionTest(f): @functools.wraps(f) def Decorator(testinstance): if testinstance.TEST_DELETION: return f(testinstance) else: return testinstance.skipTest('Tests that use deletion are disabled for this data store.') return Decorator
[ "def", "DeletionTest", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "Decorator", "(", "testinstance", ")", ":", "if", "testinstance", ".", "TEST_DELETION", ":", "return", "f", "(", "testinstance", ")", "else", ":", "return", "testinstance", ".", "skipTest", "(", "'Tests that use deletion are disabled for this data store.'", ")", "return", "Decorator" ]
this indicates a test that uses deletion .
train
false
5,295
def _nan_equal(a, b): if (type(a) is not type(b)): return False if isinstance(a, float): return (math.isnan(a) and math.isnan(b)) aexp = a.as_tuple()[2] bexp = b.as_tuple()[2] return ((aexp == bexp) and (aexp in ('n', 'N')))
[ "def", "_nan_equal", "(", "a", ",", "b", ")", ":", "if", "(", "type", "(", "a", ")", "is", "not", "type", "(", "b", ")", ")", ":", "return", "False", "if", "isinstance", "(", "a", ",", "float", ")", ":", "return", "(", "math", ".", "isnan", "(", "a", ")", "and", "math", ".", "isnan", "(", "b", ")", ")", "aexp", "=", "a", ".", "as_tuple", "(", ")", "[", "2", "]", "bexp", "=", "b", ".", "as_tuple", "(", ")", "[", "2", "]", "return", "(", "(", "aexp", "==", "bexp", ")", "and", "(", "aexp", "in", "(", "'n'", ",", "'N'", ")", ")", ")" ]
return true if a and b are both the same kind of nan .
train
false
5,296
def _count0Bits(num): num = long(num) if (num < 0): raise ValueError(('Only positive Numbers please: %s' % num)) ret = 0 while (num > 0): if ((num & 1) == 1): break num = (num >> 1) ret += 1 return ret
[ "def", "_count0Bits", "(", "num", ")", ":", "num", "=", "long", "(", "num", ")", "if", "(", "num", "<", "0", ")", ":", "raise", "ValueError", "(", "(", "'Only positive Numbers please: %s'", "%", "num", ")", ")", "ret", "=", "0", "while", "(", "num", ">", "0", ")", ":", "if", "(", "(", "num", "&", "1", ")", "==", "1", ")", ":", "break", "num", "=", "(", "num", ">>", "1", ")", "ret", "+=", "1", "return", "ret" ]
find the highest bit set to 0 in an integer .
train
false
5,298
def hlen(key, host=None, port=None, db=None, password=None): server = _connect(host, port, db, password) return server.hlen(key)
[ "def", "hlen", "(", "key", ",", "host", "=", "None", ",", "port", "=", "None", ",", "db", "=", "None", ",", "password", "=", "None", ")", ":", "server", "=", "_connect", "(", "host", ",", "port", ",", "db", ",", "password", ")", "return", "server", ".", "hlen", "(", "key", ")" ]
returns number of fields of a hash .
train
true
5,299
def _termination_condition(t, k, g, n, s, alpha, delta): diff = (k - solow_steady_state(g, n, s, alpha, delta)) return diff
[ "def", "_termination_condition", "(", "t", ",", "k", ",", "g", ",", "n", ",", "s", ",", "alpha", ",", "delta", ")", ":", "diff", "=", "(", "k", "-", "solow_steady_state", "(", "g", ",", "n", ",", "s", ",", "alpha", ",", "delta", ")", ")", "return", "diff" ]
terminate solver when we get close to steady state .
train
false
5,300
def all_pairs_dijkstra_path(G, cutoff=None, weight='weight'): path = single_source_dijkstra_path return {n: path(G, n, cutoff=cutoff, weight=weight) for n in G}
[ "def", "all_pairs_dijkstra_path", "(", "G", ",", "cutoff", "=", "None", ",", "weight", "=", "'weight'", ")", ":", "path", "=", "single_source_dijkstra_path", "return", "{", "n", ":", "path", "(", "G", ",", "n", ",", "cutoff", "=", "cutoff", ",", "weight", "=", "weight", ")", "for", "n", "in", "G", "}" ]
compute shortest paths between all nodes in a weighted graph .
train
false
5,301
def _feed_stream(feeder, in_stream, out_stream, block_size=BLOCK_SIZE): while True: chunk = in_stream.read(block_size) if (not chunk): break converted = feeder.feed(chunk) out_stream.write(converted) converted = feeder.feed() out_stream.write(converted)
[ "def", "_feed_stream", "(", "feeder", ",", "in_stream", ",", "out_stream", ",", "block_size", "=", "BLOCK_SIZE", ")", ":", "while", "True", ":", "chunk", "=", "in_stream", ".", "read", "(", "block_size", ")", "if", "(", "not", "chunk", ")", ":", "break", "converted", "=", "feeder", ".", "feed", "(", "chunk", ")", "out_stream", ".", "write", "(", "converted", ")", "converted", "=", "feeder", ".", "feed", "(", ")", "out_stream", ".", "write", "(", "converted", ")" ]
uses feeder to read and convert from in_stream and write to out_stream .
train
true
5,302
def _bit_list_to_bytes(bit_list): num_bits = len(bit_list) byte_vals = bytearray() for start in six.moves.xrange(0, num_bits, 8): curr_bits = bit_list[start:(start + 8)] char_val = sum(((val * digit) for (val, digit) in zip(_POW2, curr_bits))) byte_vals.append(char_val) return bytes(byte_vals)
[ "def", "_bit_list_to_bytes", "(", "bit_list", ")", ":", "num_bits", "=", "len", "(", "bit_list", ")", "byte_vals", "=", "bytearray", "(", ")", "for", "start", "in", "six", ".", "moves", ".", "xrange", "(", "0", ",", "num_bits", ",", "8", ")", ":", "curr_bits", "=", "bit_list", "[", "start", ":", "(", "start", "+", "8", ")", "]", "char_val", "=", "sum", "(", "(", "(", "val", "*", "digit", ")", "for", "(", "val", ",", "digit", ")", "in", "zip", "(", "_POW2", ",", "curr_bits", ")", ")", ")", "byte_vals", ".", "append", "(", "char_val", ")", "return", "bytes", "(", "byte_vals", ")" ]
converts an iterable of 1s and 0s to bytes .
train
true
5,303
def extract_module_locals(depth=0): f = sys._getframe((depth + 1)) global_ns = f.f_globals module = sys.modules[global_ns['__name__']] return (module, f.f_locals)
[ "def", "extract_module_locals", "(", "depth", "=", "0", ")", ":", "f", "=", "sys", ".", "_getframe", "(", "(", "depth", "+", "1", ")", ")", "global_ns", "=", "f", ".", "f_globals", "module", "=", "sys", ".", "modules", "[", "global_ns", "[", "'__name__'", "]", "]", "return", "(", "module", ",", "f", ".", "f_locals", ")" ]
returns of the function depth frames away from the caller .
train
true
5,305
def de_mean(x): x_bar = mean(x) return [(x_i - x_bar) for x_i in x]
[ "def", "de_mean", "(", "x", ")", ":", "x_bar", "=", "mean", "(", "x", ")", "return", "[", "(", "x_i", "-", "x_bar", ")", "for", "x_i", "in", "x", "]" ]
translate x by subtracting its mean .
train
false
5,308
def retrieve_flags(flag_dict, flag_filter): return [(f[0], f[1]) for f in list(flag_dict.items()) if (isinstance(f[0], (str, bytes)) and f[0].startswith(flag_filter))]
[ "def", "retrieve_flags", "(", "flag_dict", ",", "flag_filter", ")", ":", "return", "[", "(", "f", "[", "0", "]", ",", "f", "[", "1", "]", ")", "for", "f", "in", "list", "(", "flag_dict", ".", "items", "(", ")", ")", "if", "(", "isinstance", "(", "f", "[", "0", "]", ",", "(", "str", ",", "bytes", ")", ")", "and", "f", "[", "0", "]", ".", "startswith", "(", "flag_filter", ")", ")", "]" ]
read the flags from a dictionary and return them in a usable form .
train
true
5,309
def combine_play_stats(games): return reduce((lambda p1, p2: (p1 + p2)), [g.drives.players() for g in games if (g is not None)])
[ "def", "combine_play_stats", "(", "games", ")", ":", "return", "reduce", "(", "(", "lambda", "p1", ",", "p2", ":", "(", "p1", "+", "p2", ")", ")", ",", "[", "g", ".", "drives", ".", "players", "(", ")", "for", "g", "in", "games", "if", "(", "g", "is", "not", "None", ")", "]", ")" ]
combines a list of games into one big player sequence containing play level statistics .
train
false
5,310
def Trim(t, p=0.01): n = int((p * len(t))) t = sorted(t)[n:(- n)] return t
[ "def", "Trim", "(", "t", ",", "p", "=", "0.01", ")", ":", "n", "=", "int", "(", "(", "p", "*", "len", "(", "t", ")", ")", ")", "t", "=", "sorted", "(", "t", ")", "[", "n", ":", "(", "-", "n", ")", "]", "return", "t" ]
trims the largest and smallest elements of t .
train
false
5,312
def _sqrt_symbolic_denest(a, b, r): (a, b, r) = map(sympify, (a, b, r)) rval = _sqrt_match(r) if (not rval): return None (ra, rb, rr) = rval if rb: y = Dummy('y', positive=True) try: newa = Poly(a.subs(sqrt(rr), (((y ** 2) - ra) / rb)), y) except PolynomialError: return None if (newa.degree() == 2): (ca, cb, cc) = newa.all_coeffs() cb += b if _mexpand(((cb ** 2) - ((4 * ca) * cc))).equals(0): z = sqrt((ca * ((sqrt(r) + (cb / (2 * ca))) ** 2))) if z.is_number: z = _mexpand(Mul._from_args(z.as_content_primitive())) return z
[ "def", "_sqrt_symbolic_denest", "(", "a", ",", "b", ",", "r", ")", ":", "(", "a", ",", "b", ",", "r", ")", "=", "map", "(", "sympify", ",", "(", "a", ",", "b", ",", "r", ")", ")", "rval", "=", "_sqrt_match", "(", "r", ")", "if", "(", "not", "rval", ")", ":", "return", "None", "(", "ra", ",", "rb", ",", "rr", ")", "=", "rval", "if", "rb", ":", "y", "=", "Dummy", "(", "'y'", ",", "positive", "=", "True", ")", "try", ":", "newa", "=", "Poly", "(", "a", ".", "subs", "(", "sqrt", "(", "rr", ")", ",", "(", "(", "(", "y", "**", "2", ")", "-", "ra", ")", "/", "rb", ")", ")", ",", "y", ")", "except", "PolynomialError", ":", "return", "None", "if", "(", "newa", ".", "degree", "(", ")", "==", "2", ")", ":", "(", "ca", ",", "cb", ",", "cc", ")", "=", "newa", ".", "all_coeffs", "(", ")", "cb", "+=", "b", "if", "_mexpand", "(", "(", "(", "cb", "**", "2", ")", "-", "(", "(", "4", "*", "ca", ")", "*", "cc", ")", ")", ")", ".", "equals", "(", "0", ")", ":", "z", "=", "sqrt", "(", "(", "ca", "*", "(", "(", "sqrt", "(", "r", ")", "+", "(", "cb", "/", "(", "2", "*", "ca", ")", ")", ")", "**", "2", ")", ")", ")", "if", "z", ".", "is_number", ":", "z", "=", "_mexpand", "(", "Mul", ".", "_from_args", "(", "z", ".", "as_content_primitive", "(", ")", ")", ")", "return", "z" ]
given an expression .
train
false
5,313
def staff(): s3.filter = (FS('type') == 1) def prep(r): table = r.table tablename = r.tablename get_vars = r.get_vars crud_strings = s3.crud_strings crud_strings[tablename] = crud_strings['hrm_staff'] resource = r.resource if ('expiring' in get_vars): query = (FS('end_date') < (request.utcnow + datetime.timedelta(weeks=4))) resource.add_filter(query) crud_strings[tablename].title_list = T('Staff with Contracts Expiring in the next Month') resource.configure(sortby=table.end_date, insertable=False) list_fields = [(T('Contract End Date'), 'end_date'), 'person_id', 'job_title_id', 'organisation_id', 'site_id'] if settings.get_hrm_staff_departments(): list_fields.insert(4, 'department_id') resource.configure(list_fields=list_fields) elif (r.representation == 'xls'): s3db.hrm_xls_list_fields(r, vol=False) else: list_fields = ['person_id', 'job_title_id', 'organisation_id', 'site_id', (T('Email'), 'email.value'), (settings.get_ui_label_mobile_phone(), 'phone.value')] if settings.get_hrm_staff_departments(): list_fields.insert(3, 'department_id') if settings.get_hrm_use_trainings(): list_fields.append('person_id$training.course_id') if settings.get_hrm_use_certificates(): list_fields.append('person_id$certification.certificate_id') list_fields.append((T('Contract End Date'), 'end_date')) list_fields.append('status') resource.configure(list_fields=list_fields) if r.interactive: if r.id: if (r.method not in ('profile', 'delete')): vars = {'human_resource.id': r.id, 'group': 'staff'} args = [] if (r.representation == 'iframe'): vars['format'] = 'iframe' args = [r.method] redirect(URL(f='person', vars=vars, args=args)) elif (r.method == 'import'): redirect(URL(f='person', args='import', vars={'group': 'staff'})) elif ((not r.component) and (r.method != 'delete')): field = table.site_id site_id = get_vars.get('site_id', None) if site_id: field.default = site_id field.writable = False field.comment = DIV(DIV(_class='tooltip', _title=('%s|%s' % (settings.get_org_site_label(), T('The facility where this position is based.'))))) table.status.writable = table.status.readable = False dob = s3db.pr_person.date_of_birth dob.widget = S3CalendarWidget(past_months=972, future_months=(-192)) return True s3.prep = prep def postp(r, output): if r.interactive: if (not r.component): s3_action_buttons(r, deletable=settings.get_hrm_deletable()) if (('msg' in settings.modules) and settings.get_hrm_compose_button() and auth.permission.has_permission('update', c='hrm', f='compose')): s3.actions.append({'url': URL(f='compose', vars={'human_resource.id': '[id]'}), '_class': 'action-btn send', 'label': str(T('Send Message'))}) elif (r.representation == 'plain'): output = s3db.hrm_map_popup(r) return output s3.postp = postp return s3_rest_controller('hrm', 'human_resource')
[ "def", "staff", "(", ")", ":", "s3", ".", "filter", "=", "(", "FS", "(", "'type'", ")", "==", "1", ")", "def", "prep", "(", "r", ")", ":", "table", "=", "r", ".", "table", "tablename", "=", "r", ".", "tablename", "get_vars", "=", "r", ".", "get_vars", "crud_strings", "=", "s3", ".", "crud_strings", "crud_strings", "[", "tablename", "]", "=", "crud_strings", "[", "'hrm_staff'", "]", "resource", "=", "r", ".", "resource", "if", "(", "'expiring'", "in", "get_vars", ")", ":", "query", "=", "(", "FS", "(", "'end_date'", ")", "<", "(", "request", ".", "utcnow", "+", "datetime", ".", "timedelta", "(", "weeks", "=", "4", ")", ")", ")", "resource", ".", "add_filter", "(", "query", ")", "crud_strings", "[", "tablename", "]", ".", "title_list", "=", "T", "(", "'Staff with Contracts Expiring in the next Month'", ")", "resource", ".", "configure", "(", "sortby", "=", "table", ".", "end_date", ",", "insertable", "=", "False", ")", "list_fields", "=", "[", "(", "T", "(", "'Contract End Date'", ")", ",", "'end_date'", ")", ",", "'person_id'", ",", "'job_title_id'", ",", "'organisation_id'", ",", "'site_id'", "]", "if", "settings", ".", "get_hrm_staff_departments", "(", ")", ":", "list_fields", ".", "insert", "(", "4", ",", "'department_id'", ")", "resource", ".", "configure", "(", "list_fields", "=", "list_fields", ")", "elif", "(", "r", ".", "representation", "==", "'xls'", ")", ":", "s3db", ".", "hrm_xls_list_fields", "(", "r", ",", "vol", "=", "False", ")", "else", ":", "list_fields", "=", "[", "'person_id'", ",", "'job_title_id'", ",", "'organisation_id'", ",", "'site_id'", ",", "(", "T", "(", "'Email'", ")", ",", "'email.value'", ")", ",", "(", "settings", ".", "get_ui_label_mobile_phone", "(", ")", ",", "'phone.value'", ")", "]", "if", "settings", ".", "get_hrm_staff_departments", "(", ")", ":", "list_fields", ".", "insert", "(", "3", ",", "'department_id'", ")", "if", "settings", ".", "get_hrm_use_trainings", "(", ")", ":", "list_fields", ".", "append", "(", "'person_id$training.course_id'", ")", "if", "settings", ".", "get_hrm_use_certificates", "(", ")", ":", "list_fields", ".", "append", "(", "'person_id$certification.certificate_id'", ")", "list_fields", ".", "append", "(", "(", "T", "(", "'Contract End Date'", ")", ",", "'end_date'", ")", ")", "list_fields", ".", "append", "(", "'status'", ")", "resource", ".", "configure", "(", "list_fields", "=", "list_fields", ")", "if", "r", ".", "interactive", ":", "if", "r", ".", "id", ":", "if", "(", "r", ".", "method", "not", "in", "(", "'profile'", ",", "'delete'", ")", ")", ":", "vars", "=", "{", "'human_resource.id'", ":", "r", ".", "id", ",", "'group'", ":", "'staff'", "}", "args", "=", "[", "]", "if", "(", "r", ".", "representation", "==", "'iframe'", ")", ":", "vars", "[", "'format'", "]", "=", "'iframe'", "args", "=", "[", "r", ".", "method", "]", "redirect", "(", "URL", "(", "f", "=", "'person'", ",", "vars", "=", "vars", ",", "args", "=", "args", ")", ")", "elif", "(", "r", ".", "method", "==", "'import'", ")", ":", "redirect", "(", "URL", "(", "f", "=", "'person'", ",", "args", "=", "'import'", ",", "vars", "=", "{", "'group'", ":", "'staff'", "}", ")", ")", "elif", "(", "(", "not", "r", ".", "component", ")", "and", "(", "r", ".", "method", "!=", "'delete'", ")", ")", ":", "field", "=", "table", ".", "site_id", "site_id", "=", "get_vars", ".", "get", "(", "'site_id'", ",", "None", ")", "if", "site_id", ":", "field", ".", "default", "=", "site_id", "field", ".", "writable", "=", "False", "field", ".", "comment", "=", "DIV", "(", "DIV", "(", "_class", "=", "'tooltip'", ",", "_title", "=", "(", "'%s|%s'", "%", "(", "settings", ".", "get_org_site_label", "(", ")", ",", "T", "(", "'The facility where this position is based.'", ")", ")", ")", ")", ")", "table", ".", "status", ".", "writable", "=", "table", ".", "status", ".", "readable", "=", "False", "dob", "=", "s3db", ".", "pr_person", ".", "date_of_birth", "dob", ".", "widget", "=", "S3CalendarWidget", "(", "past_months", "=", "972", ",", "future_months", "=", "(", "-", "192", ")", ")", "return", "True", "s3", ".", "prep", "=", "prep", "def", "postp", "(", "r", ",", "output", ")", ":", "if", "r", ".", "interactive", ":", "if", "(", "not", "r", ".", "component", ")", ":", "s3_action_buttons", "(", "r", ",", "deletable", "=", "settings", ".", "get_hrm_deletable", "(", ")", ")", "if", "(", "(", "'msg'", "in", "settings", ".", "modules", ")", "and", "settings", ".", "get_hrm_compose_button", "(", ")", "and", "auth", ".", "permission", ".", "has_permission", "(", "'update'", ",", "c", "=", "'hrm'", ",", "f", "=", "'compose'", ")", ")", ":", "s3", ".", "actions", ".", "append", "(", "{", "'url'", ":", "URL", "(", "f", "=", "'compose'", ",", "vars", "=", "{", "'human_resource.id'", ":", "'[id]'", "}", ")", ",", "'_class'", ":", "'action-btn send'", ",", "'label'", ":", "str", "(", "T", "(", "'Send Message'", ")", ")", "}", ")", "elif", "(", "r", ".", "representation", "==", "'plain'", ")", ":", "output", "=", "s3db", ".", "hrm_map_popup", "(", "r", ")", "return", "output", "s3", ".", "postp", "=", "postp", "return", "s3_rest_controller", "(", "'hrm'", ",", "'human_resource'", ")" ]
rest controller for budget_staff .
train
false
5,314
def supports_caller(func): def wrap_stackframe(context, *args, **kwargs): context.caller_stack._push_frame() try: return func(context, *args, **kwargs) finally: context.caller_stack._pop_frame() return wrap_stackframe
[ "def", "supports_caller", "(", "func", ")", ":", "def", "wrap_stackframe", "(", "context", ",", "*", "args", ",", "**", "kwargs", ")", ":", "context", ".", "caller_stack", ".", "_push_frame", "(", ")", "try", ":", "return", "func", "(", "context", ",", "*", "args", ",", "**", "kwargs", ")", "finally", ":", "context", ".", "caller_stack", ".", "_pop_frame", "(", ")", "return", "wrap_stackframe" ]
apply a caller_stack compatibility decorator to a plain python function .
train
true
5,315
def request_minion_cachedir(minion_id, opts=None, fingerprint='', pubkey=None, provider=None, base=None): if (base is None): base = __opts__['cachedir'] if ((not fingerprint) and (pubkey is not None)): fingerprint = salt.utils.pem_finger(key=pubkey, sum_type=((opts and opts.get('hash_type')) or 'sha256')) init_cachedir(base) data = {'minion_id': minion_id, 'fingerprint': fingerprint, 'provider': provider} fname = '{0}.p'.format(minion_id) path = os.path.join(base, 'requested', fname) with salt.utils.fopen(path, 'w') as fh_: msgpack.dump(data, fh_)
[ "def", "request_minion_cachedir", "(", "minion_id", ",", "opts", "=", "None", ",", "fingerprint", "=", "''", ",", "pubkey", "=", "None", ",", "provider", "=", "None", ",", "base", "=", "None", ")", ":", "if", "(", "base", "is", "None", ")", ":", "base", "=", "__opts__", "[", "'cachedir'", "]", "if", "(", "(", "not", "fingerprint", ")", "and", "(", "pubkey", "is", "not", "None", ")", ")", ":", "fingerprint", "=", "salt", ".", "utils", ".", "pem_finger", "(", "key", "=", "pubkey", ",", "sum_type", "=", "(", "(", "opts", "and", "opts", ".", "get", "(", "'hash_type'", ")", ")", "or", "'sha256'", ")", ")", "init_cachedir", "(", "base", ")", "data", "=", "{", "'minion_id'", ":", "minion_id", ",", "'fingerprint'", ":", "fingerprint", ",", "'provider'", ":", "provider", "}", "fname", "=", "'{0}.p'", ".", "format", "(", "minion_id", ")", "path", "=", "os", ".", "path", ".", "join", "(", "base", ",", "'requested'", ",", "fname", ")", "with", "salt", ".", "utils", ".", "fopen", "(", "path", ",", "'w'", ")", "as", "fh_", ":", "msgpack", ".", "dump", "(", "data", ",", "fh_", ")" ]
creates an entry in the requested/ cachedir .
train
false
5,316
@_built_in_directive def session(context_name='session', request=None, **kwargs): return (request and request.context.get(context_name, None))
[ "@", "_built_in_directive", "def", "session", "(", "context_name", "=", "'session'", ",", "request", "=", "None", ",", "**", "kwargs", ")", ":", "return", "(", "request", "and", "request", ".", "context", ".", "get", "(", "context_name", ",", "None", ")", ")" ]
returns a :class:session for context-management .
train
false
5,317
def _parse_igd_profile(profile_xml): try: dom = parseString(profile_xml) except ExpatError as e: raise IGDError('Unable to parse IGD reply: {0} \n\n\n {1}'.format(profile_xml, e)) service_types = dom.getElementsByTagName('serviceType') for service in service_types: if ((_get_first_child_data(service).find('WANIPConnection') > 0) or (_get_first_child_data(service).find('WANPPPConnection') > 0)): try: control_url = _get_first_child_data(service.parentNode.getElementsByTagName('controlURL')[0]) upnp_schema = _get_first_child_data(service).split(':')[(-2)] return (control_url, upnp_schema) except IndexError: pass raise IGDError('Could not find a control url or UPNP schema in IGD response.')
[ "def", "_parse_igd_profile", "(", "profile_xml", ")", ":", "try", ":", "dom", "=", "parseString", "(", "profile_xml", ")", "except", "ExpatError", "as", "e", ":", "raise", "IGDError", "(", "'Unable to parse IGD reply: {0} \\n\\n\\n {1}'", ".", "format", "(", "profile_xml", ",", "e", ")", ")", "service_types", "=", "dom", ".", "getElementsByTagName", "(", "'serviceType'", ")", "for", "service", "in", "service_types", ":", "if", "(", "(", "_get_first_child_data", "(", "service", ")", ".", "find", "(", "'WANIPConnection'", ")", ">", "0", ")", "or", "(", "_get_first_child_data", "(", "service", ")", ".", "find", "(", "'WANPPPConnection'", ")", ">", "0", ")", ")", ":", "try", ":", "control_url", "=", "_get_first_child_data", "(", "service", ".", "parentNode", ".", "getElementsByTagName", "(", "'controlURL'", ")", "[", "0", "]", ")", "upnp_schema", "=", "_get_first_child_data", "(", "service", ")", ".", "split", "(", "':'", ")", "[", "(", "-", "2", ")", "]", "return", "(", "control_url", ",", "upnp_schema", ")", "except", "IndexError", ":", "pass", "raise", "IGDError", "(", "'Could not find a control url or UPNP schema in IGD response.'", ")" ]
traverse the profile xml dom looking for either wanipconnection or wanpppconnection and return the controlurl and the service xml schema .
train
false
5,318
def get_area_classified(): df = fd.get_stock_basics() df = df[['name', 'area']] df.reset_index(level=0, inplace=True) df = df.sort('area').reset_index(drop=True) return df
[ "def", "get_area_classified", "(", ")", ":", "df", "=", "fd", ".", "get_stock_basics", "(", ")", "df", "=", "df", "[", "[", "'name'", ",", "'area'", "]", "]", "df", ".", "reset_index", "(", "level", "=", "0", ",", "inplace", "=", "True", ")", "df", "=", "df", ".", "sort", "(", "'area'", ")", ".", "reset_index", "(", "drop", "=", "True", ")", "return", "df" ]
return dataframe code :股票代码 name :股票名称 area :地域名称 .
train
false
5,319
@inspect_command(alias=u'dump_reserved') def reserved(state, **kwargs): reserved_tasks = (state.tset(worker_state.reserved_requests) - state.tset(worker_state.active_requests)) if (not reserved_tasks): return [] return [request.info() for request in reserved_tasks]
[ "@", "inspect_command", "(", "alias", "=", "u'dump_reserved'", ")", "def", "reserved", "(", "state", ",", "**", "kwargs", ")", ":", "reserved_tasks", "=", "(", "state", ".", "tset", "(", "worker_state", ".", "reserved_requests", ")", "-", "state", ".", "tset", "(", "worker_state", ".", "active_requests", ")", ")", "if", "(", "not", "reserved_tasks", ")", ":", "return", "[", "]", "return", "[", "request", ".", "info", "(", ")", "for", "request", "in", "reserved_tasks", "]" ]
list of currently reserved tasks .
train
false
5,324
def set_log_level(verbose, match=None, return_old=False): if isinstance(verbose, bool): verbose = ('info' if verbose else 'warning') if isinstance(verbose, string_types): verbose = verbose.lower() if (verbose not in logging_types): raise ValueError(('Invalid argument "%s"' % verbose)) verbose = logging_types[verbose] else: raise TypeError('verbose must be a bool or string') logger = logging.getLogger('vispy') old_verbose = logger.level old_match = _lh._vispy_set_match(match) logger.setLevel(verbose) if (verbose <= logging.DEBUG): _lf._vispy_set_prepend(True) else: _lf._vispy_set_prepend(False) out = None if return_old: out = (old_verbose, old_match) return out
[ "def", "set_log_level", "(", "verbose", ",", "match", "=", "None", ",", "return_old", "=", "False", ")", ":", "if", "isinstance", "(", "verbose", ",", "bool", ")", ":", "verbose", "=", "(", "'info'", "if", "verbose", "else", "'warning'", ")", "if", "isinstance", "(", "verbose", ",", "string_types", ")", ":", "verbose", "=", "verbose", ".", "lower", "(", ")", "if", "(", "verbose", "not", "in", "logging_types", ")", ":", "raise", "ValueError", "(", "(", "'Invalid argument \"%s\"'", "%", "verbose", ")", ")", "verbose", "=", "logging_types", "[", "verbose", "]", "else", ":", "raise", "TypeError", "(", "'verbose must be a bool or string'", ")", "logger", "=", "logging", ".", "getLogger", "(", "'vispy'", ")", "old_verbose", "=", "logger", ".", "level", "old_match", "=", "_lh", ".", "_vispy_set_match", "(", "match", ")", "logger", ".", "setLevel", "(", "verbose", ")", "if", "(", "verbose", "<=", "logging", ".", "DEBUG", ")", ":", "_lf", ".", "_vispy_set_prepend", "(", "True", ")", "else", ":", "_lf", ".", "_vispy_set_prepend", "(", "False", ")", "out", "=", "None", "if", "return_old", ":", "out", "=", "(", "old_verbose", ",", "old_match", ")", "return", "out" ]
use this method to set log level to something other than the default debug .
train
true
5,325
def horse(): with expected_warnings(['Possible precision loss', 'Possible sign loss']): return img_as_bool(load('horse.png', as_grey=True))
[ "def", "horse", "(", ")", ":", "with", "expected_warnings", "(", "[", "'Possible precision loss'", ",", "'Possible sign loss'", "]", ")", ":", "return", "img_as_bool", "(", "load", "(", "'horse.png'", ",", "as_grey", "=", "True", ")", ")" ]
black and white silhouette of a horse .
train
false
5,327
def _check_arg_length(fname, args, max_fname_arg_count, compat_args): if (max_fname_arg_count < 0): raise ValueError("'max_fname_arg_count' must be non-negative") if (len(args) > len(compat_args)): max_arg_count = (len(compat_args) + max_fname_arg_count) actual_arg_count = (len(args) + max_fname_arg_count) argument = ('argument' if (max_arg_count == 1) else 'arguments') raise TypeError('{fname}() takes at most {max_arg} {argument} ({given_arg} given)'.format(fname=fname, max_arg=max_arg_count, argument=argument, given_arg=actual_arg_count))
[ "def", "_check_arg_length", "(", "fname", ",", "args", ",", "max_fname_arg_count", ",", "compat_args", ")", ":", "if", "(", "max_fname_arg_count", "<", "0", ")", ":", "raise", "ValueError", "(", "\"'max_fname_arg_count' must be non-negative\"", ")", "if", "(", "len", "(", "args", ")", ">", "len", "(", "compat_args", ")", ")", ":", "max_arg_count", "=", "(", "len", "(", "compat_args", ")", "+", "max_fname_arg_count", ")", "actual_arg_count", "=", "(", "len", "(", "args", ")", "+", "max_fname_arg_count", ")", "argument", "=", "(", "'argument'", "if", "(", "max_arg_count", "==", "1", ")", "else", "'arguments'", ")", "raise", "TypeError", "(", "'{fname}() takes at most {max_arg} {argument} ({given_arg} given)'", ".", "format", "(", "fname", "=", "fname", ",", "max_arg", "=", "max_arg_count", ",", "argument", "=", "argument", ",", "given_arg", "=", "actual_arg_count", ")", ")" ]
checks whether args has length of at most compat_args .
train
true
5,328
def test_unit_state(): field = UnitStateField(required=False) assert field.clean(str(FUZZY)) assert field.clean(str(TRANSLATED)) assert field.clean(str(UNTRANSLATED)) assert field.clean(True) assert (not field.clean('True')) assert (not field.clean(False)) assert (not field.clean('False'))
[ "def", "test_unit_state", "(", ")", ":", "field", "=", "UnitStateField", "(", "required", "=", "False", ")", "assert", "field", ".", "clean", "(", "str", "(", "FUZZY", ")", ")", "assert", "field", ".", "clean", "(", "str", "(", "TRANSLATED", ")", ")", "assert", "field", ".", "clean", "(", "str", "(", "UNTRANSLATED", ")", ")", "assert", "field", ".", "clean", "(", "True", ")", "assert", "(", "not", "field", ".", "clean", "(", "'True'", ")", ")", "assert", "(", "not", "field", ".", "clean", "(", "False", ")", ")", "assert", "(", "not", "field", ".", "clean", "(", "'False'", ")", ")" ]
tests how checkbox states map to booleans .
train
false
5,330
def setup_version_redirection(config): settings = config.get_settings() redirect_enabled = settings['version_prefix_redirect_enabled'] version_prefix_redirection_enabled = asbool(redirect_enabled) route_prefix = config.route_prefix config.registry.route_prefix = route_prefix if (not version_prefix_redirection_enabled): return def _redirect_to_version_view(request): if (request.method.lower() == 'options'): return utils.reapply_cors(request, Response()) querystring = request.url[(request.url.rindex(request.path) + len(request.path)):] redirect = ('/%s%s%s' % (route_prefix, request.path, querystring)) raise HTTPTemporaryRedirect(redirect) config.route_prefix = None config.add_route(name='redirect_to_version', pattern='/{path:(?!v[0-9]+)[^\\r\\n]*}') config.add_view(view=_redirect_to_version_view, route_name='redirect_to_version', permission=NO_PERMISSION_REQUIRED) config.route_prefix = route_prefix
[ "def", "setup_version_redirection", "(", "config", ")", ":", "settings", "=", "config", ".", "get_settings", "(", ")", "redirect_enabled", "=", "settings", "[", "'version_prefix_redirect_enabled'", "]", "version_prefix_redirection_enabled", "=", "asbool", "(", "redirect_enabled", ")", "route_prefix", "=", "config", ".", "route_prefix", "config", ".", "registry", ".", "route_prefix", "=", "route_prefix", "if", "(", "not", "version_prefix_redirection_enabled", ")", ":", "return", "def", "_redirect_to_version_view", "(", "request", ")", ":", "if", "(", "request", ".", "method", ".", "lower", "(", ")", "==", "'options'", ")", ":", "return", "utils", ".", "reapply_cors", "(", "request", ",", "Response", "(", ")", ")", "querystring", "=", "request", ".", "url", "[", "(", "request", ".", "url", ".", "rindex", "(", "request", ".", "path", ")", "+", "len", "(", "request", ".", "path", ")", ")", ":", "]", "redirect", "=", "(", "'/%s%s%s'", "%", "(", "route_prefix", ",", "request", ".", "path", ",", "querystring", ")", ")", "raise", "HTTPTemporaryRedirect", "(", "redirect", ")", "config", ".", "route_prefix", "=", "None", "config", ".", "add_route", "(", "name", "=", "'redirect_to_version'", ",", "pattern", "=", "'/{path:(?!v[0-9]+)[^\\\\r\\\\n]*}'", ")", "config", ".", "add_view", "(", "view", "=", "_redirect_to_version_view", ",", "route_name", "=", "'redirect_to_version'", ",", "permission", "=", "NO_PERMISSION_REQUIRED", ")", "config", ".", "route_prefix", "=", "route_prefix" ]
add a view which redirects to the current version of the api .
train
false
5,331
def _coeff_isneg(a): if a.is_Mul: a = a.args[0] return (a.is_Number and a.is_negative)
[ "def", "_coeff_isneg", "(", "a", ")", ":", "if", "a", ".", "is_Mul", ":", "a", "=", "a", ".", "args", "[", "0", "]", "return", "(", "a", ".", "is_Number", "and", "a", ".", "is_negative", ")" ]
return true if the leading number is negative .
train
false
5,332
def p_command_gosub(p): p[0] = ('GOSUB', int(p[2]))
[ "def", "p_command_gosub", "(", "p", ")", ":", "p", "[", "0", "]", "=", "(", "'GOSUB'", ",", "int", "(", "p", "[", "2", "]", ")", ")" ]
command : gosub integer .
train
false
5,333
def sanitize_file_name2(name, substitute='_'): if isbytestring(name): return sanitize_file_name(name, substitute=substitute) return sanitize_file_name_unicode(name, substitute=substitute)
[ "def", "sanitize_file_name2", "(", "name", ",", "substitute", "=", "'_'", ")", ":", "if", "isbytestring", "(", "name", ")", ":", "return", "sanitize_file_name", "(", "name", ",", "substitute", "=", "substitute", ")", "return", "sanitize_file_name_unicode", "(", "name", ",", "substitute", "=", "substitute", ")" ]
sanitize filenames removing invalid chars .
train
false
5,334
def _summary_judment(rec): if config['import']['quiet']: if (rec == Recommendation.strong): return importer.action.APPLY else: action = config['import']['quiet_fallback'].as_choice({'skip': importer.action.SKIP, 'asis': importer.action.ASIS}) elif (rec == Recommendation.none): action = config['import']['none_rec_action'].as_choice({'skip': importer.action.SKIP, 'asis': importer.action.ASIS, 'ask': None}) else: return None if (action == importer.action.SKIP): print_('Skipping.') elif (action == importer.action.ASIS): print_('Importing as-is.') return action
[ "def", "_summary_judment", "(", "rec", ")", ":", "if", "config", "[", "'import'", "]", "[", "'quiet'", "]", ":", "if", "(", "rec", "==", "Recommendation", ".", "strong", ")", ":", "return", "importer", ".", "action", ".", "APPLY", "else", ":", "action", "=", "config", "[", "'import'", "]", "[", "'quiet_fallback'", "]", ".", "as_choice", "(", "{", "'skip'", ":", "importer", ".", "action", ".", "SKIP", ",", "'asis'", ":", "importer", ".", "action", ".", "ASIS", "}", ")", "elif", "(", "rec", "==", "Recommendation", ".", "none", ")", ":", "action", "=", "config", "[", "'import'", "]", "[", "'none_rec_action'", "]", ".", "as_choice", "(", "{", "'skip'", ":", "importer", ".", "action", ".", "SKIP", ",", "'asis'", ":", "importer", ".", "action", ".", "ASIS", ",", "'ask'", ":", "None", "}", ")", "else", ":", "return", "None", "if", "(", "action", "==", "importer", ".", "action", ".", "SKIP", ")", ":", "print_", "(", "'Skipping.'", ")", "elif", "(", "action", "==", "importer", ".", "action", ".", "ASIS", ")", ":", "print_", "(", "'Importing as-is.'", ")", "return", "action" ]
determines whether a decision should be made without even asking the user .
train
false
5,335
@handle_response_format @treeio_login_required @module_admin_required() def page_edit(request, page_id, response_format='html'): page = get_object_or_404(Page, pk=page_id) if request.POST: form = PageForm(request.POST, instance=page) if form.is_valid(): page = form.save() return HttpResponseRedirect(reverse('core_admin_page_view', args=[page.id])) else: form = PageForm(instance=page) return render_to_response('core/administration/page_edit', {'page': page, 'form': form}, context_instance=RequestContext(request), response_format=response_format)
[ "@", "handle_response_format", "@", "treeio_login_required", "@", "module_admin_required", "(", ")", "def", "page_edit", "(", "request", ",", "page_id", ",", "response_format", "=", "'html'", ")", ":", "page", "=", "get_object_or_404", "(", "Page", ",", "pk", "=", "page_id", ")", "if", "request", ".", "POST", ":", "form", "=", "PageForm", "(", "request", ".", "POST", ",", "instance", "=", "page", ")", "if", "form", ".", "is_valid", "(", ")", ":", "page", "=", "form", ".", "save", "(", ")", "return", "HttpResponseRedirect", "(", "reverse", "(", "'core_admin_page_view'", ",", "args", "=", "[", "page", ".", "id", "]", ")", ")", "else", ":", "form", "=", "PageForm", "(", "instance", "=", "page", ")", "return", "render_to_response", "(", "'core/administration/page_edit'", ",", "{", "'page'", ":", "page", ",", "'form'", ":", "form", "}", ",", "context_instance", "=", "RequestContext", "(", "request", ")", ",", "response_format", "=", "response_format", ")" ]
static page edit .
train
false
5,336
def pixelCollision(rect1, rect2, hitmask1, hitmask2): rect = rect1.clip(rect2) if ((rect.width == 0) or (rect.height == 0)): return False (x1, y1) = ((rect.x - rect1.x), (rect.y - rect1.y)) (x2, y2) = ((rect.x - rect2.x), (rect.y - rect2.y)) for x in range(rect.width): for y in range(rect.height): if (hitmask1[(x1 + x)][(y1 + y)] and hitmask2[(x2 + x)][(y2 + y)]): return True return False
[ "def", "pixelCollision", "(", "rect1", ",", "rect2", ",", "hitmask1", ",", "hitmask2", ")", ":", "rect", "=", "rect1", ".", "clip", "(", "rect2", ")", "if", "(", "(", "rect", ".", "width", "==", "0", ")", "or", "(", "rect", ".", "height", "==", "0", ")", ")", ":", "return", "False", "(", "x1", ",", "y1", ")", "=", "(", "(", "rect", ".", "x", "-", "rect1", ".", "x", ")", ",", "(", "rect", ".", "y", "-", "rect1", ".", "y", ")", ")", "(", "x2", ",", "y2", ")", "=", "(", "(", "rect", ".", "x", "-", "rect2", ".", "x", ")", ",", "(", "rect", ".", "y", "-", "rect2", ".", "y", ")", ")", "for", "x", "in", "range", "(", "rect", ".", "width", ")", ":", "for", "y", "in", "range", "(", "rect", ".", "height", ")", ":", "if", "(", "hitmask1", "[", "(", "x1", "+", "x", ")", "]", "[", "(", "y1", "+", "y", ")", "]", "and", "hitmask2", "[", "(", "x2", "+", "x", ")", "]", "[", "(", "y2", "+", "y", ")", "]", ")", ":", "return", "True", "return", "False" ]
checks if two objects collide and not just their rects .
train
false
5,337
def correlation_sums(indicators, max_dim): corrsums = np.zeros((1, max_dim)) (corrsums[(0, 0)], indicators) = correlation_sum(indicators, 1) for i in range(1, max_dim): (corrsums[(0, i)], indicators) = correlation_sum(indicators, 2) return corrsums
[ "def", "correlation_sums", "(", "indicators", ",", "max_dim", ")", ":", "corrsums", "=", "np", ".", "zeros", "(", "(", "1", ",", "max_dim", ")", ")", "(", "corrsums", "[", "(", "0", ",", "0", ")", "]", ",", "indicators", ")", "=", "correlation_sum", "(", "indicators", ",", "1", ")", "for", "i", "in", "range", "(", "1", ",", "max_dim", ")", ":", "(", "corrsums", "[", "(", "0", ",", "i", ")", "]", ",", "indicators", ")", "=", "correlation_sum", "(", "indicators", ",", "2", ")", "return", "corrsums" ]
calculate all correlation sums for embedding dimensions 1:max_dim parameters indicators : 2d array matrix of distance threshold indicators max_dim : integer maximum embedding dimension returns corrsums : 1d array correlation sums .
train
false
5,338
def parse_http(pkt): payload = pkt.getlayer(Raw).load (usr, pswd) = (None, None) if (('username' in payload) or ('password' in payload)): usr = re.search('username=(.*?)(&|$| )', payload) pswd = re.search('password=(.*?)(&|$| )', payload) if (usr is not None): usr = usr.groups(0)[0] if (pswd is not None): pswd = pswd.groups(0)[0] elif ('Authorization:' in payload): pw = re.search('Authorization: Basic (.*)', payload) if (pw.groups(0) is not None): usr = b64decode(pw.groups(0)[0]) return (usr, pswd)
[ "def", "parse_http", "(", "pkt", ")", ":", "payload", "=", "pkt", ".", "getlayer", "(", "Raw", ")", ".", "load", "(", "usr", ",", "pswd", ")", "=", "(", "None", ",", "None", ")", "if", "(", "(", "'username'", "in", "payload", ")", "or", "(", "'password'", "in", "payload", ")", ")", ":", "usr", "=", "re", ".", "search", "(", "'username=(.*?)(&|$| )'", ",", "payload", ")", "pswd", "=", "re", ".", "search", "(", "'password=(.*?)(&|$| )'", ",", "payload", ")", "if", "(", "usr", "is", "not", "None", ")", ":", "usr", "=", "usr", ".", "groups", "(", "0", ")", "[", "0", "]", "if", "(", "pswd", "is", "not", "None", ")", ":", "pswd", "=", "pswd", ".", "groups", "(", "0", ")", "[", "0", "]", "elif", "(", "'Authorization:'", "in", "payload", ")", ":", "pw", "=", "re", ".", "search", "(", "'Authorization: Basic (.*)'", ",", "payload", ")", "if", "(", "pw", ".", "groups", "(", "0", ")", "is", "not", "None", ")", ":", "usr", "=", "b64decode", "(", "pw", ".", "groups", "(", "0", ")", "[", "0", "]", ")", "return", "(", "usr", ",", "pswd", ")" ]
parse out the username/password from an http request .
train
false
5,339
def ClientInit(): if (stats.STATS is None): stats.STATS = stats.StatsCollector() config_lib.SetPlatformArchContext() config_lib.ParseConfigCommandLine() log.LogInit() registry.Init()
[ "def", "ClientInit", "(", ")", ":", "if", "(", "stats", ".", "STATS", "is", "None", ")", ":", "stats", ".", "STATS", "=", "stats", ".", "StatsCollector", "(", ")", "config_lib", ".", "SetPlatformArchContext", "(", ")", "config_lib", ".", "ParseConfigCommandLine", "(", ")", "log", ".", "LogInit", "(", ")", "registry", ".", "Init", "(", ")" ]
run all startup routines for the client .
train
false
5,340
@cache_permission def can_author_translation(user, project): return check_permission(user, project, 'trans.author_translation')
[ "@", "cache_permission", "def", "can_author_translation", "(", "user", ",", "project", ")", ":", "return", "check_permission", "(", "user", ",", "project", ",", "'trans.author_translation'", ")" ]
checks whether user can author translation on given project .
train
false
5,341
def get_about_url(): return get_url('ABOUT')
[ "def", "get_about_url", "(", ")", ":", "return", "get_url", "(", "'ABOUT'", ")" ]
lookup and return about page url .
train
false
5,342
def markembling(talkative=True): colour = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) if talkative: print ('Your randomly created colour is: #%x%x%x' % colour) return colour
[ "def", "markembling", "(", "talkative", "=", "True", ")", ":", "colour", "=", "(", "random", ".", "randint", "(", "0", ",", "255", ")", ",", "random", ".", "randint", "(", "0", ",", "255", ")", ",", "random", ".", "randint", "(", "0", ",", "255", ")", ")", "if", "talkative", ":", "print", "(", "'Your randomly created colour is: #%x%x%x'", "%", "colour", ")", "return", "colour" ]
generate a random colour as a tuple and optionally print it .
train
false
5,343
def application_to_app_label(application): if isinstance(application, string_types): app_label = application.split('.')[(-1)] else: app_label = application.__name__.split('.')[(-1)] return app_label
[ "def", "application_to_app_label", "(", "application", ")", ":", "if", "isinstance", "(", "application", ",", "string_types", ")", ":", "app_label", "=", "application", ".", "split", "(", "'.'", ")", "[", "(", "-", "1", ")", "]", "else", ":", "app_label", "=", "application", ".", "__name__", ".", "split", "(", "'.'", ")", "[", "(", "-", "1", ")", "]", "return", "app_label" ]
works out the app label from either the app label .
train
false
5,344
def attachNBD(cow, flags=''): cow = abspath(cow) log('* Checking for unused /dev/nbdX device ') for i in range(0, 63): nbd = ('/dev/nbd%d' % i) if (call(['pgrep', '-f', nbd]) == 0): continue srun('modprobe nbd max-part=64') srun(('qemu-nbd %s -c %s %s' % (flags, nbd, cow))) print () return nbd raise Exception('Error: could not find unused /dev/nbdX device')
[ "def", "attachNBD", "(", "cow", ",", "flags", "=", "''", ")", ":", "cow", "=", "abspath", "(", "cow", ")", "log", "(", "'* Checking for unused /dev/nbdX device '", ")", "for", "i", "in", "range", "(", "0", ",", "63", ")", ":", "nbd", "=", "(", "'/dev/nbd%d'", "%", "i", ")", "if", "(", "call", "(", "[", "'pgrep'", ",", "'-f'", ",", "nbd", "]", ")", "==", "0", ")", ":", "continue", "srun", "(", "'modprobe nbd max-part=64'", ")", "srun", "(", "(", "'qemu-nbd %s -c %s %s'", "%", "(", "flags", ",", "nbd", ",", "cow", ")", ")", ")", "print", "(", ")", "return", "nbd", "raise", "Exception", "(", "'Error: could not find unused /dev/nbdX device'", ")" ]
attempt to attach a cow disk image and return its nbd device flags: additional flags for qemu-nbd .
train
false
5,345
def explode_tokens(tokenlist): if getattr(tokenlist, u'exploded', False): return tokenlist result = [] for (token, string) in tokenlist: for c in string: result.append((token, c)) return _ExplodedList(result)
[ "def", "explode_tokens", "(", "tokenlist", ")", ":", "if", "getattr", "(", "tokenlist", ",", "u'exploded'", ",", "False", ")", ":", "return", "tokenlist", "result", "=", "[", "]", "for", "(", "token", ",", "string", ")", "in", "tokenlist", ":", "for", "c", "in", "string", ":", "result", ".", "append", "(", "(", "token", ",", "c", ")", ")", "return", "_ExplodedList", "(", "result", ")" ]
turn a list of tuples into another list where each string is exactly one character .
train
true
5,346
def instance_extra_update_by_uuid(context, instance_uuid, updates): return IMPL.instance_extra_update_by_uuid(context, instance_uuid, updates)
[ "def", "instance_extra_update_by_uuid", "(", "context", ",", "instance_uuid", ",", "updates", ")", ":", "return", "IMPL", ".", "instance_extra_update_by_uuid", "(", "context", ",", "instance_uuid", ",", "updates", ")" ]
update the instance extra record by instance uuid .
train
false
5,348
def find_kink(curvature, angle): kinks = [] kink_index = [i for i in range(len(curvature)) if (abs(curvature[i]) < angle)] return kink_index
[ "def", "find_kink", "(", "curvature", ",", "angle", ")", ":", "kinks", "=", "[", "]", "kink_index", "=", "[", "i", "for", "i", "in", "range", "(", "len", "(", "curvature", ")", ")", "if", "(", "abs", "(", "curvature", "[", "i", "]", ")", "<", "angle", ")", "]", "return", "kink_index" ]
contour is array shape=(number of points .
train
false
5,350
@decorator def gzip(f, *args, **kwargs): data = f(*args, **kwargs) if isinstance(data, Response): content = data.data else: content = data gzip_buffer = BytesIO() gzip_file = gzip2.GzipFile(mode='wb', compresslevel=4, fileobj=gzip_buffer) gzip_file.write(content) gzip_file.close() gzip_data = gzip_buffer.getvalue() if isinstance(data, Response): data.data = gzip_data data.headers['Content-Encoding'] = 'gzip' data.headers['Content-Length'] = str(len(data.data)) return data return gzip_data
[ "@", "decorator", "def", "gzip", "(", "f", ",", "*", "args", ",", "**", "kwargs", ")", ":", "data", "=", "f", "(", "*", "args", ",", "**", "kwargs", ")", "if", "isinstance", "(", "data", ",", "Response", ")", ":", "content", "=", "data", ".", "data", "else", ":", "content", "=", "data", "gzip_buffer", "=", "BytesIO", "(", ")", "gzip_file", "=", "gzip2", ".", "GzipFile", "(", "mode", "=", "'wb'", ",", "compresslevel", "=", "4", ",", "fileobj", "=", "gzip_buffer", ")", "gzip_file", ".", "write", "(", "content", ")", "gzip_file", ".", "close", "(", ")", "gzip_data", "=", "gzip_buffer", ".", "getvalue", "(", ")", "if", "isinstance", "(", "data", ",", "Response", ")", ":", "data", ".", "data", "=", "gzip_data", "data", ".", "headers", "[", "'Content-Encoding'", "]", "=", "'gzip'", "data", ".", "headers", "[", "'Content-Length'", "]", "=", "str", "(", "len", "(", "data", ".", "data", ")", ")", "return", "data", "return", "gzip_data" ]
uses the gzip command to create gzip files template : none can be set to jinja or another supported template engine to render the command arguments before execution: .
train
true
5,352
def _create_image_assemble_error_status(status, ret, image_logs): comment = 'An error occurred while importing your image' out = None is_invalid = True status['out'] = '' try: is_invalid = False status['out'] += ('\n' + ret) for err_log in image_logs: if isinstance(err_log, dict): if ('errorDetail' in err_log): if ('code' in err_log['errorDetail']): msg = '\n{0}\n{1}: {2}'.format(err_log['error'], err_log['errorDetail']['code'], err_log['errorDetail']['message']) else: msg = '\n{0}\n{1}'.format(err_log['error'], err_log['errorDetail']['message']) comment += msg except Exception: is_invalid = True trace = traceback.format_exc() out = 'An error occurred while parsing error output:\n{0}'.format(trace) if is_invalid: _invalid(status, out=out, comment=comment) return status
[ "def", "_create_image_assemble_error_status", "(", "status", ",", "ret", ",", "image_logs", ")", ":", "comment", "=", "'An error occurred while importing your image'", "out", "=", "None", "is_invalid", "=", "True", "status", "[", "'out'", "]", "=", "''", "try", ":", "is_invalid", "=", "False", "status", "[", "'out'", "]", "+=", "(", "'\\n'", "+", "ret", ")", "for", "err_log", "in", "image_logs", ":", "if", "isinstance", "(", "err_log", ",", "dict", ")", ":", "if", "(", "'errorDetail'", "in", "err_log", ")", ":", "if", "(", "'code'", "in", "err_log", "[", "'errorDetail'", "]", ")", ":", "msg", "=", "'\\n{0}\\n{1}: {2}'", ".", "format", "(", "err_log", "[", "'error'", "]", ",", "err_log", "[", "'errorDetail'", "]", "[", "'code'", "]", ",", "err_log", "[", "'errorDetail'", "]", "[", "'message'", "]", ")", "else", ":", "msg", "=", "'\\n{0}\\n{1}'", ".", "format", "(", "err_log", "[", "'error'", "]", ",", "err_log", "[", "'errorDetail'", "]", "[", "'message'", "]", ")", "comment", "+=", "msg", "except", "Exception", ":", "is_invalid", "=", "True", "trace", "=", "traceback", ".", "format_exc", "(", ")", "out", "=", "'An error occurred while parsing error output:\\n{0}'", ".", "format", "(", "trace", ")", "if", "is_invalid", ":", "_invalid", "(", "status", ",", "out", "=", "out", ",", "comment", "=", "comment", ")", "return", "status" ]
given input in this form:: [{uerror: uget file:///r .
train
false
5,353
def _one_vs_one_coef(dual_coef, n_support, support_vectors): n_class = (dual_coef.shape[0] + 1) coef = [] sv_locs = np.cumsum(np.hstack([[0], n_support])) for class1 in range(n_class): sv1 = support_vectors[sv_locs[class1]:sv_locs[(class1 + 1)], :] for class2 in range((class1 + 1), n_class): sv2 = support_vectors[sv_locs[class2]:sv_locs[(class2 + 1)], :] alpha1 = dual_coef[(class2 - 1), sv_locs[class1]:sv_locs[(class1 + 1)]] alpha2 = dual_coef[class1, sv_locs[class2]:sv_locs[(class2 + 1)]] coef.append((safe_sparse_dot(alpha1, sv1) + safe_sparse_dot(alpha2, sv2))) return coef
[ "def", "_one_vs_one_coef", "(", "dual_coef", ",", "n_support", ",", "support_vectors", ")", ":", "n_class", "=", "(", "dual_coef", ".", "shape", "[", "0", "]", "+", "1", ")", "coef", "=", "[", "]", "sv_locs", "=", "np", ".", "cumsum", "(", "np", ".", "hstack", "(", "[", "[", "0", "]", ",", "n_support", "]", ")", ")", "for", "class1", "in", "range", "(", "n_class", ")", ":", "sv1", "=", "support_vectors", "[", "sv_locs", "[", "class1", "]", ":", "sv_locs", "[", "(", "class1", "+", "1", ")", "]", ",", ":", "]", "for", "class2", "in", "range", "(", "(", "class1", "+", "1", ")", ",", "n_class", ")", ":", "sv2", "=", "support_vectors", "[", "sv_locs", "[", "class2", "]", ":", "sv_locs", "[", "(", "class2", "+", "1", ")", "]", ",", ":", "]", "alpha1", "=", "dual_coef", "[", "(", "class2", "-", "1", ")", ",", "sv_locs", "[", "class1", "]", ":", "sv_locs", "[", "(", "class1", "+", "1", ")", "]", "]", "alpha2", "=", "dual_coef", "[", "class1", ",", "sv_locs", "[", "class2", "]", ":", "sv_locs", "[", "(", "class2", "+", "1", ")", "]", "]", "coef", ".", "append", "(", "(", "safe_sparse_dot", "(", "alpha1", ",", "sv1", ")", "+", "safe_sparse_dot", "(", "alpha2", ",", "sv2", ")", ")", ")", "return", "coef" ]
generate primal coefficients from dual coefficients for the one-vs-one multi class libsvm in the case of a linear kernel .
train
false
5,354
def test_feature_max_length_on_scenario_outline_keys(): feature1 = Feature.from_string(FEATURE8) feature2 = Feature.from_string(FEATURE9) assert_equals(feature1.max_length, 68) assert_equals(feature2.max_length, 68)
[ "def", "test_feature_max_length_on_scenario_outline_keys", "(", ")", ":", "feature1", "=", "Feature", ".", "from_string", "(", "FEATURE8", ")", "feature2", "=", "Feature", ".", "from_string", "(", "FEATURE9", ")", "assert_equals", "(", "feature1", ".", "max_length", ",", "68", ")", "assert_equals", "(", "feature2", ".", "max_length", ",", "68", ")" ]
the max length of a feature considering when the table keys of the scenario oulines are longer than the remaining things .
train
false
5,355
def cleandir(dirname): try: shutil.rmtree(dirname) except OSError as exception: if (exception.errno != errno.ENOENT): raise exception
[ "def", "cleandir", "(", "dirname", ")", ":", "try", ":", "shutil", ".", "rmtree", "(", "dirname", ")", "except", "OSError", "as", "exception", ":", "if", "(", "exception", ".", "errno", "!=", "errno", ".", "ENOENT", ")", ":", "raise", "exception" ]
removes a complete tree .
train
false
5,356
def compress_tokens(tokens): result = [tokens[0]] for tok in tokens[1:]: if ((not result[(-1)].post_tags) and (not tok.pre_tags) and (result[(-1)].annotation == tok.annotation)): compress_merge_back(result, tok) else: result.append(tok) return result
[ "def", "compress_tokens", "(", "tokens", ")", ":", "result", "=", "[", "tokens", "[", "0", "]", "]", "for", "tok", "in", "tokens", "[", "1", ":", "]", ":", "if", "(", "(", "not", "result", "[", "(", "-", "1", ")", "]", ".", "post_tags", ")", "and", "(", "not", "tok", ".", "pre_tags", ")", "and", "(", "result", "[", "(", "-", "1", ")", "]", ".", "annotation", "==", "tok", ".", "annotation", ")", ")", ":", "compress_merge_back", "(", "result", ",", "tok", ")", "else", ":", "result", ".", "append", "(", "tok", ")", "return", "result" ]
combine adjacent tokens when there is no html between the tokens .
train
true
5,357
def DA_DESeq2(input_path, out_path, mapping_fp, mapping_category, subcategory_1, subcategory_2, DESeq2_diagnostic_plots): tmp_bt = load_table(input_path) (tmp_pmf, _) = parse_mapping_file_to_dict(mapping_fp) check_mapping_file_category(tmp_bt, mapping_fp, mapping_category, subcategory_1, subcategory_2) tmp_bt.add_metadata(tmp_pmf, 'sample') (base_fname, ext) = splitext(out_path) outfile_diagnostic = join((base_fname + '_diagnostic_plots.pdf')) with tempfile.NamedTemporaryFile(dir=get_qiime_temp_dir(), prefix='QIIME-differential-abundance-temp-table-', suffix='.biom') as temp_fh: temp_fh.write(tmp_bt.to_json('forR')) temp_fh.flush() run_DESeq2(temp_fh.name, out_path, mapping_category, subcategory_1, subcategory_2, DESeq2_diagnostic_plots, outfile_diagnostic)
[ "def", "DA_DESeq2", "(", "input_path", ",", "out_path", ",", "mapping_fp", ",", "mapping_category", ",", "subcategory_1", ",", "subcategory_2", ",", "DESeq2_diagnostic_plots", ")", ":", "tmp_bt", "=", "load_table", "(", "input_path", ")", "(", "tmp_pmf", ",", "_", ")", "=", "parse_mapping_file_to_dict", "(", "mapping_fp", ")", "check_mapping_file_category", "(", "tmp_bt", ",", "mapping_fp", ",", "mapping_category", ",", "subcategory_1", ",", "subcategory_2", ")", "tmp_bt", ".", "add_metadata", "(", "tmp_pmf", ",", "'sample'", ")", "(", "base_fname", ",", "ext", ")", "=", "splitext", "(", "out_path", ")", "outfile_diagnostic", "=", "join", "(", "(", "base_fname", "+", "'_diagnostic_plots.pdf'", ")", ")", "with", "tempfile", ".", "NamedTemporaryFile", "(", "dir", "=", "get_qiime_temp_dir", "(", ")", ",", "prefix", "=", "'QIIME-differential-abundance-temp-table-'", ",", "suffix", "=", "'.biom'", ")", "as", "temp_fh", ":", "temp_fh", ".", "write", "(", "tmp_bt", ".", "to_json", "(", "'forR'", ")", ")", "temp_fh", ".", "flush", "(", ")", "run_DESeq2", "(", "temp_fh", ".", "name", ",", "out_path", ",", "mapping_category", ",", "subcategory_1", ",", "subcategory_2", ",", "DESeq2_diagnostic_plots", ",", "outfile_diagnostic", ")" ]
perform deseq2 negative binomial wald differential abundance test on a raw abundance otu matrix .
train
false
5,358
def alerting_authority(): return s3_rest_controller()
[ "def", "alerting_authority", "(", ")", ":", "return", "s3_rest_controller", "(", ")" ]
restful crud controller .
train
false
5,359
def validate_argmax_with_skipna(skipna, args, kwargs): (skipna, args) = process_skipna(skipna, args) validate_argmax(args, kwargs) return skipna
[ "def", "validate_argmax_with_skipna", "(", "skipna", ",", "args", ",", "kwargs", ")", ":", "(", "skipna", ",", "args", ")", "=", "process_skipna", "(", "skipna", ",", "args", ")", "validate_argmax", "(", "args", ",", "kwargs", ")", "return", "skipna" ]
if series .
train
true
5,360
def image_has_transparent_pixels(img): img = image_from_data(img) if img.isNull(): return False return imageops.has_transparent_pixels(img)
[ "def", "image_has_transparent_pixels", "(", "img", ")", ":", "img", "=", "image_from_data", "(", "img", ")", "if", "img", ".", "isNull", "(", ")", ":", "return", "False", "return", "imageops", ".", "has_transparent_pixels", "(", "img", ")" ]
return true iff the image has at least one semi-transparent pixel .
train
false
5,361
def idz_findrank(eps, m, n, matveca): (k, ra, ier) = _id.idz_findrank(eps, m, n, matveca) if ier: raise _RETCODE_ERROR return k
[ "def", "idz_findrank", "(", "eps", ",", "m", ",", "n", ",", "matveca", ")", ":", "(", "k", ",", "ra", ",", "ier", ")", "=", "_id", ".", "idz_findrank", "(", "eps", ",", "m", ",", "n", ",", "matveca", ")", "if", "ier", ":", "raise", "_RETCODE_ERROR", "return", "k" ]
estimate rank of a complex matrix to a specified relative precision using random matrix-vector multiplication .
train
false
5,362
def _apikey(): return __opts__.get('bamboohr', {}).get('apikey', None)
[ "def", "_apikey", "(", ")", ":", "return", "__opts__", ".", "get", "(", "'bamboohr'", ",", "{", "}", ")", ".", "get", "(", "'apikey'", ",", "None", ")" ]
get the api key .
train
false
5,363
def file_list_emptydirs(load): return _file_lists(load, 'empty_dirs')
[ "def", "file_list_emptydirs", "(", "load", ")", ":", "return", "_file_lists", "(", "load", ",", "'empty_dirs'", ")" ]
return a list of all empty directories on the master .
train
false
5,365
def recompose_dwi(in_dwi, in_bval, in_corrected, out_file=None): import numpy as np import nibabel as nb import os.path as op if (out_file is None): (fname, ext) = op.splitext(op.basename(in_dwi)) if (ext == u'.gz'): (fname, ext2) = op.splitext(fname) ext = (ext2 + ext) out_file = op.abspath((u'%s_eccorrect%s' % (fname, ext))) im = nb.load(in_dwi) dwidata = im.get_data() bvals = np.loadtxt(in_bval) dwis = np.where((bvals != 0))[0].tolist() if (len(dwis) != len(in_corrected)): raise RuntimeError(u'Length of DWIs in b-values table and aftercorrection should match') for (bindex, dwi) in zip(dwis, in_corrected): dwidata[..., bindex] = nb.load(dwi).get_data() nb.Nifti1Image(dwidata, im.affine, im.header).to_filename(out_file) return out_file
[ "def", "recompose_dwi", "(", "in_dwi", ",", "in_bval", ",", "in_corrected", ",", "out_file", "=", "None", ")", ":", "import", "numpy", "as", "np", "import", "nibabel", "as", "nb", "import", "os", ".", "path", "as", "op", "if", "(", "out_file", "is", "None", ")", ":", "(", "fname", ",", "ext", ")", "=", "op", ".", "splitext", "(", "op", ".", "basename", "(", "in_dwi", ")", ")", "if", "(", "ext", "==", "u'.gz'", ")", ":", "(", "fname", ",", "ext2", ")", "=", "op", ".", "splitext", "(", "fname", ")", "ext", "=", "(", "ext2", "+", "ext", ")", "out_file", "=", "op", ".", "abspath", "(", "(", "u'%s_eccorrect%s'", "%", "(", "fname", ",", "ext", ")", ")", ")", "im", "=", "nb", ".", "load", "(", "in_dwi", ")", "dwidata", "=", "im", ".", "get_data", "(", ")", "bvals", "=", "np", ".", "loadtxt", "(", "in_bval", ")", "dwis", "=", "np", ".", "where", "(", "(", "bvals", "!=", "0", ")", ")", "[", "0", "]", ".", "tolist", "(", ")", "if", "(", "len", "(", "dwis", ")", "!=", "len", "(", "in_corrected", ")", ")", ":", "raise", "RuntimeError", "(", "u'Length of DWIs in b-values table and aftercorrection should match'", ")", "for", "(", "bindex", ",", "dwi", ")", "in", "zip", "(", "dwis", ",", "in_corrected", ")", ":", "dwidata", "[", "...", ",", "bindex", "]", "=", "nb", ".", "load", "(", "dwi", ")", ".", "get_data", "(", ")", "nb", ".", "Nifti1Image", "(", "dwidata", ",", "im", ".", "affine", ",", "im", ".", "header", ")", ".", "to_filename", "(", "out_file", ")", "return", "out_file" ]
recompose back the dmri data accordingly the b-values table after ec correction .
train
false
5,367
def listdir(path): if (not hasattr(sys, 'frozen')): return os.listdir(path) (zipPath, archivePath) = splitZip(path) if (archivePath is None): return os.listdir(path) with zipfile.ZipFile(zipPath, 'r') as zipobj: contents = zipobj.namelist() results = set() for name in contents: if (name.startswith(archivePath) and (len(name) > len(archivePath))): name = name[len(archivePath):].split('/')[0] results.add(name) return list(results)
[ "def", "listdir", "(", "path", ")", ":", "if", "(", "not", "hasattr", "(", "sys", ",", "'frozen'", ")", ")", ":", "return", "os", ".", "listdir", "(", "path", ")", "(", "zipPath", ",", "archivePath", ")", "=", "splitZip", "(", "path", ")", "if", "(", "archivePath", "is", "None", ")", ":", "return", "os", ".", "listdir", "(", "path", ")", "with", "zipfile", ".", "ZipFile", "(", "zipPath", ",", "'r'", ")", "as", "zipobj", ":", "contents", "=", "zipobj", ".", "namelist", "(", ")", "results", "=", "set", "(", ")", "for", "name", "in", "contents", ":", "if", "(", "name", ".", "startswith", "(", "archivePath", ")", "and", "(", "len", "(", "name", ")", ">", "len", "(", "archivePath", ")", ")", ")", ":", "name", "=", "name", "[", "len", "(", "archivePath", ")", ":", "]", ".", "split", "(", "'/'", ")", "[", "0", "]", "results", ".", "add", "(", "name", ")", "return", "list", "(", "results", ")" ]
list directory contents .
train
false
5,368
def quota_get_all_by_project(context, project_id): return IMPL.quota_get_all_by_project(context, project_id)
[ "def", "quota_get_all_by_project", "(", "context", ",", "project_id", ")", ":", "return", "IMPL", ".", "quota_get_all_by_project", "(", "context", ",", "project_id", ")" ]
retrieve all quotas associated with a given project .
train
false
5,369
def get_tab_by_locator(tab_list, usage_key_string): tab_location = UsageKey.from_string(usage_key_string) item = modulestore().get_item(tab_location) static_tab = StaticTab(name=item.display_name, url_slug=item.location.name) return CourseTabList.get_tab_by_id(tab_list, static_tab.tab_id)
[ "def", "get_tab_by_locator", "(", "tab_list", ",", "usage_key_string", ")", ":", "tab_location", "=", "UsageKey", ".", "from_string", "(", "usage_key_string", ")", "item", "=", "modulestore", "(", ")", ".", "get_item", "(", "tab_location", ")", "static_tab", "=", "StaticTab", "(", "name", "=", "item", ".", "display_name", ",", "url_slug", "=", "item", ".", "location", ".", "name", ")", "return", "CourseTabList", ".", "get_tab_by_id", "(", "tab_list", ",", "static_tab", ".", "tab_id", ")" ]
look for a tab with the specified locator .
train
false
5,370
def archive_as_tarball(source_dir, dest_dir, tarball_name=None, compression='bz2', verbose=True): tarball_name = get_archive_tarball_name(source_dir, tarball_name, compression) if (not os.path.isabs(tarball_name)): tarball_path = os.path.join(dest_dir, tarball_name) else: tarball_path = tarball_name if verbose: logging.debug(('Archiving %s as %s' % (source_dir, tarball_path))) os.chdir(os.path.dirname(source_dir)) tarball = tarfile.TarFile(name=tarball_path, mode='w') tarball = tarball.open(name=tarball_path, mode=('w:%s' % compression)) tarball.add(os.path.basename(source_dir)) tarball.close()
[ "def", "archive_as_tarball", "(", "source_dir", ",", "dest_dir", ",", "tarball_name", "=", "None", ",", "compression", "=", "'bz2'", ",", "verbose", "=", "True", ")", ":", "tarball_name", "=", "get_archive_tarball_name", "(", "source_dir", ",", "tarball_name", ",", "compression", ")", "if", "(", "not", "os", ".", "path", ".", "isabs", "(", "tarball_name", ")", ")", ":", "tarball_path", "=", "os", ".", "path", ".", "join", "(", "dest_dir", ",", "tarball_name", ")", "else", ":", "tarball_path", "=", "tarball_name", "if", "verbose", ":", "logging", ".", "debug", "(", "(", "'Archiving %s as %s'", "%", "(", "source_dir", ",", "tarball_path", ")", ")", ")", "os", ".", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "source_dir", ")", ")", "tarball", "=", "tarfile", ".", "TarFile", "(", "name", "=", "tarball_path", ",", "mode", "=", "'w'", ")", "tarball", "=", "tarball", ".", "open", "(", "name", "=", "tarball_path", ",", "mode", "=", "(", "'w:%s'", "%", "compression", ")", ")", "tarball", ".", "add", "(", "os", ".", "path", ".", "basename", "(", "source_dir", ")", ")", "tarball", ".", "close", "(", ")" ]
saves the given source directory to the given destination as a tarball if the name of the archive is omitted .
train
false
5,372
def test_bc_init(): ratio = 1.0 bc = BalanceCascade(ratio=ratio, random_state=RND_SEED) assert_equal(bc.ratio, ratio) assert_equal(bc.bootstrap, True) assert_equal(bc.n_max_subset, None) assert_equal(bc.random_state, RND_SEED)
[ "def", "test_bc_init", "(", ")", ":", "ratio", "=", "1.0", "bc", "=", "BalanceCascade", "(", "ratio", "=", "ratio", ",", "random_state", "=", "RND_SEED", ")", "assert_equal", "(", "bc", ".", "ratio", ",", "ratio", ")", "assert_equal", "(", "bc", ".", "bootstrap", ",", "True", ")", "assert_equal", "(", "bc", ".", "n_max_subset", ",", "None", ")", "assert_equal", "(", "bc", ".", "random_state", ",", "RND_SEED", ")" ]
test the initialisation of the object .
train
false
5,373
def maxzerodown(x): x = np.asarray(x) cond1 = (x[:(-1)] > 0) cond2 = (x[1:] < 0) allzeros = (np.nonzero(((cond1 & cond2) | (x[1:] == 0)))[0] + 1) if (x[(-1)] <= 0): maxz = max(allzeros) else: maxz = None return (maxz, allzeros)
[ "def", "maxzerodown", "(", "x", ")", ":", "x", "=", "np", ".", "asarray", "(", "x", ")", "cond1", "=", "(", "x", "[", ":", "(", "-", "1", ")", "]", ">", "0", ")", "cond2", "=", "(", "x", "[", "1", ":", "]", "<", "0", ")", "allzeros", "=", "(", "np", ".", "nonzero", "(", "(", "(", "cond1", "&", "cond2", ")", "|", "(", "x", "[", "1", ":", "]", "==", "0", ")", ")", ")", "[", "0", "]", "+", "1", ")", "if", "(", "x", "[", "(", "-", "1", ")", "]", "<=", "0", ")", ":", "maxz", "=", "max", "(", "allzeros", ")", "else", ":", "maxz", "=", "None", "return", "(", "maxz", ",", "allzeros", ")" ]
find all up zero crossings and return the index of the highest not used anymore .
train
false
5,375
def p_statement_bad(p): print ('MALFORMED STATEMENT AT LINE %s' % p[1]) p[0] = None p.parser.error = 1
[ "def", "p_statement_bad", "(", "p", ")", ":", "print", "(", "'MALFORMED STATEMENT AT LINE %s'", "%", "p", "[", "1", "]", ")", "p", "[", "0", "]", "=", "None", "p", ".", "parser", ".", "error", "=", "1" ]
statement : integer error newline .
train
false
5,377
def limit_domains(url, limit_dict): for (domain, limiter) in limit_dict.items(): if (domain in url): limiter() break
[ "def", "limit_domains", "(", "url", ",", "limit_dict", ")", ":", "for", "(", "domain", ",", "limiter", ")", "in", "limit_dict", ".", "items", "(", ")", ":", "if", "(", "domain", "in", "url", ")", ":", "limiter", "(", ")", "break" ]
if this url matches a domain in limit_dict .
train
false
5,379
def test_fontconfig_preamble(): plt.rcParams[u'text.usetex'] = True tm1 = TexManager() font_config1 = tm1.get_font_config() plt.rcParams[u'text.latex.preamble'] = [u'\\usepackage{txfonts}'] tm2 = TexManager() font_config2 = tm2.get_font_config() assert (font_config1 != font_config2)
[ "def", "test_fontconfig_preamble", "(", ")", ":", "plt", ".", "rcParams", "[", "u'text.usetex'", "]", "=", "True", "tm1", "=", "TexManager", "(", ")", "font_config1", "=", "tm1", ".", "get_font_config", "(", ")", "plt", ".", "rcParams", "[", "u'text.latex.preamble'", "]", "=", "[", "u'\\\\usepackage{txfonts}'", "]", "tm2", "=", "TexManager", "(", ")", "font_config2", "=", "tm2", ".", "get_font_config", "(", ")", "assert", "(", "font_config1", "!=", "font_config2", ")" ]
test that the preamble is included in _fontconfig .
train
false
5,380
def _partial_fit_binary(estimator, X, y): estimator.partial_fit(X, y, np.array((0, 1))) return estimator
[ "def", "_partial_fit_binary", "(", "estimator", ",", "X", ",", "y", ")", ":", "estimator", ".", "partial_fit", "(", "X", ",", "y", ",", "np", ".", "array", "(", "(", "0", ",", "1", ")", ")", ")", "return", "estimator" ]
partially fit a single binary estimator .
train
false
5,382
def new(rsa_key): return PKCS115_SigScheme(rsa_key)
[ "def", "new", "(", "rsa_key", ")", ":", "return", "PKCS115_SigScheme", "(", "rsa_key", ")" ]
return a fresh instance of the hash object .
train
false
5,383
def is_form_submitted(): return (request and (request.method in ('PUT', 'POST')))
[ "def", "is_form_submitted", "(", ")", ":", "return", "(", "request", "and", "(", "request", ".", "method", "in", "(", "'PUT'", ",", "'POST'", ")", ")", ")" ]
check if current method is put or post .
train
false
5,384
def SafeFormatMessage(eventLogRecord, logType=None): if (logType is None): logType = 'Application' try: return FormatMessage(eventLogRecord, logType) except win32api.error: if (eventLogRecord.StringInserts is None): desc = '' else: desc = u', '.join(eventLogRecord.StringInserts) return (u'<The description for Event ID ( %d ) in Source ( %r ) could not be found. It contains the following insertion string(s):%r.>' % (winerror.HRESULT_CODE(eventLogRecord.EventID), eventLogRecord.SourceName, desc))
[ "def", "SafeFormatMessage", "(", "eventLogRecord", ",", "logType", "=", "None", ")", ":", "if", "(", "logType", "is", "None", ")", ":", "logType", "=", "'Application'", "try", ":", "return", "FormatMessage", "(", "eventLogRecord", ",", "logType", ")", "except", "win32api", ".", "error", ":", "if", "(", "eventLogRecord", ".", "StringInserts", "is", "None", ")", ":", "desc", "=", "''", "else", ":", "desc", "=", "u', '", ".", "join", "(", "eventLogRecord", ".", "StringInserts", ")", "return", "(", "u'<The description for Event ID ( %d ) in Source ( %r ) could not be found. It contains the following insertion string(s):%r.>'", "%", "(", "winerror", ".", "HRESULT_CODE", "(", "eventLogRecord", ".", "EventID", ")", ",", "eventLogRecord", ".", "SourceName", ",", "desc", ")", ")" ]
as for formatmessage .
train
false
5,385
def DropPrivileges(): if config_lib.CONFIG['Server.username']: try: os.setuid(pwd.getpwnam(config_lib.CONFIG['Server.username']).pw_uid) except (KeyError, OSError): logging.exception('Unable to switch to user %s', config_lib.CONFIG['Server.username']) raise
[ "def", "DropPrivileges", "(", ")", ":", "if", "config_lib", ".", "CONFIG", "[", "'Server.username'", "]", ":", "try", ":", "os", ".", "setuid", "(", "pwd", ".", "getpwnam", "(", "config_lib", ".", "CONFIG", "[", "'Server.username'", "]", ")", ".", "pw_uid", ")", "except", "(", "KeyError", ",", "OSError", ")", ":", "logging", ".", "exception", "(", "'Unable to switch to user %s'", ",", "config_lib", ".", "CONFIG", "[", "'Server.username'", "]", ")", "raise" ]
attempt to drop privileges if required .
train
false
5,386
def gitlab_merge_request(registry, xml_parent, data): ghprb = XML.SubElement(xml_parent, 'org.jenkinsci.plugins.gitlab.GitlabBuildTrigger') if (not data.get('cron', None)): raise jenkins_jobs.errors.JenkinsJobsException('gitlab-merge-request is missing "cron"') if (not data.get('project-path', None)): raise jenkins_jobs.errors.JenkinsJobsException('gitlab-merge-request is missing "project-path"') XML.SubElement(ghprb, 'spec').text = data.get('cron') XML.SubElement(ghprb, '__cron').text = data.get('cron') XML.SubElement(ghprb, '__projectPath').text = data.get('project-path')
[ "def", "gitlab_merge_request", "(", "registry", ",", "xml_parent", ",", "data", ")", ":", "ghprb", "=", "XML", ".", "SubElement", "(", "xml_parent", ",", "'org.jenkinsci.plugins.gitlab.GitlabBuildTrigger'", ")", "if", "(", "not", "data", ".", "get", "(", "'cron'", ",", "None", ")", ")", ":", "raise", "jenkins_jobs", ".", "errors", ".", "JenkinsJobsException", "(", "'gitlab-merge-request is missing \"cron\"'", ")", "if", "(", "not", "data", ".", "get", "(", "'project-path'", ",", "None", ")", ")", ":", "raise", "jenkins_jobs", ".", "errors", ".", "JenkinsJobsException", "(", "'gitlab-merge-request is missing \"project-path\"'", ")", "XML", ".", "SubElement", "(", "ghprb", ",", "'spec'", ")", ".", "text", "=", "data", ".", "get", "(", "'cron'", ")", "XML", ".", "SubElement", "(", "ghprb", ",", "'__cron'", ")", ".", "text", "=", "data", ".", "get", "(", "'cron'", ")", "XML", ".", "SubElement", "(", "ghprb", ",", "'__projectPath'", ")", ".", "text", "=", "data", ".", "get", "(", "'project-path'", ")" ]
yaml: gitlab-merge-request build merge requests in gitlab and report results .
train
false
5,387
def convert_LinkProperty(model, prop, kwargs): kwargs['validators'].append(validators.url()) return get_TextField(kwargs)
[ "def", "convert_LinkProperty", "(", "model", ",", "prop", ",", "kwargs", ")", ":", "kwargs", "[", "'validators'", "]", ".", "append", "(", "validators", ".", "url", "(", ")", ")", "return", "get_TextField", "(", "kwargs", ")" ]
returns a form field for a db .
train
false
5,388
def is_secure_transport(uri): if os.environ.get(u'OAUTHLIB_INSECURE_TRANSPORT'): return True return uri.lower().startswith(u'https://')
[ "def", "is_secure_transport", "(", "uri", ")", ":", "if", "os", ".", "environ", ".", "get", "(", "u'OAUTHLIB_INSECURE_TRANSPORT'", ")", ":", "return", "True", "return", "uri", ".", "lower", "(", ")", ".", "startswith", "(", "u'https://'", ")" ]
check if the uri is over ssl .
train
false
5,390
def percentile(image, selem, out=None, mask=None, shift_x=False, shift_y=False, p0=0): return _apply(percentile_cy._percentile, image, selem, out=out, mask=mask, shift_x=shift_x, shift_y=shift_y, p0=p0, p1=0.0)
[ "def", "percentile", "(", "image", ",", "selem", ",", "out", "=", "None", ",", "mask", "=", "None", ",", "shift_x", "=", "False", ",", "shift_y", "=", "False", ",", "p0", "=", "0", ")", ":", "return", "_apply", "(", "percentile_cy", ".", "_percentile", ",", "image", ",", "selem", ",", "out", "=", "out", ",", "mask", "=", "mask", ",", "shift_x", "=", "shift_x", ",", "shift_y", "=", "shift_y", ",", "p0", "=", "p0", ",", "p1", "=", "0.0", ")" ]
approximate percentile of 1-d array see numpy .
train
false