id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
19,563
def get_url_key(server_url): return (ASSOCIATIONS_KEY_PREFIX + server_url)
[ "def", "get_url_key", "(", "server_url", ")", ":", "return", "(", "ASSOCIATIONS_KEY_PREFIX", "+", "server_url", ")" ]
returns the url key for the given server_url .
train
false
19,564
def ensure_sequence_filter(data): if (not isinstance(data, (list, tuple, set, dict))): return [data] return data
[ "def", "ensure_sequence_filter", "(", "data", ")", ":", "if", "(", "not", "isinstance", "(", "data", ",", "(", "list", ",", "tuple", ",", "set", ",", "dict", ")", ")", ")", ":", "return", "[", "data", "]", "return", "data" ]
ensure sequenced data .
train
true
19,567
def retrieve_password_from_keyring(credential_id, username): try: import keyring return keyring.get_password(credential_id, username) except ImportError: log.error('USE_KEYRING configured as a password, but no keyring module is installed') return False
[ "def", "retrieve_password_from_keyring", "(", "credential_id", ",", "username", ")", ":", "try", ":", "import", "keyring", "return", "keyring", ".", "get_password", "(", "credential_id", ",", "username", ")", "except", "ImportError", ":", "log", ".", "error", "(", "'USE_KEYRING configured as a password, but no keyring module is installed'", ")", "return", "False" ]
retrieve particular users password for a specified credential set from system keyring .
train
true
19,568
def kill_process_tree(logger, pid): try: root_process = psutil.Process(pid) except psutil.NoSuchProcess: logger.warn(u'PID: {} does not exist'.format(pid)) return descendant_processes = [x for x in root_process.children(recursive=True) if x.is_running()] if (len(descendant_processes) != 0): logger.warn(u'Terminating descendant processes of {} PID: {}'.format(root_process.cmdline(), root_process.pid)) temp_processes = descendant_processes[:] for descendant in temp_processes: logger.warn(u'Terminating descendant process {} PID: {}'.format(descendant.cmdline(), descendant.pid)) try: kill_using_shell(descendant.pid, signal.SIGTERM) except psutil.NoSuchProcess: descendant_processes.remove(descendant) logger.warn(u'Waiting up to {}s for processes to exit...'.format(TIME_TO_WAIT_AFTER_SIGTERM)) try: psutil.wait_procs(descendant_processes, TIME_TO_WAIT_AFTER_SIGTERM) logger.warn(u'Done waiting') except psutil.TimeoutExpired: logger.warn(u'Ran out of time while waiting for processes to exit') descendant_processes = [x for x in root_process.children(recursive=True) if x.is_running()] if (len(descendant_processes) > 0): temp_processes = descendant_processes[:] for descendant in temp_processes: logger.warn(u'Killing descendant process {} PID: {}'.format(descendant.cmdline(), descendant.pid)) try: kill_using_shell(descendant.pid, signal.SIGTERM) descendant.wait() except psutil.NoSuchProcess: descendant_processes.remove(descendant) logger.warn(u'Killed all descendant processes of {} PID: {}'.format(root_process.cmdline(), root_process.pid)) else: logger.debug(u'There are no descendant processes to kill')
[ "def", "kill_process_tree", "(", "logger", ",", "pid", ")", ":", "try", ":", "root_process", "=", "psutil", ".", "Process", "(", "pid", ")", "except", "psutil", ".", "NoSuchProcess", ":", "logger", ".", "warn", "(", "u'PID: {} does not exist'", ".", "format", "(", "pid", ")", ")", "return", "descendant_processes", "=", "[", "x", "for", "x", "in", "root_process", ".", "children", "(", "recursive", "=", "True", ")", "if", "x", ".", "is_running", "(", ")", "]", "if", "(", "len", "(", "descendant_processes", ")", "!=", "0", ")", ":", "logger", ".", "warn", "(", "u'Terminating descendant processes of {} PID: {}'", ".", "format", "(", "root_process", ".", "cmdline", "(", ")", ",", "root_process", ".", "pid", ")", ")", "temp_processes", "=", "descendant_processes", "[", ":", "]", "for", "descendant", "in", "temp_processes", ":", "logger", ".", "warn", "(", "u'Terminating descendant process {} PID: {}'", ".", "format", "(", "descendant", ".", "cmdline", "(", ")", ",", "descendant", ".", "pid", ")", ")", "try", ":", "kill_using_shell", "(", "descendant", ".", "pid", ",", "signal", ".", "SIGTERM", ")", "except", "psutil", ".", "NoSuchProcess", ":", "descendant_processes", ".", "remove", "(", "descendant", ")", "logger", ".", "warn", "(", "u'Waiting up to {}s for processes to exit...'", ".", "format", "(", "TIME_TO_WAIT_AFTER_SIGTERM", ")", ")", "try", ":", "psutil", ".", "wait_procs", "(", "descendant_processes", ",", "TIME_TO_WAIT_AFTER_SIGTERM", ")", "logger", ".", "warn", "(", "u'Done waiting'", ")", "except", "psutil", ".", "TimeoutExpired", ":", "logger", ".", "warn", "(", "u'Ran out of time while waiting for processes to exit'", ")", "descendant_processes", "=", "[", "x", "for", "x", "in", "root_process", ".", "children", "(", "recursive", "=", "True", ")", "if", "x", ".", "is_running", "(", ")", "]", "if", "(", "len", "(", "descendant_processes", ")", ">", "0", ")", ":", "temp_processes", "=", "descendant_processes", "[", ":", "]", "for", "descendant", "in", "temp_processes", ":", "logger", ".", "warn", "(", "u'Killing descendant process {} PID: {}'", ".", "format", "(", "descendant", ".", "cmdline", "(", ")", ",", "descendant", ".", "pid", ")", ")", "try", ":", "kill_using_shell", "(", "descendant", ".", "pid", ",", "signal", ".", "SIGTERM", ")", "descendant", ".", "wait", "(", ")", "except", "psutil", ".", "NoSuchProcess", ":", "descendant_processes", ".", "remove", "(", "descendant", ")", "logger", ".", "warn", "(", "u'Killed all descendant processes of {} PID: {}'", ".", "format", "(", "root_process", ".", "cmdline", "(", ")", ",", "root_process", ".", "pid", ")", ")", "else", ":", "logger", ".", "debug", "(", "u'There are no descendant processes to kill'", ")" ]
signal a process and all of its children .
train
false
19,572
def parse_qual_scores(qual_files): qual_mappings = {} for qual_file in qual_files: qual_mappings.update(parse_qual_score(qual_file)) return qual_mappings
[ "def", "parse_qual_scores", "(", "qual_files", ")", ":", "qual_mappings", "=", "{", "}", "for", "qual_file", "in", "qual_files", ":", "qual_mappings", ".", "update", "(", "parse_qual_score", "(", "qual_file", ")", ")", "return", "qual_mappings" ]
load qual scores into dict of {id:qual_scores} .
train
false
19,573
def safe_close(fd): try: os.close(fd) except Exception: LOGGER.exception('Error while closing FD')
[ "def", "safe_close", "(", "fd", ")", ":", "try", ":", "os", ".", "close", "(", "fd", ")", "except", "Exception", ":", "LOGGER", ".", "exception", "(", "'Error while closing FD'", ")" ]
close a file descriptor .
train
false
19,574
def test_smote_bad_ratio(): ratio = (-1.0) smote = SMOTETomek(ratio=ratio) assert_raises(ValueError, smote.fit, X, Y) ratio = 100.0 smote = SMOTETomek(ratio=ratio) assert_raises(ValueError, smote.fit, X, Y) ratio = 'rnd' smote = SMOTETomek(ratio=ratio) assert_raises(ValueError, smote.fit, X, Y) ratio = [0.5, 0.5] smote = SMOTETomek(ratio=ratio) assert_raises(ValueError, smote.fit, X, Y)
[ "def", "test_smote_bad_ratio", "(", ")", ":", "ratio", "=", "(", "-", "1.0", ")", "smote", "=", "SMOTETomek", "(", "ratio", "=", "ratio", ")", "assert_raises", "(", "ValueError", ",", "smote", ".", "fit", ",", "X", ",", "Y", ")", "ratio", "=", "100.0", "smote", "=", "SMOTETomek", "(", "ratio", "=", "ratio", ")", "assert_raises", "(", "ValueError", ",", "smote", ".", "fit", ",", "X", ",", "Y", ")", "ratio", "=", "'rnd'", "smote", "=", "SMOTETomek", "(", "ratio", "=", "ratio", ")", "assert_raises", "(", "ValueError", ",", "smote", ".", "fit", ",", "X", ",", "Y", ")", "ratio", "=", "[", "0.5", ",", "0.5", "]", "smote", "=", "SMOTETomek", "(", "ratio", "=", "ratio", ")", "assert_raises", "(", "ValueError", ",", "smote", ".", "fit", ",", "X", ",", "Y", ")" ]
test either if an error is raised with a wrong decimal value for the ratio .
train
false
19,575
def semi_rel2reldict(pairs, window=5, trace=False): result = [] while (len(pairs) > 2): reldict = defaultdict(str) reldict['lcon'] = _join(pairs[0][0][(- window):]) reldict['subjclass'] = pairs[0][1].label() reldict['subjtext'] = _join(pairs[0][1].leaves()) reldict['subjsym'] = list2sym(pairs[0][1].leaves()) reldict['filler'] = _join(pairs[1][0]) reldict['untagged_filler'] = _join(pairs[1][0], untag=True) reldict['objclass'] = pairs[1][1].label() reldict['objtext'] = _join(pairs[1][1].leaves()) reldict['objsym'] = list2sym(pairs[1][1].leaves()) reldict['rcon'] = _join(pairs[2][0][:window]) if trace: print(('(%s(%s, %s)' % (reldict['untagged_filler'], reldict['subjclass'], reldict['objclass']))) result.append(reldict) pairs = pairs[1:] return result
[ "def", "semi_rel2reldict", "(", "pairs", ",", "window", "=", "5", ",", "trace", "=", "False", ")", ":", "result", "=", "[", "]", "while", "(", "len", "(", "pairs", ")", ">", "2", ")", ":", "reldict", "=", "defaultdict", "(", "str", ")", "reldict", "[", "'lcon'", "]", "=", "_join", "(", "pairs", "[", "0", "]", "[", "0", "]", "[", "(", "-", "window", ")", ":", "]", ")", "reldict", "[", "'subjclass'", "]", "=", "pairs", "[", "0", "]", "[", "1", "]", ".", "label", "(", ")", "reldict", "[", "'subjtext'", "]", "=", "_join", "(", "pairs", "[", "0", "]", "[", "1", "]", ".", "leaves", "(", ")", ")", "reldict", "[", "'subjsym'", "]", "=", "list2sym", "(", "pairs", "[", "0", "]", "[", "1", "]", ".", "leaves", "(", ")", ")", "reldict", "[", "'filler'", "]", "=", "_join", "(", "pairs", "[", "1", "]", "[", "0", "]", ")", "reldict", "[", "'untagged_filler'", "]", "=", "_join", "(", "pairs", "[", "1", "]", "[", "0", "]", ",", "untag", "=", "True", ")", "reldict", "[", "'objclass'", "]", "=", "pairs", "[", "1", "]", "[", "1", "]", ".", "label", "(", ")", "reldict", "[", "'objtext'", "]", "=", "_join", "(", "pairs", "[", "1", "]", "[", "1", "]", ".", "leaves", "(", ")", ")", "reldict", "[", "'objsym'", "]", "=", "list2sym", "(", "pairs", "[", "1", "]", "[", "1", "]", ".", "leaves", "(", ")", ")", "reldict", "[", "'rcon'", "]", "=", "_join", "(", "pairs", "[", "2", "]", "[", "0", "]", "[", ":", "window", "]", ")", "if", "trace", ":", "print", "(", "(", "'(%s(%s, %s)'", "%", "(", "reldict", "[", "'untagged_filler'", "]", ",", "reldict", "[", "'subjclass'", "]", ",", "reldict", "[", "'objclass'", "]", ")", ")", ")", "result", ".", "append", "(", "reldict", ")", "pairs", "=", "pairs", "[", "1", ":", "]", "return", "result" ]
converts the pairs generated by tree2semi_rel into a reldict: a dictionary which stores information about the subject and object nes plus the filler between them .
train
false
19,578
@register.inclusion_tag('inclusion.html') def inclusion_no_params(): return {'result': 'inclusion_no_params - Expected result'}
[ "@", "register", ".", "inclusion_tag", "(", "'inclusion.html'", ")", "def", "inclusion_no_params", "(", ")", ":", "return", "{", "'result'", ":", "'inclusion_no_params - Expected result'", "}" ]
expected inclusion_no_params __doc__ .
train
false
19,579
def nova_except_format(logical_line): if logical_line.startswith('except:'): (yield (6, "N201: no 'except:' at least use 'except Exception:'"))
[ "def", "nova_except_format", "(", "logical_line", ")", ":", "if", "logical_line", ".", "startswith", "(", "'except:'", ")", ":", "(", "yield", "(", "6", ",", "\"N201: no 'except:' at least use 'except Exception:'\"", ")", ")" ]
check for except: .
train
false
19,580
def datetime_from_rfc822(datetime_str): return datetime.fromtimestamp(mktime_tz(parsedate_tz(datetime_str)), pytz.utc)
[ "def", "datetime_from_rfc822", "(", "datetime_str", ")", ":", "return", "datetime", ".", "fromtimestamp", "(", "mktime_tz", "(", "parsedate_tz", "(", "datetime_str", ")", ")", ",", "pytz", ".", "utc", ")" ]
turns an rfc822 formatted date into a datetime object .
train
false
19,581
def write_record(record, fileobj): if hasattr(record, u'quality'): recstr = u'@{name}\n{sequence}\n+\n{quality}\n'.format(name=record.name, sequence=record.sequence, quality=record.quality) else: recstr = u'>{name}\n{sequence}\n'.format(name=record.name, sequence=record.sequence) try: fileobj.write(bytes(recstr, u'ascii')) except TypeError: fileobj.write(recstr)
[ "def", "write_record", "(", "record", ",", "fileobj", ")", ":", "if", "hasattr", "(", "record", ",", "u'quality'", ")", ":", "recstr", "=", "u'@{name}\\n{sequence}\\n+\\n{quality}\\n'", ".", "format", "(", "name", "=", "record", ".", "name", ",", "sequence", "=", "record", ".", "sequence", ",", "quality", "=", "record", ".", "quality", ")", "else", ":", "recstr", "=", "u'>{name}\\n{sequence}\\n'", ".", "format", "(", "name", "=", "record", ".", "name", ",", "sequence", "=", "record", ".", "sequence", ")", "try", ":", "fileobj", ".", "write", "(", "bytes", "(", "recstr", ",", "u'ascii'", ")", ")", "except", "TypeError", ":", "fileobj", ".", "write", "(", "recstr", ")" ]
add a record to the logs buffer .
train
false
19,582
def test_ad_hoc_cov(): tempdir = _TempDir() out_fname = op.join(tempdir, 'test-cov.fif') evoked = read_evokeds(ave_fname)[0] cov = make_ad_hoc_cov(evoked.info) cov.save(out_fname) assert_true(('Covariance' in repr(cov))) cov2 = read_cov(out_fname) assert_array_almost_equal(cov['data'], cov2['data'])
[ "def", "test_ad_hoc_cov", "(", ")", ":", "tempdir", "=", "_TempDir", "(", ")", "out_fname", "=", "op", ".", "join", "(", "tempdir", ",", "'test-cov.fif'", ")", "evoked", "=", "read_evokeds", "(", "ave_fname", ")", "[", "0", "]", "cov", "=", "make_ad_hoc_cov", "(", "evoked", ".", "info", ")", "cov", ".", "save", "(", "out_fname", ")", "assert_true", "(", "(", "'Covariance'", "in", "repr", "(", "cov", ")", ")", ")", "cov2", "=", "read_cov", "(", "out_fname", ")", "assert_array_almost_equal", "(", "cov", "[", "'data'", "]", ",", "cov2", "[", "'data'", "]", ")" ]
test ad hoc cov creation and i/o .
train
false
19,584
def clip_dump(request): print '\n[INFO] Starting Clipboard Dump Service in VM/Device' try: data = {} if (request.method == 'POST'): tools_dir = os.path.join(settings.BASE_DIR, 'DynamicAnalyzer/tools/') adb = getADB(tools_dir) args = [adb, '-s', getIdentifier(), 'shell', 'am', 'startservice', 'opensecurity.clipdump/.ClipDumper'] try: subprocess.call(args) data = {'status': 'success'} except: PrintException('[ERROR] Dumping Clipboard') data = {'status': 'error'} else: data = {'status': 'failed'} return HttpResponse(json.dumps(data), content_type='application/json') except: PrintException('[ERROR] Dumping Clipboard') return HttpResponseRedirect('/error/')
[ "def", "clip_dump", "(", "request", ")", ":", "print", "'\\n[INFO] Starting Clipboard Dump Service in VM/Device'", "try", ":", "data", "=", "{", "}", "if", "(", "request", ".", "method", "==", "'POST'", ")", ":", "tools_dir", "=", "os", ".", "path", ".", "join", "(", "settings", ".", "BASE_DIR", ",", "'DynamicAnalyzer/tools/'", ")", "adb", "=", "getADB", "(", "tools_dir", ")", "args", "=", "[", "adb", ",", "'-s'", ",", "getIdentifier", "(", ")", ",", "'shell'", ",", "'am'", ",", "'startservice'", ",", "'opensecurity.clipdump/.ClipDumper'", "]", "try", ":", "subprocess", ".", "call", "(", "args", ")", "data", "=", "{", "'status'", ":", "'success'", "}", "except", ":", "PrintException", "(", "'[ERROR] Dumping Clipboard'", ")", "data", "=", "{", "'status'", ":", "'error'", "}", "else", ":", "data", "=", "{", "'status'", ":", "'failed'", "}", "return", "HttpResponse", "(", "json", ".", "dumps", "(", "data", ")", ",", "content_type", "=", "'application/json'", ")", "except", ":", "PrintException", "(", "'[ERROR] Dumping Clipboard'", ")", "return", "HttpResponseRedirect", "(", "'/error/'", ")" ]
dump android clipboard .
train
false
19,585
def modified(*packages, **flags): return __salt__['lowpkg.modified'](*packages, **flags)
[ "def", "modified", "(", "*", "packages", ",", "**", "flags", ")", ":", "return", "__salt__", "[", "'lowpkg.modified'", "]", "(", "*", "packages", ",", "**", "flags", ")" ]
checks to see if the page has been modified since the version in the requesters cache .
train
false
19,588
def time_validator(optdict, name, value): return optik_ext.check_time(None, name, value)
[ "def", "time_validator", "(", "optdict", ",", "name", ",", "value", ")", ":", "return", "optik_ext", ".", "check_time", "(", "None", ",", "name", ",", "value", ")" ]
validate and return a time object for option of type time .
train
false
19,589
def parse_size(size_input): prefixes = [None, u'K', u'M', u'G', u'T', u'P'] try: return int(size_input) except ValueError: size_input = size_input.upper().rstrip(u'IB') (value, unit) = (float(size_input[:(-1)]), size_input[(-1):]) if (unit not in prefixes): raise ValueError(u"should be in format '0-x (KiB, MiB, GiB, TiB, PiB)'") return int(((1024 ** prefixes.index(unit)) * value))
[ "def", "parse_size", "(", "size_input", ")", ":", "prefixes", "=", "[", "None", ",", "u'K'", ",", "u'M'", ",", "u'G'", ",", "u'T'", ",", "u'P'", "]", "try", ":", "return", "int", "(", "size_input", ")", "except", "ValueError", ":", "size_input", "=", "size_input", ".", "upper", "(", ")", ".", "rstrip", "(", "u'IB'", ")", "(", "value", ",", "unit", ")", "=", "(", "float", "(", "size_input", "[", ":", "(", "-", "1", ")", "]", ")", ",", "size_input", "[", "(", "-", "1", ")", ":", "]", ")", "if", "(", "unit", "not", "in", "prefixes", ")", ":", "raise", "ValueError", "(", "u\"should be in format '0-x (KiB, MiB, GiB, TiB, PiB)'\"", ")", "return", "int", "(", "(", "(", "1024", "**", "prefixes", ".", "index", "(", "unit", ")", ")", "*", "value", ")", ")" ]
parses a size specification .
train
false
19,590
@decorators.memoize def _check_mdata_get(): return salt.utils.which('mdata-get')
[ "@", "decorators", ".", "memoize", "def", "_check_mdata_get", "(", ")", ":", "return", "salt", ".", "utils", ".", "which", "(", "'mdata-get'", ")" ]
looks to see if mdata-get is present on the system .
train
false
19,591
def _unique_descendants(node): results = [] current = node while (current and _istree(current) and (len(current) == 1)): current = current[0] results.append(current) return results
[ "def", "_unique_descendants", "(", "node", ")", ":", "results", "=", "[", "]", "current", "=", "node", "while", "(", "current", "and", "_istree", "(", "current", ")", "and", "(", "len", "(", "current", ")", "==", "1", ")", ")", ":", "current", "=", "current", "[", "0", "]", "results", ".", "append", "(", "current", ")", "return", "results" ]
returns the list of all nodes descended from the given node .
train
false
19,592
def save_untouched_background(background_url, event_id): upload_path = UPLOAD_PATHS['event']['background_url'].format(event_id=event_id) return save_event_image(background_url, upload_path)
[ "def", "save_untouched_background", "(", "background_url", ",", "event_id", ")", ":", "upload_path", "=", "UPLOAD_PATHS", "[", "'event'", "]", "[", "'background_url'", "]", ".", "format", "(", "event_id", "=", "event_id", ")", "return", "save_event_image", "(", "background_url", ",", "upload_path", ")" ]
save the untouched background image .
train
false
19,593
def test_shorter_than(): assert (hug.types.shorter_than(10)('hi there') == 'hi there') assert (hug.types.shorter_than(10)(1) == '1') assert (hug.types.shorter_than(10)('') == '') assert ('10' in hug.types.shorter_than(10).__doc__) with pytest.raises(ValueError): assert hug.types.shorter_than(10)('there is quite a bit of text here, in fact way more than allowed')
[ "def", "test_shorter_than", "(", ")", ":", "assert", "(", "hug", ".", "types", ".", "shorter_than", "(", "10", ")", "(", "'hi there'", ")", "==", "'hi there'", ")", "assert", "(", "hug", ".", "types", ".", "shorter_than", "(", "10", ")", "(", "1", ")", "==", "'1'", ")", "assert", "(", "hug", ".", "types", ".", "shorter_than", "(", "10", ")", "(", "''", ")", "==", "''", ")", "assert", "(", "'10'", "in", "hug", ".", "types", ".", "shorter_than", "(", "10", ")", ".", "__doc__", ")", "with", "pytest", ".", "raises", "(", "ValueError", ")", ":", "assert", "hug", ".", "types", ".", "shorter_than", "(", "10", ")", "(", "'there is quite a bit of text here, in fact way more than allowed'", ")" ]
tests that hugs shorter than type successfully limits the values passed in .
train
false
19,594
def long2str(l): if (type(l) not in (types.IntType, types.LongType)): raise ValueError, 'the input must be an integer' if (l < 0): raise ValueError, 'the input must be greater than 0' s = '' while l: s = (s + chr((l & 255L))) l >>= 8 return s
[ "def", "long2str", "(", "l", ")", ":", "if", "(", "type", "(", "l", ")", "not", "in", "(", "types", ".", "IntType", ",", "types", ".", "LongType", ")", ")", ":", "raise", "ValueError", ",", "'the input must be an integer'", "if", "(", "l", "<", "0", ")", ":", "raise", "ValueError", ",", "'the input must be greater than 0'", "s", "=", "''", "while", "l", ":", "s", "=", "(", "s", "+", "chr", "(", "(", "l", "&", "255", "L", ")", ")", ")", "l", ">>=", "8", "return", "s" ]
convert an integer to a string .
train
true
19,597
def _add_sub_elements_from_dict(parent, sub_dict): for (key, value) in sub_dict.items(): if isinstance(value, list): for repeated_element in value: sub_element = ET.SubElement(parent, key) _add_element_attrs(sub_element, repeated_element.get('attrs', {})) children = repeated_element.get('children', None) if isinstance(children, dict): _add_sub_elements_from_dict(sub_element, children) elif isinstance(children, str): sub_element.text = children else: sub_element = ET.SubElement(parent, key) _add_element_attrs(sub_element, value.get('attrs', {})) children = value.get('children', None) if isinstance(children, dict): _add_sub_elements_from_dict(sub_element, children) elif isinstance(children, str): sub_element.text = children
[ "def", "_add_sub_elements_from_dict", "(", "parent", ",", "sub_dict", ")", ":", "for", "(", "key", ",", "value", ")", "in", "sub_dict", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "list", ")", ":", "for", "repeated_element", "in", "value", ":", "sub_element", "=", "ET", ".", "SubElement", "(", "parent", ",", "key", ")", "_add_element_attrs", "(", "sub_element", ",", "repeated_element", ".", "get", "(", "'attrs'", ",", "{", "}", ")", ")", "children", "=", "repeated_element", ".", "get", "(", "'children'", ",", "None", ")", "if", "isinstance", "(", "children", ",", "dict", ")", ":", "_add_sub_elements_from_dict", "(", "sub_element", ",", "children", ")", "elif", "isinstance", "(", "children", ",", "str", ")", ":", "sub_element", ".", "text", "=", "children", "else", ":", "sub_element", "=", "ET", ".", "SubElement", "(", "parent", ",", "key", ")", "_add_element_attrs", "(", "sub_element", ",", "value", ".", "get", "(", "'attrs'", ",", "{", "}", ")", ")", "children", "=", "value", ".", "get", "(", "'children'", ",", "None", ")", "if", "isinstance", "(", "children", ",", "dict", ")", ":", "_add_sub_elements_from_dict", "(", "sub_element", ",", "children", ")", "elif", "isinstance", "(", "children", ",", "str", ")", ":", "sub_element", ".", "text", "=", "children" ]
add subelements to the parent element .
train
false
19,598
def ogrinspect(*args, **kwargs): return '\n'.join((s for s in _ogrinspect(*args, **kwargs)))
[ "def", "ogrinspect", "(", "*", "args", ",", "**", "kwargs", ")", ":", "return", "'\\n'", ".", "join", "(", "(", "s", "for", "s", "in", "_ogrinspect", "(", "*", "args", ",", "**", "kwargs", ")", ")", ")" ]
given a data source and a string model name this function will generate a geodjango model .
train
false
19,599
def register_fileformat(fileformat): try: fileformat.get_class() FILE_FORMATS[fileformat.format_id] = fileformat for autoload in fileformat.autoload: FILE_DETECT.append((autoload, fileformat)) except (AttributeError, ImportError): add_configuration_error(u'File format: {0}'.format(fileformat.format_id), traceback.format_exc()) return fileformat
[ "def", "register_fileformat", "(", "fileformat", ")", ":", "try", ":", "fileformat", ".", "get_class", "(", ")", "FILE_FORMATS", "[", "fileformat", ".", "format_id", "]", "=", "fileformat", "for", "autoload", "in", "fileformat", ".", "autoload", ":", "FILE_DETECT", ".", "append", "(", "(", "autoload", ",", "fileformat", ")", ")", "except", "(", "AttributeError", ",", "ImportError", ")", ":", "add_configuration_error", "(", "u'File format: {0}'", ".", "format", "(", "fileformat", ".", "format_id", ")", ",", "traceback", ".", "format_exc", "(", ")", ")", "return", "fileformat" ]
registers fileformat in dictionary .
train
false
19,600
def load_mpqa(loc='./data/'): (pos, neg) = ([], []) with open((loc + 'mpqa.pos'), 'rb') as f: for line in f: text = line.strip() if (len(text) > 0): pos.append(text) with open((loc + 'mpqa.neg'), 'rb') as f: for line in f: text = line.strip() if (len(text) > 0): neg.append(text) return (pos, neg)
[ "def", "load_mpqa", "(", "loc", "=", "'./data/'", ")", ":", "(", "pos", ",", "neg", ")", "=", "(", "[", "]", ",", "[", "]", ")", "with", "open", "(", "(", "loc", "+", "'mpqa.pos'", ")", ",", "'rb'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "text", "=", "line", ".", "strip", "(", ")", "if", "(", "len", "(", "text", ")", ">", "0", ")", ":", "pos", ".", "append", "(", "text", ")", "with", "open", "(", "(", "loc", "+", "'mpqa.neg'", ")", ",", "'rb'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "text", "=", "line", ".", "strip", "(", ")", "if", "(", "len", "(", "text", ")", ">", "0", ")", ":", "neg", ".", "append", "(", "text", ")", "return", "(", "pos", ",", "neg", ")" ]
load the mpqa dataset .
train
false
19,601
def _course_notifications_json_get(course_action_state_id): try: action_state = CourseRerunState.objects.find_first(id=course_action_state_id) except CourseActionStateItemNotFoundError: return HttpResponseBadRequest() action_state_info = {'action': action_state.action, 'state': action_state.state, 'should_display': action_state.should_display} return JsonResponse(action_state_info)
[ "def", "_course_notifications_json_get", "(", "course_action_state_id", ")", ":", "try", ":", "action_state", "=", "CourseRerunState", ".", "objects", ".", "find_first", "(", "id", "=", "course_action_state_id", ")", "except", "CourseActionStateItemNotFoundError", ":", "return", "HttpResponseBadRequest", "(", ")", "action_state_info", "=", "{", "'action'", ":", "action_state", ".", "action", ",", "'state'", ":", "action_state", ".", "state", ",", "'should_display'", ":", "action_state", ".", "should_display", "}", "return", "JsonResponse", "(", "action_state_info", ")" ]
return the action and the action state for the given id .
train
false
19,602
def test_import_vispy_no_pyopengl(): allmodnames = loaded_vispy_modules('vispy.gloo.gl.gl2', 2, True) assert_not_in('OpenGL', allmodnames) allmodnames = loaded_vispy_modules('vispy.app', 2, True) assert_not_in('OpenGL', allmodnames) allmodnames = loaded_vispy_modules('vispy.scene', 2, True) assert_not_in('OpenGL', allmodnames)
[ "def", "test_import_vispy_no_pyopengl", "(", ")", ":", "allmodnames", "=", "loaded_vispy_modules", "(", "'vispy.gloo.gl.gl2'", ",", "2", ",", "True", ")", "assert_not_in", "(", "'OpenGL'", ",", "allmodnames", ")", "allmodnames", "=", "loaded_vispy_modules", "(", "'vispy.app'", ",", "2", ",", "True", ")", "assert_not_in", "(", "'OpenGL'", ",", "allmodnames", ")", "allmodnames", "=", "loaded_vispy_modules", "(", "'vispy.scene'", ",", "2", ",", "True", ")", "assert_not_in", "(", "'OpenGL'", ",", "allmodnames", ")" ]
importing vispy .
train
false
19,603
def clear_user_permission_cache(user): from django.core.cache import cache for key in PERMISSION_KEYS: cache.delete(get_cache_key(user, key), version=get_cache_permission_version())
[ "def", "clear_user_permission_cache", "(", "user", ")", ":", "from", "django", ".", "core", ".", "cache", "import", "cache", "for", "key", "in", "PERMISSION_KEYS", ":", "cache", ".", "delete", "(", "get_cache_key", "(", "user", ",", "key", ")", ",", "version", "=", "get_cache_permission_version", "(", ")", ")" ]
cleans permission cache for given user .
train
false
19,604
def postBuild(site): pass
[ "def", "postBuild", "(", "site", ")", ":", "pass" ]
called after building the site .
train
false
19,605
def test_fix_types(): for (fname, change) in ((hp_fif_fname, True), (test_fif_fname, False), (ctf_fname, False)): raw = read_raw_fif(fname) mag_picks = pick_types(raw.info, meg='mag') other_picks = np.setdiff1d(np.arange(len(raw.ch_names)), mag_picks) if change: for ii in mag_picks: raw.info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T2 orig_types = np.array([ch['coil_type'] for ch in raw.info['chs']]) raw.fix_mag_coil_types() new_types = np.array([ch['coil_type'] for ch in raw.info['chs']]) if (not change): assert_array_equal(orig_types, new_types) else: assert_array_equal(orig_types[other_picks], new_types[other_picks]) assert_true((orig_types[mag_picks] != new_types[mag_picks]).all()) assert_true((new_types[mag_picks] == FIFF.FIFFV_COIL_VV_MAG_T3).all())
[ "def", "test_fix_types", "(", ")", ":", "for", "(", "fname", ",", "change", ")", "in", "(", "(", "hp_fif_fname", ",", "True", ")", ",", "(", "test_fif_fname", ",", "False", ")", ",", "(", "ctf_fname", ",", "False", ")", ")", ":", "raw", "=", "read_raw_fif", "(", "fname", ")", "mag_picks", "=", "pick_types", "(", "raw", ".", "info", ",", "meg", "=", "'mag'", ")", "other_picks", "=", "np", ".", "setdiff1d", "(", "np", ".", "arange", "(", "len", "(", "raw", ".", "ch_names", ")", ")", ",", "mag_picks", ")", "if", "change", ":", "for", "ii", "in", "mag_picks", ":", "raw", ".", "info", "[", "'chs'", "]", "[", "ii", "]", "[", "'coil_type'", "]", "=", "FIFF", ".", "FIFFV_COIL_VV_MAG_T2", "orig_types", "=", "np", ".", "array", "(", "[", "ch", "[", "'coil_type'", "]", "for", "ch", "in", "raw", ".", "info", "[", "'chs'", "]", "]", ")", "raw", ".", "fix_mag_coil_types", "(", ")", "new_types", "=", "np", ".", "array", "(", "[", "ch", "[", "'coil_type'", "]", "for", "ch", "in", "raw", ".", "info", "[", "'chs'", "]", "]", ")", "if", "(", "not", "change", ")", ":", "assert_array_equal", "(", "orig_types", ",", "new_types", ")", "else", ":", "assert_array_equal", "(", "orig_types", "[", "other_picks", "]", ",", "new_types", "[", "other_picks", "]", ")", "assert_true", "(", "(", "orig_types", "[", "mag_picks", "]", "!=", "new_types", "[", "mag_picks", "]", ")", ".", "all", "(", ")", ")", "assert_true", "(", "(", "new_types", "[", "mag_picks", "]", "==", "FIFF", ".", "FIFFV_COIL_VV_MAG_T3", ")", ".", "all", "(", ")", ")" ]
test fixing of channel types .
train
false
19,606
def get_if_raw_hwaddr(ifname): NULL_MAC_ADDRESS = ('\x00' * 6) if (ifname == LOOPBACK_NAME): return (ARPHDR_LOOPBACK, NULL_MAC_ADDRESS) try: fd = os.popen(('%s %s' % (conf.prog.ifconfig, ifname))) except OSError as msg: raise Scapy_Exception(('Failed to execute ifconfig: (%s)' % msg)) addresses = [l for l in fd.readlines() if ((l.find('ether') >= 0) or (l.find('lladdr') >= 0) or (l.find('address') >= 0))] if (not addresses): raise Scapy_Exception(('No MAC address found on %s !' % ifname)) mac = addresses[0].split(' ')[1] mac = [chr(int(b, 16)) for b in mac.split(':')] return (ARPHDR_ETHER, ''.join(mac))
[ "def", "get_if_raw_hwaddr", "(", "ifname", ")", ":", "NULL_MAC_ADDRESS", "=", "(", "'\\x00'", "*", "6", ")", "if", "(", "ifname", "==", "LOOPBACK_NAME", ")", ":", "return", "(", "ARPHDR_LOOPBACK", ",", "NULL_MAC_ADDRESS", ")", "try", ":", "fd", "=", "os", ".", "popen", "(", "(", "'%s %s'", "%", "(", "conf", ".", "prog", ".", "ifconfig", ",", "ifname", ")", ")", ")", "except", "OSError", "as", "msg", ":", "raise", "Scapy_Exception", "(", "(", "'Failed to execute ifconfig: (%s)'", "%", "msg", ")", ")", "addresses", "=", "[", "l", "for", "l", "in", "fd", ".", "readlines", "(", ")", "if", "(", "(", "l", ".", "find", "(", "'ether'", ")", ">=", "0", ")", "or", "(", "l", ".", "find", "(", "'lladdr'", ")", ">=", "0", ")", "or", "(", "l", ".", "find", "(", "'address'", ")", ">=", "0", ")", ")", "]", "if", "(", "not", "addresses", ")", ":", "raise", "Scapy_Exception", "(", "(", "'No MAC address found on %s !'", "%", "ifname", ")", ")", "mac", "=", "addresses", "[", "0", "]", ".", "split", "(", "' '", ")", "[", "1", "]", "mac", "=", "[", "chr", "(", "int", "(", "b", ",", "16", ")", ")", "for", "b", "in", "mac", ".", "split", "(", "':'", ")", "]", "return", "(", "ARPHDR_ETHER", ",", "''", ".", "join", "(", "mac", ")", ")" ]
returns the packed mac address configured on ifname .
train
true
19,607
def _MustBreakBefore(prev_token, cur_token): if prev_token.is_comment: return True if (cur_token.is_string and prev_token.is_string and IsSurroundedByBrackets(cur_token)): return True return pytree_utils.GetNodeAnnotation(cur_token.node, pytree_utils.Annotation.MUST_SPLIT, default=False)
[ "def", "_MustBreakBefore", "(", "prev_token", ",", "cur_token", ")", ":", "if", "prev_token", ".", "is_comment", ":", "return", "True", "if", "(", "cur_token", ".", "is_string", "and", "prev_token", ".", "is_string", "and", "IsSurroundedByBrackets", "(", "cur_token", ")", ")", ":", "return", "True", "return", "pytree_utils", ".", "GetNodeAnnotation", "(", "cur_token", ".", "node", ",", "pytree_utils", ".", "Annotation", ".", "MUST_SPLIT", ",", "default", "=", "False", ")" ]
return true if a line break is required before the current token .
train
false
19,608
def topic_is_unread(topic, topicsread, user, forumsread=None): if (not user.is_authenticated): return False read_cutoff = (time_utcnow() - timedelta(days=flaskbb_config['TRACKER_LENGTH'])) if (flaskbb_config['TRACKER_LENGTH'] == 0): return False if (topic.last_post.date_created < read_cutoff): return False if (topicsread is None): if (forumsread and (forumsread.cleared is not None)): return (forumsread.cleared < topic.last_post.date_created) return True return (topicsread.last_read < topic.last_post.date_created)
[ "def", "topic_is_unread", "(", "topic", ",", "topicsread", ",", "user", ",", "forumsread", "=", "None", ")", ":", "if", "(", "not", "user", ".", "is_authenticated", ")", ":", "return", "False", "read_cutoff", "=", "(", "time_utcnow", "(", ")", "-", "timedelta", "(", "days", "=", "flaskbb_config", "[", "'TRACKER_LENGTH'", "]", ")", ")", "if", "(", "flaskbb_config", "[", "'TRACKER_LENGTH'", "]", "==", "0", ")", ":", "return", "False", "if", "(", "topic", ".", "last_post", ".", "date_created", "<", "read_cutoff", ")", ":", "return", "False", "if", "(", "topicsread", "is", "None", ")", ":", "if", "(", "forumsread", "and", "(", "forumsread", ".", "cleared", "is", "not", "None", ")", ")", ":", "return", "(", "forumsread", ".", "cleared", "<", "topic", ".", "last_post", ".", "date_created", ")", "return", "True", "return", "(", "topicsread", ".", "last_read", "<", "topic", ".", "last_post", ".", "date_created", ")" ]
checks if a topic is unread .
train
false
19,609
def mood(x, y, axis=0): x = np.asarray(x, dtype=float) y = np.asarray(y, dtype=float) if (axis is None): x = x.flatten() y = y.flatten() axis = 0 res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if (ax != axis)]) if (not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if (ax != axis)]))): raise ValueError('Dimensions of x and y on all axes except `axis` should match') n = x.shape[axis] m = y.shape[axis] N = (m + n) if (N < 3): raise ValueError('Not enough observations.') xy = np.concatenate((x, y), axis=axis) if (axis != 0): xy = np.rollaxis(xy, axis) xy = xy.reshape(xy.shape[0], (-1)) all_ranks = np.zeros_like(xy) for j in range(xy.shape[1]): all_ranks[:, j] = stats.rankdata(xy[:, j]) Ri = all_ranks[:n] M = np.sum(((Ri - ((N + 1.0) / 2)) ** 2), axis=0) mnM = ((n * ((N * N) - 1.0)) / 12) varM = (((((m * n) * (N + 1.0)) * (N + 2)) * (N - 2)) / 180) z = ((M - mnM) / sqrt(varM)) z_pos = (z > 0) pval = np.zeros_like(z) pval[z_pos] = (2 * distributions.norm.sf(z[z_pos])) pval[(~ z_pos)] = (2 * distributions.norm.cdf(z[(~ z_pos)])) if (res_shape == ()): z = z[0] pval = pval[0] else: z.shape = res_shape pval.shape = res_shape return (z, pval)
[ "def", "mood", "(", "x", ",", "y", ",", "axis", "=", "0", ")", ":", "x", "=", "np", ".", "asarray", "(", "x", ",", "dtype", "=", "float", ")", "y", "=", "np", ".", "asarray", "(", "y", ",", "dtype", "=", "float", ")", "if", "(", "axis", "is", "None", ")", ":", "x", "=", "x", ".", "flatten", "(", ")", "y", "=", "y", ".", "flatten", "(", ")", "axis", "=", "0", "res_shape", "=", "tuple", "(", "[", "x", ".", "shape", "[", "ax", "]", "for", "ax", "in", "range", "(", "len", "(", "x", ".", "shape", ")", ")", "if", "(", "ax", "!=", "axis", ")", "]", ")", "if", "(", "not", "(", "res_shape", "==", "tuple", "(", "[", "y", ".", "shape", "[", "ax", "]", "for", "ax", "in", "range", "(", "len", "(", "y", ".", "shape", ")", ")", "if", "(", "ax", "!=", "axis", ")", "]", ")", ")", ")", ":", "raise", "ValueError", "(", "'Dimensions of x and y on all axes except `axis` should match'", ")", "n", "=", "x", ".", "shape", "[", "axis", "]", "m", "=", "y", ".", "shape", "[", "axis", "]", "N", "=", "(", "m", "+", "n", ")", "if", "(", "N", "<", "3", ")", ":", "raise", "ValueError", "(", "'Not enough observations.'", ")", "xy", "=", "np", ".", "concatenate", "(", "(", "x", ",", "y", ")", ",", "axis", "=", "axis", ")", "if", "(", "axis", "!=", "0", ")", ":", "xy", "=", "np", ".", "rollaxis", "(", "xy", ",", "axis", ")", "xy", "=", "xy", ".", "reshape", "(", "xy", ".", "shape", "[", "0", "]", ",", "(", "-", "1", ")", ")", "all_ranks", "=", "np", ".", "zeros_like", "(", "xy", ")", "for", "j", "in", "range", "(", "xy", ".", "shape", "[", "1", "]", ")", ":", "all_ranks", "[", ":", ",", "j", "]", "=", "stats", ".", "rankdata", "(", "xy", "[", ":", ",", "j", "]", ")", "Ri", "=", "all_ranks", "[", ":", "n", "]", "M", "=", "np", ".", "sum", "(", "(", "(", "Ri", "-", "(", "(", "N", "+", "1.0", ")", "/", "2", ")", ")", "**", "2", ")", ",", "axis", "=", "0", ")", "mnM", "=", "(", "(", "n", "*", "(", "(", "N", "*", "N", ")", "-", "1.0", ")", ")", "/", "12", ")", "varM", "=", "(", "(", "(", "(", "(", "m", "*", "n", ")", "*", "(", "N", "+", "1.0", ")", ")", "*", "(", "N", "+", "2", ")", ")", "*", "(", "N", "-", "2", ")", ")", "/", "180", ")", "z", "=", "(", "(", "M", "-", "mnM", ")", "/", "sqrt", "(", "varM", ")", ")", "z_pos", "=", "(", "z", ">", "0", ")", "pval", "=", "np", ".", "zeros_like", "(", "z", ")", "pval", "[", "z_pos", "]", "=", "(", "2", "*", "distributions", ".", "norm", ".", "sf", "(", "z", "[", "z_pos", "]", ")", ")", "pval", "[", "(", "~", "z_pos", ")", "]", "=", "(", "2", "*", "distributions", ".", "norm", ".", "cdf", "(", "z", "[", "(", "~", "z_pos", ")", "]", ")", ")", "if", "(", "res_shape", "==", "(", ")", ")", ":", "z", "=", "z", "[", "0", "]", "pval", "=", "pval", "[", "0", "]", "else", ":", "z", ".", "shape", "=", "res_shape", "pval", ".", "shape", "=", "res_shape", "return", "(", "z", ",", "pval", ")" ]
returns imperative .
train
false
19,610
def _isValidDate(date_string): from datetime import date (year, month, day) = map(int, date_string.split('-')) if (year < 1900): raise ValidationError, gettext('Year must be 1900 or later.') try: date(year, month, day) except ValueError as e: msg = (gettext('Invalid date: %s') % gettext(str(e))) raise ValidationError, msg
[ "def", "_isValidDate", "(", "date_string", ")", ":", "from", "datetime", "import", "date", "(", "year", ",", "month", ",", "day", ")", "=", "map", "(", "int", ",", "date_string", ".", "split", "(", "'-'", ")", ")", "if", "(", "year", "<", "1900", ")", ":", "raise", "ValidationError", ",", "gettext", "(", "'Year must be 1900 or later.'", ")", "try", ":", "date", "(", "year", ",", "month", ",", "day", ")", "except", "ValueError", "as", "e", ":", "msg", "=", "(", "gettext", "(", "'Invalid date: %s'", ")", "%", "gettext", "(", "str", "(", "e", ")", ")", ")", "raise", "ValidationError", ",", "msg" ]
a helper function used by isvalidansidate and isvalidansidatetime to check if the date is valid .
train
false
19,611
def test_git_require_sudo_user(gituser): from fabtools.require.git import working_copy (username, groupname) = gituser with cd('/tmp'): try: working_copy(REMOTE_URL, path='wc_nobody', use_sudo=True, user=username) assert is_dir('wc_nobody') assert is_dir('wc_nobody/.git') with cd('wc_nobody'): remotes = sudo('git remote -v', user=username) assert (remotes == 'origin DCTB https://github.com/disko/fabtools.git (fetch)\r\norigin DCTB https://github.com/disko/fabtools.git (push)') assert (_current_branch() == 'master') assert (owner('wc_nobody') == username) assert (group('wc_nobody') == groupname) finally: run_as_root('rm -rf wc_nobody')
[ "def", "test_git_require_sudo_user", "(", "gituser", ")", ":", "from", "fabtools", ".", "require", ".", "git", "import", "working_copy", "(", "username", ",", "groupname", ")", "=", "gituser", "with", "cd", "(", "'/tmp'", ")", ":", "try", ":", "working_copy", "(", "REMOTE_URL", ",", "path", "=", "'wc_nobody'", ",", "use_sudo", "=", "True", ",", "user", "=", "username", ")", "assert", "is_dir", "(", "'wc_nobody'", ")", "assert", "is_dir", "(", "'wc_nobody/.git'", ")", "with", "cd", "(", "'wc_nobody'", ")", ":", "remotes", "=", "sudo", "(", "'git remote -v'", ",", "user", "=", "username", ")", "assert", "(", "remotes", "==", "'origin DCTB https://github.com/disko/fabtools.git (fetch)\\r\\norigin DCTB https://github.com/disko/fabtools.git (push)'", ")", "assert", "(", "_current_branch", "(", ")", "==", "'master'", ")", "assert", "(", "owner", "(", "'wc_nobody'", ")", "==", "username", ")", "assert", "(", "group", "(", "'wc_nobody'", ")", "==", "groupname", ")", "finally", ":", "run_as_root", "(", "'rm -rf wc_nobody'", ")" ]
test working_copy() with sudo as a user .
train
false
19,612
def getNewRepository(): return ExportRepository()
[ "def", "getNewRepository", "(", ")", ":", "return", "ExportRepository", "(", ")" ]
get new repository .
train
false
19,613
def package_update_all(m): retvals = {'rc': 0, 'stdout': '', 'stderr': ''} if (m.params['type'] == 'patch'): cmdname = 'patch' else: cmdname = 'update' cmd = get_cmd(m, cmdname) retvals['cmd'] = cmd (result, retvals['rc'], retvals['stdout'], retvals['stderr']) = parse_zypper_xml(m, cmd) return (result, retvals)
[ "def", "package_update_all", "(", "m", ")", ":", "retvals", "=", "{", "'rc'", ":", "0", ",", "'stdout'", ":", "''", ",", "'stderr'", ":", "''", "}", "if", "(", "m", ".", "params", "[", "'type'", "]", "==", "'patch'", ")", ":", "cmdname", "=", "'patch'", "else", ":", "cmdname", "=", "'update'", "cmd", "=", "get_cmd", "(", "m", ",", "cmdname", ")", "retvals", "[", "'cmd'", "]", "=", "cmd", "(", "result", ",", "retvals", "[", "'rc'", "]", ",", "retvals", "[", "'stdout'", "]", ",", "retvals", "[", "'stderr'", "]", ")", "=", "parse_zypper_xml", "(", "m", ",", "cmd", ")", "return", "(", "result", ",", "retvals", ")" ]
run update or patch on all available packages .
train
false
19,615
def autovary(ignore=None, debug=False): request = cherrypy.serving.request req_h = request.headers request.headers = MonitoredHeaderMap() request.headers.update(req_h) if (ignore is None): ignore = set(['Content-Disposition', 'Content-Length', 'Content-Type']) def set_response_header(): resp_h = cherrypy.serving.response.headers v = set([e.value for e in resp_h.elements('Vary')]) if debug: cherrypy.log(('Accessed headers: %s' % request.headers.accessed_headers), 'TOOLS.AUTOVARY') v = v.union(request.headers.accessed_headers) v = v.difference(ignore) v = list(v) v.sort() resp_h['Vary'] = ', '.join(v) request.hooks.attach('before_finalize', set_response_header, 95)
[ "def", "autovary", "(", "ignore", "=", "None", ",", "debug", "=", "False", ")", ":", "request", "=", "cherrypy", ".", "serving", ".", "request", "req_h", "=", "request", ".", "headers", "request", ".", "headers", "=", "MonitoredHeaderMap", "(", ")", "request", ".", "headers", ".", "update", "(", "req_h", ")", "if", "(", "ignore", "is", "None", ")", ":", "ignore", "=", "set", "(", "[", "'Content-Disposition'", ",", "'Content-Length'", ",", "'Content-Type'", "]", ")", "def", "set_response_header", "(", ")", ":", "resp_h", "=", "cherrypy", ".", "serving", ".", "response", ".", "headers", "v", "=", "set", "(", "[", "e", ".", "value", "for", "e", "in", "resp_h", ".", "elements", "(", "'Vary'", ")", "]", ")", "if", "debug", ":", "cherrypy", ".", "log", "(", "(", "'Accessed headers: %s'", "%", "request", ".", "headers", ".", "accessed_headers", ")", ",", "'TOOLS.AUTOVARY'", ")", "v", "=", "v", ".", "union", "(", "request", ".", "headers", ".", "accessed_headers", ")", "v", "=", "v", ".", "difference", "(", "ignore", ")", "v", "=", "list", "(", "v", ")", "v", ".", "sort", "(", ")", "resp_h", "[", "'Vary'", "]", "=", "', '", ".", "join", "(", "v", ")", "request", ".", "hooks", ".", "attach", "(", "'before_finalize'", ",", "set_response_header", ",", "95", ")" ]
auto-populate the vary response header based on request .
train
false
19,617
def normcase(s): return s
[ "def", "normcase", "(", "s", ")", ":", "return", "s" ]
normalize the case of a pathname .
train
false
19,618
def package_relationships_list(context, data_dict): model = context['model'] api = context.get('api_version') id = _get_or_bust(data_dict, 'id') id2 = data_dict.get('id2') rel = data_dict.get('rel') ref_package_by = ('id' if (api == 2) else 'name') pkg1 = model.Package.get(id) pkg2 = None if (not pkg1): raise NotFound('First package named in request was not found.') if id2: pkg2 = model.Package.get(id2) if (not pkg2): raise NotFound('Second package named in address was not found.') if (rel == 'relationships'): rel = None _check_access('package_relationships_list', context, data_dict) relationships = pkg1.get_relationships(with_package=pkg2, type=rel) if (rel and (not relationships)): raise NotFound(('Relationship "%s %s %s" not found.' % (id, rel, id2))) relationship_dicts = [rel.as_dict(pkg1, ref_package_by=ref_package_by) for rel in relationships] return relationship_dicts
[ "def", "package_relationships_list", "(", "context", ",", "data_dict", ")", ":", "model", "=", "context", "[", "'model'", "]", "api", "=", "context", ".", "get", "(", "'api_version'", ")", "id", "=", "_get_or_bust", "(", "data_dict", ",", "'id'", ")", "id2", "=", "data_dict", ".", "get", "(", "'id2'", ")", "rel", "=", "data_dict", ".", "get", "(", "'rel'", ")", "ref_package_by", "=", "(", "'id'", "if", "(", "api", "==", "2", ")", "else", "'name'", ")", "pkg1", "=", "model", ".", "Package", ".", "get", "(", "id", ")", "pkg2", "=", "None", "if", "(", "not", "pkg1", ")", ":", "raise", "NotFound", "(", "'First package named in request was not found.'", ")", "if", "id2", ":", "pkg2", "=", "model", ".", "Package", ".", "get", "(", "id2", ")", "if", "(", "not", "pkg2", ")", ":", "raise", "NotFound", "(", "'Second package named in address was not found.'", ")", "if", "(", "rel", "==", "'relationships'", ")", ":", "rel", "=", "None", "_check_access", "(", "'package_relationships_list'", ",", "context", ",", "data_dict", ")", "relationships", "=", "pkg1", ".", "get_relationships", "(", "with_package", "=", "pkg2", ",", "type", "=", "rel", ")", "if", "(", "rel", "and", "(", "not", "relationships", ")", ")", ":", "raise", "NotFound", "(", "(", "'Relationship \"%s %s %s\" not found.'", "%", "(", "id", ",", "rel", ",", "id2", ")", ")", ")", "relationship_dicts", "=", "[", "rel", ".", "as_dict", "(", "pkg1", ",", "ref_package_by", "=", "ref_package_by", ")", "for", "rel", "in", "relationships", "]", "return", "relationship_dicts" ]
return a dataset s relationships .
train
false
19,619
def _project_onto_surface(rrs, surf, project_rrs=False): surf_geom = _get_tri_supp_geom(surf) coords = np.empty((len(rrs), 3)) tri_idx = np.empty((len(rrs),), int) for (ri, rr) in enumerate(rrs): tri_idx[ri] = _find_nearest_tri_pt(rr, surf_geom)[2] coords[ri] = _triangle_coords(rr, surf_geom, tri_idx[ri]) weights = np.array([((1.0 - coords[:, 0]) - coords[:, 1]), coords[:, 0], coords[:, 1]]) out = (weights, tri_idx) if project_rrs: out += (np.einsum('ij,jik->jk', weights, surf['rr'][surf['tris'][tri_idx]]),) return out
[ "def", "_project_onto_surface", "(", "rrs", ",", "surf", ",", "project_rrs", "=", "False", ")", ":", "surf_geom", "=", "_get_tri_supp_geom", "(", "surf", ")", "coords", "=", "np", ".", "empty", "(", "(", "len", "(", "rrs", ")", ",", "3", ")", ")", "tri_idx", "=", "np", ".", "empty", "(", "(", "len", "(", "rrs", ")", ",", ")", ",", "int", ")", "for", "(", "ri", ",", "rr", ")", "in", "enumerate", "(", "rrs", ")", ":", "tri_idx", "[", "ri", "]", "=", "_find_nearest_tri_pt", "(", "rr", ",", "surf_geom", ")", "[", "2", "]", "coords", "[", "ri", "]", "=", "_triangle_coords", "(", "rr", ",", "surf_geom", ",", "tri_idx", "[", "ri", "]", ")", "weights", "=", "np", ".", "array", "(", "[", "(", "(", "1.0", "-", "coords", "[", ":", ",", "0", "]", ")", "-", "coords", "[", ":", ",", "1", "]", ")", ",", "coords", "[", ":", ",", "0", "]", ",", "coords", "[", ":", ",", "1", "]", "]", ")", "out", "=", "(", "weights", ",", "tri_idx", ")", "if", "project_rrs", ":", "out", "+=", "(", "np", ".", "einsum", "(", "'ij,jik->jk'", ",", "weights", ",", "surf", "[", "'rr'", "]", "[", "surf", "[", "'tris'", "]", "[", "tri_idx", "]", "]", ")", ",", ")", "return", "out" ]
project points onto surface .
train
false
19,620
def _convert_listlike(arg, unit='ns', box=True, errors='raise', name=None): if (isinstance(arg, (list, tuple)) or (not hasattr(arg, 'dtype'))): arg = np.array(list(arg), dtype='O') if is_timedelta64_dtype(arg): value = arg.astype('timedelta64[ns]') elif is_integer_dtype(arg): value = arg.astype('timedelta64[{0}]'.format(unit)).astype('timedelta64[ns]', copy=False) else: try: value = tslib.array_to_timedelta64(_ensure_object(arg), unit=unit, errors=errors) value = value.astype('timedelta64[ns]', copy=False) except ValueError: if (errors == 'ignore'): return arg else: raise if box: from pandas import TimedeltaIndex value = TimedeltaIndex(value, unit='ns', name=name) return value
[ "def", "_convert_listlike", "(", "arg", ",", "unit", "=", "'ns'", ",", "box", "=", "True", ",", "errors", "=", "'raise'", ",", "name", "=", "None", ")", ":", "if", "(", "isinstance", "(", "arg", ",", "(", "list", ",", "tuple", ")", ")", "or", "(", "not", "hasattr", "(", "arg", ",", "'dtype'", ")", ")", ")", ":", "arg", "=", "np", ".", "array", "(", "list", "(", "arg", ")", ",", "dtype", "=", "'O'", ")", "if", "is_timedelta64_dtype", "(", "arg", ")", ":", "value", "=", "arg", ".", "astype", "(", "'timedelta64[ns]'", ")", "elif", "is_integer_dtype", "(", "arg", ")", ":", "value", "=", "arg", ".", "astype", "(", "'timedelta64[{0}]'", ".", "format", "(", "unit", ")", ")", ".", "astype", "(", "'timedelta64[ns]'", ",", "copy", "=", "False", ")", "else", ":", "try", ":", "value", "=", "tslib", ".", "array_to_timedelta64", "(", "_ensure_object", "(", "arg", ")", ",", "unit", "=", "unit", ",", "errors", "=", "errors", ")", "value", "=", "value", ".", "astype", "(", "'timedelta64[ns]'", ",", "copy", "=", "False", ")", "except", "ValueError", ":", "if", "(", "errors", "==", "'ignore'", ")", ":", "return", "arg", "else", ":", "raise", "if", "box", ":", "from", "pandas", "import", "TimedeltaIndex", "value", "=", "TimedeltaIndex", "(", "value", ",", "unit", "=", "'ns'", ",", "name", "=", "name", ")", "return", "value" ]
convert a list of objects to a timedelta index object .
train
false
19,621
def telnet_connect(ip_addr): try: return telnetlib.Telnet(ip_addr, TELNET_PORT, TELNET_TIMEOUT) except socket.timeout: sys.exit('Connection timed-out')
[ "def", "telnet_connect", "(", "ip_addr", ")", ":", "try", ":", "return", "telnetlib", ".", "Telnet", "(", "ip_addr", ",", "TELNET_PORT", ",", "TELNET_TIMEOUT", ")", "except", "socket", ".", "timeout", ":", "sys", ".", "exit", "(", "'Connection timed-out'", ")" ]
establish telnet connection .
train
false
19,623
def chain_files(paths): for path in paths: with open(path, 'r') as f: for line in f: (yield line)
[ "def", "chain_files", "(", "paths", ")", ":", "for", "path", "in", "paths", ":", "with", "open", "(", "path", ",", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "(", "yield", "line", ")" ]
iterate through many files .
train
false
19,624
def pacf_ols(x, nlags=40): (xlags, x0) = lagmat(x, nlags, original='sep') xlags = add_constant(xlags) pacf = [1.0] for k in range(1, (nlags + 1)): res = OLS(x0[k:], xlags[k:, :(k + 1)]).fit() pacf.append(res.params[(-1)]) return np.array(pacf)
[ "def", "pacf_ols", "(", "x", ",", "nlags", "=", "40", ")", ":", "(", "xlags", ",", "x0", ")", "=", "lagmat", "(", "x", ",", "nlags", ",", "original", "=", "'sep'", ")", "xlags", "=", "add_constant", "(", "xlags", ")", "pacf", "=", "[", "1.0", "]", "for", "k", "in", "range", "(", "1", ",", "(", "nlags", "+", "1", ")", ")", ":", "res", "=", "OLS", "(", "x0", "[", "k", ":", "]", ",", "xlags", "[", "k", ":", ",", ":", "(", "k", "+", "1", ")", "]", ")", ".", "fit", "(", ")", "pacf", ".", "append", "(", "res", ".", "params", "[", "(", "-", "1", ")", "]", ")", "return", "np", ".", "array", "(", "pacf", ")" ]
calculate partial autocorrelations parameters x : 1d array observations of time series for which pacf is calculated nlags : int number of lags for which pacf is returned .
train
false
19,626
def DiffAnys(obj1, obj2, looseMatch=False, ignoreArrayOrder=True): differ = Differ(looseMatch=looseMatch, ignoreArrayOrder=ignoreArrayOrder) return differ.DiffAnyObjects(obj1, obj2)
[ "def", "DiffAnys", "(", "obj1", ",", "obj2", ",", "looseMatch", "=", "False", ",", "ignoreArrayOrder", "=", "True", ")", ":", "differ", "=", "Differ", "(", "looseMatch", "=", "looseMatch", ",", "ignoreArrayOrder", "=", "ignoreArrayOrder", ")", "return", "differ", ".", "DiffAnyObjects", "(", "obj1", ",", "obj2", ")" ]
diff any two objects .
train
true
19,628
def get_disk_backing_file(path, basename=True): backing_file = images.qemu_img_info(path).backing_file if (backing_file and basename): backing_file = os.path.basename(backing_file) return backing_file
[ "def", "get_disk_backing_file", "(", "path", ",", "basename", "=", "True", ")", ":", "backing_file", "=", "images", ".", "qemu_img_info", "(", "path", ")", ".", "backing_file", "if", "(", "backing_file", "and", "basename", ")", ":", "backing_file", "=", "os", ".", "path", ".", "basename", "(", "backing_file", ")", "return", "backing_file" ]
get the backing file of a disk image .
train
false
19,629
def standalone_html_page_for_models(models, resources, title): return file_html(models, resources, title)
[ "def", "standalone_html_page_for_models", "(", "models", ",", "resources", ",", "title", ")", ":", "return", "file_html", "(", "models", ",", "resources", ",", "title", ")" ]
return an html document that renders zero or more bokeh documents or models .
train
false
19,631
def faulty(): fmadm = _check_fmadm() cmd = '{cmd} faulty'.format(cmd=fmadm) res = __salt__['cmd.run_all'](cmd) result = {} if (res['stdout'] == ''): result = False else: result = _parse_fmadm_faulty(res['stdout']) return result
[ "def", "faulty", "(", ")", ":", "fmadm", "=", "_check_fmadm", "(", ")", "cmd", "=", "'{cmd} faulty'", ".", "format", "(", "cmd", "=", "fmadm", ")", "res", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ")", "result", "=", "{", "}", "if", "(", "res", "[", "'stdout'", "]", "==", "''", ")", ":", "result", "=", "False", "else", ":", "result", "=", "_parse_fmadm_faulty", "(", "res", "[", "'stdout'", "]", ")", "return", "result" ]
display list of faulty resources cli example: .
train
true
19,634
def fixup_building_sdist(): if ('sdist' in sys.argv): if ('--install-js' in sys.argv): print("Removing '--install-js' incompatible with 'sdist'") sys.argv.remove('--install-js') if ('--build-js' not in sys.argv): print("Adding '--build-js' required for 'sdist'") sys.argv.append('--build-js')
[ "def", "fixup_building_sdist", "(", ")", ":", "if", "(", "'sdist'", "in", "sys", ".", "argv", ")", ":", "if", "(", "'--install-js'", "in", "sys", ".", "argv", ")", ":", "print", "(", "\"Removing '--install-js' incompatible with 'sdist'\"", ")", "sys", ".", "argv", ".", "remove", "(", "'--install-js'", ")", "if", "(", "'--build-js'", "not", "in", "sys", ".", "argv", ")", ":", "print", "(", "\"Adding '--build-js' required for 'sdist'\"", ")", "sys", ".", "argv", ".", "append", "(", "'--build-js'", ")" ]
check for sdist and ensure we always build bokehjs when packaging source distributions do not ship with bokehjs source code .
train
true
19,635
def safe_find_sr(session): sr_ref = _find_sr(session) if (sr_ref is None): raise exception.StorageRepositoryNotFound() return sr_ref
[ "def", "safe_find_sr", "(", "session", ")", ":", "sr_ref", "=", "_find_sr", "(", "session", ")", "if", "(", "sr_ref", "is", "None", ")", ":", "raise", "exception", ".", "StorageRepositoryNotFound", "(", ")", "return", "sr_ref" ]
same as _find_sr except raises a notfound exception if sr cannot be determined .
train
false
19,636
def strides_from_shape(ndim, shape, itemsize, layout): if (ndim == 0): return () if (layout == 'C'): strides = (list(shape[1:]) + [itemsize]) for i in range((ndim - 2), (-1), (-1)): strides[i] *= strides[(i + 1)] else: strides = ([itemsize] + list(shape[:(-1)])) for i in range(1, ndim): strides[i] *= strides[(i - 1)] return strides
[ "def", "strides_from_shape", "(", "ndim", ",", "shape", ",", "itemsize", ",", "layout", ")", ":", "if", "(", "ndim", "==", "0", ")", ":", "return", "(", ")", "if", "(", "layout", "==", "'C'", ")", ":", "strides", "=", "(", "list", "(", "shape", "[", "1", ":", "]", ")", "+", "[", "itemsize", "]", ")", "for", "i", "in", "range", "(", "(", "ndim", "-", "2", ")", ",", "(", "-", "1", ")", ",", "(", "-", "1", ")", ")", ":", "strides", "[", "i", "]", "*=", "strides", "[", "(", "i", "+", "1", ")", "]", "else", ":", "strides", "=", "(", "[", "itemsize", "]", "+", "list", "(", "shape", "[", ":", "(", "-", "1", ")", "]", ")", ")", "for", "i", "in", "range", "(", "1", ",", "ndim", ")", ":", "strides", "[", "i", "]", "*=", "strides", "[", "(", "i", "-", "1", ")", "]", "return", "strides" ]
calculate strides of a contiguous array .
train
false
19,637
def getNewRepository(): return ExportRepository()
[ "def", "getNewRepository", "(", ")", ":", "return", "ExportRepository", "(", ")" ]
get new repository .
train
false
19,639
def _objective_func(f, x_full, k_params, alpha, *args): x_params = x_full[:k_params] x_added = x_full[k_params:] return (f(x_params, *args) + (alpha * x_added).sum())
[ "def", "_objective_func", "(", "f", ",", "x_full", ",", "k_params", ",", "alpha", ",", "*", "args", ")", ":", "x_params", "=", "x_full", "[", ":", "k_params", "]", "x_added", "=", "x_full", "[", "k_params", ":", "]", "return", "(", "f", "(", "x_params", ",", "*", "args", ")", "+", "(", "alpha", "*", "x_added", ")", ".", "sum", "(", ")", ")" ]
the regularized objective function .
train
false
19,640
def make_links(parent, selector=None): elem = SubTemplateElement(parent, ('{%s}link' % XMLNS_ATOM), selector=selector) elem.set('rel') elem.set('type') elem.set('href') return elem
[ "def", "make_links", "(", "parent", ",", "selector", "=", "None", ")", ":", "elem", "=", "SubTemplateElement", "(", "parent", ",", "(", "'{%s}link'", "%", "XMLNS_ATOM", ")", ",", "selector", "=", "selector", ")", "elem", ".", "set", "(", "'rel'", ")", "elem", ".", "set", "(", "'type'", ")", "elem", ".", "set", "(", "'href'", ")", "return", "elem" ]
make links using the given traceback .
train
false
19,641
@utils.arg('domain', metavar='<domain>', help=_('DNS domain.')) @deprecated_network def do_dns_delete_domain(cs, args): cs.dns_domains.delete(args.domain)
[ "@", "utils", ".", "arg", "(", "'domain'", ",", "metavar", "=", "'<domain>'", ",", "help", "=", "_", "(", "'DNS domain.'", ")", ")", "@", "deprecated_network", "def", "do_dns_delete_domain", "(", "cs", ",", "args", ")", ":", "cs", ".", "dns_domains", ".", "delete", "(", "args", ".", "domain", ")" ]
delete the specified dns domain .
train
false
19,642
def obfuscateNum(N, mod): d = random.randint(1, mod) left = int((N / d)) right = d remainder = (N % d) return ('(%s*%s+%s)' % (left, right, remainder))
[ "def", "obfuscateNum", "(", "N", ",", "mod", ")", ":", "d", "=", "random", ".", "randint", "(", "1", ",", "mod", ")", "left", "=", "int", "(", "(", "N", "/", "d", ")", ")", "right", "=", "d", "remainder", "=", "(", "N", "%", "d", ")", "return", "(", "'(%s*%s+%s)'", "%", "(", "left", ",", "right", ",", "remainder", ")", ")" ]
take a number and modulus and return an obsucfated form .
train
false
19,643
def status_load(): data = status() if ('LOADPCT' in data): load = data['LOADPCT'].split() if (load[1].lower() == 'percent'): return float(load[0]) return {'Error': 'Load not available.'}
[ "def", "status_load", "(", ")", ":", "data", "=", "status", "(", ")", "if", "(", "'LOADPCT'", "in", "data", ")", ":", "load", "=", "data", "[", "'LOADPCT'", "]", ".", "split", "(", ")", "if", "(", "load", "[", "1", "]", ".", "lower", "(", ")", "==", "'percent'", ")", ":", "return", "float", "(", "load", "[", "0", "]", ")", "return", "{", "'Error'", ":", "'Load not available.'", "}" ]
return load cli example: .
train
true
19,644
def patient(): tablename = 'patient_patient' s3db.table('patient_patient') s3db.configure(tablename, create_next=URL(args=['[id]', 'relative'])) def prep(r): if r.id: s3db.configure('patient_relative', create_next=URL(args=[str(r.id), 'home'])) return True s3.prep = prep def postp(r, output): s3_action_buttons(r, deletable=False) return output s3.postp = postp tabs = [(T('Basic Details'), None), (T('Accompanying Relative'), 'relative'), (T('Home'), 'home')] rheader = (lambda r: patient_rheader(r, tabs=tabs)) output = s3_rest_controller(rheader=rheader) return output
[ "def", "patient", "(", ")", ":", "tablename", "=", "'patient_patient'", "s3db", ".", "table", "(", "'patient_patient'", ")", "s3db", ".", "configure", "(", "tablename", ",", "create_next", "=", "URL", "(", "args", "=", "[", "'[id]'", ",", "'relative'", "]", ")", ")", "def", "prep", "(", "r", ")", ":", "if", "r", ".", "id", ":", "s3db", ".", "configure", "(", "'patient_relative'", ",", "create_next", "=", "URL", "(", "args", "=", "[", "str", "(", "r", ".", "id", ")", ",", "'home'", "]", ")", ")", "return", "True", "s3", ".", "prep", "=", "prep", "def", "postp", "(", "r", ",", "output", ")", ":", "s3_action_buttons", "(", "r", ",", "deletable", "=", "False", ")", "return", "output", "s3", ".", "postp", "=", "postp", "tabs", "=", "[", "(", "T", "(", "'Basic Details'", ")", ",", "None", ")", ",", "(", "T", "(", "'Accompanying Relative'", ")", ",", "'relative'", ")", ",", "(", "T", "(", "'Home'", ")", ",", "'home'", ")", "]", "rheader", "=", "(", "lambda", "r", ":", "patient_rheader", "(", "r", ",", "tabs", "=", "tabs", ")", ")", "output", "=", "s3_rest_controller", "(", "rheader", "=", "rheader", ")", "return", "output" ]
restful crud controller .
train
false
19,649
def get_shell_type(): out = run_shell_command('echo $host.name', shell=True)[0] if (out.strip() == 'ConsoleHost'): return 'powershell' out = run_shell_command('echo $0', shell=True)[0] if (out.strip() == '$0'): return 'cmd' return 'sh'
[ "def", "get_shell_type", "(", ")", ":", "out", "=", "run_shell_command", "(", "'echo $host.name'", ",", "shell", "=", "True", ")", "[", "0", "]", "if", "(", "out", ".", "strip", "(", ")", "==", "'ConsoleHost'", ")", ":", "return", "'powershell'", "out", "=", "run_shell_command", "(", "'echo $0'", ",", "shell", "=", "True", ")", "[", "0", "]", "if", "(", "out", ".", "strip", "(", ")", "==", "'$0'", ")", ":", "return", "'cmd'", "return", "'sh'" ]
finds the current shell type based on the outputs of common pre-defined variables in them .
train
false
19,650
def targets_equal(keys, a, b): for key in keys: if ((key in b) and (a[key] != b[key])): return False return True
[ "def", "targets_equal", "(", "keys", ",", "a", ",", "b", ")", ":", "for", "key", "in", "keys", ":", "if", "(", "(", "key", "in", "b", ")", "and", "(", "a", "[", "key", "]", "!=", "b", "[", "key", "]", ")", ")", ":", "return", "False", "return", "True" ]
method compare two mount targets by specified attributes .
train
false
19,651
def cond(predicate, consequence, alternative=None): if predicate: return consequence else: return alternative
[ "def", "cond", "(", "predicate", ",", "consequence", ",", "alternative", "=", "None", ")", ":", "if", "predicate", ":", "return", "consequence", "else", ":", "return", "alternative" ]
function replacement for if-else to use in expressions .
train
false
19,652
def enclosing_scope(scope): return scope.rpartition(u'.')[0]
[ "def", "enclosing_scope", "(", "scope", ")", ":", "return", "scope", ".", "rpartition", "(", "u'.'", ")", "[", "0", "]" ]
utility function to return the scope immediately enclosing a given scope .
train
false
19,653
def space_eval(space, hp_assignment): space = pyll.as_apply(space) nodes = pyll.toposort(space) memo = {} for node in nodes: if (node.name == 'hyperopt_param'): label = node.arg['label'].eval() if (label in hp_assignment): memo[node] = hp_assignment[label] rval = pyll.rec_eval(space, memo=memo) return rval
[ "def", "space_eval", "(", "space", ",", "hp_assignment", ")", ":", "space", "=", "pyll", ".", "as_apply", "(", "space", ")", "nodes", "=", "pyll", ".", "toposort", "(", "space", ")", "memo", "=", "{", "}", "for", "node", "in", "nodes", ":", "if", "(", "node", ".", "name", "==", "'hyperopt_param'", ")", ":", "label", "=", "node", ".", "arg", "[", "'label'", "]", ".", "eval", "(", ")", "if", "(", "label", "in", "hp_assignment", ")", ":", "memo", "[", "node", "]", "=", "hp_assignment", "[", "label", "]", "rval", "=", "pyll", ".", "rec_eval", "(", "space", ",", "memo", "=", "memo", ")", "return", "rval" ]
compute a point in a search space from a hyperparameter assignment .
train
false
19,655
def ComputeRebalanceSize(mapping, server_id): loc = data_store.DB.Location() if (not os.path.exists(loc)): return 0 if (not os.path.isdir(loc)): return 0 return _RecComputeRebalanceSize(mapping, server_id, loc, '')
[ "def", "ComputeRebalanceSize", "(", "mapping", ",", "server_id", ")", ":", "loc", "=", "data_store", ".", "DB", ".", "Location", "(", ")", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "loc", ")", ")", ":", "return", "0", "if", "(", "not", "os", ".", "path", ".", "isdir", "(", "loc", ")", ")", ":", "return", "0", "return", "_RecComputeRebalanceSize", "(", "mapping", ",", "server_id", ",", "loc", ",", "''", ")" ]
compute size of files that need to be moved .
train
false
19,656
def test_get_init_3(): with make_tempfile(join(TMP_TEST_DIR, '__init__.pyc')): nt.assert_is_none(mp.get_init(TMP_TEST_DIR))
[ "def", "test_get_init_3", "(", ")", ":", "with", "make_tempfile", "(", "join", "(", "TMP_TEST_DIR", ",", "'__init__.pyc'", ")", ")", ":", "nt", ".", "assert_is_none", "(", "mp", ".", "get_init", "(", "TMP_TEST_DIR", ")", ")" ]
get_init cant find __init__ .
train
false
19,659
def _verify_views(): global VERIFIED_VIEWS if (VERIFIED_VIEWS or __opts__.get('couchbase.skip_verify_views', False)): return cb_ = _get_connection() ddoc = {'views': {'jids': {'map': "function (doc, meta) { if (meta.id.indexOf('/') === -1 && doc.load){ emit(meta.id, null) } }"}, 'jid_returns': {'map': "function (doc, meta) { if (meta.id.indexOf('/') > -1){ key_parts = meta.id.split('/'); emit(key_parts[0], key_parts[1]); } }"}}} try: curr_ddoc = cb_.design_get(DESIGN_NAME, use_devmode=False).value if (curr_ddoc['views'] == ddoc['views']): VERIFIED_VIEWS = True return except couchbase.exceptions.HTTPError: pass cb_.design_create(DESIGN_NAME, ddoc, use_devmode=False) VERIFIED_VIEWS = True
[ "def", "_verify_views", "(", ")", ":", "global", "VERIFIED_VIEWS", "if", "(", "VERIFIED_VIEWS", "or", "__opts__", ".", "get", "(", "'couchbase.skip_verify_views'", ",", "False", ")", ")", ":", "return", "cb_", "=", "_get_connection", "(", ")", "ddoc", "=", "{", "'views'", ":", "{", "'jids'", ":", "{", "'map'", ":", "\"function (doc, meta) { if (meta.id.indexOf('/') === -1 && doc.load){ emit(meta.id, null) } }\"", "}", ",", "'jid_returns'", ":", "{", "'map'", ":", "\"function (doc, meta) { if (meta.id.indexOf('/') > -1){ key_parts = meta.id.split('/'); emit(key_parts[0], key_parts[1]); } }\"", "}", "}", "}", "try", ":", "curr_ddoc", "=", "cb_", ".", "design_get", "(", "DESIGN_NAME", ",", "use_devmode", "=", "False", ")", ".", "value", "if", "(", "curr_ddoc", "[", "'views'", "]", "==", "ddoc", "[", "'views'", "]", ")", ":", "VERIFIED_VIEWS", "=", "True", "return", "except", "couchbase", ".", "exceptions", ".", "HTTPError", ":", "pass", "cb_", ".", "design_create", "(", "DESIGN_NAME", ",", "ddoc", ",", "use_devmode", "=", "False", ")", "VERIFIED_VIEWS", "=", "True" ]
verify that you have the views you need .
train
true
19,660
def package_install(name, **kwargs): cmd = ('pkg_install ' + name) if kwargs.get('version', False): cmd += (' ' + kwargs['version']) (out, err) = DETAILS['server'].sendline(cmd) return parse(out)
[ "def", "package_install", "(", "name", ",", "**", "kwargs", ")", ":", "cmd", "=", "(", "'pkg_install '", "+", "name", ")", "if", "kwargs", ".", "get", "(", "'version'", ",", "False", ")", ":", "cmd", "+=", "(", "' '", "+", "kwargs", "[", "'version'", "]", ")", "(", "out", ",", "err", ")", "=", "DETAILS", "[", "'server'", "]", ".", "sendline", "(", "cmd", ")", "return", "parse", "(", "out", ")" ]
install a "package" on the rest server .
train
true
19,661
def _is_device(path): out = __salt__['cmd.run_all']('file -i {0}'.format(path)) _verify_run(out) return (re.split('\\s+', out['stdout'])[1][:(-1)] == 'inode/blockdevice')
[ "def", "_is_device", "(", "path", ")", ":", "out", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "'file -i {0}'", ".", "format", "(", "path", ")", ")", "_verify_run", "(", "out", ")", "return", "(", "re", ".", "split", "(", "'\\\\s+'", ",", "out", "[", "'stdout'", "]", ")", "[", "1", "]", "[", ":", "(", "-", "1", ")", "]", "==", "'inode/blockdevice'", ")" ]
return true if path is a physical device .
train
true
19,662
def sizeof_fmt(num): for x in ['bytes', 'KB', 'MB', 'GB', 'TB']: if (num < 1024.0): return ('%3.1f %s' % (num, x)) num /= 1024.0
[ "def", "sizeof_fmt", "(", "num", ")", ":", "for", "x", "in", "[", "'bytes'", ",", "'KB'", ",", "'MB'", ",", "'GB'", ",", "'TB'", "]", ":", "if", "(", "num", "<", "1024.0", ")", ":", "return", "(", "'%3.1f %s'", "%", "(", "num", ",", "x", ")", ")", "num", "/=", "1024.0" ]
return a human-readable string representation of a filesize arguments: size -- size in bytes .
train
false
19,663
@pytest.fixture(autouse=True, scope='session') def translations_directory(request): from django.conf import settings settings.POOTLE_TRANSLATION_DIRECTORY = tempfile.mkdtemp() def rm_tmp_dir(): shutil.rmtree(settings.POOTLE_TRANSLATION_DIRECTORY) request.addfinalizer(rm_tmp_dir)
[ "@", "pytest", ".", "fixture", "(", "autouse", "=", "True", ",", "scope", "=", "'session'", ")", "def", "translations_directory", "(", "request", ")", ":", "from", "django", ".", "conf", "import", "settings", "settings", ".", "POOTLE_TRANSLATION_DIRECTORY", "=", "tempfile", ".", "mkdtemp", "(", ")", "def", "rm_tmp_dir", "(", ")", ":", "shutil", ".", "rmtree", "(", "settings", ".", "POOTLE_TRANSLATION_DIRECTORY", ")", "request", ".", "addfinalizer", "(", "rm_tmp_dir", ")" ]
used by pootleenv .
train
false
19,664
@pytest.mark.parametrize('parallel', [True, False]) def test_inf_nan(parallel, read_basic): text = dedent(' A\n nan\n +nan\n -nan\n inf\n infinity\n +inf\n +infinity\n -inf\n -infinity\n ') expected = Table({'A': [np.nan, np.nan, np.nan, np.inf, np.inf, np.inf, np.inf, (- np.inf), (- np.inf)]}) table = read_basic(text, parallel=parallel) assert (table['A'].dtype.kind == 'f') assert_table_equal(table, expected)
[ "@", "pytest", ".", "mark", ".", "parametrize", "(", "'parallel'", ",", "[", "True", ",", "False", "]", ")", "def", "test_inf_nan", "(", "parallel", ",", "read_basic", ")", ":", "text", "=", "dedent", "(", "' A\\n nan\\n +nan\\n -nan\\n inf\\n infinity\\n +inf\\n +infinity\\n -inf\\n -infinity\\n '", ")", "expected", "=", "Table", "(", "{", "'A'", ":", "[", "np", ".", "nan", ",", "np", ".", "nan", ",", "np", ".", "nan", ",", "np", ".", "inf", ",", "np", ".", "inf", ",", "np", ".", "inf", ",", "np", ".", "inf", ",", "(", "-", "np", ".", "inf", ")", ",", "(", "-", "np", ".", "inf", ")", "]", "}", ")", "table", "=", "read_basic", "(", "text", ",", "parallel", "=", "parallel", ")", "assert", "(", "table", "[", "'A'", "]", ".", "dtype", ".", "kind", "==", "'f'", ")", "assert_table_equal", "(", "table", ",", "expected", ")" ]
test that inf and nan-like values are correctly parsed on all platforms .
train
false
19,666
def test_tokenize_file(filename, expected): tokens = Tokenizer.tokenize(filename) if (len(tokens) != len(expected)): print 'Tokens and expected are different lengths\n' return False for idx in range(len(expected)): if (tokens[idx]['type'] != expected[idx]): print (('Difference at index: ' + str(idx)) + '\n') print ('Expected: ' + str(expected[idx])) print ('Received: ' + str(tokens[idx])) return False print 'Tokenizer tests pass\n' return True
[ "def", "test_tokenize_file", "(", "filename", ",", "expected", ")", ":", "tokens", "=", "Tokenizer", ".", "tokenize", "(", "filename", ")", "if", "(", "len", "(", "tokens", ")", "!=", "len", "(", "expected", ")", ")", ":", "print", "'Tokens and expected are different lengths\\n'", "return", "False", "for", "idx", "in", "range", "(", "len", "(", "expected", ")", ")", ":", "if", "(", "tokens", "[", "idx", "]", "[", "'type'", "]", "!=", "expected", "[", "idx", "]", ")", ":", "print", "(", "(", "'Difference at index: '", "+", "str", "(", "idx", ")", ")", "+", "'\\n'", ")", "print", "(", "'Expected: '", "+", "str", "(", "expected", "[", "idx", "]", ")", ")", "print", "(", "'Received: '", "+", "str", "(", "tokens", "[", "idx", "]", ")", ")", "return", "False", "print", "'Tokenizer tests pass\\n'", "return", "True" ]
parse the file and verify that the types are what we expect .
train
false
19,668
def enum_formatter(view, value): return value.name
[ "def", "enum_formatter", "(", "view", ",", "value", ")", ":", "return", "value", ".", "name" ]
return the name of the enumerated member .
train
false
19,669
@must_be_valid_project @must_have_permission(ADMIN) def project_generate_private_link_post(auth, node, **kwargs): node_ids = request.json.get('node_ids', []) name = request.json.get('name', '') anonymous = request.json.get('anonymous', False) if (node._id not in node_ids): node_ids.insert(0, node._id) nodes = [Node.load(node_id) for node_id in node_ids] try: new_link = new_private_link(name=name, user=auth.user, nodes=nodes, anonymous=anonymous) except ValidationError as e: raise HTTPError(http.BAD_REQUEST, data=dict(message_long=e.message)) return new_link
[ "@", "must_be_valid_project", "@", "must_have_permission", "(", "ADMIN", ")", "def", "project_generate_private_link_post", "(", "auth", ",", "node", ",", "**", "kwargs", ")", ":", "node_ids", "=", "request", ".", "json", ".", "get", "(", "'node_ids'", ",", "[", "]", ")", "name", "=", "request", ".", "json", ".", "get", "(", "'name'", ",", "''", ")", "anonymous", "=", "request", ".", "json", ".", "get", "(", "'anonymous'", ",", "False", ")", "if", "(", "node", ".", "_id", "not", "in", "node_ids", ")", ":", "node_ids", ".", "insert", "(", "0", ",", "node", ".", "_id", ")", "nodes", "=", "[", "Node", ".", "load", "(", "node_id", ")", "for", "node_id", "in", "node_ids", "]", "try", ":", "new_link", "=", "new_private_link", "(", "name", "=", "name", ",", "user", "=", "auth", ".", "user", ",", "nodes", "=", "nodes", ",", "anonymous", "=", "anonymous", ")", "except", "ValidationError", "as", "e", ":", "raise", "HTTPError", "(", "http", ".", "BAD_REQUEST", ",", "data", "=", "dict", "(", "message_long", "=", "e", ".", "message", ")", ")", "return", "new_link" ]
creata a new private link object and add it to the node and its selected children .
train
false
19,670
def newkeys(nbits, accurate=True, poolsize=1): if (nbits < 16): raise ValueError('Key too small') if (poolsize < 1): raise ValueError(('Pool size (%i) should be >= 1' % poolsize)) if (poolsize > 1): from rsa import parallel import functools getprime_func = functools.partial(parallel.getprime, poolsize=poolsize) else: getprime_func = rsa.prime.getprime (p, q, e, d) = gen_keys(nbits, getprime_func) n = (p * q) return (PublicKey(n, e), PrivateKey(n, e, d, p, q))
[ "def", "newkeys", "(", "nbits", ",", "accurate", "=", "True", ",", "poolsize", "=", "1", ")", ":", "if", "(", "nbits", "<", "16", ")", ":", "raise", "ValueError", "(", "'Key too small'", ")", "if", "(", "poolsize", "<", "1", ")", ":", "raise", "ValueError", "(", "(", "'Pool size (%i) should be >= 1'", "%", "poolsize", ")", ")", "if", "(", "poolsize", ">", "1", ")", ":", "from", "rsa", "import", "parallel", "import", "functools", "getprime_func", "=", "functools", ".", "partial", "(", "parallel", ".", "getprime", ",", "poolsize", "=", "poolsize", ")", "else", ":", "getprime_func", "=", "rsa", ".", "prime", ".", "getprime", "(", "p", ",", "q", ",", "e", ",", "d", ")", "=", "gen_keys", "(", "nbits", ",", "getprime_func", ")", "n", "=", "(", "p", "*", "q", ")", "return", "(", "PublicKey", "(", "n", ",", "e", ")", ",", "PrivateKey", "(", "n", ",", "e", ",", "d", ",", "p", ",", "q", ")", ")" ]
generates public and private keys .
train
false
19,671
def PrintSchemaTree(schema, se_class, se_tree, se_oid, level): se_obj = schema.get_obj(se_class, se_oid) if (se_obj != None): print (('| ' * (level - 1)) + ('+---' * (level > 0))), ', '.join(se_obj.names), ('(%s)' % se_obj.oid) for sub_se_oid in se_tree[se_oid]: print ('| ' * (level + 1)) PrintSchemaTree(schema, se_class, se_tree, sub_se_oid, (level + 1))
[ "def", "PrintSchemaTree", "(", "schema", ",", "se_class", ",", "se_tree", ",", "se_oid", ",", "level", ")", ":", "se_obj", "=", "schema", ".", "get_obj", "(", "se_class", ",", "se_oid", ")", "if", "(", "se_obj", "!=", "None", ")", ":", "print", "(", "(", "'| '", "*", "(", "level", "-", "1", ")", ")", "+", "(", "'+---'", "*", "(", "level", ">", "0", ")", ")", ")", ",", "', '", ".", "join", "(", "se_obj", ".", "names", ")", ",", "(", "'(%s)'", "%", "se_obj", ".", "oid", ")", "for", "sub_se_oid", "in", "se_tree", "[", "se_oid", "]", ":", "print", "(", "'| '", "*", "(", "level", "+", "1", ")", ")", "PrintSchemaTree", "(", "schema", ",", "se_class", ",", "se_tree", ",", "sub_se_oid", ",", "(", "level", "+", "1", ")", ")" ]
ascii text output for console .
train
false
19,672
@pytest.fixture def webengineview(): QtWebEngineWidgets = pytest.importorskip('PyQt5.QtWebEngineWidgets') return QtWebEngineWidgets.QWebEngineView()
[ "@", "pytest", ".", "fixture", "def", "webengineview", "(", ")", ":", "QtWebEngineWidgets", "=", "pytest", ".", "importorskip", "(", "'PyQt5.QtWebEngineWidgets'", ")", "return", "QtWebEngineWidgets", ".", "QWebEngineView", "(", ")" ]
get a qwebengineview if qtwebengine is available .
train
false
19,673
def get_best_layout(n_plots): assert (n_plots > 0) n_rows = 1 n_cols = np.ceil(((n_plots * 1.0) / n_rows)) n_cols = int(n_cols) half_perimeter = (n_cols + 1) max_row = np.sqrt(n_plots) max_row = np.round(max_row) max_row = int(max_row) for l in xrange(1, (max_row + 1)): width = np.ceil(((n_plots * 1.0) / l)) width = int(width) if (half_perimeter >= (width + l)): n_rows = l n_cols = np.ceil(((n_plots * 1.0) / n_rows)) n_cols = int(n_cols) half_perimeter = (n_rows + n_cols) return (n_rows, n_cols)
[ "def", "get_best_layout", "(", "n_plots", ")", ":", "assert", "(", "n_plots", ">", "0", ")", "n_rows", "=", "1", "n_cols", "=", "np", ".", "ceil", "(", "(", "(", "n_plots", "*", "1.0", ")", "/", "n_rows", ")", ")", "n_cols", "=", "int", "(", "n_cols", ")", "half_perimeter", "=", "(", "n_cols", "+", "1", ")", "max_row", "=", "np", ".", "sqrt", "(", "n_plots", ")", "max_row", "=", "np", ".", "round", "(", "max_row", ")", "max_row", "=", "int", "(", "max_row", ")", "for", "l", "in", "xrange", "(", "1", ",", "(", "max_row", "+", "1", ")", ")", ":", "width", "=", "np", ".", "ceil", "(", "(", "(", "n_plots", "*", "1.0", ")", "/", "l", ")", ")", "width", "=", "int", "(", "width", ")", "if", "(", "half_perimeter", ">=", "(", "width", "+", "l", ")", ")", ":", "n_rows", "=", "l", "n_cols", "=", "np", ".", "ceil", "(", "(", "(", "n_plots", "*", "1.0", ")", "/", "n_rows", ")", ")", "n_cols", "=", "int", "(", "n_cols", ")", "half_perimeter", "=", "(", "n_rows", "+", "n_cols", ")", "return", "(", "n_rows", ",", "n_cols", ")" ]
find the best basic layout for a given number of plots .
train
false
19,674
def ForceUnicode(value): value = escape.to_unicode(value) return (_force_unicode_re.search(value) and (not _gsm_re.search(value)))
[ "def", "ForceUnicode", "(", "value", ")", ":", "value", "=", "escape", ".", "to_unicode", "(", "value", ")", "return", "(", "_force_unicode_re", ".", "search", "(", "value", ")", "and", "(", "not", "_gsm_re", ".", "search", "(", "value", ")", ")", ")" ]
returns true if the value contains only gsm chars .
train
false
19,675
def _cast_number(value): m = FLOAT_REGEX.search(value) if (m is not None): return float(value) return long(value)
[ "def", "_cast_number", "(", "value", ")", ":", "m", "=", "FLOAT_REGEX", ".", "search", "(", "value", ")", "if", "(", "m", "is", "not", "None", ")", ":", "return", "float", "(", "value", ")", "return", "long", "(", "value", ")" ]
convert numbers as string to an int or float .
train
false
19,676
def create_conference(user, users, title): id = md5() id.update(str(datetime.now())) id = ((user + '_') + id.hexdigest()) users.append(user) conferences = get_memcached(get_key('conferences')) if (id in conferences.keys()): return get_new_message_for_user(user) conferences[id] = {} conferences[id]['users'] = {} conferences[id]['info'] = {'creator': user, 'title': title, 'creation_date': datetime.now()} set_memcached(get_key('conferences'), conferences) add_users_in_conference(id, user, users) return get_new_message_for_user(user)
[ "def", "create_conference", "(", "user", ",", "users", ",", "title", ")", ":", "id", "=", "md5", "(", ")", "id", ".", "update", "(", "str", "(", "datetime", ".", "now", "(", ")", ")", ")", "id", "=", "(", "(", "user", "+", "'_'", ")", "+", "id", ".", "hexdigest", "(", ")", ")", "users", ".", "append", "(", "user", ")", "conferences", "=", "get_memcached", "(", "get_key", "(", "'conferences'", ")", ")", "if", "(", "id", "in", "conferences", ".", "keys", "(", ")", ")", ":", "return", "get_new_message_for_user", "(", "user", ")", "conferences", "[", "id", "]", "=", "{", "}", "conferences", "[", "id", "]", "[", "'users'", "]", "=", "{", "}", "conferences", "[", "id", "]", "[", "'info'", "]", "=", "{", "'creator'", ":", "user", ",", "'title'", ":", "title", ",", "'creation_date'", ":", "datetime", ".", "now", "(", ")", "}", "set_memcached", "(", "get_key", "(", "'conferences'", ")", ",", "conferences", ")", "add_users_in_conference", "(", "id", ",", "user", ",", "users", ")", "return", "get_new_message_for_user", "(", "user", ")" ]
create conference :type user: basestring :type users: baselist .
train
false
19,677
def current_branch(): head = git.git_path(u'HEAD') try: key = core.stat(head).st_mtime if (_current_branch.key == key): return _current_branch.value except OSError: key = 0 (status, data, err) = git.rev_parse(u'HEAD', symbolic_full_name=True) if (status != 0): data = _read_git_head(head) for refs_prefix in (u'refs/heads/', u'refs/remotes/', u'refs/tags/'): if data.startswith(refs_prefix): value = data[len(refs_prefix):] _current_branch.key = key _current_branch.value = value return value return data
[ "def", "current_branch", "(", ")", ":", "head", "=", "git", ".", "git_path", "(", "u'HEAD'", ")", "try", ":", "key", "=", "core", ".", "stat", "(", "head", ")", ".", "st_mtime", "if", "(", "_current_branch", ".", "key", "==", "key", ")", ":", "return", "_current_branch", ".", "value", "except", "OSError", ":", "key", "=", "0", "(", "status", ",", "data", ",", "err", ")", "=", "git", ".", "rev_parse", "(", "u'HEAD'", ",", "symbolic_full_name", "=", "True", ")", "if", "(", "status", "!=", "0", ")", ":", "data", "=", "_read_git_head", "(", "head", ")", "for", "refs_prefix", "in", "(", "u'refs/heads/'", ",", "u'refs/remotes/'", ",", "u'refs/tags/'", ")", ":", "if", "data", ".", "startswith", "(", "refs_prefix", ")", ":", "value", "=", "data", "[", "len", "(", "refs_prefix", ")", ":", "]", "_current_branch", ".", "key", "=", "key", "_current_branch", ".", "value", "=", "value", "return", "value", "return", "data" ]
return the current branch .
train
false
19,678
def _strcoll(a, b): return ((a > b) - (a < b))
[ "def", "_strcoll", "(", "a", ",", "b", ")", ":", "return", "(", "(", "a", ">", "b", ")", "-", "(", "a", "<", "b", ")", ")" ]
strcoll -> int .
train
false
19,679
def makeStatefulDispatcher(name, template): def dispatcher(self, *args, **kwargs): func = getattr(self, ((('_' + name) + '_') + self._state), None) if (func is None): raise RuntimeError(('%r has no %s method in state %s' % (self, name, self._state))) return func(*args, **kwargs) dispatcher.__doc__ = template.__doc__ return dispatcher
[ "def", "makeStatefulDispatcher", "(", "name", ",", "template", ")", ":", "def", "dispatcher", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", ":", "func", "=", "getattr", "(", "self", ",", "(", "(", "(", "'_'", "+", "name", ")", "+", "'_'", ")", "+", "self", ".", "_state", ")", ",", "None", ")", "if", "(", "func", "is", "None", ")", ":", "raise", "RuntimeError", "(", "(", "'%r has no %s method in state %s'", "%", "(", "self", ",", "name", ",", "self", ".", "_state", ")", ")", ")", "return", "func", "(", "*", "args", ",", "**", "kwargs", ")", "dispatcher", ".", "__doc__", "=", "template", ".", "__doc__", "return", "dispatcher" ]
given a i{dispatch} name and a function .
train
false
19,680
def yaml_load(source, loader=yaml.Loader): def construct_yaml_str(self, node): u'Override the default string handling function to always return Unicode objects.' return self.construct_scalar(node) class Loader(loader, ): u'Define a custom loader to leave the global loader unaltered.' Loader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str) return yaml.load(source, Loader)
[ "def", "yaml_load", "(", "source", ",", "loader", "=", "yaml", ".", "Loader", ")", ":", "def", "construct_yaml_str", "(", "self", ",", "node", ")", ":", "return", "self", ".", "construct_scalar", "(", "node", ")", "class", "Loader", "(", "loader", ",", ")", ":", "Loader", ".", "add_constructor", "(", "u'tag:yaml.org,2002:str'", ",", "construct_yaml_str", ")", "return", "yaml", ".", "load", "(", "source", ",", "Loader", ")" ]
wrap pyyamls loader so we can extend it to suit our needs .
train
true
19,681
def test_alias_magic(): ip = get_ipython() mm = ip.magics_manager ip.run_line_magic('alias_magic', 'timeit_alias timeit') nt.assert_in('timeit_alias', mm.magics['line']) nt.assert_in('timeit_alias', mm.magics['cell']) ip.run_line_magic('alias_magic', '--cell timeit_cell_alias timeit') nt.assert_not_in('timeit_cell_alias', mm.magics['line']) nt.assert_in('timeit_cell_alias', mm.magics['cell']) ip.run_line_magic('alias_magic', '--line env_alias env') nt.assert_equal(ip.run_line_magic('env', ''), ip.run_line_magic('env_alias', ''))
[ "def", "test_alias_magic", "(", ")", ":", "ip", "=", "get_ipython", "(", ")", "mm", "=", "ip", ".", "magics_manager", "ip", ".", "run_line_magic", "(", "'alias_magic'", ",", "'timeit_alias timeit'", ")", "nt", ".", "assert_in", "(", "'timeit_alias'", ",", "mm", ".", "magics", "[", "'line'", "]", ")", "nt", ".", "assert_in", "(", "'timeit_alias'", ",", "mm", ".", "magics", "[", "'cell'", "]", ")", "ip", ".", "run_line_magic", "(", "'alias_magic'", ",", "'--cell timeit_cell_alias timeit'", ")", "nt", ".", "assert_not_in", "(", "'timeit_cell_alias'", ",", "mm", ".", "magics", "[", "'line'", "]", ")", "nt", ".", "assert_in", "(", "'timeit_cell_alias'", ",", "mm", ".", "magics", "[", "'cell'", "]", ")", "ip", ".", "run_line_magic", "(", "'alias_magic'", ",", "'--line env_alias env'", ")", "nt", ".", "assert_equal", "(", "ip", ".", "run_line_magic", "(", "'env'", ",", "''", ")", ",", "ip", ".", "run_line_magic", "(", "'env_alias'", ",", "''", ")", ")" ]
test %alias_magic .
train
false
19,682
def delif(br=None, iface=None): return _os_dispatch('delif', br, iface)
[ "def", "delif", "(", "br", "=", "None", ",", "iface", "=", "None", ")", ":", "return", "_os_dispatch", "(", "'delif'", ",", "br", ",", "iface", ")" ]
removes an interface from a bridge cli example: .
train
false
19,684
def _to_hass_brightness(brightness): return int((brightness * 255))
[ "def", "_to_hass_brightness", "(", "brightness", ")", ":", "return", "int", "(", "(", "brightness", "*", "255", ")", ")" ]
convert percentage to home assistant brightness units .
train
false
19,685
def get_disk_bus_for_disk_dev(virt_type, disk_dev): if disk_dev.startswith('hd'): return 'ide' elif disk_dev.startswith('sd'): if (virt_type == 'xen'): return 'xen' else: return 'scsi' elif disk_dev.startswith('vd'): return 'virtio' elif disk_dev.startswith('fd'): return 'fdc' elif disk_dev.startswith('xvd'): return 'xen' elif disk_dev.startswith('ubd'): return 'uml' else: msg = (_("Unable to determine disk bus for '%s'") % disk_dev[:1]) raise exception.InternalError(msg)
[ "def", "get_disk_bus_for_disk_dev", "(", "virt_type", ",", "disk_dev", ")", ":", "if", "disk_dev", ".", "startswith", "(", "'hd'", ")", ":", "return", "'ide'", "elif", "disk_dev", ".", "startswith", "(", "'sd'", ")", ":", "if", "(", "virt_type", "==", "'xen'", ")", ":", "return", "'xen'", "else", ":", "return", "'scsi'", "elif", "disk_dev", ".", "startswith", "(", "'vd'", ")", ":", "return", "'virtio'", "elif", "disk_dev", ".", "startswith", "(", "'fd'", ")", ":", "return", "'fdc'", "elif", "disk_dev", ".", "startswith", "(", "'xvd'", ")", ":", "return", "'xen'", "elif", "disk_dev", ".", "startswith", "(", "'ubd'", ")", ":", "return", "'uml'", "else", ":", "msg", "=", "(", "_", "(", "\"Unable to determine disk bus for '%s'\"", ")", "%", "disk_dev", "[", ":", "1", "]", ")", "raise", "exception", ".", "InternalError", "(", "msg", ")" ]
determine the disk bus for a disk dev .
train
false
19,686
def _api_watched_now(name, output, kwargs): sabnzbd.dirscanner.dirscan() return report(output)
[ "def", "_api_watched_now", "(", "name", ",", "output", ",", "kwargs", ")", ":", "sabnzbd", ".", "dirscanner", ".", "dirscan", "(", ")", "return", "report", "(", "output", ")" ]
api: accepts output .
train
false
19,687
def expectation(expr, condition=None, numsamples=None, evaluate=True, **kwargs): if (not random_symbols(expr)): return expr if numsamples: return sampling_E(expr, condition, numsamples=numsamples) if (condition is not None): return expectation(given(expr, condition), evaluate=evaluate) if expr.is_Add: return Add(*[expectation(arg, evaluate=evaluate) for arg in expr.args]) result = pspace(expr).integrate(expr) if (evaluate and hasattr(result, 'doit')): return result.doit(**kwargs) else: return result
[ "def", "expectation", "(", "expr", ",", "condition", "=", "None", ",", "numsamples", "=", "None", ",", "evaluate", "=", "True", ",", "**", "kwargs", ")", ":", "if", "(", "not", "random_symbols", "(", "expr", ")", ")", ":", "return", "expr", "if", "numsamples", ":", "return", "sampling_E", "(", "expr", ",", "condition", ",", "numsamples", "=", "numsamples", ")", "if", "(", "condition", "is", "not", "None", ")", ":", "return", "expectation", "(", "given", "(", "expr", ",", "condition", ")", ",", "evaluate", "=", "evaluate", ")", "if", "expr", ".", "is_Add", ":", "return", "Add", "(", "*", "[", "expectation", "(", "arg", ",", "evaluate", "=", "evaluate", ")", "for", "arg", "in", "expr", ".", "args", "]", ")", "result", "=", "pspace", "(", "expr", ")", ".", "integrate", "(", "expr", ")", "if", "(", "evaluate", "and", "hasattr", "(", "result", ",", "'doit'", ")", ")", ":", "return", "result", ".", "doit", "(", "**", "kwargs", ")", "else", ":", "return", "result" ]
returns the expected value of a random expression parameters expr : expr containing randomsymbols the expression of which you want to compute the expectation value given : expr containing randomsymbols a conditional expression .
train
false
19,688
def pad_sequence(sequence, n, pad_left=False, pad_right=False, left_pad_symbol=None, right_pad_symbol=None): sequence = iter(sequence) if pad_left: sequence = chain(((left_pad_symbol,) * (n - 1)), sequence) if pad_right: sequence = chain(sequence, ((right_pad_symbol,) * (n - 1))) return sequence
[ "def", "pad_sequence", "(", "sequence", ",", "n", ",", "pad_left", "=", "False", ",", "pad_right", "=", "False", ",", "left_pad_symbol", "=", "None", ",", "right_pad_symbol", "=", "None", ")", ":", "sequence", "=", "iter", "(", "sequence", ")", "if", "pad_left", ":", "sequence", "=", "chain", "(", "(", "(", "left_pad_symbol", ",", ")", "*", "(", "n", "-", "1", ")", ")", ",", "sequence", ")", "if", "pad_right", ":", "sequence", "=", "chain", "(", "sequence", ",", "(", "(", "right_pad_symbol", ",", ")", "*", "(", "n", "-", "1", ")", ")", ")", "return", "sequence" ]
returns a padded sequence of items before ngram extraction .
train
false
19,689
def _nthroot_solve(p, n, prec): from sympy.polys.numberfields import _minimal_polynomial_sq from sympy.solvers import solve while ((n % 2) == 0): p = sqrtdenest(sqrt(p)) n = (n // 2) if (n == 1): return p pn = (p ** Rational(1, n)) x = Symbol('x') f = _minimal_polynomial_sq(p, n, x) if (f is None): return None sols = solve(f, x) for sol in sols: if (abs((sol - pn)).n() < (1.0 / (10 ** prec))): sol = sqrtdenest(sol) if (_mexpand((sol ** n)) == p): return sol
[ "def", "_nthroot_solve", "(", "p", ",", "n", ",", "prec", ")", ":", "from", "sympy", ".", "polys", ".", "numberfields", "import", "_minimal_polynomial_sq", "from", "sympy", ".", "solvers", "import", "solve", "while", "(", "(", "n", "%", "2", ")", "==", "0", ")", ":", "p", "=", "sqrtdenest", "(", "sqrt", "(", "p", ")", ")", "n", "=", "(", "n", "//", "2", ")", "if", "(", "n", "==", "1", ")", ":", "return", "p", "pn", "=", "(", "p", "**", "Rational", "(", "1", ",", "n", ")", ")", "x", "=", "Symbol", "(", "'x'", ")", "f", "=", "_minimal_polynomial_sq", "(", "p", ",", "n", ",", "x", ")", "if", "(", "f", "is", "None", ")", ":", "return", "None", "sols", "=", "solve", "(", "f", ",", "x", ")", "for", "sol", "in", "sols", ":", "if", "(", "abs", "(", "(", "sol", "-", "pn", ")", ")", ".", "n", "(", ")", "<", "(", "1.0", "/", "(", "10", "**", "prec", ")", ")", ")", ":", "sol", "=", "sqrtdenest", "(", "sol", ")", "if", "(", "_mexpand", "(", "(", "sol", "**", "n", ")", ")", "==", "p", ")", ":", "return", "sol" ]
helper function for nthroot it denests p**rational using its minimal polynomial .
train
false
19,690
@py.test.mark.parametrize('item_name', [item.name for item in six._urllib_error_moved_attributes]) def test_move_items_urllib_error(item_name): if (sys.version_info[:2] >= (2, 6)): assert (item_name in dir(six.moves.urllib.error)) getattr(six.moves.urllib.error, item_name)
[ "@", "py", ".", "test", ".", "mark", ".", "parametrize", "(", "'item_name'", ",", "[", "item", ".", "name", "for", "item", "in", "six", ".", "_urllib_error_moved_attributes", "]", ")", "def", "test_move_items_urllib_error", "(", "item_name", ")", ":", "if", "(", "sys", ".", "version_info", "[", ":", "2", "]", ">=", "(", "2", ",", "6", ")", ")", ":", "assert", "(", "item_name", "in", "dir", "(", "six", ".", "moves", ".", "urllib", ".", "error", ")", ")", "getattr", "(", "six", ".", "moves", ".", "urllib", ".", "error", ",", "item_name", ")" ]
ensure that everything loads correctly .
train
false
19,691
def _get_cls(name, fallback_cls=conf.raw_layer): return globals().get(name, fallback_cls)
[ "def", "_get_cls", "(", "name", ",", "fallback_cls", "=", "conf", ".", "raw_layer", ")", ":", "return", "globals", "(", ")", ".", "get", "(", "name", ",", "fallback_cls", ")" ]
returns class named "name" if it exists .
train
false
19,693
def _aberth(f, fp, x0, tol=1e-15, maxiter=50): N = len(x0) x = array(x0, complex) beta = np.empty_like(x0) for iteration in range(maxiter): alpha = ((- f(x)) / fp(x)) for k in range(N): beta[k] = np.sum((1 / (x[k] - x[(k + 1):]))) beta[k] += np.sum((1 / (x[k] - x[:k]))) x += (alpha / (1 + (alpha * beta))) if (not all(np.isfinite(x))): raise RuntimeError('Root-finding calculation failed') if all((abs(alpha) <= tol)): break else: raise Exception('Zeros failed to converge') return x
[ "def", "_aberth", "(", "f", ",", "fp", ",", "x0", ",", "tol", "=", "1e-15", ",", "maxiter", "=", "50", ")", ":", "N", "=", "len", "(", "x0", ")", "x", "=", "array", "(", "x0", ",", "complex", ")", "beta", "=", "np", ".", "empty_like", "(", "x0", ")", "for", "iteration", "in", "range", "(", "maxiter", ")", ":", "alpha", "=", "(", "(", "-", "f", "(", "x", ")", ")", "/", "fp", "(", "x", ")", ")", "for", "k", "in", "range", "(", "N", ")", ":", "beta", "[", "k", "]", "=", "np", ".", "sum", "(", "(", "1", "/", "(", "x", "[", "k", "]", "-", "x", "[", "(", "k", "+", "1", ")", ":", "]", ")", ")", ")", "beta", "[", "k", "]", "+=", "np", ".", "sum", "(", "(", "1", "/", "(", "x", "[", "k", "]", "-", "x", "[", ":", "k", "]", ")", ")", ")", "x", "+=", "(", "alpha", "/", "(", "1", "+", "(", "alpha", "*", "beta", ")", ")", ")", "if", "(", "not", "all", "(", "np", ".", "isfinite", "(", "x", ")", ")", ")", ":", "raise", "RuntimeError", "(", "'Root-finding calculation failed'", ")", "if", "all", "(", "(", "abs", "(", "alpha", ")", "<=", "tol", ")", ")", ":", "break", "else", ":", "raise", "Exception", "(", "'Zeros failed to converge'", ")", "return", "x" ]
given a function f .
train
false
19,694
def find_nearest_pickleable_exception(exc): cls = exc.__class__ getmro_ = getattr(cls, 'mro', None) if (not getmro_): if (not getattr(cls, '__bases__', ())): return getmro_ = (lambda : inspect.getmro(cls)) for supercls in getmro_(): if (supercls in unwanted_base_classes): return try: exc_args = getattr(exc, 'args', []) superexc = supercls(*exc_args) pickle.dumps(superexc) except: pass else: return superexc
[ "def", "find_nearest_pickleable_exception", "(", "exc", ")", ":", "cls", "=", "exc", ".", "__class__", "getmro_", "=", "getattr", "(", "cls", ",", "'mro'", ",", "None", ")", "if", "(", "not", "getmro_", ")", ":", "if", "(", "not", "getattr", "(", "cls", ",", "'__bases__'", ",", "(", ")", ")", ")", ":", "return", "getmro_", "=", "(", "lambda", ":", "inspect", ".", "getmro", "(", "cls", ")", ")", "for", "supercls", "in", "getmro_", "(", ")", ":", "if", "(", "supercls", "in", "unwanted_base_classes", ")", ":", "return", "try", ":", "exc_args", "=", "getattr", "(", "exc", ",", "'args'", ",", "[", "]", ")", "superexc", "=", "supercls", "(", "*", "exc_args", ")", "pickle", ".", "dumps", "(", "superexc", ")", "except", ":", "pass", "else", ":", "return", "superexc" ]
with an exception instance .
train
false