id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
23,769
def default_ssl_connection_selector(connection, app_blame, client_hello): return None
[ "def", "default_ssl_connection_selector", "(", "connection", ",", "app_blame", ",", "client_hello", ")", ":", "return", "None" ]
returns a nogotofail .
train
false
23,771
def appproperties(): appprops = makeelement('Properties', nsprefix='ep') appprops = etree.fromstring('<?xml version="1.0" encoding="UTF-8" standalone="yes"?><Properties xmlns="http://schemas.openxmlformats.org/officeDocument/2006/extended-properties" xmlns:vt="http://schemas.openxmlformats.org/officeDocument/2006/docPropsVTypes"></Properties>') props = {'Template': 'Normal.dotm', 'TotalTime': '6', 'Pages': '1', 'Words': '83', 'Characters': '475', 'Application': 'Microsoft Word 12.0.0', 'DocSecurity': '0', 'Lines': '12', 'Paragraphs': '8', 'ScaleCrop': 'false', 'LinksUpToDate': 'false', 'CharactersWithSpaces': '583', 'SharedDoc': 'false', 'HyperlinksChanged': 'false', 'AppVersion': '12.0000'} for prop in props: appprops.append(makeelement(prop, tagtext=props[prop], nsprefix=None)) return appprops
[ "def", "appproperties", "(", ")", ":", "appprops", "=", "makeelement", "(", "'Properties'", ",", "nsprefix", "=", "'ep'", ")", "appprops", "=", "etree", ".", "fromstring", "(", "'<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?><Properties xmlns=\"http://schemas.openxmlformats.org/officeDocument/2006/extended-properties\" xmlns:vt=\"http://schemas.openxmlformats.org/officeDocument/2006/docPropsVTypes\"></Properties>'", ")", "props", "=", "{", "'Template'", ":", "'Normal.dotm'", ",", "'TotalTime'", ":", "'6'", ",", "'Pages'", ":", "'1'", ",", "'Words'", ":", "'83'", ",", "'Characters'", ":", "'475'", ",", "'Application'", ":", "'Microsoft Word 12.0.0'", ",", "'DocSecurity'", ":", "'0'", ",", "'Lines'", ":", "'12'", ",", "'Paragraphs'", ":", "'8'", ",", "'ScaleCrop'", ":", "'false'", ",", "'LinksUpToDate'", ":", "'false'", ",", "'CharactersWithSpaces'", ":", "'583'", ",", "'SharedDoc'", ":", "'false'", ",", "'HyperlinksChanged'", ":", "'false'", ",", "'AppVersion'", ":", "'12.0000'", "}", "for", "prop", "in", "props", ":", "appprops", ".", "append", "(", "makeelement", "(", "prop", ",", "tagtext", "=", "props", "[", "prop", "]", ",", "nsprefix", "=", "None", ")", ")", "return", "appprops" ]
create app-specific properties .
train
true
23,772
def request_host(request): url = request.get_full_url() host = _rfc3986.urlsplit(url)[1] if (host is None): host = request.get_header('Host', '') return cut_port_re.sub('', host, 1)
[ "def", "request_host", "(", "request", ")", ":", "url", "=", "request", ".", "get_full_url", "(", ")", "host", "=", "_rfc3986", ".", "urlsplit", "(", "url", ")", "[", "1", "]", "if", "(", "host", "is", "None", ")", ":", "host", "=", "request", ".", "get_header", "(", "'Host'", ",", "''", ")", "return", "cut_port_re", ".", "sub", "(", "''", ",", "host", ",", "1", ")" ]
return request-host .
train
false
23,773
def get_cache_key_counter(bound_method, *args, **kwargs): model = bound_method.im_self ormcache = bound_method.clear_cache.im_self (cache, key0, counter) = ormcache.lru(model) key = (key0 + ormcache.key(model, *args, **kwargs)) return (cache, key, counter)
[ "def", "get_cache_key_counter", "(", "bound_method", ",", "*", "args", ",", "**", "kwargs", ")", ":", "model", "=", "bound_method", ".", "im_self", "ormcache", "=", "bound_method", ".", "clear_cache", ".", "im_self", "(", "cache", ",", "key0", ",", "counter", ")", "=", "ormcache", ".", "lru", "(", "model", ")", "key", "=", "(", "key0", "+", "ormcache", ".", "key", "(", "model", ",", "*", "args", ",", "**", "kwargs", ")", ")", "return", "(", "cache", ",", "key", ",", "counter", ")" ]
return the cache .
train
false
23,774
def sep2tabs(fname, in_place=True, patt='\\s+'): regexp = re.compile(patt) (fd, temp_name) = tempfile.mkstemp() fp = os.fdopen(fd, 'wt') i = None for (i, line) in enumerate(open(fname)): line = line.rstrip('\r\n') elems = regexp.split(line) fp.write(('%s\n' % ' DCTB '.join(elems))) fp.close() if (i is None): i = 0 else: i += 1 if in_place: shutil.move(temp_name, fname) return (i, None) else: return (i, temp_name)
[ "def", "sep2tabs", "(", "fname", ",", "in_place", "=", "True", ",", "patt", "=", "'\\\\s+'", ")", ":", "regexp", "=", "re", ".", "compile", "(", "patt", ")", "(", "fd", ",", "temp_name", ")", "=", "tempfile", ".", "mkstemp", "(", ")", "fp", "=", "os", ".", "fdopen", "(", "fd", ",", "'wt'", ")", "i", "=", "None", "for", "(", "i", ",", "line", ")", "in", "enumerate", "(", "open", "(", "fname", ")", ")", ":", "line", "=", "line", ".", "rstrip", "(", "'\\r\\n'", ")", "elems", "=", "regexp", ".", "split", "(", "line", ")", "fp", ".", "write", "(", "(", "'%s\\n'", "%", "' DCTB '", ".", "join", "(", "elems", ")", ")", ")", "fp", ".", "close", "(", ")", "if", "(", "i", "is", "None", ")", ":", "i", "=", "0", "else", ":", "i", "+=", "1", "if", "in_place", ":", "shutil", ".", "move", "(", "temp_name", ",", "fname", ")", "return", "(", "i", ",", "None", ")", "else", ":", "return", "(", "i", ",", "temp_name", ")" ]
transforms in place a sep separated file to a tab separated one .
train
false
23,775
def _get_conv_weights_bc01(layer): assert isinstance(layer, (MaxoutConvC01B, ConvElemwise)) weights = layer.get_params()[0].get_value() if isinstance(layer, MaxoutConvC01B): c01b = Conv2DSpace(shape=weights.shape[1:3], num_channels=weights.shape[0], axes=('c', 0, 1, 'b')) bc01 = Conv2DSpace(shape=c01b.shape, num_channels=c01b.num_channels, axes=('b', 'c', 0, 1)) weights = c01b.np_format_as(weights, bc01) elif isinstance(layer, ConvElemwise): weights = weights[:, :, ::(-1), ::(-1)] return weights
[ "def", "_get_conv_weights_bc01", "(", "layer", ")", ":", "assert", "isinstance", "(", "layer", ",", "(", "MaxoutConvC01B", ",", "ConvElemwise", ")", ")", "weights", "=", "layer", ".", "get_params", "(", ")", "[", "0", "]", ".", "get_value", "(", ")", "if", "isinstance", "(", "layer", ",", "MaxoutConvC01B", ")", ":", "c01b", "=", "Conv2DSpace", "(", "shape", "=", "weights", ".", "shape", "[", "1", ":", "3", "]", ",", "num_channels", "=", "weights", ".", "shape", "[", "0", "]", ",", "axes", "=", "(", "'c'", ",", "0", ",", "1", ",", "'b'", ")", ")", "bc01", "=", "Conv2DSpace", "(", "shape", "=", "c01b", ".", "shape", ",", "num_channels", "=", "c01b", ".", "num_channels", ",", "axes", "=", "(", "'b'", ",", "'c'", ",", "0", ",", "1", ")", ")", "weights", "=", "c01b", ".", "np_format_as", "(", "weights", ",", "bc01", ")", "elif", "isinstance", "(", "layer", ",", "ConvElemwise", ")", ":", "weights", "=", "weights", "[", ":", ",", ":", ",", ":", ":", "(", "-", "1", ")", ",", ":", ":", "(", "-", "1", ")", "]", "return", "weights" ]
returns a conv .
train
false
23,776
def clean_expired_tokens(opts): serializer = salt.payload.Serial(opts) for (dirpath, dirnames, filenames) in os.walk(opts['token_dir']): for token in filenames: token_path = os.path.join(dirpath, token) with salt.utils.fopen(token_path) as token_file: try: token_data = serializer.loads(token_file.read()) except msgpack.UnpackValueError: os.remove(token_path) return if (('expire' not in token_data) or (token_data.get('expire', 0) < time.time())): try: os.remove(token_path) except (IOError, OSError): pass
[ "def", "clean_expired_tokens", "(", "opts", ")", ":", "serializer", "=", "salt", ".", "payload", ".", "Serial", "(", "opts", ")", "for", "(", "dirpath", ",", "dirnames", ",", "filenames", ")", "in", "os", ".", "walk", "(", "opts", "[", "'token_dir'", "]", ")", ":", "for", "token", "in", "filenames", ":", "token_path", "=", "os", ".", "path", ".", "join", "(", "dirpath", ",", "token", ")", "with", "salt", ".", "utils", ".", "fopen", "(", "token_path", ")", "as", "token_file", ":", "try", ":", "token_data", "=", "serializer", ".", "loads", "(", "token_file", ".", "read", "(", ")", ")", "except", "msgpack", ".", "UnpackValueError", ":", "os", ".", "remove", "(", "token_path", ")", "return", "if", "(", "(", "'expire'", "not", "in", "token_data", ")", "or", "(", "token_data", ".", "get", "(", "'expire'", ",", "0", ")", "<", "time", ".", "time", "(", ")", ")", ")", ":", "try", ":", "os", ".", "remove", "(", "token_path", ")", "except", "(", "IOError", ",", "OSError", ")", ":", "pass" ]
clean expired tokens from the master .
train
false
23,777
def S_ISSOCK(mode): return (S_IFMT(mode) == S_IFSOCK)
[ "def", "S_ISSOCK", "(", "mode", ")", ":", "return", "(", "S_IFMT", "(", "mode", ")", "==", "S_IFSOCK", ")" ]
return true if mode is from a socket .
train
false
23,778
def implicit_application(result, local_dict, global_dict): for step in (_group_parentheses(implicit_application), _apply_functions, _implicit_application): result = step(result, local_dict, global_dict) result = _flatten(result) return result
[ "def", "implicit_application", "(", "result", ",", "local_dict", ",", "global_dict", ")", ":", "for", "step", "in", "(", "_group_parentheses", "(", "implicit_application", ")", ",", "_apply_functions", ",", "_implicit_application", ")", ":", "result", "=", "step", "(", "result", ",", "local_dict", ",", "global_dict", ")", "result", "=", "_flatten", "(", "result", ")", "return", "result" ]
makes parentheses optional in some cases for function calls .
train
false
23,780
def image_list(id=None, profile=None, name=None): g_client = _auth(profile) ret = [] for image in g_client.images.list(): if ((id is None) and (name is None)): _add_image(ret, image) else: if ((id is not None) and (id == image.id)): _add_image(ret, image) return ret if (name == image.name): if ((name in ret) and (CUR_VER < BORON)): return {'result': False, 'comment': 'More than one image with name "{0}"'.format(name)} _add_image(ret, image) log.debug('Returning images: {0}'.format(ret)) return ret
[ "def", "image_list", "(", "id", "=", "None", ",", "profile", "=", "None", ",", "name", "=", "None", ")", ":", "g_client", "=", "_auth", "(", "profile", ")", "ret", "=", "[", "]", "for", "image", "in", "g_client", ".", "images", ".", "list", "(", ")", ":", "if", "(", "(", "id", "is", "None", ")", "and", "(", "name", "is", "None", ")", ")", ":", "_add_image", "(", "ret", ",", "image", ")", "else", ":", "if", "(", "(", "id", "is", "not", "None", ")", "and", "(", "id", "==", "image", ".", "id", ")", ")", ":", "_add_image", "(", "ret", ",", "image", ")", "return", "ret", "if", "(", "name", "==", "image", ".", "name", ")", ":", "if", "(", "(", "name", "in", "ret", ")", "and", "(", "CUR_VER", "<", "BORON", ")", ")", ":", "return", "{", "'result'", ":", "False", ",", "'comment'", ":", "'More than one image with name \"{0}\"'", ".", "format", "(", "name", ")", "}", "_add_image", "(", "ret", ",", "image", ")", "log", ".", "debug", "(", "'Returning images: {0}'", ".", "format", "(", "ret", ")", ")", "return", "ret" ]
return a list of available images if a name is provided .
train
true
23,781
def _convert_nnn_fr(val): word = '' (mod, rem) = ((val % 100), (val // 100)) if (rem > 0): word = (to_19_fr[rem] + ' Cent') if (mod > 0): word += ' ' if (mod > 0): word += _convert_nn_fr(mod) return word
[ "def", "_convert_nnn_fr", "(", "val", ")", ":", "word", "=", "''", "(", "mod", ",", "rem", ")", "=", "(", "(", "val", "%", "100", ")", ",", "(", "val", "//", "100", ")", ")", "if", "(", "rem", ">", "0", ")", ":", "word", "=", "(", "to_19_fr", "[", "rem", "]", "+", "' Cent'", ")", "if", "(", "mod", ">", "0", ")", ":", "word", "+=", "' '", "if", "(", "mod", ">", "0", ")", ":", "word", "+=", "_convert_nn_fr", "(", "mod", ")", "return", "word" ]
convert a value < 1000 to french special cased because it is the level that kicks off the < 100 special case .
train
false
23,783
def promoted(name): ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if (('@' in name) or ('#' in name)): ret['result'] = False ret['comment'] = 'invalid filesystem or volume name: {0}'.format(name) if ret['result']: if (name in __salt__['zfs.list'](name)): origin = '-' if (not __opts__['test']): origin = __salt__['zfs.get'](name, **{'properties': 'origin', 'fields': 'value'})[name]['origin']['value'] if (origin == '-'): ret['comment'] = '{0} already promoted'.format(name) else: result = {name: 'promoted'} if (not __opts__['test']): result = __salt__['zfs.promote'](name) ret['result'] = ((name in result) and (result[name] == 'promoted')) ret['changes'] = (result if ret['result'] else {}) if ret['result']: ret['comment'] = '{0} was promoted'.format(name) else: ret['comment'] = 'failed to promote {0}'.format(name) if (name in result): ret['comment'] = result[name] else: ret['result'] = False ret['comment'] = 'dataset {0} does not exist'.format(name) return ret
[ "def", "promoted", "(", "name", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", "}", "if", "(", "(", "'@'", "in", "name", ")", "or", "(", "'#'", "in", "name", ")", ")", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'invalid filesystem or volume name: {0}'", ".", "format", "(", "name", ")", "if", "ret", "[", "'result'", "]", ":", "if", "(", "name", "in", "__salt__", "[", "'zfs.list'", "]", "(", "name", ")", ")", ":", "origin", "=", "'-'", "if", "(", "not", "__opts__", "[", "'test'", "]", ")", ":", "origin", "=", "__salt__", "[", "'zfs.get'", "]", "(", "name", ",", "**", "{", "'properties'", ":", "'origin'", ",", "'fields'", ":", "'value'", "}", ")", "[", "name", "]", "[", "'origin'", "]", "[", "'value'", "]", "if", "(", "origin", "==", "'-'", ")", ":", "ret", "[", "'comment'", "]", "=", "'{0} already promoted'", ".", "format", "(", "name", ")", "else", ":", "result", "=", "{", "name", ":", "'promoted'", "}", "if", "(", "not", "__opts__", "[", "'test'", "]", ")", ":", "result", "=", "__salt__", "[", "'zfs.promote'", "]", "(", "name", ")", "ret", "[", "'result'", "]", "=", "(", "(", "name", "in", "result", ")", "and", "(", "result", "[", "name", "]", "==", "'promoted'", ")", ")", "ret", "[", "'changes'", "]", "=", "(", "result", "if", "ret", "[", "'result'", "]", "else", "{", "}", ")", "if", "ret", "[", "'result'", "]", ":", "ret", "[", "'comment'", "]", "=", "'{0} was promoted'", ".", "format", "(", "name", ")", "else", ":", "ret", "[", "'comment'", "]", "=", "'failed to promote {0}'", ".", "format", "(", "name", ")", "if", "(", "name", "in", "result", ")", ":", "ret", "[", "'comment'", "]", "=", "result", "[", "name", "]", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'dataset {0} does not exist'", ".", "format", "(", "name", ")", "return", "ret" ]
ensure a dataset is not a clone name : string name of fileset or volume .
train
false
23,785
def unregister_actions(action_ids): _populate_defaults() for action_id in action_ids: try: action = _all_actions[action_id] except KeyError: raise KeyError((u'%s does not correspond to a registered review request action' % action_id)) action.unregister()
[ "def", "unregister_actions", "(", "action_ids", ")", ":", "_populate_defaults", "(", ")", "for", "action_id", "in", "action_ids", ":", "try", ":", "action", "=", "_all_actions", "[", "action_id", "]", "except", "KeyError", ":", "raise", "KeyError", "(", "(", "u'%s does not correspond to a registered review request action'", "%", "action_id", ")", ")", "action", ".", "unregister", "(", ")" ]
unregister each of the actions corresponding to the given ids .
train
false
23,788
def get_custom_objects(): return _GLOBAL_CUSTOM_OBJECTS
[ "def", "get_custom_objects", "(", ")", ":", "return", "_GLOBAL_CUSTOM_OBJECTS" ]
retrieves a live reference to the global dictionary of custom objects .
train
false
23,790
@require_admin_context def cluster_destroy(context, id): query = _cluster_query(context, id=id) query = query.filter((models.Cluster.num_hosts == 0)) result = query.update(models.Cluster.delete_values(), synchronize_session=False) if (not result): cluster_get(context, id=id) raise exception.ClusterHasHosts(id=id)
[ "@", "require_admin_context", "def", "cluster_destroy", "(", "context", ",", "id", ")", ":", "query", "=", "_cluster_query", "(", "context", ",", "id", "=", "id", ")", "query", "=", "query", ".", "filter", "(", "(", "models", ".", "Cluster", ".", "num_hosts", "==", "0", ")", ")", "result", "=", "query", ".", "update", "(", "models", ".", "Cluster", ".", "delete_values", "(", ")", ",", "synchronize_session", "=", "False", ")", "if", "(", "not", "result", ")", ":", "cluster_get", "(", "context", ",", "id", "=", "id", ")", "raise", "exception", ".", "ClusterHasHosts", "(", "id", "=", "id", ")" ]
destroy the cluster or raise if it does not exist or has hosts .
train
false
23,793
def read_style_table(xml_source): table = {} xmlns = 'http://schemas.openxmlformats.org/spreadsheetml/2006/main' root = fromstring(xml_source) custom_num_formats = parse_custom_num_formats(root, xmlns) builtin_formats = NumberFormat._BUILTIN_FORMATS cell_xfs = root.find(QName(xmlns, 'cellXfs').text) cell_xfs_nodes = cell_xfs.findall(QName(xmlns, 'xf').text) for (index, cell_xfs_node) in enumerate(cell_xfs_nodes): new_style = Style() number_format_id = int(cell_xfs_node.get('numFmtId')) if (number_format_id < 164): new_style.number_format.format_code = builtin_formats.get(number_format_id, 'General') elif (number_format_id in custom_num_formats): new_style.number_format.format_code = custom_num_formats[number_format_id] else: raise MissingNumberFormat(('%s' % number_format_id)) table[index] = new_style return table
[ "def", "read_style_table", "(", "xml_source", ")", ":", "table", "=", "{", "}", "xmlns", "=", "'http://schemas.openxmlformats.org/spreadsheetml/2006/main'", "root", "=", "fromstring", "(", "xml_source", ")", "custom_num_formats", "=", "parse_custom_num_formats", "(", "root", ",", "xmlns", ")", "builtin_formats", "=", "NumberFormat", ".", "_BUILTIN_FORMATS", "cell_xfs", "=", "root", ".", "find", "(", "QName", "(", "xmlns", ",", "'cellXfs'", ")", ".", "text", ")", "cell_xfs_nodes", "=", "cell_xfs", ".", "findall", "(", "QName", "(", "xmlns", ",", "'xf'", ")", ".", "text", ")", "for", "(", "index", ",", "cell_xfs_node", ")", "in", "enumerate", "(", "cell_xfs_nodes", ")", ":", "new_style", "=", "Style", "(", ")", "number_format_id", "=", "int", "(", "cell_xfs_node", ".", "get", "(", "'numFmtId'", ")", ")", "if", "(", "number_format_id", "<", "164", ")", ":", "new_style", ".", "number_format", ".", "format_code", "=", "builtin_formats", ".", "get", "(", "number_format_id", ",", "'General'", ")", "elif", "(", "number_format_id", "in", "custom_num_formats", ")", ":", "new_style", ".", "number_format", ".", "format_code", "=", "custom_num_formats", "[", "number_format_id", "]", "else", ":", "raise", "MissingNumberFormat", "(", "(", "'%s'", "%", "number_format_id", ")", ")", "table", "[", "index", "]", "=", "new_style", "return", "table" ]
read styles from the shared style table .
train
false
23,794
def set_auth_traps_enabled(status=True): vname = 'EnableAuthenticationTraps' current_status = get_auth_traps_enabled() if (bool(status) == current_status): _LOG.debug('%s already contains the provided value.', vname) return True vdata = int(status) __salt__['reg.set_value'](_HKEY, _SNMP_KEY, vname, vdata, 'REG_DWORD') new_status = get_auth_traps_enabled() if (status == new_status): _LOG.debug('Setting %s configured successfully: %s', vname, vdata) return True _LOG.error('Unable to configure %s with value: %s', vname, vdata) return False
[ "def", "set_auth_traps_enabled", "(", "status", "=", "True", ")", ":", "vname", "=", "'EnableAuthenticationTraps'", "current_status", "=", "get_auth_traps_enabled", "(", ")", "if", "(", "bool", "(", "status", ")", "==", "current_status", ")", ":", "_LOG", ".", "debug", "(", "'%s already contains the provided value.'", ",", "vname", ")", "return", "True", "vdata", "=", "int", "(", "status", ")", "__salt__", "[", "'reg.set_value'", "]", "(", "_HKEY", ",", "_SNMP_KEY", ",", "vname", ",", "vdata", ",", "'REG_DWORD'", ")", "new_status", "=", "get_auth_traps_enabled", "(", ")", "if", "(", "status", "==", "new_status", ")", ":", "_LOG", ".", "debug", "(", "'Setting %s configured successfully: %s'", ",", "vname", ",", "vdata", ")", "return", "True", "_LOG", ".", "error", "(", "'Unable to configure %s with value: %s'", ",", "vname", ",", "vdata", ")", "return", "False" ]
manage the sending of authentication traps .
train
true
23,795
def _split_nonparallel_tests(test): ptests = [] stests = [] def is_parallelizable_test_case(test): method_name = test._testMethodName method = getattr(test, method_name) if ((method.__name__ != method_name) and (method.__name__ == 'testFailure')): return False return getattr(test, '_numba_parallel_test_', True) if isinstance(test, unittest.TestSuite): for t in test: (p, s) = _split_nonparallel_tests(t) ptests.extend(p) stests.extend(s) elif is_parallelizable_test_case(test): ptests = [test] else: stests = _flatten_suite(test) return (ptests, stests)
[ "def", "_split_nonparallel_tests", "(", "test", ")", ":", "ptests", "=", "[", "]", "stests", "=", "[", "]", "def", "is_parallelizable_test_case", "(", "test", ")", ":", "method_name", "=", "test", ".", "_testMethodName", "method", "=", "getattr", "(", "test", ",", "method_name", ")", "if", "(", "(", "method", ".", "__name__", "!=", "method_name", ")", "and", "(", "method", ".", "__name__", "==", "'testFailure'", ")", ")", ":", "return", "False", "return", "getattr", "(", "test", ",", "'_numba_parallel_test_'", ",", "True", ")", "if", "isinstance", "(", "test", ",", "unittest", ".", "TestSuite", ")", ":", "for", "t", "in", "test", ":", "(", "p", ",", "s", ")", "=", "_split_nonparallel_tests", "(", "t", ")", "ptests", ".", "extend", "(", "p", ")", "stests", ".", "extend", "(", "s", ")", "elif", "is_parallelizable_test_case", "(", "test", ")", ":", "ptests", "=", "[", "test", "]", "else", ":", "stests", "=", "_flatten_suite", "(", "test", ")", "return", "(", "ptests", ",", "stests", ")" ]
split test suite into parallel and serial tests .
train
false
23,796
def list_meta_fields(): ret = {} (status, result) = _query(action='meta', command='fields') root = ET.fromstring(result) fields = root.getchildren() for field in fields: field_id = None field_ret = {'name': field.text} for item in field.items(): field_ret[item[0]] = item[1] if (item[0] == 'id'): field_id = item[1] ret[field_id] = field_ret return ret
[ "def", "list_meta_fields", "(", ")", ":", "ret", "=", "{", "}", "(", "status", ",", "result", ")", "=", "_query", "(", "action", "=", "'meta'", ",", "command", "=", "'fields'", ")", "root", "=", "ET", ".", "fromstring", "(", "result", ")", "fields", "=", "root", ".", "getchildren", "(", ")", "for", "field", "in", "fields", ":", "field_id", "=", "None", "field_ret", "=", "{", "'name'", ":", "field", ".", "text", "}", "for", "item", "in", "field", ".", "items", "(", ")", ":", "field_ret", "[", "item", "[", "0", "]", "]", "=", "item", "[", "1", "]", "if", "(", "item", "[", "0", "]", "==", "'id'", ")", ":", "field_id", "=", "item", "[", "1", "]", "ret", "[", "field_id", "]", "=", "field_ret", "return", "ret" ]
show all meta data fields for this company .
train
true
23,797
def get_descriptor(dev, desc_size, desc_type, desc_index, wIndex=0): wValue = (desc_index | (desc_type << 8)) bmRequestType = util.build_request_type(util.CTRL_IN, util.CTRL_TYPE_STANDARD, util.CTRL_RECIPIENT_DEVICE) return dev.ctrl_transfer(bmRequestType=bmRequestType, bRequest=6, wValue=wValue, wIndex=wIndex, data_or_wLength=desc_size)
[ "def", "get_descriptor", "(", "dev", ",", "desc_size", ",", "desc_type", ",", "desc_index", ",", "wIndex", "=", "0", ")", ":", "wValue", "=", "(", "desc_index", "|", "(", "desc_type", "<<", "8", ")", ")", "bmRequestType", "=", "util", ".", "build_request_type", "(", "util", ".", "CTRL_IN", ",", "util", ".", "CTRL_TYPE_STANDARD", ",", "util", ".", "CTRL_RECIPIENT_DEVICE", ")", "return", "dev", ".", "ctrl_transfer", "(", "bmRequestType", "=", "bmRequestType", ",", "bRequest", "=", "6", ",", "wValue", "=", "wValue", ",", "wIndex", "=", "wIndex", ",", "data_or_wLength", "=", "desc_size", ")" ]
return the specified descriptor .
train
true
23,798
def make_errordocument(app, global_conf, **kw): map = {} for (status, redir_loc) in kw.items(): try: status = int(status) except ValueError: raise ValueError(('Bad status code: %r' % status)) map[status] = redir_loc forwarder = forward(app, map) return forwarder
[ "def", "make_errordocument", "(", "app", ",", "global_conf", ",", "**", "kw", ")", ":", "map", "=", "{", "}", "for", "(", "status", ",", "redir_loc", ")", "in", "kw", ".", "items", "(", ")", ":", "try", ":", "status", "=", "int", "(", "status", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "(", "'Bad status code: %r'", "%", "status", ")", ")", "map", "[", "status", "]", "=", "redir_loc", "forwarder", "=", "forward", "(", "app", ",", "map", ")", "return", "forwarder" ]
paste deploy entry point to create a error document wrapper .
train
false
23,799
def init(mpstate): return SerialModule(mpstate)
[ "def", "init", "(", "mpstate", ")", ":", "return", "SerialModule", "(", "mpstate", ")" ]
initialise module .
train
false
23,800
def _on_raw(func_name): def wrapped(self, *args, **kwargs): args = list(args) try: string = args.pop(0) if hasattr(string, '_raw_string'): args.insert(0, string.raw()) else: args.insert(0, string) except IndexError: pass result = getattr(self._raw_string, func_name)(*args, **kwargs) if isinstance(result, basestring): return ANSIString(result, decoded=True) return result return wrapped
[ "def", "_on_raw", "(", "func_name", ")", ":", "def", "wrapped", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", ":", "args", "=", "list", "(", "args", ")", "try", ":", "string", "=", "args", ".", "pop", "(", "0", ")", "if", "hasattr", "(", "string", ",", "'_raw_string'", ")", ":", "args", ".", "insert", "(", "0", ",", "string", ".", "raw", "(", ")", ")", "else", ":", "args", ".", "insert", "(", "0", ",", "string", ")", "except", "IndexError", ":", "pass", "result", "=", "getattr", "(", "self", ".", "_raw_string", ",", "func_name", ")", "(", "*", "args", ",", "**", "kwargs", ")", "if", "isinstance", "(", "result", ",", "basestring", ")", ":", "return", "ANSIString", "(", "result", ",", "decoded", "=", "True", ")", "return", "result", "return", "wrapped" ]
like query_super .
train
false
23,802
@cache_permission def can_edit_project(user, project): return check_permission(user, project, 'trans.change_project')
[ "@", "cache_permission", "def", "can_edit_project", "(", "user", ",", "project", ")", ":", "return", "check_permission", "(", "user", ",", "project", ",", "'trans.change_project'", ")" ]
checks whether user can edit given project .
train
false
23,804
@utils.arg('snapshot_id', metavar='<snapshot-id>', help='ID of the snapshot to delete.') @utils.service_type('monitor') def do_snapshot_delete(cs, args): snapshot = _find_monitor_snapshot(cs, args.snapshot_id) snapshot.delete()
[ "@", "utils", ".", "arg", "(", "'snapshot_id'", ",", "metavar", "=", "'<snapshot-id>'", ",", "help", "=", "'ID of the snapshot to delete.'", ")", "@", "utils", ".", "service_type", "(", "'monitor'", ")", "def", "do_snapshot_delete", "(", "cs", ",", "args", ")", ":", "snapshot", "=", "_find_monitor_snapshot", "(", "cs", ",", "args", ".", "snapshot_id", ")", "snapshot", ".", "delete", "(", ")" ]
remove a snapshot .
train
false
23,805
def message_url_path(course_key, access_point): return RestrictedCourse.message_url_path(course_key, access_point)
[ "def", "message_url_path", "(", "course_key", ",", "access_point", ")", ":", "return", "RestrictedCourse", ".", "message_url_path", "(", "course_key", ",", "access_point", ")" ]
determine the url path for the message explaining why the user was blocked .
train
false
23,806
def test_one_image_peak(): image = np.ones((5, 5)) image[(2, 2)] = 2 mask = (np.ones((5, 5)) * 3) assert_close(reconstruction(image, mask), 2)
[ "def", "test_one_image_peak", "(", ")", ":", "image", "=", "np", ".", "ones", "(", "(", "5", ",", "5", ")", ")", "image", "[", "(", "2", ",", "2", ")", "]", "=", "2", "mask", "=", "(", "np", ".", "ones", "(", "(", "5", ",", "5", ")", ")", "*", "3", ")", "assert_close", "(", "reconstruction", "(", "image", ",", "mask", ")", ",", "2", ")" ]
test reconstruction with one peak pixel .
train
false
23,809
def copyFile(srcFile, destFile): try: shutil.copyfile(srcFile, destFile) shutil.copymode(srcFile, destFile) except OSError as e: raise
[ "def", "copyFile", "(", "srcFile", ",", "destFile", ")", ":", "try", ":", "shutil", ".", "copyfile", "(", "srcFile", ",", "destFile", ")", "shutil", ".", "copymode", "(", "srcFile", ",", "destFile", ")", "except", "OSError", "as", "e", ":", "raise" ]
copy a file from source to destination .
train
false
23,811
def _sizeof_fmt(num): for unit in ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB']: if (num < 1024.0): return '{0:3.1f} {1}'.format(num, unit) num /= 1024.0
[ "def", "_sizeof_fmt", "(", "num", ")", ":", "for", "unit", "in", "[", "'bytes'", ",", "'KB'", ",", "'MB'", ",", "'GB'", ",", "'TB'", ",", "'PB'", "]", ":", "if", "(", "num", "<", "1024.0", ")", ":", "return", "'{0:3.1f} {1}'", ".", "format", "(", "num", ",", "unit", ")", "num", "/=", "1024.0" ]
return disk format size data .
train
false
23,812
def _db_documents_for(locale, topics=None, products=None): qs = Document.objects.filter(locale=locale, is_archived=False, current_revision__isnull=False, category__in=settings.IA_DEFAULT_CATEGORIES) for topic in (topics or []): qs = qs.filter(topics=topic) for product in (products or []): qs = qs.filter(products=product) doc_dicts = [] for d in qs.distinct(): doc_dicts.append(dict(id=d.id, document_title=d.title, url=d.get_absolute_url(), document_parent_id=d.parent_id, document_summary=d.current_revision.summary)) return doc_dicts
[ "def", "_db_documents_for", "(", "locale", ",", "topics", "=", "None", ",", "products", "=", "None", ")", ":", "qs", "=", "Document", ".", "objects", ".", "filter", "(", "locale", "=", "locale", ",", "is_archived", "=", "False", ",", "current_revision__isnull", "=", "False", ",", "category__in", "=", "settings", ".", "IA_DEFAULT_CATEGORIES", ")", "for", "topic", "in", "(", "topics", "or", "[", "]", ")", ":", "qs", "=", "qs", ".", "filter", "(", "topics", "=", "topic", ")", "for", "product", "in", "(", "products", "or", "[", "]", ")", ":", "qs", "=", "qs", ".", "filter", "(", "products", "=", "product", ")", "doc_dicts", "=", "[", "]", "for", "d", "in", "qs", ".", "distinct", "(", ")", ":", "doc_dicts", ".", "append", "(", "dict", "(", "id", "=", "d", ".", "id", ",", "document_title", "=", "d", ".", "title", ",", "url", "=", "d", ".", "get_absolute_url", "(", ")", ",", "document_parent_id", "=", "d", ".", "parent_id", ",", "document_summary", "=", "d", ".", "current_revision", ".", "summary", ")", ")", "return", "doc_dicts" ]
db implementation of documents_for .
train
false
23,814
def cleanedUpGenericNetwork(original_network): network = caffe_pb2.NetParameter() network.CopyFrom(original_network) for (i, layer) in enumerate(network.layer): if ('Data' in layer.type): assert (layer.type in ['Data']), ('Unsupported data layer type %s' % layer.type) elif (layer.type == 'Input'): del network.layer[i] elif (layer.type == 'InnerProduct'): assert layer.inner_product_param.HasField('num_output'), ("Don't leave inner_product_param.num_output unset for generic networks (layer %s)" % layer.name) return network
[ "def", "cleanedUpGenericNetwork", "(", "original_network", ")", ":", "network", "=", "caffe_pb2", ".", "NetParameter", "(", ")", "network", ".", "CopyFrom", "(", "original_network", ")", "for", "(", "i", ",", "layer", ")", "in", "enumerate", "(", "network", ".", "layer", ")", ":", "if", "(", "'Data'", "in", "layer", ".", "type", ")", ":", "assert", "(", "layer", ".", "type", "in", "[", "'Data'", "]", ")", ",", "(", "'Unsupported data layer type %s'", "%", "layer", ".", "type", ")", "elif", "(", "layer", ".", "type", "==", "'Input'", ")", ":", "del", "network", ".", "layer", "[", "i", "]", "elif", "(", "layer", ".", "type", "==", "'InnerProduct'", ")", ":", "assert", "layer", ".", "inner_product_param", ".", "HasField", "(", "'num_output'", ")", ",", "(", "\"Don't leave inner_product_param.num_output unset for generic networks (layer %s)\"", "%", "layer", ".", "name", ")", "return", "network" ]
perform a few cleanup routines on a generic network returns a new netparameter .
train
false
23,815
def serialize_query_with_map_builtin_function(test, serial, fcn): t = symbol('t', discover(iris)) expr = t.species.map(fcn, 'int') query = {'expr': to_tree(expr)} response = test.post('/compute', data=serial.dumps(query), headers=mimetype(serial)) assert ('OK' in response.status) respdata = serial.loads(response.data) result = serial.data_loads(respdata['data']) exp_res = compute(expr, {t: iris}, return_type=list) return (exp_res, result)
[ "def", "serialize_query_with_map_builtin_function", "(", "test", ",", "serial", ",", "fcn", ")", ":", "t", "=", "symbol", "(", "'t'", ",", "discover", "(", "iris", ")", ")", "expr", "=", "t", ".", "species", ".", "map", "(", "fcn", ",", "'int'", ")", "query", "=", "{", "'expr'", ":", "to_tree", "(", "expr", ")", "}", "response", "=", "test", ".", "post", "(", "'/compute'", ",", "data", "=", "serial", ".", "dumps", "(", "query", ")", ",", "headers", "=", "mimetype", "(", "serial", ")", ")", "assert", "(", "'OK'", "in", "response", ".", "status", ")", "respdata", "=", "serial", ".", "loads", "(", "response", ".", "data", ")", "result", "=", "serial", ".", "data_loads", "(", "respdata", "[", "'data'", "]", ")", "exp_res", "=", "compute", "(", "expr", ",", "{", "t", ":", "iris", "}", ",", "return_type", "=", "list", ")", "return", "(", "exp_res", ",", "result", ")" ]
serialize a query that invokes the map operation using a builtin function return the result of the post operation along with expected result .
train
false
23,817
def checks_list(): application_url = _get_application_url() log.debug('[uptime] get checks') jcontent = requests.get('{0}/api/checks'.format(application_url)).json() return [x['url'] for x in jcontent]
[ "def", "checks_list", "(", ")", ":", "application_url", "=", "_get_application_url", "(", ")", "log", ".", "debug", "(", "'[uptime] get checks'", ")", "jcontent", "=", "requests", ".", "get", "(", "'{0}/api/checks'", ".", "format", "(", "application_url", ")", ")", ".", "json", "(", ")", "return", "[", "x", "[", "'url'", "]", "for", "x", "in", "jcontent", "]" ]
list url checked by uptime cli example: .
train
true
23,818
def _categories_level(keys): res = [] for i in zip(*keys): tuplefied = _tuplify(i) res.append(list(OrderedDict([(j, None) for j in tuplefied]))) return res
[ "def", "_categories_level", "(", "keys", ")", ":", "res", "=", "[", "]", "for", "i", "in", "zip", "(", "*", "keys", ")", ":", "tuplefied", "=", "_tuplify", "(", "i", ")", "res", ".", "append", "(", "list", "(", "OrderedDict", "(", "[", "(", "j", ",", "None", ")", "for", "j", "in", "tuplefied", "]", ")", ")", ")", "return", "res" ]
use the ordered dict to implement a simple ordered set return each level of each category [[key_1_level_1 .
train
false
23,819
def _get_changed_files(): if (not ci_diff_helper): return None try: config = ci_diff_helper.get_config() except OSError: return None changed_files = ci_diff_helper.get_changed_files('HEAD', config.base) changed_files = set(['./{}'.format(filename) for filename in changed_files]) return changed_files
[ "def", "_get_changed_files", "(", ")", ":", "if", "(", "not", "ci_diff_helper", ")", ":", "return", "None", "try", ":", "config", "=", "ci_diff_helper", ".", "get_config", "(", ")", "except", "OSError", ":", "return", "None", "changed_files", "=", "ci_diff_helper", ".", "get_changed_files", "(", "'HEAD'", ",", "config", ".", "base", ")", "changed_files", "=", "set", "(", "[", "'./{}'", ".", "format", "(", "filename", ")", "for", "filename", "in", "changed_files", "]", ")", "return", "changed_files" ]
returns a list of files changed for this pull request / push .
train
false
23,820
def send_file(name, data): nf = NetlogFile(name) nf.sock.sendall(data) nf.close()
[ "def", "send_file", "(", "name", ",", "data", ")", ":", "nf", "=", "NetlogFile", "(", "name", ")", "nf", ".", "sock", ".", "sendall", "(", "data", ")", "nf", ".", "close", "(", ")" ]
send a file-like object to the request output .
train
false
23,821
def MERGE_SQUARES(writer, segments): from whoosh.filedb.filereading import SegmentReader sizedsegs = [(s.doc_count_all(), s) for s in segments] tomerge = [] for size in (10, 100, 1000, 10000, 100000): smaller = [seg for (segsize, seg) in sizedsegs if ((segsize < (size - 1)) and (segsize >= (size // 10)))] if (len(smaller) >= 10): tomerge.extend(smaller) for seg in smaller: segments.remove(seg) for seg in tomerge: reader = SegmentReader(writer.storage, writer.schema, seg) writer.add_reader(reader) reader.close() return segments
[ "def", "MERGE_SQUARES", "(", "writer", ",", "segments", ")", ":", "from", "whoosh", ".", "filedb", ".", "filereading", "import", "SegmentReader", "sizedsegs", "=", "[", "(", "s", ".", "doc_count_all", "(", ")", ",", "s", ")", "for", "s", "in", "segments", "]", "tomerge", "=", "[", "]", "for", "size", "in", "(", "10", ",", "100", ",", "1000", ",", "10000", ",", "100000", ")", ":", "smaller", "=", "[", "seg", "for", "(", "segsize", ",", "seg", ")", "in", "sizedsegs", "if", "(", "(", "segsize", "<", "(", "size", "-", "1", ")", ")", "and", "(", "segsize", ">=", "(", "size", "//", "10", ")", ")", ")", "]", "if", "(", "len", "(", "smaller", ")", ">=", "10", ")", ":", "tomerge", ".", "extend", "(", "smaller", ")", "for", "seg", "in", "smaller", ":", "segments", ".", "remove", "(", "seg", ")", "for", "seg", "in", "tomerge", ":", "reader", "=", "SegmentReader", "(", "writer", ".", "storage", ",", "writer", ".", "schema", ",", "seg", ")", "writer", ".", "add_reader", "(", "reader", ")", "reader", ".", "close", "(", ")", "return", "segments" ]
this is an alternative merge policy similar to lucenes .
train
false
23,822
def FITSFactory(image_ext): if (not isinstance(image_ext, tuple)): raise TypeError('Expected a tuple') if (len(image_ext) != 2): raise ValueError('Expected a tuple of length 2') filename = image_ext[0] extnum = image_ext[1] if ((type(filename) is not str) or (type(extnum) is not int)): raise ValueError('Expected a (filename, extension) tuple') hdulist = pyfits.open(filename) data = hdulist[extnum].data hdulist.close() if (data is None): raise RuntimeError(('Extension %d of %s has no data' % (extnum, filename))) return data
[ "def", "FITSFactory", "(", "image_ext", ")", ":", "if", "(", "not", "isinstance", "(", "image_ext", ",", "tuple", ")", ")", ":", "raise", "TypeError", "(", "'Expected a tuple'", ")", "if", "(", "len", "(", "image_ext", ")", "!=", "2", ")", ":", "raise", "ValueError", "(", "'Expected a tuple of length 2'", ")", "filename", "=", "image_ext", "[", "0", "]", "extnum", "=", "image_ext", "[", "1", "]", "if", "(", "(", "type", "(", "filename", ")", "is", "not", "str", ")", "or", "(", "type", "(", "extnum", ")", "is", "not", "int", ")", ")", ":", "raise", "ValueError", "(", "'Expected a (filename, extension) tuple'", ")", "hdulist", "=", "pyfits", ".", "open", "(", "filename", ")", "data", "=", "hdulist", "[", "extnum", "]", ".", "data", "hdulist", ".", "close", "(", ")", "if", "(", "data", "is", "None", ")", ":", "raise", "RuntimeError", "(", "(", "'Extension %d of %s has no data'", "%", "(", "extnum", ",", "filename", ")", ")", ")", "return", "data" ]
load an image extension from a fits file and return a numpy array parameters image_ext : tuple fits extension to load .
train
false
23,823
def relevent_issue(issue, after): return (closed_issue(issue, after) and issue_completed(issue) and issue_section(issue))
[ "def", "relevent_issue", "(", "issue", ",", "after", ")", ":", "return", "(", "closed_issue", "(", "issue", ",", "after", ")", "and", "issue_completed", "(", "issue", ")", "and", "issue_section", "(", "issue", ")", ")" ]
returns true iff this issue is something we should show in the changelog .
train
true
23,824
def parse_blob_info(field_storage): if (field_storage is None): return None field_name = field_storage.name def get_value(dct, name): value = dct.get(name, None) if (value is None): raise BlobInfoParseError(('Field %s has no %s.' % (field_name, name))) return value filename = get_value(field_storage.disposition_options, 'filename') blob_key_str = get_value(field_storage.type_options, 'blob-key') blob_key = BlobKey(blob_key_str) upload_content = email.message_from_file(field_storage.file) content_type = get_value(upload_content, 'content-type') size = get_value(upload_content, 'content-length') creation_string = get_value(upload_content, UPLOAD_INFO_CREATION_HEADER) md5_hash_encoded = get_value(upload_content, 'content-md5') md5_hash = base64.urlsafe_b64decode(md5_hash_encoded) try: size = int(size) except (TypeError, ValueError): raise BlobInfoParseError(('%s is not a valid value for %s size.' % (size, field_name))) try: creation = blobstore._parse_creation(creation_string, field_name) except blobstore._CreationFormatError as err: raise BlobInfoParseError(str(err)) return BlobInfo(id=blob_key_str, content_type=content_type, creation=creation, filename=filename, size=size, md5_hash=md5_hash)
[ "def", "parse_blob_info", "(", "field_storage", ")", ":", "if", "(", "field_storage", "is", "None", ")", ":", "return", "None", "field_name", "=", "field_storage", ".", "name", "def", "get_value", "(", "dct", ",", "name", ")", ":", "value", "=", "dct", ".", "get", "(", "name", ",", "None", ")", "if", "(", "value", "is", "None", ")", ":", "raise", "BlobInfoParseError", "(", "(", "'Field %s has no %s.'", "%", "(", "field_name", ",", "name", ")", ")", ")", "return", "value", "filename", "=", "get_value", "(", "field_storage", ".", "disposition_options", ",", "'filename'", ")", "blob_key_str", "=", "get_value", "(", "field_storage", ".", "type_options", ",", "'blob-key'", ")", "blob_key", "=", "BlobKey", "(", "blob_key_str", ")", "upload_content", "=", "email", ".", "message_from_file", "(", "field_storage", ".", "file", ")", "content_type", "=", "get_value", "(", "upload_content", ",", "'content-type'", ")", "size", "=", "get_value", "(", "upload_content", ",", "'content-length'", ")", "creation_string", "=", "get_value", "(", "upload_content", ",", "UPLOAD_INFO_CREATION_HEADER", ")", "md5_hash_encoded", "=", "get_value", "(", "upload_content", ",", "'content-md5'", ")", "md5_hash", "=", "base64", ".", "urlsafe_b64decode", "(", "md5_hash_encoded", ")", "try", ":", "size", "=", "int", "(", "size", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "BlobInfoParseError", "(", "(", "'%s is not a valid value for %s size.'", "%", "(", "size", ",", "field_name", ")", ")", ")", "try", ":", "creation", "=", "blobstore", ".", "_parse_creation", "(", "creation_string", ",", "field_name", ")", "except", "blobstore", ".", "_CreationFormatError", "as", "err", ":", "raise", "BlobInfoParseError", "(", "str", "(", "err", ")", ")", "return", "BlobInfo", "(", "id", "=", "blob_key_str", ",", "content_type", "=", "content_type", ",", "creation", "=", "creation", ",", "filename", "=", "filename", ",", "size", "=", "size", ",", "md5_hash", "=", "md5_hash", ")" ]
parse a blobinfo record from file upload field_storage .
train
true
23,825
def _view_on_get(request): return ((request.method == 'GET') and acl.action_allowed(request, 'ReviewerTools', 'View'))
[ "def", "_view_on_get", "(", "request", ")", ":", "return", "(", "(", "request", ".", "method", "==", "'GET'", ")", "and", "acl", ".", "action_allowed", "(", "request", ",", "'ReviewerTools'", ",", "'View'", ")", ")" ]
return true if the user can access this page .
train
false
23,827
def unregister_account_page_class(page_cls): warn(u'unregister_account_page_class is deprecated in Review Board 3.0 and will be removed; use AccountPage.registry.unregister instead.', DeprecationWarning) AccountPage.registry.unregister(page_cls)
[ "def", "unregister_account_page_class", "(", "page_cls", ")", ":", "warn", "(", "u'unregister_account_page_class is deprecated in Review Board 3.0 and will be removed; use AccountPage.registry.unregister instead.'", ",", "DeprecationWarning", ")", "AccountPage", ".", "registry", ".", "unregister", "(", "page_cls", ")" ]
unregister a previously registered account page class .
train
false
23,828
def get_dpi(raise_error=True): display = quartz.CGMainDisplayID() mm = quartz.CGDisplayScreenSize(display) px = quartz.CGDisplayBounds(display).size return ((((px.width / mm.width) + (px.height / mm.height)) * 0.5) * 25.4)
[ "def", "get_dpi", "(", "raise_error", "=", "True", ")", ":", "display", "=", "quartz", ".", "CGMainDisplayID", "(", ")", "mm", "=", "quartz", ".", "CGDisplayScreenSize", "(", "display", ")", "px", "=", "quartz", ".", "CGDisplayBounds", "(", "display", ")", ".", "size", "return", "(", "(", "(", "(", "px", ".", "width", "/", "mm", ".", "width", ")", "+", "(", "px", ".", "height", "/", "mm", ".", "height", ")", ")", "*", "0.5", ")", "*", "25.4", ")" ]
get screen dpi from the os parameters raise_error : bool if true .
train
true
23,829
def get_random_sequence(length, exclude=None): seen = set() def add_seen(kmer): seen.add(kmer) seen.add(revcomp(kmer)) if (exclude is not None): for pos in range(0, (len(exclude) - K)): add_seen(exclude[pos:((pos + K) - 1)]) seq = [random.choice('ACGT') for _ in range((K - 1))] add_seen(''.join(seq)) while (len(seq) < length): next_base = random.choice('ACGT') next_kmer = ''.join((seq[((- K) + 2):] + [next_base])) assert (len(next_kmer) == (K - 1)) if (next_kmer not in seen): seq.append(next_base) add_seen(next_kmer) else: continue return ''.join(seq)
[ "def", "get_random_sequence", "(", "length", ",", "exclude", "=", "None", ")", ":", "seen", "=", "set", "(", ")", "def", "add_seen", "(", "kmer", ")", ":", "seen", ".", "add", "(", "kmer", ")", "seen", ".", "add", "(", "revcomp", "(", "kmer", ")", ")", "if", "(", "exclude", "is", "not", "None", ")", ":", "for", "pos", "in", "range", "(", "0", ",", "(", "len", "(", "exclude", ")", "-", "K", ")", ")", ":", "add_seen", "(", "exclude", "[", "pos", ":", "(", "(", "pos", "+", "K", ")", "-", "1", ")", "]", ")", "seq", "=", "[", "random", ".", "choice", "(", "'ACGT'", ")", "for", "_", "in", "range", "(", "(", "K", "-", "1", ")", ")", "]", "add_seen", "(", "''", ".", "join", "(", "seq", ")", ")", "while", "(", "len", "(", "seq", ")", "<", "length", ")", ":", "next_base", "=", "random", ".", "choice", "(", "'ACGT'", ")", "next_kmer", "=", "''", ".", "join", "(", "(", "seq", "[", "(", "(", "-", "K", ")", "+", "2", ")", ":", "]", "+", "[", "next_base", "]", ")", ")", "assert", "(", "len", "(", "next_kmer", ")", "==", "(", "K", "-", "1", ")", ")", "if", "(", "next_kmer", "not", "in", "seen", ")", ":", "seq", ".", "append", "(", "next_base", ")", "add_seen", "(", "next_kmer", ")", "else", ":", "continue", "return", "''", ".", "join", "(", "seq", ")" ]
generate a random nucleotide sequence .
train
false
23,830
def get_net(t): return (100 * (0.5 + (0.5 * np.sin(((0.7 * np.pi) * (t - 0.1))))))
[ "def", "get_net", "(", "t", ")", ":", "return", "(", "100", "*", "(", "0.5", "+", "(", "0.5", "*", "np", ".", "sin", "(", "(", "(", "0.7", "*", "np", ".", "pi", ")", "*", "(", "t", "-", "0.1", ")", ")", ")", ")", ")", ")" ]
returns an instance of caffe .
train
false
23,831
def yield_fixture(scope='function', params=None, autouse=False, ids=None, name=None): if (callable(scope) and (params is None) and (not autouse)): return FixtureFunctionMarker('function', params, autouse, ids=ids, name=name)(scope) else: return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)
[ "def", "yield_fixture", "(", "scope", "=", "'function'", ",", "params", "=", "None", ",", "autouse", "=", "False", ",", "ids", "=", "None", ",", "name", "=", "None", ")", ":", "if", "(", "callable", "(", "scope", ")", "and", "(", "params", "is", "None", ")", "and", "(", "not", "autouse", ")", ")", ":", "return", "FixtureFunctionMarker", "(", "'function'", ",", "params", ",", "autouse", ",", "ids", "=", "ids", ",", "name", "=", "name", ")", "(", "scope", ")", "else", ":", "return", "FixtureFunctionMarker", "(", "scope", ",", "params", ",", "autouse", ",", "ids", "=", "ids", ",", "name", "=", "name", ")" ]
decorator to mark a yield-fixture factory function .
train
false
23,832
def get_group_info_for_cohort(cohort, use_cached=False): request_cache = RequestCache.get_request_cache() cache_key = u'cohorts.get_group_info_for_cohort.{}'.format(cohort.id) if (use_cached and (cache_key in request_cache.data)): return request_cache.data[cache_key] request_cache.data.pop(cache_key, None) try: partition_group = CourseUserGroupPartitionGroup.objects.get(course_user_group=cohort) return request_cache.data.setdefault(cache_key, (partition_group.group_id, partition_group.partition_id)) except CourseUserGroupPartitionGroup.DoesNotExist: pass return request_cache.data.setdefault(cache_key, (None, None))
[ "def", "get_group_info_for_cohort", "(", "cohort", ",", "use_cached", "=", "False", ")", ":", "request_cache", "=", "RequestCache", ".", "get_request_cache", "(", ")", "cache_key", "=", "u'cohorts.get_group_info_for_cohort.{}'", ".", "format", "(", "cohort", ".", "id", ")", "if", "(", "use_cached", "and", "(", "cache_key", "in", "request_cache", ".", "data", ")", ")", ":", "return", "request_cache", ".", "data", "[", "cache_key", "]", "request_cache", ".", "data", ".", "pop", "(", "cache_key", ",", "None", ")", "try", ":", "partition_group", "=", "CourseUserGroupPartitionGroup", ".", "objects", ".", "get", "(", "course_user_group", "=", "cohort", ")", "return", "request_cache", ".", "data", ".", "setdefault", "(", "cache_key", ",", "(", "partition_group", ".", "group_id", ",", "partition_group", ".", "partition_id", ")", ")", "except", "CourseUserGroupPartitionGroup", ".", "DoesNotExist", ":", "pass", "return", "request_cache", ".", "data", ".", "setdefault", "(", "cache_key", ",", "(", "None", ",", "None", ")", ")" ]
get the ids of the group and partition to which this cohort has been linked as a tuple of .
train
false
23,835
def new(rsa_key): return PKCS115_SigScheme(rsa_key)
[ "def", "new", "(", "rsa_key", ")", ":", "return", "PKCS115_SigScheme", "(", "rsa_key", ")" ]
returns a form for a new imageclassificationdatasetjob .
train
false
23,836
def writeDataRow(output_file, session_info, session_uservar_names, event_data): session_data = [str(i) for i in session_info[:(-1)]] session_user_data = [session_info.user_variables[sud_name] for sud_name in session_uservar_names] all_data = (((session_data + session_user_data) + [str(e) for e in event_data]) + session_user_data) output_file.write(' DCTB '.join(all_data)) output_file.write('\n')
[ "def", "writeDataRow", "(", "output_file", ",", "session_info", ",", "session_uservar_names", ",", "event_data", ")", ":", "session_data", "=", "[", "str", "(", "i", ")", "for", "i", "in", "session_info", "[", ":", "(", "-", "1", ")", "]", "]", "session_user_data", "=", "[", "session_info", ".", "user_variables", "[", "sud_name", "]", "for", "sud_name", "in", "session_uservar_names", "]", "all_data", "=", "(", "(", "(", "session_data", "+", "session_user_data", ")", "+", "[", "str", "(", "e", ")", "for", "e", "in", "event_data", "]", ")", "+", "session_user_data", ")", "output_file", ".", "write", "(", "' DCTB '", ".", "join", "(", "all_data", ")", ")", "output_file", ".", "write", "(", "'\\n'", ")" ]
save a row of data to the output file .
train
false
23,837
def data_sharing_consent_requirement_at_login(request): if (not enterprise_enabled()): return None if data_sharing_consent_required_at_login(request): return 'required' if data_sharing_consent_requested(request): return 'optional' return None
[ "def", "data_sharing_consent_requirement_at_login", "(", "request", ")", ":", "if", "(", "not", "enterprise_enabled", "(", ")", ")", ":", "return", "None", "if", "data_sharing_consent_required_at_login", "(", "request", ")", ":", "return", "'required'", "if", "data_sharing_consent_requested", "(", "request", ")", ":", "return", "'optional'", "return", "None" ]
returns either optional or required based on where we are .
train
false
23,838
def not26(func): @wraps(func) def errfunc(*args, **kwargs): raise NotImplementedError if (hexversion < 34013184): return errfunc else: return func
[ "def", "not26", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "errfunc", "(", "*", "args", ",", "**", "kwargs", ")", ":", "raise", "NotImplementedError", "if", "(", "hexversion", "<", "34013184", ")", ":", "return", "errfunc", "else", ":", "return", "func" ]
function decorator for methods not implemented in python 2 .
train
true
23,840
def _run_checker_for_package(checker, package_name, extra_ignore=None): ignore_strings = IGNORE_ERRORS if extra_ignore: ignore_strings += extra_ignore package_path = path_for_import(package_name) for (root, dirs, files) in os.walk(str(package_path)): for f in files: if ((f == u'local_settings.py') or (not f.endswith(u'.py')) or (root.split(os.sep)[(-1)] in [u'migrations'])): continue for warning in checker(os.path.join(root, f)): for ignore in ignore_strings: if (ignore in warning): break else: (yield warning.replace(package_path, package_name, 1))
[ "def", "_run_checker_for_package", "(", "checker", ",", "package_name", ",", "extra_ignore", "=", "None", ")", ":", "ignore_strings", "=", "IGNORE_ERRORS", "if", "extra_ignore", ":", "ignore_strings", "+=", "extra_ignore", "package_path", "=", "path_for_import", "(", "package_name", ")", "for", "(", "root", ",", "dirs", ",", "files", ")", "in", "os", ".", "walk", "(", "str", "(", "package_path", ")", ")", ":", "for", "f", "in", "files", ":", "if", "(", "(", "f", "==", "u'local_settings.py'", ")", "or", "(", "not", "f", ".", "endswith", "(", "u'.py'", ")", ")", "or", "(", "root", ".", "split", "(", "os", ".", "sep", ")", "[", "(", "-", "1", ")", "]", "in", "[", "u'migrations'", "]", ")", ")", ":", "continue", "for", "warning", "in", "checker", "(", "os", ".", "path", ".", "join", "(", "root", ",", "f", ")", ")", ":", "for", "ignore", "in", "ignore_strings", ":", "if", "(", "ignore", "in", "warning", ")", ":", "break", "else", ":", "(", "yield", "warning", ".", "replace", "(", "package_path", ",", "package_name", ",", "1", ")", ")" ]
runs the checker function across every python module in the given package .
train
false
23,841
def _custom_getter(resource, resource_id): if (resource == quotasv2.RESOURCE_NAME): return quota.get_tenant_quotas(resource_id)[quotasv2.RESOURCE_NAME]
[ "def", "_custom_getter", "(", "resource", ",", "resource_id", ")", ":", "if", "(", "resource", "==", "quotasv2", ".", "RESOURCE_NAME", ")", ":", "return", "quota", ".", "get_tenant_quotas", "(", "resource_id", ")", "[", "quotasv2", ".", "RESOURCE_NAME", "]" ]
helper function to retrieve resources not served by any plugin .
train
false
23,842
def _validate_mutable_mappings(a, b): if (not (isinstance(a, MutableMapping) and isinstance(b, MutableMapping))): myvars = [] for x in [a, b]: try: myvars.append(dumps(x)) except: myvars.append(to_native(x)) raise AnsibleError("failed to combine variables, expected dicts but got a '{0}' and a '{1}': \n{2}\n{3}".format(a.__class__.__name__, b.__class__.__name__, myvars[0], myvars[1]))
[ "def", "_validate_mutable_mappings", "(", "a", ",", "b", ")", ":", "if", "(", "not", "(", "isinstance", "(", "a", ",", "MutableMapping", ")", "and", "isinstance", "(", "b", ",", "MutableMapping", ")", ")", ")", ":", "myvars", "=", "[", "]", "for", "x", "in", "[", "a", ",", "b", "]", ":", "try", ":", "myvars", ".", "append", "(", "dumps", "(", "x", ")", ")", "except", ":", "myvars", ".", "append", "(", "to_native", "(", "x", ")", ")", "raise", "AnsibleError", "(", "\"failed to combine variables, expected dicts but got a '{0}' and a '{1}': \\n{2}\\n{3}\"", ".", "format", "(", "a", ".", "__class__", ".", "__name__", ",", "b", ".", "__class__", ".", "__name__", ",", "myvars", "[", "0", "]", ",", "myvars", "[", "1", "]", ")", ")" ]
internal convenience function to ensure arguments are mutablemappings this checks that all arguments are mutablemappings or raises an error :raises ansibleerror: if one of the arguments is not a mutablemapping .
train
false
23,844
def _format_error(error): try: return _format_error_helper(error) except: return json.dumps(error, indent=2, sort_keys=True)
[ "def", "_format_error", "(", "error", ")", ":", "try", ":", "return", "_format_error_helper", "(", "error", ")", "except", ":", "return", "json", ".", "dumps", "(", "error", ",", "indent", "=", "2", ",", "sort_keys", "=", "True", ")" ]
return string to log/print explaining the given error .
train
false
23,845
def distance_indicators(x, epsilon=None, distance=1.5): x = np.asarray(x) nobs = len(x) if ((epsilon is not None) and (epsilon <= 0)): raise ValueError(('Threshold distance must be positive if specified. Got epsilon of %f' % epsilon)) if (distance <= 0): raise ValueError(('Threshold distance must be positive. Got distance multiplier %f' % distance)) if (epsilon is None): epsilon = (distance * x.std(ddof=1)) return (np.abs((x[:, None] - x)) < epsilon)
[ "def", "distance_indicators", "(", "x", ",", "epsilon", "=", "None", ",", "distance", "=", "1.5", ")", ":", "x", "=", "np", ".", "asarray", "(", "x", ")", "nobs", "=", "len", "(", "x", ")", "if", "(", "(", "epsilon", "is", "not", "None", ")", "and", "(", "epsilon", "<=", "0", ")", ")", ":", "raise", "ValueError", "(", "(", "'Threshold distance must be positive if specified. Got epsilon of %f'", "%", "epsilon", ")", ")", "if", "(", "distance", "<=", "0", ")", ":", "raise", "ValueError", "(", "(", "'Threshold distance must be positive. Got distance multiplier %f'", "%", "distance", ")", ")", "if", "(", "epsilon", "is", "None", ")", ":", "epsilon", "=", "(", "distance", "*", "x", ".", "std", "(", "ddof", "=", "1", ")", ")", "return", "(", "np", ".", "abs", "(", "(", "x", "[", ":", ",", "None", "]", "-", "x", ")", ")", "<", "epsilon", ")" ]
calculate all pairwise threshold distance indicators for a time series parameters x : 1d array observations of time series for which heaviside distance indicators are calculated epsilon : scalar .
train
false
23,848
def lock_exists(name): return os.path.exists(get_lock_path(name))
[ "def", "lock_exists", "(", "name", ")", ":", "return", "os", ".", "path", ".", "exists", "(", "get_lock_path", "(", "name", ")", ")" ]
returns true if lock of the given name exists .
train
false
23,849
def getpath(environ): return ''.join([quote(environ.get('SCRIPT_NAME', '')), quote(environ.get('PATH_INFO', ''))])
[ "def", "getpath", "(", "environ", ")", ":", "return", "''", ".", "join", "(", "[", "quote", "(", "environ", ".", "get", "(", "'SCRIPT_NAME'", ",", "''", ")", ")", ",", "quote", "(", "environ", ".", "get", "(", "'PATH_INFO'", ",", "''", ")", ")", "]", ")" ]
builds a path .
train
false
23,850
def degree_mixing_matrix(G, x='out', y='in', weight=None, nodes=None, normalized=True): d = degree_mixing_dict(G, x=x, y=y, nodes=nodes, weight=weight) s = set(d.keys()) for (k, v) in d.items(): s.update(v.keys()) m = max(s) mapping = dict(zip(range((m + 1)), range((m + 1)))) a = dict_to_numpy_array(d, mapping=mapping) if normalized: a = (a / a.sum()) return a
[ "def", "degree_mixing_matrix", "(", "G", ",", "x", "=", "'out'", ",", "y", "=", "'in'", ",", "weight", "=", "None", ",", "nodes", "=", "None", ",", "normalized", "=", "True", ")", ":", "d", "=", "degree_mixing_dict", "(", "G", ",", "x", "=", "x", ",", "y", "=", "y", ",", "nodes", "=", "nodes", ",", "weight", "=", "weight", ")", "s", "=", "set", "(", "d", ".", "keys", "(", ")", ")", "for", "(", "k", ",", "v", ")", "in", "d", ".", "items", "(", ")", ":", "s", ".", "update", "(", "v", ".", "keys", "(", ")", ")", "m", "=", "max", "(", "s", ")", "mapping", "=", "dict", "(", "zip", "(", "range", "(", "(", "m", "+", "1", ")", ")", ",", "range", "(", "(", "m", "+", "1", ")", ")", ")", ")", "a", "=", "dict_to_numpy_array", "(", "d", ",", "mapping", "=", "mapping", ")", "if", "normalized", ":", "a", "=", "(", "a", "/", "a", ".", "sum", "(", ")", ")", "return", "a" ]
return mixing matrix for attribute .
train
false
23,851
def is_in(obj, l): for item in l: if (item is obj): return True return False
[ "def", "is_in", "(", "obj", ",", "l", ")", ":", "for", "item", "in", "l", ":", "if", "(", "item", "is", "obj", ")", ":", "return", "True", "return", "False" ]
checks whether an object is one of the item in the list .
train
false
23,852
def recipr(X): x = np.maximum(np.asarray(X).astype(np.float64), 0) return (np.greater(x, 0.0) / (x + np.less_equal(x, 0.0)))
[ "def", "recipr", "(", "X", ")", ":", "x", "=", "np", ".", "maximum", "(", "np", ".", "asarray", "(", "X", ")", ".", "astype", "(", "np", ".", "float64", ")", ",", "0", ")", "return", "(", "np", ".", "greater", "(", "x", ",", "0.0", ")", "/", "(", "x", "+", "np", ".", "less_equal", "(", "x", ",", "0.0", ")", ")", ")" ]
return the reciprocal of an array .
train
false
23,853
def dup_neg(f, K): return [(- coeff) for coeff in f]
[ "def", "dup_neg", "(", "f", ",", "K", ")", ":", "return", "[", "(", "-", "coeff", ")", "for", "coeff", "in", "f", "]" ]
negate a polynomial in k[x] .
train
false
23,854
def db_encrypt_parameters_and_properties(ctxt, encryption_key, batch_size=50, verbose=False): excs = [] excs.extend(_db_encrypt_or_decrypt_template_params(ctxt, encryption_key, True, batch_size, verbose)) excs.extend(_db_encrypt_or_decrypt_resource_prop_data(ctxt, encryption_key, True, batch_size, verbose)) return excs
[ "def", "db_encrypt_parameters_and_properties", "(", "ctxt", ",", "encryption_key", ",", "batch_size", "=", "50", ",", "verbose", "=", "False", ")", ":", "excs", "=", "[", "]", "excs", ".", "extend", "(", "_db_encrypt_or_decrypt_template_params", "(", "ctxt", ",", "encryption_key", ",", "True", ",", "batch_size", ",", "verbose", ")", ")", "excs", ".", "extend", "(", "_db_encrypt_or_decrypt_resource_prop_data", "(", "ctxt", ",", "encryption_key", ",", "True", ",", "batch_size", ",", "verbose", ")", ")", "return", "excs" ]
encrypt parameters and properties for all templates in db .
train
false
23,856
def initial_state(layer, dimensions=None): if (dimensions is None): return (layer.initial_hidden_state if has_hidden(layer) else None) else: return (matrixify(layer.initial_hidden_state, dimensions) if has_hidden(layer) else None)
[ "def", "initial_state", "(", "layer", ",", "dimensions", "=", "None", ")", ":", "if", "(", "dimensions", "is", "None", ")", ":", "return", "(", "layer", ".", "initial_hidden_state", "if", "has_hidden", "(", "layer", ")", "else", "None", ")", "else", ":", "return", "(", "matrixify", "(", "layer", ".", "initial_hidden_state", ",", "dimensions", ")", "if", "has_hidden", "(", "layer", ")", "else", "None", ")" ]
initalizes the recurrence relation with an initial hidden state if needed .
train
false
23,857
def drop_user(name): _run_as_pg(('psql -c "DROP USER %(name)s;"' % locals()))
[ "def", "drop_user", "(", "name", ")", ":", "_run_as_pg", "(", "(", "'psql -c \"DROP USER %(name)s;\"'", "%", "locals", "(", ")", ")", ")" ]
drop a postgresql user .
train
false
23,860
def flush_cache(**kwargs): def class_hierarchy(clslist): 'Recursively yield a class hierarchy' for cls in clslist: subclass_list = cls.__subclasses__() if subclass_list: for subcls in class_hierarchy(subclass_list): (yield subcls) else: (yield cls) for cls in class_hierarchy([SharedMemoryModel]): cls.flush_instance_cache() return gc.collect()
[ "def", "flush_cache", "(", "**", "kwargs", ")", ":", "def", "class_hierarchy", "(", "clslist", ")", ":", "for", "cls", "in", "clslist", ":", "subclass_list", "=", "cls", ".", "__subclasses__", "(", ")", "if", "subclass_list", ":", "for", "subcls", "in", "class_hierarchy", "(", "subclass_list", ")", ":", "(", "yield", "subcls", ")", "else", ":", "(", "yield", "cls", ")", "for", "cls", "in", "class_hierarchy", "(", "[", "SharedMemoryModel", "]", ")", ":", "cls", ".", "flush_instance_cache", "(", ")", "return", "gc", ".", "collect", "(", ")" ]
flush idmapper cache .
train
false
23,861
def ADOPT_module_key_flags(module, flag_values=FLAGS): if isinstance(module, str): raise FlagsError(('Received module name %s; expected a module object.' % module)) _InternalDeclareKeyFlags([f.name for f in flag_values._GetKeyFlagsForModule(module.__name__)], flag_values=flag_values) if (module == _GetThisModuleObjectAndName()[0]): _InternalDeclareKeyFlags([f.name for f in _SPECIAL_FLAGS.FlagDict().values()], flag_values=_SPECIAL_FLAGS, key_flag_values=flag_values)
[ "def", "ADOPT_module_key_flags", "(", "module", ",", "flag_values", "=", "FLAGS", ")", ":", "if", "isinstance", "(", "module", ",", "str", ")", ":", "raise", "FlagsError", "(", "(", "'Received module name %s; expected a module object.'", "%", "module", ")", ")", "_InternalDeclareKeyFlags", "(", "[", "f", ".", "name", "for", "f", "in", "flag_values", ".", "_GetKeyFlagsForModule", "(", "module", ".", "__name__", ")", "]", ",", "flag_values", "=", "flag_values", ")", "if", "(", "module", "==", "_GetThisModuleObjectAndName", "(", ")", "[", "0", "]", ")", ":", "_InternalDeclareKeyFlags", "(", "[", "f", ".", "name", "for", "f", "in", "_SPECIAL_FLAGS", ".", "FlagDict", "(", ")", ".", "values", "(", ")", "]", ",", "flag_values", "=", "_SPECIAL_FLAGS", ",", "key_flag_values", "=", "flag_values", ")" ]
declares that all flags key to a module are key to the current module .
train
false
23,862
def get_new_device(event, config, device): device_id = slugify(event.device.id_string.lower()) if (device_id in RFX_DEVICES): return if (not config[ATTR_AUTOMATIC_ADD]): return pkt_id = ''.join(('{0:02x}'.format(x) for x in event.data)) _LOGGER.info('Automatic add %s rfxtrx device (Class: %s Sub: %s Packet_id: %s)', device_id, event.device.__class__.__name__, event.device.subtype, pkt_id) datas = {ATTR_STATE: False, ATTR_FIREEVENT: False} signal_repetitions = config[CONF_SIGNAL_REPETITIONS] new_device = device(pkt_id, event, datas, signal_repetitions) RFX_DEVICES[device_id] = new_device return new_device
[ "def", "get_new_device", "(", "event", ",", "config", ",", "device", ")", ":", "device_id", "=", "slugify", "(", "event", ".", "device", ".", "id_string", ".", "lower", "(", ")", ")", "if", "(", "device_id", "in", "RFX_DEVICES", ")", ":", "return", "if", "(", "not", "config", "[", "ATTR_AUTOMATIC_ADD", "]", ")", ":", "return", "pkt_id", "=", "''", ".", "join", "(", "(", "'{0:02x}'", ".", "format", "(", "x", ")", "for", "x", "in", "event", ".", "data", ")", ")", "_LOGGER", ".", "info", "(", "'Automatic add %s rfxtrx device (Class: %s Sub: %s Packet_id: %s)'", ",", "device_id", ",", "event", ".", "device", ".", "__class__", ".", "__name__", ",", "event", ".", "device", ".", "subtype", ",", "pkt_id", ")", "datas", "=", "{", "ATTR_STATE", ":", "False", ",", "ATTR_FIREEVENT", ":", "False", "}", "signal_repetitions", "=", "config", "[", "CONF_SIGNAL_REPETITIONS", "]", "new_device", "=", "device", "(", "pkt_id", ",", "event", ",", "datas", ",", "signal_repetitions", ")", "RFX_DEVICES", "[", "device_id", "]", "=", "new_device", "return", "new_device" ]
add entity if not exist and the automatic_add is true .
train
false
23,863
def bytes_to_num(bval): num = 0 num += ord((bval[0] << 24)) num += ord((bval[1] << 16)) num += ord((bval[2] << 8)) num += ord(bval[3]) return num
[ "def", "bytes_to_num", "(", "bval", ")", ":", "num", "=", "0", "num", "+=", "ord", "(", "(", "bval", "[", "0", "]", "<<", "24", ")", ")", "num", "+=", "ord", "(", "(", "bval", "[", "1", "]", "<<", "16", ")", ")", "num", "+=", "ord", "(", "(", "bval", "[", "2", "]", "<<", "8", ")", ")", "num", "+=", "ord", "(", "bval", "[", "3", "]", ")", "return", "num" ]
convert a four byte sequence to an integer .
train
false
23,864
def recommend_for_brands(brands): return []
[ "def", "recommend_for_brands", "(", "brands", ")", ":", "return", "[", "]" ]
return top five recommended brands when given brands to recommend for .
train
false
23,865
def show_interface(call=None, kwargs=None): global netconn if (not netconn): netconn = get_conn(NetworkManagementClient) if (kwargs is None): kwargs = {} if kwargs.get('group'): kwargs['resource_group'] = kwargs['group'] if (kwargs.get('resource_group') is None): kwargs['resource_group'] = config.get_cloud_config_value('resource_group', {}, __opts__, search_global=True) iface_name = kwargs.get('iface_name', kwargs.get('name')) iface = netconn.network_interfaces.get(kwargs['resource_group'], iface_name) data = object_to_dict(iface) data['resource_group'] = kwargs['resource_group'] data['ip_configurations'] = {} for ip_ in iface.ip_configurations: data['ip_configurations'][ip_.name] = make_safe(ip_) try: pubip = netconn.public_ip_addresses.get(kwargs['resource_group'], ip_.name) data['ip_configurations'][ip_.name]['public_ip_address']['ip_address'] = pubip.ip_address except Exception as exc: log.warning('There was a cloud error: {0}'.format(exc)) log.warning('{0}'.format(type(exc))) continue return data
[ "def", "show_interface", "(", "call", "=", "None", ",", "kwargs", "=", "None", ")", ":", "global", "netconn", "if", "(", "not", "netconn", ")", ":", "netconn", "=", "get_conn", "(", "NetworkManagementClient", ")", "if", "(", "kwargs", "is", "None", ")", ":", "kwargs", "=", "{", "}", "if", "kwargs", ".", "get", "(", "'group'", ")", ":", "kwargs", "[", "'resource_group'", "]", "=", "kwargs", "[", "'group'", "]", "if", "(", "kwargs", ".", "get", "(", "'resource_group'", ")", "is", "None", ")", ":", "kwargs", "[", "'resource_group'", "]", "=", "config", ".", "get_cloud_config_value", "(", "'resource_group'", ",", "{", "}", ",", "__opts__", ",", "search_global", "=", "True", ")", "iface_name", "=", "kwargs", ".", "get", "(", "'iface_name'", ",", "kwargs", ".", "get", "(", "'name'", ")", ")", "iface", "=", "netconn", ".", "network_interfaces", ".", "get", "(", "kwargs", "[", "'resource_group'", "]", ",", "iface_name", ")", "data", "=", "object_to_dict", "(", "iface", ")", "data", "[", "'resource_group'", "]", "=", "kwargs", "[", "'resource_group'", "]", "data", "[", "'ip_configurations'", "]", "=", "{", "}", "for", "ip_", "in", "iface", ".", "ip_configurations", ":", "data", "[", "'ip_configurations'", "]", "[", "ip_", ".", "name", "]", "=", "make_safe", "(", "ip_", ")", "try", ":", "pubip", "=", "netconn", ".", "public_ip_addresses", ".", "get", "(", "kwargs", "[", "'resource_group'", "]", ",", "ip_", ".", "name", ")", "data", "[", "'ip_configurations'", "]", "[", "ip_", ".", "name", "]", "[", "'public_ip_address'", "]", "[", "'ip_address'", "]", "=", "pubip", ".", "ip_address", "except", "Exception", "as", "exc", ":", "log", ".", "warning", "(", "'There was a cloud error: {0}'", ".", "format", "(", "exc", ")", ")", "log", ".", "warning", "(", "'{0}'", ".", "format", "(", "type", "(", "exc", ")", ")", ")", "continue", "return", "data" ]
create a network interface .
train
false
23,866
def make_display_brightness_message(brightness): raise ((0 <= brightness <= 255) or AssertionError) return make_message(8, to_7L1M(brightness))
[ "def", "make_display_brightness_message", "(", "brightness", ")", ":", "raise", "(", "(", "0", "<=", "brightness", "<=", "255", ")", "or", "AssertionError", ")", "return", "make_message", "(", "8", ",", "to_7L1M", "(", "brightness", ")", ")" ]
the display brightness is influenced by various parameters like absolute maximum backlight led current .
train
false
23,868
def PlistValueToPlainValue(plist): if isinstance(plist, dict): ret_value = dict() for (key, value) in plist.items(): ret_value[key] = PlistValueToPlainValue(value) return ret_value elif isinstance(plist, list): return [PlistValueToPlainValue(value) for value in plist] elif isinstance(plist, binplist.RawValue): return plist.value elif (isinstance(plist, binplist.CorruptReference) or isinstance(plist, binplist.UnknownObject)): return None elif isinstance(plist, datetime.datetime): return ((calendar.timegm(plist.utctimetuple()) * 1000000) + plist.microsecond) return plist
[ "def", "PlistValueToPlainValue", "(", "plist", ")", ":", "if", "isinstance", "(", "plist", ",", "dict", ")", ":", "ret_value", "=", "dict", "(", ")", "for", "(", "key", ",", "value", ")", "in", "plist", ".", "items", "(", ")", ":", "ret_value", "[", "key", "]", "=", "PlistValueToPlainValue", "(", "value", ")", "return", "ret_value", "elif", "isinstance", "(", "plist", ",", "list", ")", ":", "return", "[", "PlistValueToPlainValue", "(", "value", ")", "for", "value", "in", "plist", "]", "elif", "isinstance", "(", "plist", ",", "binplist", ".", "RawValue", ")", ":", "return", "plist", ".", "value", "elif", "(", "isinstance", "(", "plist", ",", "binplist", ".", "CorruptReference", ")", "or", "isinstance", "(", "plist", ",", "binplist", ".", "UnknownObject", ")", ")", ":", "return", "None", "elif", "isinstance", "(", "plist", ",", "datetime", ".", "datetime", ")", ":", "return", "(", "(", "calendar", ".", "timegm", "(", "plist", ".", "utctimetuple", "(", ")", ")", "*", "1000000", ")", "+", "plist", ".", "microsecond", ")", "return", "plist" ]
takes the plist contents generated by binplist and returns a plain dict .
train
false
23,869
def f_oneway(*args): return _f_oneway(*args)[0]
[ "def", "f_oneway", "(", "*", "args", ")", ":", "return", "_f_oneway", "(", "*", "args", ")", "[", "0", "]" ]
performs a 1-way anova .
train
false
23,871
def reindex_questions_answers(sender, instance, **kw): if instance.id: answer_ids = instance.answers.all().values_list('id', flat=True) index_task.delay(AnswerMetricsMappingType, list(answer_ids))
[ "def", "reindex_questions_answers", "(", "sender", ",", "instance", ",", "**", "kw", ")", ":", "if", "instance", ".", "id", ":", "answer_ids", "=", "instance", ".", "answers", ".", "all", "(", ")", ".", "values_list", "(", "'id'", ",", "flat", "=", "True", ")", "index_task", ".", "delay", "(", "AnswerMetricsMappingType", ",", "list", "(", "answer_ids", ")", ")" ]
when a question is saved .
train
false
23,873
def getDocBlockRegion(view, point): start = end = point while ((start > 0) and (view.scope_name((start - 1)).find('comment.block') > (-1))): start = (start - 1) while ((end < view.size()) and (view.scope_name(end).find('comment.block') > (-1))): end = (end + 1) return sublime.Region(start, end)
[ "def", "getDocBlockRegion", "(", "view", ",", "point", ")", ":", "start", "=", "end", "=", "point", "while", "(", "(", "start", ">", "0", ")", "and", "(", "view", ".", "scope_name", "(", "(", "start", "-", "1", ")", ")", ".", "find", "(", "'comment.block'", ")", ">", "(", "-", "1", ")", ")", ")", ":", "start", "=", "(", "start", "-", "1", ")", "while", "(", "(", "end", "<", "view", ".", "size", "(", ")", ")", "and", "(", "view", ".", "scope_name", "(", "end", ")", ".", "find", "(", "'comment.block'", ")", ">", "(", "-", "1", ")", ")", ")", ":", "end", "=", "(", "end", "+", "1", ")", "return", "sublime", ".", "Region", "(", "start", ",", "end", ")" ]
given a starting point inside a docblock .
train
false
23,875
def mu_law_decode(output, quantization_channels): with tf.name_scope('decode'): mu = (quantization_channels - 1) casted = tf.cast(output, tf.float32) signal = ((2 * (casted / mu)) - 1) magnitude = ((1 / mu) * (((1 + mu) ** abs(signal)) - 1)) return (tf.sign(signal) * magnitude)
[ "def", "mu_law_decode", "(", "output", ",", "quantization_channels", ")", ":", "with", "tf", ".", "name_scope", "(", "'decode'", ")", ":", "mu", "=", "(", "quantization_channels", "-", "1", ")", "casted", "=", "tf", ".", "cast", "(", "output", ",", "tf", ".", "float32", ")", "signal", "=", "(", "(", "2", "*", "(", "casted", "/", "mu", ")", ")", "-", "1", ")", "magnitude", "=", "(", "(", "1", "/", "mu", ")", "*", "(", "(", "(", "1", "+", "mu", ")", "**", "abs", "(", "signal", ")", ")", "-", "1", ")", ")", "return", "(", "tf", ".", "sign", "(", "signal", ")", "*", "magnitude", ")" ]
recovers waveform from quantized values .
train
false
23,876
def identityResponse(): a = TpPd(pd=3) b = MessageType(mesType=22) c = MobileId() packet = ((a / b) / c) return packet
[ "def", "identityResponse", "(", ")", ":", "a", "=", "TpPd", "(", "pd", "=", "3", ")", "b", "=", "MessageType", "(", "mesType", "=", "22", ")", "c", "=", "MobileId", "(", ")", "packet", "=", "(", "(", "a", "/", "b", ")", "/", "c", ")", "return", "packet" ]
identity response section 9 .
train
true
23,878
def _format_comments(comments): ret = '. '.join(comments) if (len(comments) > 1): ret += '.' return ret
[ "def", "_format_comments", "(", "comments", ")", ":", "ret", "=", "'. '", ".", "join", "(", "comments", ")", "if", "(", "len", "(", "comments", ")", ">", "1", ")", ":", "ret", "+=", "'.'", "return", "ret" ]
return a joined list .
train
false
23,879
def CheckLocation(): for url in (config_lib.CONFIG['Client.server_urls'] + config_lib.CONFIG['Client.control_urls']): if (('staging' in url) or ('localhost' in url)): return logging.error('Poolclient should only be run against test or staging.') exit()
[ "def", "CheckLocation", "(", ")", ":", "for", "url", "in", "(", "config_lib", ".", "CONFIG", "[", "'Client.server_urls'", "]", "+", "config_lib", ".", "CONFIG", "[", "'Client.control_urls'", "]", ")", ":", "if", "(", "(", "'staging'", "in", "url", ")", "or", "(", "'localhost'", "in", "url", ")", ")", ":", "return", "logging", ".", "error", "(", "'Poolclient should only be run against test or staging.'", ")", "exit", "(", ")" ]
checks that the poolclient is not accidentally ran against production .
train
false
23,881
def map_int_to_tour(num_nodes, i, start_node): nodes_remaining = (range(0, start_node) + range((start_node + 1), num_nodes)) tour = [] while (len(nodes_remaining) > 0): num_nodes = len(nodes_remaining) next_step = nodes_remaining[(i % num_nodes)] nodes_remaining.remove(next_step) tour.append(next_step) i = (i / num_nodes) tour = (([start_node] + tour) + [start_node]) return tour
[ "def", "map_int_to_tour", "(", "num_nodes", ",", "i", ",", "start_node", ")", ":", "nodes_remaining", "=", "(", "range", "(", "0", ",", "start_node", ")", "+", "range", "(", "(", "start_node", "+", "1", ")", ",", "num_nodes", ")", ")", "tour", "=", "[", "]", "while", "(", "len", "(", "nodes_remaining", ")", ">", "0", ")", ":", "num_nodes", "=", "len", "(", "nodes_remaining", ")", "next_step", "=", "nodes_remaining", "[", "(", "i", "%", "num_nodes", ")", "]", "nodes_remaining", ".", "remove", "(", "next_step", ")", "tour", ".", "append", "(", "next_step", ")", "i", "=", "(", "i", "/", "num_nodes", ")", "tour", "=", "(", "(", "[", "start_node", "]", "+", "tour", ")", "+", "[", "start_node", "]", ")", "return", "tour" ]
gets a unique tour through a graph given an integer and starting node .
train
false
23,882
def getNewRepository(): return ExportRepository()
[ "def", "getNewRepository", "(", ")", ":", "return", "ExportRepository", "(", ")" ]
get the repository constructor .
train
false
23,883
def csv_mapping_parser(path, template): mapping_blocks = defaultdict(dict) with open(path, 'r') as handle: reader = csv.reader(handle) reader.next() for row in reader: mapping = dict(zip(template, row)) fid = mapping.pop('function') aid = int(mapping['address']) mapping_blocks[aid] = mapping return mapping_blocks
[ "def", "csv_mapping_parser", "(", "path", ",", "template", ")", ":", "mapping_blocks", "=", "defaultdict", "(", "dict", ")", "with", "open", "(", "path", ",", "'r'", ")", "as", "handle", ":", "reader", "=", "csv", ".", "reader", "(", "handle", ")", "reader", ".", "next", "(", ")", "for", "row", "in", "reader", ":", "mapping", "=", "dict", "(", "zip", "(", "template", ",", "row", ")", ")", "fid", "=", "mapping", ".", "pop", "(", "'function'", ")", "aid", "=", "int", "(", "mapping", "[", "'address'", "]", ")", "mapping_blocks", "[", "aid", "]", "=", "mapping", "return", "mapping_blocks" ]
given a csv file of the the mapping data for a modbus device .
train
false
23,884
def get_plugins(phase=None, interface=None, category=None, name=None, min_api=None): def matches(plugin): if ((phase is not None) and (phase not in phase_methods)): raise ValueError((u'Unknown phase %s' % phase)) if (phase and (phase not in plugin.phase_handlers)): return False if (interface and (interface not in plugin.interfaces)): return False if (category and (not (category == plugin.category))): return False if ((name is not None) and (name != plugin.name)): return False if ((min_api is not None) and (plugin.api_ver < min_api)): return False return True return filter(matches, iter(plugins.values()))
[ "def", "get_plugins", "(", "phase", "=", "None", ",", "interface", "=", "None", ",", "category", "=", "None", ",", "name", "=", "None", ",", "min_api", "=", "None", ")", ":", "def", "matches", "(", "plugin", ")", ":", "if", "(", "(", "phase", "is", "not", "None", ")", "and", "(", "phase", "not", "in", "phase_methods", ")", ")", ":", "raise", "ValueError", "(", "(", "u'Unknown phase %s'", "%", "phase", ")", ")", "if", "(", "phase", "and", "(", "phase", "not", "in", "plugin", ".", "phase_handlers", ")", ")", ":", "return", "False", "if", "(", "interface", "and", "(", "interface", "not", "in", "plugin", ".", "interfaces", ")", ")", ":", "return", "False", "if", "(", "category", "and", "(", "not", "(", "category", "==", "plugin", ".", "category", ")", ")", ")", ":", "return", "False", "if", "(", "(", "name", "is", "not", "None", ")", "and", "(", "name", "!=", "plugin", ".", "name", ")", ")", ":", "return", "False", "if", "(", "(", "min_api", "is", "not", "None", ")", "and", "(", "plugin", ".", "api_ver", "<", "min_api", ")", ")", ":", "return", "False", "return", "True", "return", "filter", "(", "matches", ",", "iter", "(", "plugins", ".", "values", "(", ")", ")", ")" ]
helper to get a direct interface to _plugins .
train
false
23,885
def align_block(raw, multiple=4, pad='\x00'): extra = (len(raw) % multiple) if (extra == 0): return raw return (raw + (pad * (multiple - extra)))
[ "def", "align_block", "(", "raw", ",", "multiple", "=", "4", ",", "pad", "=", "'\\x00'", ")", ":", "extra", "=", "(", "len", "(", "raw", ")", "%", "multiple", ")", "if", "(", "extra", "==", "0", ")", ":", "return", "raw", "return", "(", "raw", "+", "(", "pad", "*", "(", "multiple", "-", "extra", ")", ")", ")" ]
return raw with enough pad bytes append to ensure its length is a multiple of 4 .
train
false
23,886
def check_freezing_date(posting_date, adv_adj=False): if (not adv_adj): acc_frozen_upto = frappe.db.get_value(u'Accounts Settings', None, u'acc_frozen_upto') if acc_frozen_upto: frozen_accounts_modifier = frappe.db.get_value(u'Accounts Settings', None, u'frozen_accounts_modifier') if ((getdate(posting_date) <= getdate(acc_frozen_upto)) and (not (frozen_accounts_modifier in frappe.get_roles()))): frappe.throw(_(u'You are not authorized to add or update entries before {0}').format(formatdate(acc_frozen_upto)))
[ "def", "check_freezing_date", "(", "posting_date", ",", "adv_adj", "=", "False", ")", ":", "if", "(", "not", "adv_adj", ")", ":", "acc_frozen_upto", "=", "frappe", ".", "db", ".", "get_value", "(", "u'Accounts Settings'", ",", "None", ",", "u'acc_frozen_upto'", ")", "if", "acc_frozen_upto", ":", "frozen_accounts_modifier", "=", "frappe", ".", "db", ".", "get_value", "(", "u'Accounts Settings'", ",", "None", ",", "u'frozen_accounts_modifier'", ")", "if", "(", "(", "getdate", "(", "posting_date", ")", "<=", "getdate", "(", "acc_frozen_upto", ")", ")", "and", "(", "not", "(", "frozen_accounts_modifier", "in", "frappe", ".", "get_roles", "(", ")", ")", ")", ")", ":", "frappe", ".", "throw", "(", "_", "(", "u'You are not authorized to add or update entries before {0}'", ")", ".", "format", "(", "formatdate", "(", "acc_frozen_upto", ")", ")", ")" ]
nobody can do gl entries where posting date is before freezing date except authorized person .
train
false
23,888
def hazard(): return s3_rest_controller()
[ "def", "hazard", "(", ")", ":", "return", "s3_rest_controller", "(", ")" ]
rest controller .
train
false
23,890
def test_random_report_log_xdist(testdir): pytest.importorskip('xdist') testdir.makepyfile("\n import pytest, time\n @pytest.mark.parametrize('i', list(range(30)))\n def test_x(i):\n assert i != 22\n ") (_, dom) = runandparse(testdir, '-n2') suite_node = dom.find_first_by_tag('testsuite') failed = [] for case_node in suite_node.find_by_tag('testcase'): if case_node.find_first_by_tag('failure'): failed.append(case_node['name']) assert (failed == ['test_x[22]'])
[ "def", "test_random_report_log_xdist", "(", "testdir", ")", ":", "pytest", ".", "importorskip", "(", "'xdist'", ")", "testdir", ".", "makepyfile", "(", "\"\\n import pytest, time\\n @pytest.mark.parametrize('i', list(range(30)))\\n def test_x(i):\\n assert i != 22\\n \"", ")", "(", "_", ",", "dom", ")", "=", "runandparse", "(", "testdir", ",", "'-n2'", ")", "suite_node", "=", "dom", ".", "find_first_by_tag", "(", "'testsuite'", ")", "failed", "=", "[", "]", "for", "case_node", "in", "suite_node", ".", "find_by_tag", "(", "'testcase'", ")", ":", "if", "case_node", ".", "find_first_by_tag", "(", "'failure'", ")", ":", "failed", ".", "append", "(", "case_node", "[", "'name'", "]", ")", "assert", "(", "failed", "==", "[", "'test_x[22]'", "]", ")" ]
xdist calls pytest_runtest_logreport as they are executed by the slaves .
train
false
23,891
def conforms_partial_ordering(tuples, sorted_elements): deps = defaultdict(set) for (parent, child) in tuples: deps[parent].add(child) for (i, node) in enumerate(sorted_elements): for n in sorted_elements[i:]: if (node in deps[n]): return False else: return True
[ "def", "conforms_partial_ordering", "(", "tuples", ",", "sorted_elements", ")", ":", "deps", "=", "defaultdict", "(", "set", ")", "for", "(", "parent", ",", "child", ")", "in", "tuples", ":", "deps", "[", "parent", "]", ".", "add", "(", "child", ")", "for", "(", "i", ",", "node", ")", "in", "enumerate", "(", "sorted_elements", ")", ":", "for", "n", "in", "sorted_elements", "[", "i", ":", "]", ":", "if", "(", "node", "in", "deps", "[", "n", "]", ")", ":", "return", "False", "else", ":", "return", "True" ]
true if the given sorting conforms to the given partial ordering .
train
false
23,892
@should_profile_cpu def stop_cpu_profiling(): cancel_thread(SAVE_THREAD_PTR) dump_data()
[ "@", "should_profile_cpu", "def", "stop_cpu_profiling", "(", ")", ":", "cancel_thread", "(", "SAVE_THREAD_PTR", ")", "dump_data", "(", ")" ]
save profiling information .
train
false
23,893
def generate_signing_key(args): if os.path.exists(args.keyfile): raise esptool.FatalError(('ERROR: Key file %s already exists' % args.keyfile)) sk = ecdsa.SigningKey.generate(curve=ecdsa.NIST256p) with open(args.keyfile, 'wb') as f: f.write(sk.to_pem()) print ('ECDSA NIST256p private key in PEM format written to %s' % args.keyfile)
[ "def", "generate_signing_key", "(", "args", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "args", ".", "keyfile", ")", ":", "raise", "esptool", ".", "FatalError", "(", "(", "'ERROR: Key file %s already exists'", "%", "args", ".", "keyfile", ")", ")", "sk", "=", "ecdsa", ".", "SigningKey", ".", "generate", "(", "curve", "=", "ecdsa", ".", "NIST256p", ")", "with", "open", "(", "args", ".", "keyfile", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "sk", ".", "to_pem", "(", ")", ")", "print", "(", "'ECDSA NIST256p private key in PEM format written to %s'", "%", "args", ".", "keyfile", ")" ]
generate an ecdsa signing key for signing secure boot images .
train
true
23,894
def get_cohorted_discussions(course, course_settings): cohorted_course_wide_discussions = [] cohorted_inline_discussions = [] course_wide_discussions = [topic['id'] for (__, topic) in course.discussion_topics.items()] all_discussions = get_discussion_categories_ids(course, None, include_all=True) for cohorted_discussion_id in course_settings.cohorted_discussions: if (cohorted_discussion_id in course_wide_discussions): cohorted_course_wide_discussions.append(cohorted_discussion_id) elif (cohorted_discussion_id in all_discussions): cohorted_inline_discussions.append(cohorted_discussion_id) return (cohorted_course_wide_discussions, cohorted_inline_discussions)
[ "def", "get_cohorted_discussions", "(", "course", ",", "course_settings", ")", ":", "cohorted_course_wide_discussions", "=", "[", "]", "cohorted_inline_discussions", "=", "[", "]", "course_wide_discussions", "=", "[", "topic", "[", "'id'", "]", "for", "(", "__", ",", "topic", ")", "in", "course", ".", "discussion_topics", ".", "items", "(", ")", "]", "all_discussions", "=", "get_discussion_categories_ids", "(", "course", ",", "None", ",", "include_all", "=", "True", ")", "for", "cohorted_discussion_id", "in", "course_settings", ".", "cohorted_discussions", ":", "if", "(", "cohorted_discussion_id", "in", "course_wide_discussions", ")", ":", "cohorted_course_wide_discussions", ".", "append", "(", "cohorted_discussion_id", ")", "elif", "(", "cohorted_discussion_id", "in", "all_discussions", ")", ":", "cohorted_inline_discussions", ".", "append", "(", "cohorted_discussion_id", ")", "return", "(", "cohorted_course_wide_discussions", ",", "cohorted_inline_discussions", ")" ]
returns the course-wide and inline cohorted discussion ids separately .
train
false
23,895
def test_biweight_location_axis(): with NumpyRNGContext(12345): ny = 100 nx = 200 data = normal(5, 2, (ny, nx)) bw = funcs.biweight_location(data, axis=0) bwi = [] for i in range(nx): bwi.append(funcs.biweight_location(data[:, i])) bwi = np.array(bwi) assert_allclose(bw, bwi) bw = funcs.biweight_location(data, axis=1) bwi = [] for i in range(ny): bwi.append(funcs.biweight_location(data[i, :])) bwi = np.array(bwi) assert_allclose(bw, bwi)
[ "def", "test_biweight_location_axis", "(", ")", ":", "with", "NumpyRNGContext", "(", "12345", ")", ":", "ny", "=", "100", "nx", "=", "200", "data", "=", "normal", "(", "5", ",", "2", ",", "(", "ny", ",", "nx", ")", ")", "bw", "=", "funcs", ".", "biweight_location", "(", "data", ",", "axis", "=", "0", ")", "bwi", "=", "[", "]", "for", "i", "in", "range", "(", "nx", ")", ":", "bwi", ".", "append", "(", "funcs", ".", "biweight_location", "(", "data", "[", ":", ",", "i", "]", ")", ")", "bwi", "=", "np", ".", "array", "(", "bwi", ")", "assert_allclose", "(", "bw", ",", "bwi", ")", "bw", "=", "funcs", ".", "biweight_location", "(", "data", ",", "axis", "=", "1", ")", "bwi", "=", "[", "]", "for", "i", "in", "range", "(", "ny", ")", ":", "bwi", ".", "append", "(", "funcs", ".", "biweight_location", "(", "data", "[", "i", ",", ":", "]", ")", ")", "bwi", "=", "np", ".", "array", "(", "bwi", ")", "assert_allclose", "(", "bw", ",", "bwi", ")" ]
test a 2d array with the axis keyword .
train
false
23,896
def skew(a, axis=0, bias=True, nan_policy='propagate'): (a, axis) = _chk_asarray(a, axis) n = a.shape[axis] (contains_nan, nan_policy) = _contains_nan(a, nan_policy) if (contains_nan and (nan_policy == 'omit')): a = ma.masked_invalid(a) return mstats_basic.skew(a, axis, bias) m2 = moment(a, 2, axis) m3 = moment(a, 3, axis) zero = (m2 == 0) vals = _lazywhere((~ zero), (m2, m3), (lambda m2, m3: (m3 / (m2 ** 1.5))), 0.0) if (not bias): can_correct = ((n > 2) & (m2 > 0)) if can_correct.any(): m2 = np.extract(can_correct, m2) m3 = np.extract(can_correct, m3) nval = (((np.sqrt(((n - 1.0) * n)) / (n - 2.0)) * m3) / (m2 ** 1.5)) np.place(vals, can_correct, nval) if (vals.ndim == 0): return vals.item() return vals
[ "def", "skew", "(", "a", ",", "axis", "=", "0", ",", "bias", "=", "True", ",", "nan_policy", "=", "'propagate'", ")", ":", "(", "a", ",", "axis", ")", "=", "_chk_asarray", "(", "a", ",", "axis", ")", "n", "=", "a", ".", "shape", "[", "axis", "]", "(", "contains_nan", ",", "nan_policy", ")", "=", "_contains_nan", "(", "a", ",", "nan_policy", ")", "if", "(", "contains_nan", "and", "(", "nan_policy", "==", "'omit'", ")", ")", ":", "a", "=", "ma", ".", "masked_invalid", "(", "a", ")", "return", "mstats_basic", ".", "skew", "(", "a", ",", "axis", ",", "bias", ")", "m2", "=", "moment", "(", "a", ",", "2", ",", "axis", ")", "m3", "=", "moment", "(", "a", ",", "3", ",", "axis", ")", "zero", "=", "(", "m2", "==", "0", ")", "vals", "=", "_lazywhere", "(", "(", "~", "zero", ")", ",", "(", "m2", ",", "m3", ")", ",", "(", "lambda", "m2", ",", "m3", ":", "(", "m3", "/", "(", "m2", "**", "1.5", ")", ")", ")", ",", "0.0", ")", "if", "(", "not", "bias", ")", ":", "can_correct", "=", "(", "(", "n", ">", "2", ")", "&", "(", "m2", ">", "0", ")", ")", "if", "can_correct", ".", "any", "(", ")", ":", "m2", "=", "np", ".", "extract", "(", "can_correct", ",", "m2", ")", "m3", "=", "np", ".", "extract", "(", "can_correct", ",", "m3", ")", "nval", "=", "(", "(", "(", "np", ".", "sqrt", "(", "(", "(", "n", "-", "1.0", ")", "*", "n", ")", ")", "/", "(", "n", "-", "2.0", ")", ")", "*", "m3", ")", "/", "(", "m2", "**", "1.5", ")", ")", "np", ".", "place", "(", "vals", ",", "can_correct", ",", "nval", ")", "if", "(", "vals", ".", "ndim", "==", "0", ")", ":", "return", "vals", ".", "item", "(", ")", "return", "vals" ]
skew matrix a such that a x v = av for any v .
train
false
23,897
def display_colorbar(): (y, x) = np.mgrid[(-4):2:200j, (-4):2:200j] z = (10 * np.cos(((x ** 2) + (y ** 2)))) cmap = plt.cm.copper ls = LightSource(315, 45) rgb = ls.shade(z, cmap) (fig, ax) = plt.subplots() ax.imshow(rgb, interpolation='bilinear') im = ax.imshow(z, cmap=cmap) im.remove() fig.colorbar(im) ax.set_title('Using a colorbar with a shaded plot', size='x-large')
[ "def", "display_colorbar", "(", ")", ":", "(", "y", ",", "x", ")", "=", "np", ".", "mgrid", "[", "(", "-", "4", ")", ":", "2", ":", "200j", ",", "(", "-", "4", ")", ":", "2", ":", "200j", "]", "z", "=", "(", "10", "*", "np", ".", "cos", "(", "(", "(", "x", "**", "2", ")", "+", "(", "y", "**", "2", ")", ")", ")", ")", "cmap", "=", "plt", ".", "cm", ".", "copper", "ls", "=", "LightSource", "(", "315", ",", "45", ")", "rgb", "=", "ls", ".", "shade", "(", "z", ",", "cmap", ")", "(", "fig", ",", "ax", ")", "=", "plt", ".", "subplots", "(", ")", "ax", ".", "imshow", "(", "rgb", ",", "interpolation", "=", "'bilinear'", ")", "im", "=", "ax", ".", "imshow", "(", "z", ",", "cmap", "=", "cmap", ")", "im", ".", "remove", "(", ")", "fig", ".", "colorbar", "(", "im", ")", "ax", ".", "set_title", "(", "'Using a colorbar with a shaded plot'", ",", "size", "=", "'x-large'", ")" ]
display a correct numeric colorbar for a shaded plot .
train
false
23,898
def create_gs_key_async(filename, rpc=None): if (not isinstance(filename, basestring)): raise TypeError(('filename must be str: %s' % filename)) if (not filename.startswith(GS_PREFIX)): raise ValueError(('filename must start with "/gs/": %s' % filename)) if (not ('/' in filename[4:])): raise ValueError(('filename must have the format "/gs/bucket_name/object_name": %s' % filename)) request = blobstore_service_pb.CreateEncodedGoogleStorageKeyRequest() response = blobstore_service_pb.CreateEncodedGoogleStorageKeyResponse() request.set_filename(filename) return _make_async_call(rpc, 'CreateEncodedGoogleStorageKey', request, response, _get_result_hook, (lambda rpc: rpc.response.blob_key()))
[ "def", "create_gs_key_async", "(", "filename", ",", "rpc", "=", "None", ")", ":", "if", "(", "not", "isinstance", "(", "filename", ",", "basestring", ")", ")", ":", "raise", "TypeError", "(", "(", "'filename must be str: %s'", "%", "filename", ")", ")", "if", "(", "not", "filename", ".", "startswith", "(", "GS_PREFIX", ")", ")", ":", "raise", "ValueError", "(", "(", "'filename must start with \"/gs/\": %s'", "%", "filename", ")", ")", "if", "(", "not", "(", "'/'", "in", "filename", "[", "4", ":", "]", ")", ")", ":", "raise", "ValueError", "(", "(", "'filename must have the format \"/gs/bucket_name/object_name\": %s'", "%", "filename", ")", ")", "request", "=", "blobstore_service_pb", ".", "CreateEncodedGoogleStorageKeyRequest", "(", ")", "response", "=", "blobstore_service_pb", ".", "CreateEncodedGoogleStorageKeyResponse", "(", ")", "request", ".", "set_filename", "(", "filename", ")", "return", "_make_async_call", "(", "rpc", ",", "'CreateEncodedGoogleStorageKey'", ",", "request", ",", "response", ",", "_get_result_hook", ",", "(", "lambda", "rpc", ":", "rpc", ".", "response", ".", "blob_key", "(", ")", ")", ")" ]
create an encoded key for a google storage file - async version .
train
false
23,899
def serve(request, path, document_root=None, insecure=False, **kwargs): if ((not settings.DEBUG) and (not insecure)): raise ImproperlyConfigured("The staticfiles view can only be used in debug mode or if the the --insecure option of 'runserver' is used") normalized_path = posixpath.normpath(urllib.unquote(path)).lstrip('/') absolute_path = finders.find(normalized_path) if (not absolute_path): if (path.endswith('/') or (path == '')): raise Http404('Directory indexes are not allowed here.') raise Http404(("'%s' could not be found" % path)) (document_root, path) = os.path.split(absolute_path) return static.serve(request, path, document_root=document_root, **kwargs)
[ "def", "serve", "(", "request", ",", "path", ",", "document_root", "=", "None", ",", "insecure", "=", "False", ",", "**", "kwargs", ")", ":", "if", "(", "(", "not", "settings", ".", "DEBUG", ")", "and", "(", "not", "insecure", ")", ")", ":", "raise", "ImproperlyConfigured", "(", "\"The staticfiles view can only be used in debug mode or if the the --insecure option of 'runserver' is used\"", ")", "normalized_path", "=", "posixpath", ".", "normpath", "(", "urllib", ".", "unquote", "(", "path", ")", ")", ".", "lstrip", "(", "'/'", ")", "absolute_path", "=", "finders", ".", "find", "(", "normalized_path", ")", "if", "(", "not", "absolute_path", ")", ":", "if", "(", "path", ".", "endswith", "(", "'/'", ")", "or", "(", "path", "==", "''", ")", ")", ":", "raise", "Http404", "(", "'Directory indexes are not allowed here.'", ")", "raise", "Http404", "(", "(", "\"'%s' could not be found\"", "%", "path", ")", ")", "(", "document_root", ",", "path", ")", "=", "os", ".", "path", ".", "split", "(", "absolute_path", ")", "return", "static", ".", "serve", "(", "request", ",", "path", ",", "document_root", "=", "document_root", ",", "**", "kwargs", ")" ]
start development web server .
train
false
23,900
def get_default_gid(uid): if isinstance(uid, int): return pwd.getpwuid(uid).pw_gid return pwd.getpwnam(uid).pw_gid
[ "def", "get_default_gid", "(", "uid", ")", ":", "if", "isinstance", "(", "uid", ",", "int", ")", ":", "return", "pwd", ".", "getpwuid", "(", "uid", ")", ".", "pw_gid", "return", "pwd", ".", "getpwnam", "(", "uid", ")", ".", "pw_gid" ]
return the default group of a specific user .
train
false
23,901
def xmliter(obj, nodename): nodename_patt = re.escape(nodename) HEADER_START_RE = re.compile(('^(.*?)<\\s*%s(?:\\s|>)' % nodename_patt), re.S) HEADER_END_RE = re.compile(('<\\s*/%s\\s*>' % nodename_patt), re.S) text = _body_or_str(obj) header_start = re.search(HEADER_START_RE, text) header_start = (header_start.group(1).strip() if header_start else '') header_end = re_rsearch(HEADER_END_RE, text) header_end = (text[header_end[1]:].strip() if header_end else '') r = re.compile(('<%(np)s[\\s>].*?</%(np)s>' % {'np': nodename_patt}), re.DOTALL) for match in r.finditer(text): nodetext = ((header_start + match.group()) + header_end) (yield Selector(text=nodetext, type='xml').xpath(('//' + nodename))[0])
[ "def", "xmliter", "(", "obj", ",", "nodename", ")", ":", "nodename_patt", "=", "re", ".", "escape", "(", "nodename", ")", "HEADER_START_RE", "=", "re", ".", "compile", "(", "(", "'^(.*?)<\\\\s*%s(?:\\\\s|>)'", "%", "nodename_patt", ")", ",", "re", ".", "S", ")", "HEADER_END_RE", "=", "re", ".", "compile", "(", "(", "'<\\\\s*/%s\\\\s*>'", "%", "nodename_patt", ")", ",", "re", ".", "S", ")", "text", "=", "_body_or_str", "(", "obj", ")", "header_start", "=", "re", ".", "search", "(", "HEADER_START_RE", ",", "text", ")", "header_start", "=", "(", "header_start", ".", "group", "(", "1", ")", ".", "strip", "(", ")", "if", "header_start", "else", "''", ")", "header_end", "=", "re_rsearch", "(", "HEADER_END_RE", ",", "text", ")", "header_end", "=", "(", "text", "[", "header_end", "[", "1", "]", ":", "]", ".", "strip", "(", ")", "if", "header_end", "else", "''", ")", "r", "=", "re", ".", "compile", "(", "(", "'<%(np)s[\\\\s>].*?</%(np)s>'", "%", "{", "'np'", ":", "nodename_patt", "}", ")", ",", "re", ".", "DOTALL", ")", "for", "match", "in", "r", ".", "finditer", "(", "text", ")", ":", "nodetext", "=", "(", "(", "header_start", "+", "match", ".", "group", "(", ")", ")", "+", "header_end", ")", "(", "yield", "Selector", "(", "text", "=", "nodetext", ",", "type", "=", "'xml'", ")", ".", "xpath", "(", "(", "'//'", "+", "nodename", ")", ")", "[", "0", "]", ")" ]
return a iterator of selectors over all nodes of a xml document .
train
false
23,902
def move_task_by_id(task_id, dest, **kwargs): return move_by_idmap({task_id: dest}, **kwargs)
[ "def", "move_task_by_id", "(", "task_id", ",", "dest", ",", "**", "kwargs", ")", ":", "return", "move_by_idmap", "(", "{", "task_id", ":", "dest", "}", ",", "**", "kwargs", ")" ]
find a task by id and move it to another queue .
train
false