id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
43,622
def preload_module(*modules): for m in modules: s = ('import %s as x; x.' % m) Script(s, 1, len(s), None).completions()
[ "def", "preload_module", "(", "*", "modules", ")", ":", "for", "m", "in", "modules", ":", "s", "=", "(", "'import %s as x; x.'", "%", "m", ")", "Script", "(", "s", ",", "1", ",", "len", "(", "s", ")", ",", "None", ")", ".", "completions", "(", ")" ]
preloading modules tells jedi to load a module now .
train
false
43,625
@requires_mne def test_compensation_raw_mne(): tempdir = _TempDir() def compensate_mne(fname, grad): tmp_fname = op.join(tempdir, 'mne_ctf_test_raw.fif') cmd = ['mne_process_raw', '--raw', fname, '--save', tmp_fname, '--grad', str(grad), '--projoff', '--filteroff'] run_subprocess(cmd) return read_raw_fif(tmp_fname, preload=True) for grad in [0, 2, 3]: raw_py = read_raw_fif(ctf_comp_fname, preload=True) raw_py.apply_gradient_compensation(grad) raw_c = compensate_mne(ctf_comp_fname, grad) assert_allclose(raw_py._data, raw_c._data, rtol=1e-06, atol=1e-17) assert_equal(raw_py.info['nchan'], raw_c.info['nchan']) for (ch_py, ch_c) in zip(raw_py.info['chs'], raw_c.info['chs']): for key in ('ch_name', 'coil_type', 'scanno', 'logno', 'unit', 'coord_frame', 'kind'): assert_equal(ch_py[key], ch_c[key]) for key in ('loc', 'unit_mul', 'range', 'cal'): assert_allclose(ch_py[key], ch_c[key])
[ "@", "requires_mne", "def", "test_compensation_raw_mne", "(", ")", ":", "tempdir", "=", "_TempDir", "(", ")", "def", "compensate_mne", "(", "fname", ",", "grad", ")", ":", "tmp_fname", "=", "op", ".", "join", "(", "tempdir", ",", "'mne_ctf_test_raw.fif'", ")", "cmd", "=", "[", "'mne_process_raw'", ",", "'--raw'", ",", "fname", ",", "'--save'", ",", "tmp_fname", ",", "'--grad'", ",", "str", "(", "grad", ")", ",", "'--projoff'", ",", "'--filteroff'", "]", "run_subprocess", "(", "cmd", ")", "return", "read_raw_fif", "(", "tmp_fname", ",", "preload", "=", "True", ")", "for", "grad", "in", "[", "0", ",", "2", ",", "3", "]", ":", "raw_py", "=", "read_raw_fif", "(", "ctf_comp_fname", ",", "preload", "=", "True", ")", "raw_py", ".", "apply_gradient_compensation", "(", "grad", ")", "raw_c", "=", "compensate_mne", "(", "ctf_comp_fname", ",", "grad", ")", "assert_allclose", "(", "raw_py", ".", "_data", ",", "raw_c", ".", "_data", ",", "rtol", "=", "1e-06", ",", "atol", "=", "1e-17", ")", "assert_equal", "(", "raw_py", ".", "info", "[", "'nchan'", "]", ",", "raw_c", ".", "info", "[", "'nchan'", "]", ")", "for", "(", "ch_py", ",", "ch_c", ")", "in", "zip", "(", "raw_py", ".", "info", "[", "'chs'", "]", ",", "raw_c", ".", "info", "[", "'chs'", "]", ")", ":", "for", "key", "in", "(", "'ch_name'", ",", "'coil_type'", ",", "'scanno'", ",", "'logno'", ",", "'unit'", ",", "'coord_frame'", ",", "'kind'", ")", ":", "assert_equal", "(", "ch_py", "[", "key", "]", ",", "ch_c", "[", "key", "]", ")", "for", "key", "in", "(", "'loc'", ",", "'unit_mul'", ",", "'range'", ",", "'cal'", ")", ":", "assert_allclose", "(", "ch_py", "[", "key", "]", ",", "ch_c", "[", "key", "]", ")" ]
test raw compensation by comparing with mne-c .
train
false
43,626
@cache_return def may_be_null_is_nullable(): repo = GIRepository() repo.require('GLib', '2.0', 0) info = repo.find_by_name('GLib', 'spawn_sync') return (not info.get_arg(8).may_be_null)
[ "@", "cache_return", "def", "may_be_null_is_nullable", "(", ")", ":", "repo", "=", "GIRepository", "(", ")", "repo", ".", "require", "(", "'GLib'", ",", "'2.0'", ",", "0", ")", "info", "=", "repo", ".", "find_by_name", "(", "'GLib'", ",", "'spawn_sync'", ")", "return", "(", "not", "info", ".", "get_arg", "(", "8", ")", ".", "may_be_null", ")" ]
if may_be_null returns nullable or if null can be passed in .
train
true
43,628
def get_keystone_session(**config): return Session(auth=_openstack_auth_from_config(**config), verify=_openstack_verify_from_config(**config))
[ "def", "get_keystone_session", "(", "**", "config", ")", ":", "return", "Session", "(", "auth", "=", "_openstack_auth_from_config", "(", "**", "config", ")", ",", "verify", "=", "_openstack_verify_from_config", "(", "**", "config", ")", ")" ]
create a keystone session from a configuration stanza .
train
false
43,629
def setup_container(image, container_dir): img = _DiskImage(image=image, mount_dir=container_dir) dev = img.mount() if (dev is None): LOG.error(_LE("Failed to mount container filesystem '%(image)s' on '%(target)s': %(errors)s"), {'image': img, 'target': container_dir, 'errors': img.errors}) raise exception.NovaException(img.errors) return dev
[ "def", "setup_container", "(", "image", ",", "container_dir", ")", ":", "img", "=", "_DiskImage", "(", "image", "=", "image", ",", "mount_dir", "=", "container_dir", ")", "dev", "=", "img", ".", "mount", "(", ")", "if", "(", "dev", "is", "None", ")", ":", "LOG", ".", "error", "(", "_LE", "(", "\"Failed to mount container filesystem '%(image)s' on '%(target)s': %(errors)s\"", ")", ",", "{", "'image'", ":", "img", ",", "'target'", ":", "container_dir", ",", "'errors'", ":", "img", ".", "errors", "}", ")", "raise", "exception", ".", "NovaException", "(", "img", ".", "errors", ")", "return", "dev" ]
setup the lxc container .
train
false
43,631
def printOneTrainingVector(x): print ''.join((('1' if (k != 0) else '.') for k in x))
[ "def", "printOneTrainingVector", "(", "x", ")", ":", "print", "''", ".", "join", "(", "(", "(", "'1'", "if", "(", "k", "!=", "0", ")", "else", "'.'", ")", "for", "k", "in", "x", ")", ")" ]
print a single vector succinctly .
train
false
43,632
def quicksort(a): n = len(a) if (n <= 1): return a else: from random import randrange pivot = a.pop(randrange(n)) lesser = quicksort([x for x in a if (x < pivot)]) greater = quicksort([x for x in a if (x >= pivot)]) return ((lesser + [pivot]) + greater)
[ "def", "quicksort", "(", "a", ")", ":", "n", "=", "len", "(", "a", ")", "if", "(", "n", "<=", "1", ")", ":", "return", "a", "else", ":", "from", "random", "import", "randrange", "pivot", "=", "a", ".", "pop", "(", "randrange", "(", "n", ")", ")", "lesser", "=", "quicksort", "(", "[", "x", "for", "x", "in", "a", "if", "(", "x", "<", "pivot", ")", "]", ")", "greater", "=", "quicksort", "(", "[", "x", "for", "x", "in", "a", "if", "(", "x", ">=", "pivot", ")", "]", ")", "return", "(", "(", "lesser", "+", "[", "pivot", "]", ")", "+", "greater", ")" ]
quicksort implementation in python note: this algo uses o(n) extra space to compute quicksort .
train
false
43,633
def kmap(registry, xml_parent, data): kmap = XML.SubElement(xml_parent, 'org.jenkinsci.plugins.KmapJenkinsBuilder') kmap.set('plugin', 'kmap-jenkins') publish = data.get('publish-optional', False) mapping = [('username', 'username', None), ('password', 'password', None), ('url', 'kmapClient', None), ('categories', 'categories', None), ('file-path', 'filePath', None), ('app-name', 'appName', None), ('bundle', 'bundle', ''), ('version', 'version', None), ('description', 'description', ''), ('icon-path', 'iconPath', '')] convert_mapping_to_xml(kmap, data, mapping, fail_required=True) if (publish is True): publish_optional = XML.SubElement(kmap, 'publishOptional') publish_mapping = [('groups', 'teams', ''), ('users', 'users', ''), ('notify-users', 'sendNotifications', False)] convert_mapping_to_xml(publish_optional, data, publish_mapping, fail_required=True)
[ "def", "kmap", "(", "registry", ",", "xml_parent", ",", "data", ")", ":", "kmap", "=", "XML", ".", "SubElement", "(", "xml_parent", ",", "'org.jenkinsci.plugins.KmapJenkinsBuilder'", ")", "kmap", ".", "set", "(", "'plugin'", ",", "'kmap-jenkins'", ")", "publish", "=", "data", ".", "get", "(", "'publish-optional'", ",", "False", ")", "mapping", "=", "[", "(", "'username'", ",", "'username'", ",", "None", ")", ",", "(", "'password'", ",", "'password'", ",", "None", ")", ",", "(", "'url'", ",", "'kmapClient'", ",", "None", ")", ",", "(", "'categories'", ",", "'categories'", ",", "None", ")", ",", "(", "'file-path'", ",", "'filePath'", ",", "None", ")", ",", "(", "'app-name'", ",", "'appName'", ",", "None", ")", ",", "(", "'bundle'", ",", "'bundle'", ",", "''", ")", ",", "(", "'version'", ",", "'version'", ",", "None", ")", ",", "(", "'description'", ",", "'description'", ",", "''", ")", ",", "(", "'icon-path'", ",", "'iconPath'", ",", "''", ")", "]", "convert_mapping_to_xml", "(", "kmap", ",", "data", ",", "mapping", ",", "fail_required", "=", "True", ")", "if", "(", "publish", "is", "True", ")", ":", "publish_optional", "=", "XML", ".", "SubElement", "(", "kmap", ",", "'publishOptional'", ")", "publish_mapping", "=", "[", "(", "'groups'", ",", "'teams'", ",", "''", ")", ",", "(", "'users'", ",", "'users'", ",", "''", ")", ",", "(", "'notify-users'", ",", "'sendNotifications'", ",", "False", ")", "]", "convert_mapping_to_xml", "(", "publish_optional", ",", "data", ",", "publish_mapping", ",", "fail_required", "=", "True", ")" ]
yaml: kmap publish mobile applications to your keivox kmap private mobile app store .
train
false
43,635
def extract_link_from_link_header(http_response, header_name, header_value): re_match = LINK_HEADER_RE.search(header_value) if re_match: try: url_str = re_match.group(1) except IndexError: raise StopIteration if (not url_str): raise StopIteration try: (yield http_response.get_url().url_join(url_str)) except ValueError: msg = 'The application sent a "%s" header that w3af failed to correctly parse as an URL, the header value was: "%s"' om.out.debug((msg % (header_name, header_value)))
[ "def", "extract_link_from_link_header", "(", "http_response", ",", "header_name", ",", "header_value", ")", ":", "re_match", "=", "LINK_HEADER_RE", ".", "search", "(", "header_value", ")", "if", "re_match", ":", "try", ":", "url_str", "=", "re_match", ".", "group", "(", "1", ")", "except", "IndexError", ":", "raise", "StopIteration", "if", "(", "not", "url_str", ")", ":", "raise", "StopIteration", "try", ":", "(", "yield", "http_response", ".", "get_url", "(", ")", ".", "url_join", "(", "url_str", ")", ")", "except", "ValueError", ":", "msg", "=", "'The application sent a \"%s\" header that w3af failed to correctly parse as an URL, the header value was: \"%s\"'", "om", ".", "out", ".", "debug", "(", "(", "msg", "%", "(", "header_name", ",", "header_value", ")", ")", ")" ]
extract links from http response headers which have the header value set to a "wordpress link" example headers we can parse: link: <URL rel=shortlink .
train
false
43,637
def hash_opensubtitles(video_path): bytesize = struct.calcsize('<q') with open(video_path, 'rb') as f: filesize = os.path.getsize(video_path) filehash = filesize if (filesize < (65536 * 2)): return for _ in range((65536 // bytesize)): filebuffer = f.read(bytesize) (l_value,) = struct.unpack('<q', filebuffer) filehash += l_value filehash &= 18446744073709551615L f.seek(max(0, (filesize - 65536)), 0) for _ in range((65536 // bytesize)): filebuffer = f.read(bytesize) (l_value,) = struct.unpack('<q', filebuffer) filehash += l_value filehash &= 18446744073709551615L returnedhash = ('%016x' % filehash) return returnedhash
[ "def", "hash_opensubtitles", "(", "video_path", ")", ":", "bytesize", "=", "struct", ".", "calcsize", "(", "'<q'", ")", "with", "open", "(", "video_path", ",", "'rb'", ")", "as", "f", ":", "filesize", "=", "os", ".", "path", ".", "getsize", "(", "video_path", ")", "filehash", "=", "filesize", "if", "(", "filesize", "<", "(", "65536", "*", "2", ")", ")", ":", "return", "for", "_", "in", "range", "(", "(", "65536", "//", "bytesize", ")", ")", ":", "filebuffer", "=", "f", ".", "read", "(", "bytesize", ")", "(", "l_value", ",", ")", "=", "struct", ".", "unpack", "(", "'<q'", ",", "filebuffer", ")", "filehash", "+=", "l_value", "filehash", "&=", "18446744073709551615", "L", "f", ".", "seek", "(", "max", "(", "0", ",", "(", "filesize", "-", "65536", ")", ")", ",", "0", ")", "for", "_", "in", "range", "(", "(", "65536", "//", "bytesize", ")", ")", ":", "filebuffer", "=", "f", ".", "read", "(", "bytesize", ")", "(", "l_value", ",", ")", "=", "struct", ".", "unpack", "(", "'<q'", ",", "filebuffer", ")", "filehash", "+=", "l_value", "filehash", "&=", "18446744073709551615", "L", "returnedhash", "=", "(", "'%016x'", "%", "filehash", ")", "return", "returnedhash" ]
compute a hash using opensubtitles algorithm .
train
true
43,638
def encode_reply(version, rep, rsv, address_type, bind_address, bind_port): data = struct.pack('BBBB', version, rep, rsv, address_type) data += __encode_address(address_type, bind_address) data += struct.pack('!H', bind_port) return data
[ "def", "encode_reply", "(", "version", ",", "rep", ",", "rsv", ",", "address_type", ",", "bind_address", ",", "bind_port", ")", ":", "data", "=", "struct", ".", "pack", "(", "'BBBB'", ",", "version", ",", "rep", ",", "rsv", ",", "address_type", ")", "data", "+=", "__encode_address", "(", "address_type", ",", "bind_address", ")", "data", "+=", "struct", ".", "pack", "(", "'!H'", ",", "bind_port", ")", "return", "data" ]
encode a reply .
train
false
43,639
def get_main_version(version=None): version = get_complete_version(version) parts = (2 if (version[2] == 0) else 3) return '.'.join((str(x) for x in version[:parts]))
[ "def", "get_main_version", "(", "version", "=", "None", ")", ":", "version", "=", "get_complete_version", "(", "version", ")", "parts", "=", "(", "2", "if", "(", "version", "[", "2", "]", "==", "0", ")", "else", "3", ")", "return", "'.'", ".", "join", "(", "(", "str", "(", "x", ")", "for", "x", "in", "version", "[", ":", "parts", "]", ")", ")" ]
returns main version from version .
train
true
43,640
def test_roles_as_tuples(): @roles('r1') def command(): pass eq_hosts(command, ['a', 'b'], env={'roledefs': tuple_roles}) eq_effective_roles(command, ['r1'], env={'roledefs': fake_roles})
[ "def", "test_roles_as_tuples", "(", ")", ":", "@", "roles", "(", "'r1'", ")", "def", "command", "(", ")", ":", "pass", "eq_hosts", "(", "command", ",", "[", "'a'", ",", "'b'", "]", ",", "env", "=", "{", "'roledefs'", ":", "tuple_roles", "}", ")", "eq_effective_roles", "(", "command", ",", "[", "'r1'", "]", ",", "env", "=", "{", "'roledefs'", ":", "fake_roles", "}", ")" ]
test that a list of roles as a tuple succeeds .
train
false
43,642
@pytest.mark.parametrize('fast_writer', [True, False]) @pytest.mark.parametrize('fmt', ['%0.1f', '.1f', '0.1f', '{0:0.1f}']) def test_write_format(fast_writer, fmt): data = ascii.read('#c1\n # c2 DCTB \na,b,c\n# c3\n1.11,2.22,3.33') out = StringIO() expected = ['# c1', '# c2', '# c3', 'a b c', '1.1 2.22 3.33'] data['a'].format = fmt ascii.write(data, out, format='basic', fast_writer=fast_writer) assert (out.getvalue().splitlines() == expected)
[ "@", "pytest", ".", "mark", ".", "parametrize", "(", "'fast_writer'", ",", "[", "True", ",", "False", "]", ")", "@", "pytest", ".", "mark", ".", "parametrize", "(", "'fmt'", ",", "[", "'%0.1f'", ",", "'.1f'", ",", "'0.1f'", ",", "'{0:0.1f}'", "]", ")", "def", "test_write_format", "(", "fast_writer", ",", "fmt", ")", ":", "data", "=", "ascii", ".", "read", "(", "'#c1\\n # c2 DCTB \\na,b,c\\n# c3\\n1.11,2.22,3.33'", ")", "out", "=", "StringIO", "(", ")", "expected", "=", "[", "'# c1'", ",", "'# c2'", ",", "'# c3'", ",", "'a b c'", ",", "'1.1 2.22 3.33'", "]", "data", "[", "'a'", "]", ".", "format", "=", "fmt", "ascii", ".", "write", "(", "data", ",", "out", ",", "format", "=", "'basic'", ",", "fast_writer", "=", "fast_writer", ")", "assert", "(", "out", ".", "getvalue", "(", ")", ".", "splitlines", "(", ")", "==", "expected", ")" ]
check different formats for a column .
train
false
43,643
def req_skill(): s3.filter = (FS('req_id$is_template') == False) def prep(r): if (r.interactive or (r.representation == 'aadata')): list_fields = s3db.get_config('req_req_skill', 'list_fields') list_fields.insert(1, 'req_id$site_id') list_fields.insert(1, 'req_id$site_id$location_id$L4') list_fields.insert(1, 'req_id$site_id$location_id$L3') s3db.configure('req_req_skill', insertable=False, list_fields=list_fields) if ((r.method != 'update') and (r.method != 'read')): s3db.req_hide_quantities(r.table) return True s3.prep = prep def postp(r, output): if r.interactive: s3.actions = [dict(url=URL(c='req', f='req', args=['req_skill', '[id]']), _class='action-btn', label=str(READ))] return output s3.postp = postp return s3_rest_controller('req', 'req_skill')
[ "def", "req_skill", "(", ")", ":", "s3", ".", "filter", "=", "(", "FS", "(", "'req_id$is_template'", ")", "==", "False", ")", "def", "prep", "(", "r", ")", ":", "if", "(", "r", ".", "interactive", "or", "(", "r", ".", "representation", "==", "'aadata'", ")", ")", ":", "list_fields", "=", "s3db", ".", "get_config", "(", "'req_req_skill'", ",", "'list_fields'", ")", "list_fields", ".", "insert", "(", "1", ",", "'req_id$site_id'", ")", "list_fields", ".", "insert", "(", "1", ",", "'req_id$site_id$location_id$L4'", ")", "list_fields", ".", "insert", "(", "1", ",", "'req_id$site_id$location_id$L3'", ")", "s3db", ".", "configure", "(", "'req_req_skill'", ",", "insertable", "=", "False", ",", "list_fields", "=", "list_fields", ")", "if", "(", "(", "r", ".", "method", "!=", "'update'", ")", "and", "(", "r", ".", "method", "!=", "'read'", ")", ")", ":", "s3db", ".", "req_hide_quantities", "(", "r", ".", "table", ")", "return", "True", "s3", ".", "prep", "=", "prep", "def", "postp", "(", "r", ",", "output", ")", ":", "if", "r", ".", "interactive", ":", "s3", ".", "actions", "=", "[", "dict", "(", "url", "=", "URL", "(", "c", "=", "'req'", ",", "f", "=", "'req'", ",", "args", "=", "[", "'req_skill'", ",", "'[id]'", "]", ")", ",", "_class", "=", "'action-btn'", ",", "label", "=", "str", "(", "READ", ")", ")", "]", "return", "output", "s3", ".", "postp", "=", "postp", "return", "s3_rest_controller", "(", "'req'", ",", "'req_skill'", ")" ]
rest controller @todo: filter out fulfilled skills? .
train
false
43,644
def p_direct_declarator_2(t): pass
[ "def", "p_direct_declarator_2", "(", "t", ")", ":", "pass" ]
direct_declarator : lparen declarator rparen .
train
false
43,645
def test_issue617(): try: vocab = Vocab.load(u'/tmp/vocab') except IOError: pass
[ "def", "test_issue617", "(", ")", ":", "try", ":", "vocab", "=", "Vocab", ".", "load", "(", "u'/tmp/vocab'", ")", "except", "IOError", ":", "pass" ]
test loading vocab with string .
train
false
43,646
def test_optimizer(): nan_detected = [False] def detect_nan(i, node, fn): for output in fn.outputs: if numpy.isnan(output[0]).any(): print('*** NaN detected ***') theano.printing.debugprint(node) print(('Inputs : %s' % [input[0] for input in fn.inputs])) print(('Outputs: %s' % [output[0] for output in fn.outputs])) nan_detected[0] = True break x = theano.tensor.dscalar('x') mode = theano.compile.MonitorMode(post_func=detect_nan) mode = mode.excluding('fusion') f = theano.function([x], [(theano.tensor.log(x) * x)], mode=mode) assert (len(f.maker.fgraph.apply_nodes) == 2) f(0) assert nan_detected[0]
[ "def", "test_optimizer", "(", ")", ":", "nan_detected", "=", "[", "False", "]", "def", "detect_nan", "(", "i", ",", "node", ",", "fn", ")", ":", "for", "output", "in", "fn", ".", "outputs", ":", "if", "numpy", ".", "isnan", "(", "output", "[", "0", "]", ")", ".", "any", "(", ")", ":", "print", "(", "'*** NaN detected ***'", ")", "theano", ".", "printing", ".", "debugprint", "(", "node", ")", "print", "(", "(", "'Inputs : %s'", "%", "[", "input", "[", "0", "]", "for", "input", "in", "fn", ".", "inputs", "]", ")", ")", "print", "(", "(", "'Outputs: %s'", "%", "[", "output", "[", "0", "]", "for", "output", "in", "fn", ".", "outputs", "]", ")", ")", "nan_detected", "[", "0", "]", "=", "True", "break", "x", "=", "theano", ".", "tensor", ".", "dscalar", "(", "'x'", ")", "mode", "=", "theano", ".", "compile", ".", "MonitorMode", "(", "post_func", "=", "detect_nan", ")", "mode", "=", "mode", ".", "excluding", "(", "'fusion'", ")", "f", "=", "theano", ".", "function", "(", "[", "x", "]", ",", "[", "(", "theano", ".", "tensor", ".", "log", "(", "x", ")", "*", "x", ")", "]", ",", "mode", "=", "mode", ")", "assert", "(", "len", "(", "f", ".", "maker", ".", "fgraph", ".", "apply_nodes", ")", "==", "2", ")", "f", "(", "0", ")", "assert", "nan_detected", "[", "0", "]" ]
test that we can remove optimizer .
train
false
43,647
def chpgrp(path, group): if (group is None): raise SaltInvocationError("The group value was specified as None and is invalid. If you mean the built-in None group, specify the group in lowercase, e.g. 'none'.") err = '' try: (groupSID, domainName, objectType) = win32security.LookupAccountName(None, group) except pywinerror: err += 'Group does not exist\n' if (not os.path.exists(path)): err += 'File not found\n' if err: return err privilege_enabled = False try: privilege_enabled = _enable_privilege(win32security.SE_RESTORE_NAME) win32security.SetNamedSecurityInfo(path, win32security.SE_FILE_OBJECT, win32security.GROUP_SECURITY_INFORMATION, None, groupSID, None, None) finally: if privilege_enabled: _disable_privilege(win32security.SE_RESTORE_NAME) return None
[ "def", "chpgrp", "(", "path", ",", "group", ")", ":", "if", "(", "group", "is", "None", ")", ":", "raise", "SaltInvocationError", "(", "\"The group value was specified as None and is invalid. If you mean the built-in None group, specify the group in lowercase, e.g. 'none'.\"", ")", "err", "=", "''", "try", ":", "(", "groupSID", ",", "domainName", ",", "objectType", ")", "=", "win32security", ".", "LookupAccountName", "(", "None", ",", "group", ")", "except", "pywinerror", ":", "err", "+=", "'Group does not exist\\n'", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ")", ":", "err", "+=", "'File not found\\n'", "if", "err", ":", "return", "err", "privilege_enabled", "=", "False", "try", ":", "privilege_enabled", "=", "_enable_privilege", "(", "win32security", ".", "SE_RESTORE_NAME", ")", "win32security", ".", "SetNamedSecurityInfo", "(", "path", ",", "win32security", ".", "SE_FILE_OBJECT", ",", "win32security", ".", "GROUP_SECURITY_INFORMATION", ",", "None", ",", "groupSID", ",", "None", ",", "None", ")", "finally", ":", "if", "privilege_enabled", ":", "_disable_privilege", "(", "win32security", ".", "SE_RESTORE_NAME", ")", "return", "None" ]
change the group of a file under windows .
train
false
43,649
def EvalHypergeomPmf(k, N, K, n): return stats.hypergeom.pmf(k, N, K, n)
[ "def", "EvalHypergeomPmf", "(", "k", ",", "N", ",", "K", ",", "n", ")", ":", "return", "stats", ".", "hypergeom", ".", "pmf", "(", "k", ",", "N", ",", "K", ",", "n", ")" ]
evaluates the hypergeometric pmf .
train
false
43,650
def get_ovf_descriptor(ovf_path): if path.exists(ovf_path): with open(ovf_path, 'r') as f: try: ovfd = f.read() f.close() return ovfd except: print ('Could not read file: %s' % ovf_path) exit(1)
[ "def", "get_ovf_descriptor", "(", "ovf_path", ")", ":", "if", "path", ".", "exists", "(", "ovf_path", ")", ":", "with", "open", "(", "ovf_path", ",", "'r'", ")", "as", "f", ":", "try", ":", "ovfd", "=", "f", ".", "read", "(", ")", "f", ".", "close", "(", ")", "return", "ovfd", "except", ":", "print", "(", "'Could not read file: %s'", "%", "ovf_path", ")", "exit", "(", "1", ")" ]
read in the ovf descriptor .
train
false
43,652
def names_to_indices(names, ordered_names): indices = [] names_list = list(names) for ordered_name in ordered_names: if (ordered_name in names_list): indices.append(names_list.index(ordered_name)) return np.array(indices)
[ "def", "names_to_indices", "(", "names", ",", "ordered_names", ")", ":", "indices", "=", "[", "]", "names_list", "=", "list", "(", "names", ")", "for", "ordered_name", "in", "ordered_names", ":", "if", "(", "ordered_name", "in", "names_list", ")", ":", "indices", ".", "append", "(", "names_list", ".", "index", "(", "ordered_name", ")", ")", "return", "np", ".", "array", "(", "indices", ")" ]
returns the indices that would sort names like ordered_names .
train
false
43,653
def format_argspec_init(method, grouped=True): if (method is object.__init__): args = ((grouped and '(self)') or 'self') else: try: return format_argspec_plus(method, grouped=grouped) except TypeError: args = ((grouped and '(self, *args, **kwargs)') or 'self, *args, **kwargs') return dict(self_arg='self', args=args, apply_pos=args, apply_kw=args)
[ "def", "format_argspec_init", "(", "method", ",", "grouped", "=", "True", ")", ":", "if", "(", "method", "is", "object", ".", "__init__", ")", ":", "args", "=", "(", "(", "grouped", "and", "'(self)'", ")", "or", "'self'", ")", "else", ":", "try", ":", "return", "format_argspec_plus", "(", "method", ",", "grouped", "=", "grouped", ")", "except", "TypeError", ":", "args", "=", "(", "(", "grouped", "and", "'(self, *args, **kwargs)'", ")", "or", "'self, *args, **kwargs'", ")", "return", "dict", "(", "self_arg", "=", "'self'", ",", "args", "=", "args", ",", "apply_pos", "=", "args", ",", "apply_kw", "=", "args", ")" ]
format_argspec_plus with considerations for typical __init__ methods wraps format_argspec_plus with error handling strategies for typical __init__ cases:: object .
train
false
43,654
@api_versions.wraps('2.8') @utils.arg('server', metavar='<server>', help=_('Name or ID of server.')) def do_get_mks_console(cs, args): server = _find_server(cs, args.server) data = server.get_mks_console() print_console(cs, data)
[ "@", "api_versions", ".", "wraps", "(", "'2.8'", ")", "@", "utils", ".", "arg", "(", "'server'", ",", "metavar", "=", "'<server>'", ",", "help", "=", "_", "(", "'Name or ID of server.'", ")", ")", "def", "do_get_mks_console", "(", "cs", ",", "args", ")", ":", "server", "=", "_find_server", "(", "cs", ",", "args", ".", "server", ")", "data", "=", "server", ".", "get_mks_console", "(", ")", "print_console", "(", "cs", ",", "data", ")" ]
get an mks console to a server .
train
false
43,655
def register_common_cli_options(): cfg.CONF.register_cli_opt(cfg.BoolOpt('verbose', short='v', default=False))
[ "def", "register_common_cli_options", "(", ")", ":", "cfg", ".", "CONF", ".", "register_cli_opt", "(", "cfg", ".", "BoolOpt", "(", "'verbose'", ",", "short", "=", "'v'", ",", "default", "=", "False", ")", ")" ]
register common cli options .
train
false
43,656
def calculate_children(evaluator, children): iterator = iter(children) types = evaluator.eval_element(next(iterator)) for operator in iterator: right = next(iterator) if tree.is_node(operator, 'comp_op'): operator = ' '.join((str(c.value) for c in operator.children)) if (operator in ('and', 'or')): left_bools = set([left.py__bool__() for left in types]) if (left_bools == set([True])): if (operator == 'and'): types = evaluator.eval_element(right) elif (left_bools == set([False])): if (operator != 'and'): types = evaluator.eval_element(right) else: types = calculate(evaluator, types, operator, evaluator.eval_element(right)) debug.dbg('calculate_children types %s', types) return types
[ "def", "calculate_children", "(", "evaluator", ",", "children", ")", ":", "iterator", "=", "iter", "(", "children", ")", "types", "=", "evaluator", ".", "eval_element", "(", "next", "(", "iterator", ")", ")", "for", "operator", "in", "iterator", ":", "right", "=", "next", "(", "iterator", ")", "if", "tree", ".", "is_node", "(", "operator", ",", "'comp_op'", ")", ":", "operator", "=", "' '", ".", "join", "(", "(", "str", "(", "c", ".", "value", ")", "for", "c", "in", "operator", ".", "children", ")", ")", "if", "(", "operator", "in", "(", "'and'", ",", "'or'", ")", ")", ":", "left_bools", "=", "set", "(", "[", "left", ".", "py__bool__", "(", ")", "for", "left", "in", "types", "]", ")", "if", "(", "left_bools", "==", "set", "(", "[", "True", "]", ")", ")", ":", "if", "(", "operator", "==", "'and'", ")", ":", "types", "=", "evaluator", ".", "eval_element", "(", "right", ")", "elif", "(", "left_bools", "==", "set", "(", "[", "False", "]", ")", ")", ":", "if", "(", "operator", "!=", "'and'", ")", ":", "types", "=", "evaluator", ".", "eval_element", "(", "right", ")", "else", ":", "types", "=", "calculate", "(", "evaluator", ",", "types", ",", "operator", ",", "evaluator", ".", "eval_element", "(", "right", ")", ")", "debug", ".", "dbg", "(", "'calculate_children types %s'", ",", "types", ")", "return", "types" ]
calculate a list of children with operators .
train
false
43,658
@pytest.fixture def key_config_stub(stubs): stub = stubs.KeyConfigStub() objreg.register('key-config', stub) (yield stub) objreg.delete('key-config')
[ "@", "pytest", ".", "fixture", "def", "key_config_stub", "(", "stubs", ")", ":", "stub", "=", "stubs", ".", "KeyConfigStub", "(", ")", "objreg", ".", "register", "(", "'key-config'", ",", "stub", ")", "(", "yield", "stub", ")", "objreg", ".", "delete", "(", "'key-config'", ")" ]
fixture which provides a fake key config object .
train
false
43,659
def test_hsl_to_rgb_part_14(): assert (hsl_to_rgb(60, 100, 0) == (0, 0, 0)) assert (hsl_to_rgb(60, 100, 10) == (51, 51, 0)) assert (hsl_to_rgb(60, 100, 20) == (102, 102, 0)) assert (hsl_to_rgb(60, 100, 30) == (153, 153, 0)) assert (hsl_to_rgb(60, 100, 40) == (204, 204, 0)) assert (hsl_to_rgb(60, 100, 50) == (255, 255, 0)) assert (hsl_to_rgb(60, 100, 60) == (255, 255, 51)) assert (hsl_to_rgb(60, 100, 70) == (255, 255, 102)) assert (hsl_to_rgb(60, 100, 80) == (255, 255, 153)) assert (hsl_to_rgb(60, 100, 90) == (255, 255, 204)) assert (hsl_to_rgb(60, 100, 100) == (255, 255, 255))
[ "def", "test_hsl_to_rgb_part_14", "(", ")", ":", "assert", "(", "hsl_to_rgb", "(", "60", ",", "100", ",", "0", ")", "==", "(", "0", ",", "0", ",", "0", ")", ")", "assert", "(", "hsl_to_rgb", "(", "60", ",", "100", ",", "10", ")", "==", "(", "51", ",", "51", ",", "0", ")", ")", "assert", "(", "hsl_to_rgb", "(", "60", ",", "100", ",", "20", ")", "==", "(", "102", ",", "102", ",", "0", ")", ")", "assert", "(", "hsl_to_rgb", "(", "60", ",", "100", ",", "30", ")", "==", "(", "153", ",", "153", ",", "0", ")", ")", "assert", "(", "hsl_to_rgb", "(", "60", ",", "100", ",", "40", ")", "==", "(", "204", ",", "204", ",", "0", ")", ")", "assert", "(", "hsl_to_rgb", "(", "60", ",", "100", ",", "50", ")", "==", "(", "255", ",", "255", ",", "0", ")", ")", "assert", "(", "hsl_to_rgb", "(", "60", ",", "100", ",", "60", ")", "==", "(", "255", ",", "255", ",", "51", ")", ")", "assert", "(", "hsl_to_rgb", "(", "60", ",", "100", ",", "70", ")", "==", "(", "255", ",", "255", ",", "102", ")", ")", "assert", "(", "hsl_to_rgb", "(", "60", ",", "100", ",", "80", ")", "==", "(", "255", ",", "255", ",", "153", ")", ")", "assert", "(", "hsl_to_rgb", "(", "60", ",", "100", ",", "90", ")", "==", "(", "255", ",", "255", ",", "204", ")", ")", "assert", "(", "hsl_to_rgb", "(", "60", ",", "100", ",", "100", ")", "==", "(", "255", ",", "255", ",", "255", ")", ")" ]
test hsl to rgb color function .
train
false
43,661
def test_no_stdlib_collections3(): import collections matplotlib = import_module('matplotlib', __import__kwargs={'fromlist': ['cm', 'collections']}, min_module_version='1.1.0') if matplotlib: assert (collections != matplotlib.collections)
[ "def", "test_no_stdlib_collections3", "(", ")", ":", "import", "collections", "matplotlib", "=", "import_module", "(", "'matplotlib'", ",", "__import__kwargs", "=", "{", "'fromlist'", ":", "[", "'cm'", ",", "'collections'", "]", "}", ",", "min_module_version", "=", "'1.1.0'", ")", "if", "matplotlib", ":", "assert", "(", "collections", "!=", "matplotlib", ".", "collections", ")" ]
make sure we get the right collections with no catch .
train
false
43,662
def migrate_tags_on_taxes(cr, registry): env = api.Environment(cr, SUPERUSER_ID, {}) xml_records = env['ir.model.data'].search([('model', '=', 'account.tax.template'), ('module', 'like', 'l10n_%')]) tax_template_ids = [x['res_id'] for x in xml_records.sudo().read(['res_id'])] for tax_template in env['account.tax.template'].browse(tax_template_ids): tax_id = env['account.tax'].search([('name', '=', tax_template.name), ('type_tax_use', '=', tax_template.type_tax_use), ('description', '=', tax_template.description)]) if (len(tax_id.ids) == 1): tax_id.sudo().write({'tag_ids': [(6, 0, tax_template.tag_ids.ids)]})
[ "def", "migrate_tags_on_taxes", "(", "cr", ",", "registry", ")", ":", "env", "=", "api", ".", "Environment", "(", "cr", ",", "SUPERUSER_ID", ",", "{", "}", ")", "xml_records", "=", "env", "[", "'ir.model.data'", "]", ".", "search", "(", "[", "(", "'model'", ",", "'='", ",", "'account.tax.template'", ")", ",", "(", "'module'", ",", "'like'", ",", "'l10n_%'", ")", "]", ")", "tax_template_ids", "=", "[", "x", "[", "'res_id'", "]", "for", "x", "in", "xml_records", ".", "sudo", "(", ")", ".", "read", "(", "[", "'res_id'", "]", ")", "]", "for", "tax_template", "in", "env", "[", "'account.tax.template'", "]", ".", "browse", "(", "tax_template_ids", ")", ":", "tax_id", "=", "env", "[", "'account.tax'", "]", ".", "search", "(", "[", "(", "'name'", ",", "'='", ",", "tax_template", ".", "name", ")", ",", "(", "'type_tax_use'", ",", "'='", ",", "tax_template", ".", "type_tax_use", ")", ",", "(", "'description'", ",", "'='", ",", "tax_template", ".", "description", ")", "]", ")", "if", "(", "len", "(", "tax_id", ".", "ids", ")", "==", "1", ")", ":", "tax_id", ".", "sudo", "(", ")", ".", "write", "(", "{", "'tag_ids'", ":", "[", "(", "6", ",", "0", ",", "tax_template", ".", "tag_ids", ".", "ids", ")", "]", "}", ")" ]
this is a utiliy function to help migrate the tags of taxes when the localization has been modified on stable version .
train
false
43,665
def adv_index_broadcastable_pattern(a, idx): def replace_slice(v): if isinstance(v, gof.Apply): if (len(v.outputs) != 1): raise ValueError('It is ambiguous which output of a multi-output Op has to be fetched.', v) else: v = v.outputs[0] if NoneConst.equals(v): return None if isinstance(v.type, SliceType): return slice(None, None) return numpy.zeros(((2,) * v.ndim), int) newidx = tuple(map(replace_slice, idx)) fakeshape = [(2 - bc) for bc in a.broadcastable] retshape = numpy.empty(fakeshape)[newidx].shape return tuple([(dim == 1) for dim in retshape])
[ "def", "adv_index_broadcastable_pattern", "(", "a", ",", "idx", ")", ":", "def", "replace_slice", "(", "v", ")", ":", "if", "isinstance", "(", "v", ",", "gof", ".", "Apply", ")", ":", "if", "(", "len", "(", "v", ".", "outputs", ")", "!=", "1", ")", ":", "raise", "ValueError", "(", "'It is ambiguous which output of a multi-output Op has to be fetched.'", ",", "v", ")", "else", ":", "v", "=", "v", ".", "outputs", "[", "0", "]", "if", "NoneConst", ".", "equals", "(", "v", ")", ":", "return", "None", "if", "isinstance", "(", "v", ".", "type", ",", "SliceType", ")", ":", "return", "slice", "(", "None", ",", "None", ")", "return", "numpy", ".", "zeros", "(", "(", "(", "2", ",", ")", "*", "v", ".", "ndim", ")", ",", "int", ")", "newidx", "=", "tuple", "(", "map", "(", "replace_slice", ",", "idx", ")", ")", "fakeshape", "=", "[", "(", "2", "-", "bc", ")", "for", "bc", "in", "a", ".", "broadcastable", "]", "retshape", "=", "numpy", ".", "empty", "(", "fakeshape", ")", "[", "newidx", "]", ".", "shape", "return", "tuple", "(", "[", "(", "dim", "==", "1", ")", "for", "dim", "in", "retshape", "]", ")" ]
this function is only used to determine the broadcast pattern for advancedsubtensor output variable .
train
false
43,667
def get_status_code_from_code_response(code): last_valid_line_from_code = [line for line in code.split(u'\n') if line][(-1)] status_code_from_last_line = int(last_valid_line_from_code.split()[0]) status_code_from_first_digits = int(code[:3]) if (status_code_from_last_line != status_code_from_first_digits): log.warning(u'FTP response status code seems to be inconsistent.\nCode received: %s, extracted: %s and %s', code, status_code_from_last_line, status_code_from_first_digits) return status_code_from_last_line
[ "def", "get_status_code_from_code_response", "(", "code", ")", ":", "last_valid_line_from_code", "=", "[", "line", "for", "line", "in", "code", ".", "split", "(", "u'\\n'", ")", "if", "line", "]", "[", "(", "-", "1", ")", "]", "status_code_from_last_line", "=", "int", "(", "last_valid_line_from_code", ".", "split", "(", ")", "[", "0", "]", ")", "status_code_from_first_digits", "=", "int", "(", "code", "[", ":", "3", "]", ")", "if", "(", "status_code_from_last_line", "!=", "status_code_from_first_digits", ")", ":", "log", ".", "warning", "(", "u'FTP response status code seems to be inconsistent.\\nCode received: %s, extracted: %s and %s'", ",", "code", ",", "status_code_from_last_line", ",", "status_code_from_first_digits", ")", "return", "status_code_from_last_line" ]
the idea is to handle complicated code response .
train
false
43,668
def run_pylint(filename, options): ARGS = ['--rcfile=./.pylintrc'] if (not options.show_all): ARGS.append('-E') pylint_output = WritableObject() from pylint import lint from pylint.reporters.text import TextReporter lint.Run(([filename] + ARGS), reporter=TextReporter(pylint_output), exit=False) return pylint_output.read()
[ "def", "run_pylint", "(", "filename", ",", "options", ")", ":", "ARGS", "=", "[", "'--rcfile=./.pylintrc'", "]", "if", "(", "not", "options", ".", "show_all", ")", ":", "ARGS", ".", "append", "(", "'-E'", ")", "pylint_output", "=", "WritableObject", "(", ")", "from", "pylint", "import", "lint", "from", "pylint", ".", "reporters", ".", "text", "import", "TextReporter", "lint", ".", "Run", "(", "(", "[", "filename", "]", "+", "ARGS", ")", ",", "reporter", "=", "TextReporter", "(", "pylint_output", ")", ",", "exit", "=", "False", ")", "return", "pylint_output", ".", "read", "(", ")" ]
run pylint on the given file .
train
false
43,669
def fix_switch_cases(switch_node): assert isinstance(switch_node, c_ast.Switch) if (not isinstance(switch_node.stmt, c_ast.Compound)): return switch_node new_compound = c_ast.Compound([], switch_node.stmt.coord) last_case = None for child in switch_node.stmt.block_items: if isinstance(child, (c_ast.Case, c_ast.Default)): new_compound.block_items.append(child) _extract_nested_case(child, new_compound.block_items) last_case = new_compound.block_items[(-1)] elif (last_case is None): new_compound.block_items.append(child) else: last_case.stmts.append(child) switch_node.stmt = new_compound return switch_node
[ "def", "fix_switch_cases", "(", "switch_node", ")", ":", "assert", "isinstance", "(", "switch_node", ",", "c_ast", ".", "Switch", ")", "if", "(", "not", "isinstance", "(", "switch_node", ".", "stmt", ",", "c_ast", ".", "Compound", ")", ")", ":", "return", "switch_node", "new_compound", "=", "c_ast", ".", "Compound", "(", "[", "]", ",", "switch_node", ".", "stmt", ".", "coord", ")", "last_case", "=", "None", "for", "child", "in", "switch_node", ".", "stmt", ".", "block_items", ":", "if", "isinstance", "(", "child", ",", "(", "c_ast", ".", "Case", ",", "c_ast", ".", "Default", ")", ")", ":", "new_compound", ".", "block_items", ".", "append", "(", "child", ")", "_extract_nested_case", "(", "child", ",", "new_compound", ".", "block_items", ")", "last_case", "=", "new_compound", ".", "block_items", "[", "(", "-", "1", ")", "]", "elif", "(", "last_case", "is", "None", ")", ":", "new_compound", ".", "block_items", ".", "append", "(", "child", ")", "else", ":", "last_case", ".", "stmts", ".", "append", "(", "child", ")", "switch_node", ".", "stmt", "=", "new_compound", "return", "switch_node" ]
the case statements in a switch come out of parsing with one child node .
train
false
43,670
def types_of_fields(fields, expr): if isinstance(expr.dshape.measure, Record): return get(fields, expr.dshape.measure) else: if isinstance(fields, (tuple, list, set)): assert (len(fields) == 1) (fields,) = fields assert (fields == expr._name) return expr.dshape.measure
[ "def", "types_of_fields", "(", "fields", ",", "expr", ")", ":", "if", "isinstance", "(", "expr", ".", "dshape", ".", "measure", ",", "Record", ")", ":", "return", "get", "(", "fields", ",", "expr", ".", "dshape", ".", "measure", ")", "else", ":", "if", "isinstance", "(", "fields", ",", "(", "tuple", ",", "list", ",", "set", ")", ")", ":", "assert", "(", "len", "(", "fields", ")", "==", "1", ")", "(", "fields", ",", ")", "=", "fields", "assert", "(", "fields", "==", "expr", ".", "_name", ")", "return", "expr", ".", "dshape", ".", "measure" ]
get the types of fields in an expression examples .
train
false
43,671
def ResampleRowsWeighted(df, column='finalwgt'): weights = df[column] cdf = Cdf(dict(weights)) indices = cdf.Sample(len(weights)) sample = df.loc[indices] return sample
[ "def", "ResampleRowsWeighted", "(", "df", ",", "column", "=", "'finalwgt'", ")", ":", "weights", "=", "df", "[", "column", "]", "cdf", "=", "Cdf", "(", "dict", "(", "weights", ")", ")", "indices", "=", "cdf", ".", "Sample", "(", "len", "(", "weights", ")", ")", "sample", "=", "df", ".", "loc", "[", "indices", "]", "return", "sample" ]
resamples a dataframe using probabilities proportional to given column .
train
false
43,672
def uplinkBusy(): name = 'Uplink Busy' a = TpPd(pd=6) b = MessageType(mesType=42) packet = (a / b) return packet
[ "def", "uplinkBusy", "(", ")", ":", "name", "=", "'Uplink Busy'", "a", "=", "TpPd", "(", "pd", "=", "6", ")", "b", "=", "MessageType", "(", "mesType", "=", "42", ")", "packet", "=", "(", "a", "/", "b", ")", "return", "packet" ]
uplink busy section 9 .
train
true
43,673
def load_config_file(filename, log_printer, silent=False): filename = os.path.abspath(filename) try: return ConfParser().parse(filename) except FileNotFoundError: if (not silent): if (os.path.basename(filename) == Constants.default_coafile): log_printer.warn('The default coafile {0!r} was not found. You can generate a configuration file with your current options by adding the `--save` flag or suppress any use of config files with `-I`.'.format(Constants.default_coafile)) else: log_printer.err('The requested coafile {0!r} does not exist. You can generate it with your current options by adding the `--save` flag or suppress any use of config files with `-I`.'.format(filename)) sys.exit(2) return {'default': Section('default')}
[ "def", "load_config_file", "(", "filename", ",", "log_printer", ",", "silent", "=", "False", ")", ":", "filename", "=", "os", ".", "path", ".", "abspath", "(", "filename", ")", "try", ":", "return", "ConfParser", "(", ")", ".", "parse", "(", "filename", ")", "except", "FileNotFoundError", ":", "if", "(", "not", "silent", ")", ":", "if", "(", "os", ".", "path", ".", "basename", "(", "filename", ")", "==", "Constants", ".", "default_coafile", ")", ":", "log_printer", ".", "warn", "(", "'The default coafile {0!r} was not found. You can generate a configuration file with your current options by adding the `--save` flag or suppress any use of config files with `-I`.'", ".", "format", "(", "Constants", ".", "default_coafile", ")", ")", "else", ":", "log_printer", ".", "err", "(", "'The requested coafile {0!r} does not exist. You can generate it with your current options by adding the `--save` flag or suppress any use of config files with `-I`.'", ".", "format", "(", "filename", ")", ")", "sys", ".", "exit", "(", "2", ")", "return", "{", "'default'", ":", "Section", "(", "'default'", ")", "}" ]
loads sections from a config file .
train
false
43,674
def limit_length(s, length): if (s is None): return None ELLIPSES = u'...' if (len(s) > length): return (s[:(length - len(ELLIPSES))] + ELLIPSES) return s
[ "def", "limit_length", "(", "s", ",", "length", ")", ":", "if", "(", "s", "is", "None", ")", ":", "return", "None", "ELLIPSES", "=", "u'...'", "if", "(", "len", "(", "s", ")", ">", "length", ")", ":", "return", "(", "s", "[", ":", "(", "length", "-", "len", "(", "ELLIPSES", ")", ")", "]", "+", "ELLIPSES", ")", "return", "s" ]
add ellipses to overly long strings .
train
false
43,675
def add_svc_avail_path(path): if os.path.exists(path): if (path not in AVAIL_SVR_DIRS): AVAIL_SVR_DIRS.append(path) return True return False
[ "def", "add_svc_avail_path", "(", "path", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "if", "(", "path", "not", "in", "AVAIL_SVR_DIRS", ")", ":", "AVAIL_SVR_DIRS", ".", "append", "(", "path", ")", "return", "True", "return", "False" ]
add a path that may contain available services .
train
true
43,676
def to_series(knowledge_dates, earning_dates): return pd.Series(index=pd.to_datetime(knowledge_dates), data=pd.to_datetime(earning_dates))
[ "def", "to_series", "(", "knowledge_dates", ",", "earning_dates", ")", ":", "return", "pd", ".", "Series", "(", "index", "=", "pd", ".", "to_datetime", "(", "knowledge_dates", ")", ",", "data", "=", "pd", ".", "to_datetime", "(", "earning_dates", ")", ")" ]
helper for converting a dict of strings to a series of datetimes .
train
false
43,677
def immediateAssignment(ChannelDescription_presence=0, PacketChannelDescription_presence=0, StartingTime_presence=0): a = L2PseudoLength() b = TpPd(pd=6) c = MessageType(mesType=63) d = PageModeAndDedicatedModeOrTBF() packet = (((a / b) / c) / d) if (ChannelDescription_presence is 1): f = ChannelDescription() packet = (packet / f) if (PacketChannelDescription_presence is 1): g = PacketChannelDescription() packet = (packet / g) h = RequestReference() i = TimingAdvance() j = MobileAllocation() packet = (((packet / h) / i) / j) if (StartingTime_presence is 1): k = StartingTimeHdr(ieiST=124, eightBitST=0) packet = (packet / k) l = IaRestOctets() packet = (packet / l) return packet
[ "def", "immediateAssignment", "(", "ChannelDescription_presence", "=", "0", ",", "PacketChannelDescription_presence", "=", "0", ",", "StartingTime_presence", "=", "0", ")", ":", "a", "=", "L2PseudoLength", "(", ")", "b", "=", "TpPd", "(", "pd", "=", "6", ")", "c", "=", "MessageType", "(", "mesType", "=", "63", ")", "d", "=", "PageModeAndDedicatedModeOrTBF", "(", ")", "packet", "=", "(", "(", "(", "a", "/", "b", ")", "/", "c", ")", "/", "d", ")", "if", "(", "ChannelDescription_presence", "is", "1", ")", ":", "f", "=", "ChannelDescription", "(", ")", "packet", "=", "(", "packet", "/", "f", ")", "if", "(", "PacketChannelDescription_presence", "is", "1", ")", ":", "g", "=", "PacketChannelDescription", "(", ")", "packet", "=", "(", "packet", "/", "g", ")", "h", "=", "RequestReference", "(", ")", "i", "=", "TimingAdvance", "(", ")", "j", "=", "MobileAllocation", "(", ")", "packet", "=", "(", "(", "(", "packet", "/", "h", ")", "/", "i", ")", "/", "j", ")", "if", "(", "StartingTime_presence", "is", "1", ")", ":", "k", "=", "StartingTimeHdr", "(", "ieiST", "=", "124", ",", "eightBitST", "=", "0", ")", "packet", "=", "(", "packet", "/", "k", ")", "l", "=", "IaRestOctets", "(", ")", "packet", "=", "(", "packet", "/", "l", ")", "return", "packet" ]
immediate assignment section 9 .
train
true
43,678
def find_entry_points(): ep = ['ipython%s = IPython:start_ipython', 'iptest%s = IPython.testing.iptestcontroller:main'] suffix = str(sys.version_info[0]) return ([(e % '') for e in ep] + [(e % suffix) for e in ep])
[ "def", "find_entry_points", "(", ")", ":", "ep", "=", "[", "'ipython%s = IPython:start_ipython'", ",", "'iptest%s = IPython.testing.iptestcontroller:main'", "]", "suffix", "=", "str", "(", "sys", ".", "version_info", "[", "0", "]", ")", "return", "(", "[", "(", "e", "%", "''", ")", "for", "e", "in", "ep", "]", "+", "[", "(", "e", "%", "suffix", ")", "for", "e", "in", "ep", "]", ")" ]
defines the command line entry points for ipython this always uses setuptools-style entry points .
train
false
43,679
def _scryptROMix(blocks, n): x = [blocks[i:(i + 64)] for i in xrange(0, len(blocks), 64)] len_x = len(x) v = ([None] * n) load_le_uint32 = _raw_salsa20_lib.load_le_uint32 for i in xrange(n): v[i] = x x = _scryptBlockMix(x, len_x) for i in xrange(n): j = (load_le_uint32(x[(-1)]) & (n - 1)) t = [strxor(x[idx], v[j][idx]) for idx in xrange(len_x)] x = _scryptBlockMix(t, len_x) return b('').join([get_raw_buffer(y) for y in x])
[ "def", "_scryptROMix", "(", "blocks", ",", "n", ")", ":", "x", "=", "[", "blocks", "[", "i", ":", "(", "i", "+", "64", ")", "]", "for", "i", "in", "xrange", "(", "0", ",", "len", "(", "blocks", ")", ",", "64", ")", "]", "len_x", "=", "len", "(", "x", ")", "v", "=", "(", "[", "None", "]", "*", "n", ")", "load_le_uint32", "=", "_raw_salsa20_lib", ".", "load_le_uint32", "for", "i", "in", "xrange", "(", "n", ")", ":", "v", "[", "i", "]", "=", "x", "x", "=", "_scryptBlockMix", "(", "x", ",", "len_x", ")", "for", "i", "in", "xrange", "(", "n", ")", ":", "j", "=", "(", "load_le_uint32", "(", "x", "[", "(", "-", "1", ")", "]", ")", "&", "(", "n", "-", "1", ")", ")", "t", "=", "[", "strxor", "(", "x", "[", "idx", "]", ",", "v", "[", "j", "]", "[", "idx", "]", ")", "for", "idx", "in", "xrange", "(", "len_x", ")", "]", "x", "=", "_scryptBlockMix", "(", "t", ",", "len_x", ")", "return", "b", "(", "''", ")", ".", "join", "(", "[", "get_raw_buffer", "(", "y", ")", "for", "y", "in", "x", "]", ")" ]
sequential memory-hard function for scrypt .
train
false
43,680
def source_model(view): if isinstance(view.model(), QSortFilterProxyModel): return view.model().sourceModel() else: return view.model()
[ "def", "source_model", "(", "view", ")", ":", "if", "isinstance", "(", "view", ".", "model", "(", ")", ",", "QSortFilterProxyModel", ")", ":", "return", "view", ".", "model", "(", ")", ".", "sourceModel", "(", ")", "else", ":", "return", "view", ".", "model", "(", ")" ]
return the source model for the qt item view if it uses the qsortfilterproxymodel .
train
false
43,681
def release(): return uname()[2]
[ "def", "release", "(", ")", ":", "return", "uname", "(", ")", "[", "2", "]" ]
release app .
train
false
43,682
def test_fill_hole(): seed = np.array([0, 8, 8, 8, 8, 8, 8, 8, 8, 0]) mask = np.array([0, 3, 6, 2, 1, 1, 1, 4, 2, 0]) result = reconstruction(seed, mask, method='erosion') assert_close(result, np.array([0, 3, 6, 4, 4, 4, 4, 4, 2, 0]))
[ "def", "test_fill_hole", "(", ")", ":", "seed", "=", "np", ".", "array", "(", "[", "0", ",", "8", ",", "8", ",", "8", ",", "8", ",", "8", ",", "8", ",", "8", ",", "8", ",", "0", "]", ")", "mask", "=", "np", ".", "array", "(", "[", "0", ",", "3", ",", "6", ",", "2", ",", "1", ",", "1", ",", "1", ",", "4", ",", "2", ",", "0", "]", ")", "result", "=", "reconstruction", "(", "seed", ",", "mask", ",", "method", "=", "'erosion'", ")", "assert_close", "(", "result", ",", "np", ".", "array", "(", "[", "0", ",", "3", ",", "6", ",", "4", ",", "4", ",", "4", ",", "4", ",", "4", ",", "2", ",", "0", "]", ")", ")" ]
test reconstruction by erosion .
train
false
43,684
def test_path_info(monkeypatch): patches = {'config': (lambda : 'CONFIG PATH'), 'data': (lambda : 'DATA PATH'), 'system_data': (lambda : 'SYSTEM DATA PATH'), 'cache': (lambda : 'CACHE PATH'), 'download': (lambda : 'DOWNLOAD PATH'), 'runtime': (lambda : 'RUNTIME PATH')} for (attr, val) in patches.items(): monkeypatch.setattr(('qutebrowser.utils.standarddir.' + attr), val) pathinfo = version._path_info() assert (pathinfo['config'] == 'CONFIG PATH') assert (pathinfo['data'] == 'DATA PATH') assert (pathinfo['system_data'] == 'SYSTEM DATA PATH') assert (pathinfo['cache'] == 'CACHE PATH') assert (pathinfo['download'] == 'DOWNLOAD PATH') assert (pathinfo['runtime'] == 'RUNTIME PATH')
[ "def", "test_path_info", "(", "monkeypatch", ")", ":", "patches", "=", "{", "'config'", ":", "(", "lambda", ":", "'CONFIG PATH'", ")", ",", "'data'", ":", "(", "lambda", ":", "'DATA PATH'", ")", ",", "'system_data'", ":", "(", "lambda", ":", "'SYSTEM DATA PATH'", ")", ",", "'cache'", ":", "(", "lambda", ":", "'CACHE PATH'", ")", ",", "'download'", ":", "(", "lambda", ":", "'DOWNLOAD PATH'", ")", ",", "'runtime'", ":", "(", "lambda", ":", "'RUNTIME PATH'", ")", "}", "for", "(", "attr", ",", "val", ")", "in", "patches", ".", "items", "(", ")", ":", "monkeypatch", ".", "setattr", "(", "(", "'qutebrowser.utils.standarddir.'", "+", "attr", ")", ",", "val", ")", "pathinfo", "=", "version", ".", "_path_info", "(", ")", "assert", "(", "pathinfo", "[", "'config'", "]", "==", "'CONFIG PATH'", ")", "assert", "(", "pathinfo", "[", "'data'", "]", "==", "'DATA PATH'", ")", "assert", "(", "pathinfo", "[", "'system_data'", "]", "==", "'SYSTEM DATA PATH'", ")", "assert", "(", "pathinfo", "[", "'cache'", "]", "==", "'CACHE PATH'", ")", "assert", "(", "pathinfo", "[", "'download'", "]", "==", "'DOWNLOAD PATH'", ")", "assert", "(", "pathinfo", "[", "'runtime'", "]", "==", "'RUNTIME PATH'", ")" ]
test _path_info() .
train
false
43,687
def GenerateYamlHandlersListForDevAppServer(app_engine_web_xml, web_xml, static_urls): appinfo.MAX_URL_MAPS = 10000 static_handler_generator = StaticHandlerGeneratorForDevAppServer(app_engine_web_xml, web_xml, static_urls) dynamic_handler_generator = DynamicHandlerGenerator(app_engine_web_xml, web_xml) return ((['handlers:'] + static_handler_generator.GetHandlerYaml()) + dynamic_handler_generator.GetHandlerYaml())
[ "def", "GenerateYamlHandlersListForDevAppServer", "(", "app_engine_web_xml", ",", "web_xml", ",", "static_urls", ")", ":", "appinfo", ".", "MAX_URL_MAPS", "=", "10000", "static_handler_generator", "=", "StaticHandlerGeneratorForDevAppServer", "(", "app_engine_web_xml", ",", "web_xml", ",", "static_urls", ")", "dynamic_handler_generator", "=", "DynamicHandlerGenerator", "(", "app_engine_web_xml", ",", "web_xml", ")", "return", "(", "(", "[", "'handlers:'", "]", "+", "static_handler_generator", ".", "GetHandlerYaml", "(", ")", ")", "+", "dynamic_handler_generator", ".", "GetHandlerYaml", "(", ")", ")" ]
produces a list of yaml strings for dynamic and static handlers .
train
false
43,688
def delete_network_segment(context, segment_id): with context.session.begin(subtransactions=True): context.session.query(segments_model.NetworkSegment).filter_by(id=segment_id).delete()
[ "def", "delete_network_segment", "(", "context", ",", "segment_id", ")", ":", "with", "context", ".", "session", ".", "begin", "(", "subtransactions", "=", "True", ")", ":", "context", ".", "session", ".", "query", "(", "segments_model", ".", "NetworkSegment", ")", ".", "filter_by", "(", "id", "=", "segment_id", ")", ".", "delete", "(", ")" ]
release a dynamic segment for the params provided if one exists .
train
false
43,690
def mutating_method(func): def wrapper(self, *__args, **__kwargs): old_mutable = self._mutable self._mutable = True try: return func(self, *__args, **__kwargs) finally: self._mutable = old_mutable return wrapper
[ "def", "mutating_method", "(", "func", ")", ":", "def", "wrapper", "(", "self", ",", "*", "__args", ",", "**", "__kwargs", ")", ":", "old_mutable", "=", "self", ".", "_mutable", "self", ".", "_mutable", "=", "True", "try", ":", "return", "func", "(", "self", ",", "*", "__args", ",", "**", "__kwargs", ")", "finally", ":", "self", ".", "_mutable", "=", "old_mutable", "return", "wrapper" ]
decorator for methods that are allowed to modify immutable objects .
train
true
43,693
@task(base=PersistOnFailureTask, default_retry_delay=30, routing_key=settings.RECALCULATE_GRADES_ROUTING_KEY) def recalculate_subsection_grade_v3(**kwargs): _recalculate_subsection_grade(recalculate_subsection_grade_v3, **kwargs)
[ "@", "task", "(", "base", "=", "PersistOnFailureTask", ",", "default_retry_delay", "=", "30", ",", "routing_key", "=", "settings", ".", "RECALCULATE_GRADES_ROUTING_KEY", ")", "def", "recalculate_subsection_grade_v3", "(", "**", "kwargs", ")", ":", "_recalculate_subsection_grade", "(", "recalculate_subsection_grade_v3", ",", "**", "kwargs", ")" ]
latest version of the recalculate_subsection_grade task .
train
false
43,695
def _create_meg_coils(chs, acc, t=None, coilset=None, do_es=False): acc = (_accuracy_dict[acc] if isinstance(acc, string_types) else acc) coilset = (_read_coil_defs(verbose=False) if (coilset is None) else coilset) coils = [_create_meg_coil(coilset, ch, acc, do_es) for ch in chs] _transform_orig_meg_coils(coils, t, do_es=do_es) return coils
[ "def", "_create_meg_coils", "(", "chs", ",", "acc", ",", "t", "=", "None", ",", "coilset", "=", "None", ",", "do_es", "=", "False", ")", ":", "acc", "=", "(", "_accuracy_dict", "[", "acc", "]", "if", "isinstance", "(", "acc", ",", "string_types", ")", "else", "acc", ")", "coilset", "=", "(", "_read_coil_defs", "(", "verbose", "=", "False", ")", "if", "(", "coilset", "is", "None", ")", "else", "coilset", ")", "coils", "=", "[", "_create_meg_coil", "(", "coilset", ",", "ch", ",", "acc", ",", "do_es", ")", "for", "ch", "in", "chs", "]", "_transform_orig_meg_coils", "(", "coils", ",", "t", ",", "do_es", "=", "do_es", ")", "return", "coils" ]
create a set of meg coils in the head coordinate frame .
train
false
43,696
def get_sent_properties(request_func, propname_list): prop_hash = {} for item in propname_list: if isinstance(item, basestring): key = item value = request_func(item) elif isinstance(item, tuple): key = item[0] prop_func = item[1] if (len(item) <= 2): value = prop_func(request_func(key)) else: try: addl_keys = map(prop_hash.get, item[2:]) value = prop_func(*addl_keys) except: return None if value: prop_hash[key] = value return prop_hash
[ "def", "get_sent_properties", "(", "request_func", ",", "propname_list", ")", ":", "prop_hash", "=", "{", "}", "for", "item", "in", "propname_list", ":", "if", "isinstance", "(", "item", ",", "basestring", ")", ":", "key", "=", "item", "value", "=", "request_func", "(", "item", ")", "elif", "isinstance", "(", "item", ",", "tuple", ")", ":", "key", "=", "item", "[", "0", "]", "prop_func", "=", "item", "[", "1", "]", "if", "(", "len", "(", "item", ")", "<=", "2", ")", ":", "value", "=", "prop_func", "(", "request_func", "(", "key", ")", ")", "else", ":", "try", ":", "addl_keys", "=", "map", "(", "prop_hash", ".", "get", ",", "item", "[", "2", ":", "]", ")", "value", "=", "prop_func", "(", "*", "addl_keys", ")", "except", ":", "return", "None", "if", "value", ":", "prop_hash", "[", "key", "]", "=", "value", "return", "prop_hash" ]
this maps request strings to values in a hash .
train
false
43,698
def set_log_file_dir(directory): global _log_file_dir _log_file_dir = directory
[ "def", "set_log_file_dir", "(", "directory", ")", ":", "global", "_log_file_dir", "_log_file_dir", "=", "directory" ]
set the base directory for log files created by log_line() .
train
false
43,699
def overload_method(typ, attr): from .typing.templates import make_overload_method_template def decorate(overload_func): template = make_overload_method_template(typ, attr, overload_func) infer_getattr(template) return overload_func return decorate
[ "def", "overload_method", "(", "typ", ",", "attr", ")", ":", "from", ".", "typing", ".", "templates", "import", "make_overload_method_template", "def", "decorate", "(", "overload_func", ")", ":", "template", "=", "make_overload_method_template", "(", "typ", ",", "attr", ",", "overload_func", ")", "infer_getattr", "(", "template", ")", "return", "overload_func", "return", "decorate" ]
a decorator marking the decorated function as typing and implementing attribute *attr* for the given numba type in nopython mode .
train
false
43,700
def _parse_fields(parsed): out = [] tags = parsed.find_all(_tag_ptn) for tag in tags: helpers.lowercase_attr_names(tag) while tags: tag = tags.pop(0) try: field = _parse_field(tag, tags) except exceptions.InvalidNameError: continue if (field is not None): out.append(field) return out
[ "def", "_parse_fields", "(", "parsed", ")", ":", "out", "=", "[", "]", "tags", "=", "parsed", ".", "find_all", "(", "_tag_ptn", ")", "for", "tag", "in", "tags", ":", "helpers", ".", "lowercase_attr_names", "(", "tag", ")", "while", "tags", ":", "tag", "=", "tags", ".", "pop", "(", "0", ")", "try", ":", "field", "=", "_parse_field", "(", "tag", ",", "tags", ")", "except", "exceptions", ".", "InvalidNameError", ":", "continue", "if", "(", "field", "is", "not", "None", ")", ":", "out", ".", "append", "(", "field", ")", "return", "out" ]
parse form fields from html .
train
true
43,701
def merge_dict_from_to(d1, d2): for (k, v) in list(d1.items()): if (k in d2): if isinstance(v, type(d2[k])): if isinstance(v, dict): merge_dict_from_to(d1[k], d2[k]) elif isinstance(v, list): d2[k].extend(copy.deepcopy(v)) elif isinstance(v, (basestring, bool, int, float, type(None))): pass else: raise Exception((u'Unknown type: %s value: %s in dictionary' % (type(v), repr(v)))) elif (isinstance(v, (basestring, bool, int, float, type(None))) and isinstance(d2[k], (basestring, bool, int, float, type(None)))): pass else: raise MergeException((u'Merging key %s failed, conflicting datatypes %r vs. %r.' % (k, type(v).__name__, type(d2[k]).__name__))) else: d2[k] = copy.deepcopy(v)
[ "def", "merge_dict_from_to", "(", "d1", ",", "d2", ")", ":", "for", "(", "k", ",", "v", ")", "in", "list", "(", "d1", ".", "items", "(", ")", ")", ":", "if", "(", "k", "in", "d2", ")", ":", "if", "isinstance", "(", "v", ",", "type", "(", "d2", "[", "k", "]", ")", ")", ":", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "merge_dict_from_to", "(", "d1", "[", "k", "]", ",", "d2", "[", "k", "]", ")", "elif", "isinstance", "(", "v", ",", "list", ")", ":", "d2", "[", "k", "]", ".", "extend", "(", "copy", ".", "deepcopy", "(", "v", ")", ")", "elif", "isinstance", "(", "v", ",", "(", "basestring", ",", "bool", ",", "int", ",", "float", ",", "type", "(", "None", ")", ")", ")", ":", "pass", "else", ":", "raise", "Exception", "(", "(", "u'Unknown type: %s value: %s in dictionary'", "%", "(", "type", "(", "v", ")", ",", "repr", "(", "v", ")", ")", ")", ")", "elif", "(", "isinstance", "(", "v", ",", "(", "basestring", ",", "bool", ",", "int", ",", "float", ",", "type", "(", "None", ")", ")", ")", "and", "isinstance", "(", "d2", "[", "k", "]", ",", "(", "basestring", ",", "bool", ",", "int", ",", "float", ",", "type", "(", "None", ")", ")", ")", ")", ":", "pass", "else", ":", "raise", "MergeException", "(", "(", "u'Merging key %s failed, conflicting datatypes %r vs. %r.'", "%", "(", "k", ",", "type", "(", "v", ")", ".", "__name__", ",", "type", "(", "d2", "[", "k", "]", ")", ".", "__name__", ")", ")", ")", "else", ":", "d2", "[", "k", "]", "=", "copy", ".", "deepcopy", "(", "v", ")" ]
merges dictionary d1 into dictionary d2 .
train
false
43,702
def dedupe(contains_dupes, threshold=70, scorer=fuzz.token_set_ratio): extractor = [] for item in contains_dupes: matches = extract(item, contains_dupes, limit=None, scorer=scorer) filtered = [x for x in matches if (x[1] > threshold)] if (len(filtered) == 1): extractor.append(filtered[0][0]) else: filtered = sorted(filtered, key=(lambda x: x[0])) filter_sort = sorted(filtered, key=(lambda x: len(x[0])), reverse=True) extractor.append(filter_sort[0][0]) keys = {} for e in extractor: keys[e] = 1 extractor = keys.keys() if (len(extractor) == len(contains_dupes)): return contains_dupes else: return extractor
[ "def", "dedupe", "(", "contains_dupes", ",", "threshold", "=", "70", ",", "scorer", "=", "fuzz", ".", "token_set_ratio", ")", ":", "extractor", "=", "[", "]", "for", "item", "in", "contains_dupes", ":", "matches", "=", "extract", "(", "item", ",", "contains_dupes", ",", "limit", "=", "None", ",", "scorer", "=", "scorer", ")", "filtered", "=", "[", "x", "for", "x", "in", "matches", "if", "(", "x", "[", "1", "]", ">", "threshold", ")", "]", "if", "(", "len", "(", "filtered", ")", "==", "1", ")", ":", "extractor", ".", "append", "(", "filtered", "[", "0", "]", "[", "0", "]", ")", "else", ":", "filtered", "=", "sorted", "(", "filtered", ",", "key", "=", "(", "lambda", "x", ":", "x", "[", "0", "]", ")", ")", "filter_sort", "=", "sorted", "(", "filtered", ",", "key", "=", "(", "lambda", "x", ":", "len", "(", "x", "[", "0", "]", ")", ")", ",", "reverse", "=", "True", ")", "extractor", ".", "append", "(", "filter_sort", "[", "0", "]", "[", "0", "]", ")", "keys", "=", "{", "}", "for", "e", "in", "extractor", ":", "keys", "[", "e", "]", "=", "1", "extractor", "=", "keys", ".", "keys", "(", ")", "if", "(", "len", "(", "extractor", ")", "==", "len", "(", "contains_dupes", ")", ")", ":", "return", "contains_dupes", "else", ":", "return", "extractor" ]
this convenience function takes a list of strings containing duplicates and uses fuzzy matching to identify and remove duplicates .
train
true
43,703
def _determine_device_class(): if (__salt__['grains.get']('kernel') == 'Linux'): return '/Server/Linux'
[ "def", "_determine_device_class", "(", ")", ":", "if", "(", "__salt__", "[", "'grains.get'", "]", "(", "'kernel'", ")", "==", "'Linux'", ")", ":", "return", "'/Server/Linux'" ]
if no device class is given when adding a device .
train
false
43,704
def import_pyqt5(): import sip from PyQt5 import QtCore, QtSvg, QtWidgets, QtGui QtCore.Signal = QtCore.pyqtSignal QtCore.Slot = QtCore.pyqtSlot QtGuiCompat = types.ModuleType('QtGuiCompat') QtGuiCompat.__dict__.update(QtGui.__dict__) QtGuiCompat.__dict__.update(QtWidgets.__dict__) api = QT_API_PYQT5 return (QtCore, QtGuiCompat, QtSvg, api)
[ "def", "import_pyqt5", "(", ")", ":", "import", "sip", "from", "PyQt5", "import", "QtCore", ",", "QtSvg", ",", "QtWidgets", ",", "QtGui", "QtCore", ".", "Signal", "=", "QtCore", ".", "pyqtSignal", "QtCore", ".", "Slot", "=", "QtCore", ".", "pyqtSlot", "QtGuiCompat", "=", "types", ".", "ModuleType", "(", "'QtGuiCompat'", ")", "QtGuiCompat", ".", "__dict__", ".", "update", "(", "QtGui", ".", "__dict__", ")", "QtGuiCompat", ".", "__dict__", ".", "update", "(", "QtWidgets", ".", "__dict__", ")", "api", "=", "QT_API_PYQT5", "return", "(", "QtCore", ",", "QtGuiCompat", ",", "QtSvg", ",", "api", ")" ]
import pyqt5 importerrors rasied within this function are non-recoverable .
train
false
43,705
def getSliceElements(xmlElement): gElements = xmlElement.getChildrenWithClassNameRecursively('g') sliceElements = [] for gElement in gElements: if ('id' in gElement.attributeDictionary): idValue = gElement.attributeDictionary['id'].strip() if idValue.startswith('z:'): sliceElements.append(gElement) return sliceElements
[ "def", "getSliceElements", "(", "xmlElement", ")", ":", "gElements", "=", "xmlElement", ".", "getChildrenWithClassNameRecursively", "(", "'g'", ")", "sliceElements", "=", "[", "]", "for", "gElement", "in", "gElements", ":", "if", "(", "'id'", "in", "gElement", ".", "attributeDictionary", ")", ":", "idValue", "=", "gElement", ".", "attributeDictionary", "[", "'id'", "]", ".", "strip", "(", ")", "if", "idValue", ".", "startswith", "(", "'z:'", ")", ":", "sliceElements", ".", "append", "(", "gElement", ")", "return", "sliceElements" ]
get the slice elements .
train
false
43,706
def show_session(session_id=None, url='default', app_path='/', session=None, browser=None, new='tab', controller=None): if (session is not None): server_url = server_url_for_websocket_url(session._connection.url) session_id = session.id else: coords = _SessionCoordinates(dict(session_id=session_id, url=url, app_path=app_path)) server_url = coords.server_url session_id = coords.session_id if (controller is None): from bokeh.util.browser import get_browser_controller controller = get_browser_controller(browser=browser) controller.open(((server_url + '?bokeh-session-id=') + _encode_query_param(session_id)), new=_new_param[new])
[ "def", "show_session", "(", "session_id", "=", "None", ",", "url", "=", "'default'", ",", "app_path", "=", "'/'", ",", "session", "=", "None", ",", "browser", "=", "None", ",", "new", "=", "'tab'", ",", "controller", "=", "None", ")", ":", "if", "(", "session", "is", "not", "None", ")", ":", "server_url", "=", "server_url_for_websocket_url", "(", "session", ".", "_connection", ".", "url", ")", "session_id", "=", "session", ".", "id", "else", ":", "coords", "=", "_SessionCoordinates", "(", "dict", "(", "session_id", "=", "session_id", ",", "url", "=", "url", ",", "app_path", "=", "app_path", ")", ")", "server_url", "=", "coords", ".", "server_url", "session_id", "=", "coords", ".", "session_id", "if", "(", "controller", "is", "None", ")", ":", "from", "bokeh", ".", "util", ".", "browser", "import", "get_browser_controller", "controller", "=", "get_browser_controller", "(", "browser", "=", "browser", ")", "controller", ".", "open", "(", "(", "(", "server_url", "+", "'?bokeh-session-id='", ")", "+", "_encode_query_param", "(", "session_id", ")", ")", ",", "new", "=", "_new_param", "[", "new", "]", ")" ]
open a browser displaying a session document .
train
false
43,707
def spinner(label=''): sys.stdout.write(('\r DCTB %s %s' % (label, _spinner.next()))) sys.stdout.flush()
[ "def", "spinner", "(", "label", "=", "''", ")", ":", "sys", ".", "stdout", ".", "write", "(", "(", "'\\r DCTB %s %s'", "%", "(", "label", ",", "_spinner", ".", "next", "(", ")", ")", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")" ]
prints label with a spinner .
train
false
43,708
def register_sync_strategies(command_table, session, **kwargs): register_sync_strategy(session, SizeOnlySync) register_sync_strategy(session, ExactTimestampsSync) register_sync_strategy(session, DeleteSync, 'file_not_at_src')
[ "def", "register_sync_strategies", "(", "command_table", ",", "session", ",", "**", "kwargs", ")", ":", "register_sync_strategy", "(", "session", ",", "SizeOnlySync", ")", "register_sync_strategy", "(", "session", ",", "ExactTimestampsSync", ")", "register_sync_strategy", "(", "session", ",", "DeleteSync", ",", "'file_not_at_src'", ")" ]
registers the different sync strategies .
train
false
43,709
def get_token_prefix(url): return urlparse(url).netloc
[ "def", "get_token_prefix", "(", "url", ")", ":", "return", "urlparse", "(", "url", ")", ".", "netloc" ]
returns a prefix for the token to store in the session so we can hold more than one single oauth providers access key in the session .
train
false
43,711
def checksum_ip(ipvx, length, payload): if (ipvx.version == 4): header = struct.pack(_IPV4_PSEUDO_HEADER_PACK_STR, addrconv.ipv4.text_to_bin(ipvx.src), addrconv.ipv4.text_to_bin(ipvx.dst), ipvx.proto, length) elif (ipvx.version == 6): header = struct.pack(_IPV6_PSEUDO_HEADER_PACK_STR, addrconv.ipv6.text_to_bin(ipvx.src), addrconv.ipv6.text_to_bin(ipvx.dst), length, ipvx.nxt) else: raise ValueError(('Unknown IP version %d' % ipvx.version)) buf = (header + payload) return checksum(buf)
[ "def", "checksum_ip", "(", "ipvx", ",", "length", ",", "payload", ")", ":", "if", "(", "ipvx", ".", "version", "==", "4", ")", ":", "header", "=", "struct", ".", "pack", "(", "_IPV4_PSEUDO_HEADER_PACK_STR", ",", "addrconv", ".", "ipv4", ".", "text_to_bin", "(", "ipvx", ".", "src", ")", ",", "addrconv", ".", "ipv4", ".", "text_to_bin", "(", "ipvx", ".", "dst", ")", ",", "ipvx", ".", "proto", ",", "length", ")", "elif", "(", "ipvx", ".", "version", "==", "6", ")", ":", "header", "=", "struct", ".", "pack", "(", "_IPV6_PSEUDO_HEADER_PACK_STR", ",", "addrconv", ".", "ipv6", ".", "text_to_bin", "(", "ipvx", ".", "src", ")", ",", "addrconv", ".", "ipv6", ".", "text_to_bin", "(", "ipvx", ".", "dst", ")", ",", "length", ",", "ipvx", ".", "nxt", ")", "else", ":", "raise", "ValueError", "(", "(", "'Unknown IP version %d'", "%", "ipvx", ".", "version", ")", ")", "buf", "=", "(", "header", "+", "payload", ")", "return", "checksum", "(", "buf", ")" ]
calculate checksum of ip pseudo header ipv4 pseudo header udp rfc768 tcp rfc793 3 .
train
true
43,712
def GetRequestSize(request, env_dict, outfile): if ('content-length' in request.headers): request_size = int(request.headers['content-length']) elif (env_dict and (env_dict.get('REQUEST_METHOD', '') == 'POST')): _WriteErrorToOutput(('%d Length required' % httplib.LENGTH_REQUIRED), 'POST requests require a Content-length header.', outfile) return None else: request_size = 0 if (request_size <= MAX_REQUEST_SIZE): return request_size else: msg = ('HTTP request was too large: %d. The limit is: %d.' % (request_size, MAX_REQUEST_SIZE)) _WriteErrorToOutput(('%d Request entity too large' % httplib.REQUEST_ENTITY_TOO_LARGE), msg, outfile) return None
[ "def", "GetRequestSize", "(", "request", ",", "env_dict", ",", "outfile", ")", ":", "if", "(", "'content-length'", "in", "request", ".", "headers", ")", ":", "request_size", "=", "int", "(", "request", ".", "headers", "[", "'content-length'", "]", ")", "elif", "(", "env_dict", "and", "(", "env_dict", ".", "get", "(", "'REQUEST_METHOD'", ",", "''", ")", "==", "'POST'", ")", ")", ":", "_WriteErrorToOutput", "(", "(", "'%d Length required'", "%", "httplib", ".", "LENGTH_REQUIRED", ")", ",", "'POST requests require a Content-length header.'", ",", "outfile", ")", "return", "None", "else", ":", "request_size", "=", "0", "if", "(", "request_size", "<=", "MAX_REQUEST_SIZE", ")", ":", "return", "request_size", "else", ":", "msg", "=", "(", "'HTTP request was too large: %d. The limit is: %d.'", "%", "(", "request_size", ",", "MAX_REQUEST_SIZE", ")", ")", "_WriteErrorToOutput", "(", "(", "'%d Request entity too large'", "%", "httplib", ".", "REQUEST_ENTITY_TOO_LARGE", ")", ",", "msg", ",", "outfile", ")", "return", "None" ]
gets the size of the given request .
train
false
43,713
def GetSi(): return _si
[ "def", "GetSi", "(", ")", ":", "return", "_si" ]
get the saved service instance .
train
false
43,714
def find_all_wcs(header, relax=True, keysel=None, fix=True, translate_units=u'', _do_set=True): if isinstance(header, (six.text_type, six.binary_type)): header_string = header elif isinstance(header, fits.Header): header_string = header.tostring() else: raise TypeError(u'header must be a string or astropy.io.fits.Header object') keysel_flags = _parse_keysel(keysel) if isinstance(header_string, six.text_type): header_bytes = header_string.encode(u'ascii') else: header_bytes = header_string wcsprms = _wcs.find_all_wcs(header_bytes, relax, keysel_flags) result = [] for wcsprm in wcsprms: subresult = WCS(fix=False, _do_set=False) subresult.wcs = wcsprm result.append(subresult) if fix: subresult.fix(translate_units) if _do_set: subresult.wcs.set() return result
[ "def", "find_all_wcs", "(", "header", ",", "relax", "=", "True", ",", "keysel", "=", "None", ",", "fix", "=", "True", ",", "translate_units", "=", "u''", ",", "_do_set", "=", "True", ")", ":", "if", "isinstance", "(", "header", ",", "(", "six", ".", "text_type", ",", "six", ".", "binary_type", ")", ")", ":", "header_string", "=", "header", "elif", "isinstance", "(", "header", ",", "fits", ".", "Header", ")", ":", "header_string", "=", "header", ".", "tostring", "(", ")", "else", ":", "raise", "TypeError", "(", "u'header must be a string or astropy.io.fits.Header object'", ")", "keysel_flags", "=", "_parse_keysel", "(", "keysel", ")", "if", "isinstance", "(", "header_string", ",", "six", ".", "text_type", ")", ":", "header_bytes", "=", "header_string", ".", "encode", "(", "u'ascii'", ")", "else", ":", "header_bytes", "=", "header_string", "wcsprms", "=", "_wcs", ".", "find_all_wcs", "(", "header_bytes", ",", "relax", ",", "keysel_flags", ")", "result", "=", "[", "]", "for", "wcsprm", "in", "wcsprms", ":", "subresult", "=", "WCS", "(", "fix", "=", "False", ",", "_do_set", "=", "False", ")", "subresult", ".", "wcs", "=", "wcsprm", "result", ".", "append", "(", "subresult", ")", "if", "fix", ":", "subresult", ".", "fix", "(", "translate_units", ")", "if", "_do_set", ":", "subresult", ".", "wcs", ".", "set", "(", ")", "return", "result" ]
find all the wcs transformations in the given header .
train
false
43,715
@should_dump_psutil def start_psutil_dump(): dump_data_every_thread(dump_psutil, DELAY_MINUTES, SAVE_PSUTIL_PTR)
[ "@", "should_dump_psutil", "def", "start_psutil_dump", "(", ")", ":", "dump_data_every_thread", "(", "dump_psutil", ",", "DELAY_MINUTES", ",", "SAVE_PSUTIL_PTR", ")" ]
if the environment variable w3af_psutils is set to 1 .
train
false
43,716
def usecase3(x, N): for k in range(N): print(x.f1[k], x.s1[k], x.f2[k])
[ "def", "usecase3", "(", "x", ",", "N", ")", ":", "for", "k", "in", "range", "(", "N", ")", ":", "print", "(", "x", ".", "f1", "[", "k", "]", ",", "x", ".", "s1", "[", "k", "]", ",", "x", ".", "f2", "[", "k", "]", ")" ]
base on test2 of URL .
train
false
43,717
def openAllU6(): returnDict = dict() for i in range(deviceCount(6)): d = U6(firstFound=False, devNumber=(i + 1)) returnDict[str(d.serialNumber)] = d return returnDict
[ "def", "openAllU6", "(", ")", ":", "returnDict", "=", "dict", "(", ")", "for", "i", "in", "range", "(", "deviceCount", "(", "6", ")", ")", ":", "d", "=", "U6", "(", "firstFound", "=", "False", ",", "devNumber", "=", "(", "i", "+", "1", ")", ")", "returnDict", "[", "str", "(", "d", ".", "serialNumber", ")", "]", "=", "d", "return", "returnDict" ]
a helpful function which will open all the connected u6s .
train
false
43,723
def _get_current_database(): if settings.USE_POSTGRES: return None try: return _get_current_client()[settings.DB_NAME] except ConnectionFailure: if settings.DEBUG_MODE: logger.warn('Cannot connect to database.') return None else: raise
[ "def", "_get_current_database", "(", ")", ":", "if", "settings", ".", "USE_POSTGRES", ":", "return", "None", "try", ":", "return", "_get_current_client", "(", ")", "[", "settings", ".", "DB_NAME", "]", "except", "ConnectionFailure", ":", "if", "settings", ".", "DEBUG_MODE", ":", "logger", ".", "warn", "(", "'Cannot connect to database.'", ")", "return", "None", "else", ":", "raise" ]
getter for database proxy .
train
false
43,724
def imread_collection(load_pattern, conserve_memory=True): intype = type(load_pattern) if ((intype is not list) and (intype is not str)): raise TypeError('Input must be a filename or list of filenames') if (intype is not list): load_pattern = [load_pattern] ext_list = [] for filename in load_pattern: hdulist = pyfits.open(filename) for (n, hdu) in zip(range(len(hdulist)), hdulist): if (isinstance(hdu, pyfits.ImageHDU) or isinstance(hdu, pyfits.PrimaryHDU)): try: data_size = hdu.size() except TypeError: data_size = hdu.size if (data_size > 0): ext_list.append((filename, n)) hdulist.close() return io.ImageCollection(ext_list, load_func=FITSFactory, conserve_memory=conserve_memory)
[ "def", "imread_collection", "(", "load_pattern", ",", "conserve_memory", "=", "True", ")", ":", "intype", "=", "type", "(", "load_pattern", ")", "if", "(", "(", "intype", "is", "not", "list", ")", "and", "(", "intype", "is", "not", "str", ")", ")", ":", "raise", "TypeError", "(", "'Input must be a filename or list of filenames'", ")", "if", "(", "intype", "is", "not", "list", ")", ":", "load_pattern", "=", "[", "load_pattern", "]", "ext_list", "=", "[", "]", "for", "filename", "in", "load_pattern", ":", "hdulist", "=", "pyfits", ".", "open", "(", "filename", ")", "for", "(", "n", ",", "hdu", ")", "in", "zip", "(", "range", "(", "len", "(", "hdulist", ")", ")", ",", "hdulist", ")", ":", "if", "(", "isinstance", "(", "hdu", ",", "pyfits", ".", "ImageHDU", ")", "or", "isinstance", "(", "hdu", ",", "pyfits", ".", "PrimaryHDU", ")", ")", ":", "try", ":", "data_size", "=", "hdu", ".", "size", "(", ")", "except", "TypeError", ":", "data_size", "=", "hdu", ".", "size", "if", "(", "data_size", ">", "0", ")", ":", "ext_list", ".", "append", "(", "(", "filename", ",", "n", ")", ")", "hdulist", ".", "close", "(", ")", "return", "io", ".", "ImageCollection", "(", "ext_list", ",", "load_func", "=", "FITSFactory", ",", "conserve_memory", "=", "conserve_memory", ")" ]
load a collection of images .
train
false
43,726
def is_rgb_like(image): return ((image.ndim == 3) and (image.shape[2] in (3, 4)))
[ "def", "is_rgb_like", "(", "image", ")", ":", "return", "(", "(", "image", ".", "ndim", "==", "3", ")", "and", "(", "image", ".", "shape", "[", "2", "]", "in", "(", "3", ",", "4", ")", ")", ")" ]
return true if the image *looks* like its rgb .
train
false
43,727
def MakeCdfFromPmf(pmf, label=None): if (label is None): label = pmf.label return Cdf(pmf, label=label)
[ "def", "MakeCdfFromPmf", "(", "pmf", ",", "label", "=", "None", ")", ":", "if", "(", "label", "is", "None", ")", ":", "label", "=", "pmf", ".", "label", "return", "Cdf", "(", "pmf", ",", "label", "=", "label", ")" ]
makes a cdf from a pmf object .
train
false
43,728
def path_for_import(name): return os.path.dirname(os.path.abspath(import_module(name).__file__))
[ "def", "path_for_import", "(", "name", ")", ":", "return", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "import_module", "(", "name", ")", ".", "__file__", ")", ")" ]
returns the directory path for the given package or module .
train
true
43,729
def plugin(): return SwapQuotes
[ "def", "plugin", "(", ")", ":", "return", "SwapQuotes" ]
make plugin available .
train
false
43,732
def subdir_findall(dir, subdir): strip_n = len(dir.split('/')) path = '/'.join((dir, subdir)) return ['/'.join(s.split('/')[strip_n:]) for s in setuptools.findall(path)]
[ "def", "subdir_findall", "(", "dir", ",", "subdir", ")", ":", "strip_n", "=", "len", "(", "dir", ".", "split", "(", "'/'", ")", ")", "path", "=", "'/'", ".", "join", "(", "(", "dir", ",", "subdir", ")", ")", "return", "[", "'/'", ".", "join", "(", "s", ".", "split", "(", "'/'", ")", "[", "strip_n", ":", "]", ")", "for", "s", "in", "setuptools", ".", "findall", "(", "path", ")", "]" ]
find all files in a subdirectory and return paths relative to dir this is similar to setuptools .
train
true
43,734
def stochasticTimeScale(stocEnv, timeScaling): if ((timeScaling.size % 2) != 0): raise ValueError('Time scaling array does not have an even size') L = stocEnv[:, 0].size outL = int(((L * timeScaling[(-1)]) / timeScaling[(-2)])) timeScalingEnv = interp1d((timeScaling[::2] / timeScaling[(-2)]), (timeScaling[1::2] / timeScaling[(-1)])) indexes = ((L - 1) * timeScalingEnv((np.arange(outL) / float(outL)))) ystocEnv = stocEnv[0, :] for l in indexes[1:]: ystocEnv = np.vstack((ystocEnv, stocEnv[round(l), :])) return ystocEnv
[ "def", "stochasticTimeScale", "(", "stocEnv", ",", "timeScaling", ")", ":", "if", "(", "(", "timeScaling", ".", "size", "%", "2", ")", "!=", "0", ")", ":", "raise", "ValueError", "(", "'Time scaling array does not have an even size'", ")", "L", "=", "stocEnv", "[", ":", ",", "0", "]", ".", "size", "outL", "=", "int", "(", "(", "(", "L", "*", "timeScaling", "[", "(", "-", "1", ")", "]", ")", "/", "timeScaling", "[", "(", "-", "2", ")", "]", ")", ")", "timeScalingEnv", "=", "interp1d", "(", "(", "timeScaling", "[", ":", ":", "2", "]", "/", "timeScaling", "[", "(", "-", "2", ")", "]", ")", ",", "(", "timeScaling", "[", "1", ":", ":", "2", "]", "/", "timeScaling", "[", "(", "-", "1", ")", "]", ")", ")", "indexes", "=", "(", "(", "L", "-", "1", ")", "*", "timeScalingEnv", "(", "(", "np", ".", "arange", "(", "outL", ")", "/", "float", "(", "outL", ")", ")", ")", ")", "ystocEnv", "=", "stocEnv", "[", "0", ",", ":", "]", "for", "l", "in", "indexes", "[", "1", ":", "]", ":", "ystocEnv", "=", "np", ".", "vstack", "(", "(", "ystocEnv", ",", "stocEnv", "[", "round", "(", "l", ")", ",", ":", "]", ")", ")", "return", "ystocEnv" ]
time scaling of the stochastic representation of a sound stocenv: stochastic envelope timescaling: scaling factors .
train
false
43,735
def ParseFloat(text): try: return float(text) except ValueError: if _FLOAT_INFINITY.match(text): if (text[0] == '-'): return float('-inf') else: return float('inf') elif _FLOAT_NAN.match(text): return float('nan') else: try: return float(text.rstrip('f')) except ValueError: raise ValueError(("Couldn't parse float: %s" % text))
[ "def", "ParseFloat", "(", "text", ")", ":", "try", ":", "return", "float", "(", "text", ")", "except", "ValueError", ":", "if", "_FLOAT_INFINITY", ".", "match", "(", "text", ")", ":", "if", "(", "text", "[", "0", "]", "==", "'-'", ")", ":", "return", "float", "(", "'-inf'", ")", "else", ":", "return", "float", "(", "'inf'", ")", "elif", "_FLOAT_NAN", ".", "match", "(", "text", ")", ":", "return", "float", "(", "'nan'", ")", "else", ":", "try", ":", "return", "float", "(", "text", ".", "rstrip", "(", "'f'", ")", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "(", "\"Couldn't parse float: %s\"", "%", "text", ")", ")" ]
parse a floating point number .
train
true
43,737
def test_post_delete(topic): post_middle = Post(content='Test Content Middle') post_middle.save(topic=topic, user=topic.user) post_last = Post(content='Test Content Last') post_last.save(topic=topic, user=topic.user) assert (topic.post_count == 3) assert (topic.forum.post_count == 3) assert (topic.user.post_count == 3) post_middle.delete() assert (topic.last_post == post_last) assert (topic.forum.last_post == post_last) post_last.delete() assert (topic.post_count == 1) assert (topic.forum.post_count == 1) assert (topic.user.post_count == 1) assert (topic.first_post_id == topic.last_post_id) assert (topic.forum.last_post_id == topic.last_post_id)
[ "def", "test_post_delete", "(", "topic", ")", ":", "post_middle", "=", "Post", "(", "content", "=", "'Test Content Middle'", ")", "post_middle", ".", "save", "(", "topic", "=", "topic", ",", "user", "=", "topic", ".", "user", ")", "post_last", "=", "Post", "(", "content", "=", "'Test Content Last'", ")", "post_last", ".", "save", "(", "topic", "=", "topic", ",", "user", "=", "topic", ".", "user", ")", "assert", "(", "topic", ".", "post_count", "==", "3", ")", "assert", "(", "topic", ".", "forum", ".", "post_count", "==", "3", ")", "assert", "(", "topic", ".", "user", ".", "post_count", "==", "3", ")", "post_middle", ".", "delete", "(", ")", "assert", "(", "topic", ".", "last_post", "==", "post_last", ")", "assert", "(", "topic", ".", "forum", ".", "last_post", "==", "post_last", ")", "post_last", ".", "delete", "(", ")", "assert", "(", "topic", ".", "post_count", "==", "1", ")", "assert", "(", "topic", ".", "forum", ".", "post_count", "==", "1", ")", "assert", "(", "topic", ".", "user", ".", "post_count", "==", "1", ")", "assert", "(", "topic", ".", "first_post_id", "==", "topic", ".", "last_post_id", ")", "assert", "(", "topic", ".", "forum", ".", "last_post_id", "==", "topic", ".", "last_post_id", ")" ]
tests the delete post method with three different post types .
train
false
43,738
def _create_pbuilders(env): home = os.path.expanduser('~') pbuilderrc = os.path.join(home, '.pbuilderrc') if (not os.path.isfile(pbuilderrc)): raise SaltInvocationError('pbuilderrc environment is incorrectly setup') env_overrides = _get_build_env(env) if (env_overrides and (not env_overrides.isspace())): with salt.utils.fopen(pbuilderrc, 'a') as fow: fow.write('{0}'.format(env_overrides))
[ "def", "_create_pbuilders", "(", "env", ")", ":", "home", "=", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", "pbuilderrc", "=", "os", ".", "path", ".", "join", "(", "home", ",", "'.pbuilderrc'", ")", "if", "(", "not", "os", ".", "path", ".", "isfile", "(", "pbuilderrc", ")", ")", ":", "raise", "SaltInvocationError", "(", "'pbuilderrc environment is incorrectly setup'", ")", "env_overrides", "=", "_get_build_env", "(", "env", ")", "if", "(", "env_overrides", "and", "(", "not", "env_overrides", ".", "isspace", "(", ")", ")", ")", ":", "with", "salt", ".", "utils", ".", "fopen", "(", "pbuilderrc", ",", "'a'", ")", "as", "fow", ":", "fow", ".", "write", "(", "'{0}'", ".", "format", "(", "env_overrides", ")", ")" ]
create the .
train
false
43,741
def imageToArray(img, copy=False, transpose=True): fmt = img.format() ptr = img.bits() if USE_PYSIDE: arr = np.frombuffer(ptr, dtype=np.ubyte) else: ptr.setsize(img.byteCount()) arr = np.asarray(ptr) if (img.byteCount() != (arr.size * arr.itemsize)): arr = np.frombuffer(ptr, np.ubyte, img.byteCount()) arr = arr.reshape(img.height(), img.width(), 4) if (fmt == img.Format_RGB32): arr[..., 3] = 255 if copy: arr = arr.copy() if transpose: return arr.transpose((1, 0, 2)) else: return arr
[ "def", "imageToArray", "(", "img", ",", "copy", "=", "False", ",", "transpose", "=", "True", ")", ":", "fmt", "=", "img", ".", "format", "(", ")", "ptr", "=", "img", ".", "bits", "(", ")", "if", "USE_PYSIDE", ":", "arr", "=", "np", ".", "frombuffer", "(", "ptr", ",", "dtype", "=", "np", ".", "ubyte", ")", "else", ":", "ptr", ".", "setsize", "(", "img", ".", "byteCount", "(", ")", ")", "arr", "=", "np", ".", "asarray", "(", "ptr", ")", "if", "(", "img", ".", "byteCount", "(", ")", "!=", "(", "arr", ".", "size", "*", "arr", ".", "itemsize", ")", ")", ":", "arr", "=", "np", ".", "frombuffer", "(", "ptr", ",", "np", ".", "ubyte", ",", "img", ".", "byteCount", "(", ")", ")", "arr", "=", "arr", ".", "reshape", "(", "img", ".", "height", "(", ")", ",", "img", ".", "width", "(", ")", ",", "4", ")", "if", "(", "fmt", "==", "img", ".", "Format_RGB32", ")", ":", "arr", "[", "...", ",", "3", "]", "=", "255", "if", "copy", ":", "arr", "=", "arr", ".", "copy", "(", ")", "if", "transpose", ":", "return", "arr", ".", "transpose", "(", "(", "1", ",", "0", ",", "2", ")", ")", "else", ":", "return", "arr" ]
convert a qimage into numpy array .
train
false
43,742
@commands(u'comments') def show_comments(bot, trigger): if (not ismeetingrunning(trigger.sender)): return if (not ischair(trigger.nick, trigger.sender)): bot.say(u'Only meeting head or chairs can do that') return comments = meetings_dict[trigger.sender][u'comments'] if comments: msg = u'The following comments were made:' bot.say(msg) logplain((u'<%s> %s' % (bot.nick, msg)), trigger.sender) for comment in comments: msg = (u'<%s> %s' % comment) bot.say(msg) logplain((u'<%s> %s' % (bot.nick, msg)), trigger.sender) meetings_dict[trigger.sender][u'comments'] = [] else: bot.say(u'No comments have been logged.')
[ "@", "commands", "(", "u'comments'", ")", "def", "show_comments", "(", "bot", ",", "trigger", ")", ":", "if", "(", "not", "ismeetingrunning", "(", "trigger", ".", "sender", ")", ")", ":", "return", "if", "(", "not", "ischair", "(", "trigger", ".", "nick", ",", "trigger", ".", "sender", ")", ")", ":", "bot", ".", "say", "(", "u'Only meeting head or chairs can do that'", ")", "return", "comments", "=", "meetings_dict", "[", "trigger", ".", "sender", "]", "[", "u'comments'", "]", "if", "comments", ":", "msg", "=", "u'The following comments were made:'", "bot", ".", "say", "(", "msg", ")", "logplain", "(", "(", "u'<%s> %s'", "%", "(", "bot", ".", "nick", ",", "msg", ")", ")", ",", "trigger", ".", "sender", ")", "for", "comment", "in", "comments", ":", "msg", "=", "(", "u'<%s> %s'", "%", "comment", ")", "bot", ".", "say", "(", "msg", ")", "logplain", "(", "(", "u'<%s> %s'", "%", "(", "bot", ".", "nick", ",", "msg", ")", ")", ",", "trigger", ".", "sender", ")", "meetings_dict", "[", "trigger", ".", "sender", "]", "[", "u'comments'", "]", "=", "[", "]", "else", ":", "bot", ".", "say", "(", "u'No comments have been logged.'", ")" ]
show the comments that have been logged for this meeting with .
train
false
43,743
def get_article_tabs(): return _article_tabs
[ "def", "get_article_tabs", "(", ")", ":", "return", "_article_tabs" ]
get all article tab dictionaries from plugins .
train
false
43,744
def oo_select_keys(data, keys): if (not isinstance(data, Mapping)): raise errors.AnsibleFilterError('|failed expects to filter on a dict or object') if (not isinstance(keys, list)): raise errors.AnsibleFilterError('|failed expects first param is a list') retval = [data[key] for key in keys if (key in data)] return retval
[ "def", "oo_select_keys", "(", "data", ",", "keys", ")", ":", "if", "(", "not", "isinstance", "(", "data", ",", "Mapping", ")", ")", ":", "raise", "errors", ".", "AnsibleFilterError", "(", "'|failed expects to filter on a dict or object'", ")", "if", "(", "not", "isinstance", "(", "keys", ",", "list", ")", ")", ":", "raise", "errors", ".", "AnsibleFilterError", "(", "'|failed expects first param is a list'", ")", "retval", "=", "[", "data", "[", "key", "]", "for", "key", "in", "keys", "if", "(", "key", "in", "data", ")", "]", "return", "retval" ]
this returns a list .
train
false
43,746
def _int0oo_1(g, x): from sympy import gamma, combsimp, unpolarify (eta, _) = _get_coeff_exp(g.argument, x) res = (1 / eta) for b in g.bm: res *= gamma((b + 1)) for a in g.an: res *= gamma(((1 - a) - 1)) for b in g.bother: res /= gamma(((1 - b) - 1)) for a in g.aother: res /= gamma((a + 1)) return combsimp(unpolarify(res))
[ "def", "_int0oo_1", "(", "g", ",", "x", ")", ":", "from", "sympy", "import", "gamma", ",", "combsimp", ",", "unpolarify", "(", "eta", ",", "_", ")", "=", "_get_coeff_exp", "(", "g", ".", "argument", ",", "x", ")", "res", "=", "(", "1", "/", "eta", ")", "for", "b", "in", "g", ".", "bm", ":", "res", "*=", "gamma", "(", "(", "b", "+", "1", ")", ")", "for", "a", "in", "g", ".", "an", ":", "res", "*=", "gamma", "(", "(", "(", "1", "-", "a", ")", "-", "1", ")", ")", "for", "b", "in", "g", ".", "bother", ":", "res", "/=", "gamma", "(", "(", "(", "1", "-", "b", ")", "-", "1", ")", ")", "for", "a", "in", "g", ".", "aother", ":", "res", "/=", "gamma", "(", "(", "a", "+", "1", ")", ")", "return", "combsimp", "(", "unpolarify", "(", "res", ")", ")" ]
evaluate int_0^infty g dx using g functions .
train
false
43,747
def compare_obj(test, obj, db_obj, subs=None, allow_missing=None, comparators=None): if (subs is None): subs = {} if (allow_missing is None): allow_missing = [] if (comparators is None): comparators = {} for key in obj.fields: if ((key in allow_missing) and (not obj.obj_attr_is_set(key))): continue obj_val = getattr(obj, key) db_key = subs.get(key, key) db_val = db_obj[db_key] if isinstance(obj_val, datetime.datetime): obj_val = obj_val.replace(tzinfo=None) if (key in comparators): comparator = comparators[key] comparator(db_val, obj_val) else: test.assertEqual(db_val, obj_val)
[ "def", "compare_obj", "(", "test", ",", "obj", ",", "db_obj", ",", "subs", "=", "None", ",", "allow_missing", "=", "None", ",", "comparators", "=", "None", ")", ":", "if", "(", "subs", "is", "None", ")", ":", "subs", "=", "{", "}", "if", "(", "allow_missing", "is", "None", ")", ":", "allow_missing", "=", "[", "]", "if", "(", "comparators", "is", "None", ")", ":", "comparators", "=", "{", "}", "for", "key", "in", "obj", ".", "fields", ":", "if", "(", "(", "key", "in", "allow_missing", ")", "and", "(", "not", "obj", ".", "obj_attr_is_set", "(", "key", ")", ")", ")", ":", "continue", "obj_val", "=", "getattr", "(", "obj", ",", "key", ")", "db_key", "=", "subs", ".", "get", "(", "key", ",", "key", ")", "db_val", "=", "db_obj", "[", "db_key", "]", "if", "isinstance", "(", "obj_val", ",", "datetime", ".", "datetime", ")", ":", "obj_val", "=", "obj_val", ".", "replace", "(", "tzinfo", "=", "None", ")", "if", "(", "key", "in", "comparators", ")", ":", "comparator", "=", "comparators", "[", "key", "]", "comparator", "(", "db_val", ",", "obj_val", ")", "else", ":", "test", ".", "assertEqual", "(", "db_val", ",", "obj_val", ")" ]
compare a novaobject and a dict-like database object .
train
false
43,748
def decode_hex_number(raw, codec=u'utf-8'): (raw, consumed) = decode_string(raw, codec=codec) return (int(raw, 16), consumed)
[ "def", "decode_hex_number", "(", "raw", ",", "codec", "=", "u'utf-8'", ")", ":", "(", "raw", ",", "consumed", ")", "=", "decode_string", "(", "raw", ",", "codec", "=", "codec", ")", "return", "(", "int", "(", "raw", ",", "16", ")", ",", "consumed", ")" ]
return a variable length number encoded using hexadecimal encoding .
train
false
43,749
def mutFlipBit(individual, indpb): for i in xrange(len(individual)): if (random.random() < indpb): individual[i] = type(individual[i])((not individual[i])) return (individual,)
[ "def", "mutFlipBit", "(", "individual", ",", "indpb", ")", ":", "for", "i", "in", "xrange", "(", "len", "(", "individual", ")", ")", ":", "if", "(", "random", ".", "random", "(", ")", "<", "indpb", ")", ":", "individual", "[", "i", "]", "=", "type", "(", "individual", "[", "i", "]", ")", "(", "(", "not", "individual", "[", "i", "]", ")", ")", "return", "(", "individual", ",", ")" ]
flip the value of the attributes of the input individual and return the mutant .
train
false
43,750
def assignment(): return s3_rest_controller()
[ "def", "assignment", "(", ")", ":", "return", "s3_rest_controller", "(", ")" ]
job assignments - restful controller .
train
false
43,751
def evass_realm_entity(table, row): db = current.db s3db = current.s3db tablename = table._tablename realm_entity = None if (tablename in ('event_event', 'evr_case', 'cr_shelter', 'hrm_human_resource', 'org_facility', 'org_office')): otable = s3db.org_organisation organisation_id = row.organisation_id if organisation_id: org = db((otable.id == organisation_id)).select(otable.realm_entity, limitby=(0, 1)).first() realm_entity = org.realm_entity elif (tablename == 'event_incident'): etable = db.event_event try: incident_id = row.id query = ((table.id == incident_id) & (etable.id == table.event_id)) event = db(query).select(etable.realm_entity, limitby=(0, 1)).first() realm_entity = event.realm_entity except: return elif (tablename == 'pr_group'): user = current.auth.user if user: realm_entity = s3db.pr_get_pe_id('org_organisation', user.organisation_id) elif (tablename == 'org_organisation'): realm_entity = row.pe_id return realm_entity
[ "def", "evass_realm_entity", "(", "table", ",", "row", ")", ":", "db", "=", "current", ".", "db", "s3db", "=", "current", ".", "s3db", "tablename", "=", "table", ".", "_tablename", "realm_entity", "=", "None", "if", "(", "tablename", "in", "(", "'event_event'", ",", "'evr_case'", ",", "'cr_shelter'", ",", "'hrm_human_resource'", ",", "'org_facility'", ",", "'org_office'", ")", ")", ":", "otable", "=", "s3db", ".", "org_organisation", "organisation_id", "=", "row", ".", "organisation_id", "if", "organisation_id", ":", "org", "=", "db", "(", "(", "otable", ".", "id", "==", "organisation_id", ")", ")", ".", "select", "(", "otable", ".", "realm_entity", ",", "limitby", "=", "(", "0", ",", "1", ")", ")", ".", "first", "(", ")", "realm_entity", "=", "org", ".", "realm_entity", "elif", "(", "tablename", "==", "'event_incident'", ")", ":", "etable", "=", "db", ".", "event_event", "try", ":", "incident_id", "=", "row", ".", "id", "query", "=", "(", "(", "table", ".", "id", "==", "incident_id", ")", "&", "(", "etable", ".", "id", "==", "table", ".", "event_id", ")", ")", "event", "=", "db", "(", "query", ")", ".", "select", "(", "etable", ".", "realm_entity", ",", "limitby", "=", "(", "0", ",", "1", ")", ")", ".", "first", "(", ")", "realm_entity", "=", "event", ".", "realm_entity", "except", ":", "return", "elif", "(", "tablename", "==", "'pr_group'", ")", ":", "user", "=", "current", ".", "auth", ".", "user", "if", "user", ":", "realm_entity", "=", "s3db", ".", "pr_get_pe_id", "(", "'org_organisation'", ",", "user", ".", "organisation_id", ")", "elif", "(", "tablename", "==", "'org_organisation'", ")", ":", "realm_entity", "=", "row", ".", "pe_id", "return", "realm_entity" ]
assign a realm entity to records .
train
false
43,752
def tokenify(cmd, token=None): if (token is not None): cmd['token'] = token return cmd
[ "def", "tokenify", "(", "cmd", ",", "token", "=", "None", ")", ":", "if", "(", "token", "is", "not", "None", ")", ":", "cmd", "[", "'token'", "]", "=", "token", "return", "cmd" ]
if token is not none then assign token to token key of cmd dict and return cmd otherwise return cmd .
train
false
43,754
def _file_write(path, content): with salt.utils.fopen(path, 'w+') as fp_: fp_.write(content) fp_.close()
[ "def", "_file_write", "(", "path", ",", "content", ")", ":", "with", "salt", ".", "utils", ".", "fopen", "(", "path", ",", "'w+'", ")", "as", "fp_", ":", "fp_", ".", "write", "(", "content", ")", "fp_", ".", "close", "(", ")" ]
write content to a file .
train
false
43,755
def enable_monitor(channel=None): tmp = init_app('iwconfig', True) iface = None for line in tmp.split('\n'): if line.startswith('wlan'): try: iface = line.split(' ')[0] if (channel is None): tmp = getoutput('airmon-ng start {0}'.format(iface)) else: tmp = getoutput('airmon-ng start {0} {1}'.format(iface, channel)) debug(("started '%s' in monitor mode" % iface)) except Exception as j: Error(('Error enabling monitor mode: %s' % j)) break return get_monitor_adapter()
[ "def", "enable_monitor", "(", "channel", "=", "None", ")", ":", "tmp", "=", "init_app", "(", "'iwconfig'", ",", "True", ")", "iface", "=", "None", "for", "line", "in", "tmp", ".", "split", "(", "'\\n'", ")", ":", "if", "line", ".", "startswith", "(", "'wlan'", ")", ":", "try", ":", "iface", "=", "line", ".", "split", "(", "' '", ")", "[", "0", "]", "if", "(", "channel", "is", "None", ")", ":", "tmp", "=", "getoutput", "(", "'airmon-ng start {0}'", ".", "format", "(", "iface", ")", ")", "else", ":", "tmp", "=", "getoutput", "(", "'airmon-ng start {0} {1}'", ".", "format", "(", "iface", ",", "channel", ")", ")", "debug", "(", "(", "\"started '%s' in monitor mode\"", "%", "iface", ")", ")", "except", "Exception", "as", "j", ":", "Error", "(", "(", "'Error enabling monitor mode: %s'", "%", "j", ")", ")", "break", "return", "get_monitor_adapter", "(", ")" ]
enable monitor mode on the wireless adapter channel is the channel to monitor on .
train
false
43,756
def gff_attributes_to_str(attrs, gff_format): if (gff_format == 'GTF'): format_string = '%s "%s"' id_attr = None if ('group' in attrs): id_attr = 'group' elif ('ID' in attrs): id_attr = 'ID' elif ('Parent' in attrs): id_attr = 'Parent' if id_attr: attrs['transcript_id'] = attrs['gene_id'] = attrs[id_attr] elif (gff_format == 'GFF3'): format_string = '%s=%s' attrs_strs = [] for (name, value) in attrs.items(): attrs_strs.append((format_string % (name, value))) return ' ; '.join(attrs_strs)
[ "def", "gff_attributes_to_str", "(", "attrs", ",", "gff_format", ")", ":", "if", "(", "gff_format", "==", "'GTF'", ")", ":", "format_string", "=", "'%s \"%s\"'", "id_attr", "=", "None", "if", "(", "'group'", "in", "attrs", ")", ":", "id_attr", "=", "'group'", "elif", "(", "'ID'", "in", "attrs", ")", ":", "id_attr", "=", "'ID'", "elif", "(", "'Parent'", "in", "attrs", ")", ":", "id_attr", "=", "'Parent'", "if", "id_attr", ":", "attrs", "[", "'transcript_id'", "]", "=", "attrs", "[", "'gene_id'", "]", "=", "attrs", "[", "id_attr", "]", "elif", "(", "gff_format", "==", "'GFF3'", ")", ":", "format_string", "=", "'%s=%s'", "attrs_strs", "=", "[", "]", "for", "(", "name", ",", "value", ")", "in", "attrs", ".", "items", "(", ")", ":", "attrs_strs", ".", "append", "(", "(", "format_string", "%", "(", "name", ",", "value", ")", ")", ")", "return", "' ; '", ".", "join", "(", "attrs_strs", ")" ]
convert gff attributes to string .
train
false
43,757
def test_datetime_tzinfo(): class TZm6(datetime.tzinfo, ): def utcoffset(self, dt): return datetime.timedelta(hours=(-6)) d = datetime.datetime(2002, 1, 2, 10, 3, 4, tzinfo=TZm6()) t = Time(d) assert (t.value == datetime.datetime(2002, 1, 2, 16, 3, 4))
[ "def", "test_datetime_tzinfo", "(", ")", ":", "class", "TZm6", "(", "datetime", ".", "tzinfo", ",", ")", ":", "def", "utcoffset", "(", "self", ",", "dt", ")", ":", "return", "datetime", ".", "timedelta", "(", "hours", "=", "(", "-", "6", ")", ")", "d", "=", "datetime", ".", "datetime", "(", "2002", ",", "1", ",", "2", ",", "10", ",", "3", ",", "4", ",", "tzinfo", "=", "TZm6", "(", ")", ")", "t", "=", "Time", "(", "d", ")", "assert", "(", "t", ".", "value", "==", "datetime", ".", "datetime", "(", "2002", ",", "1", ",", "2", ",", "16", ",", "3", ",", "4", ")", ")" ]
test #3160 that time zone info in datetime objects is respected .
train
false
43,759
@memoize def git_version_str(): return git.version()[STDOUT].strip()
[ "@", "memoize", "def", "git_version_str", "(", ")", ":", "return", "git", ".", "version", "(", ")", "[", "STDOUT", "]", ".", "strip", "(", ")" ]
returns the current git version .
train
false