id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
23,500
def _datetime_to_millis(dtm): if (dtm.utcoffset() is not None): dtm = (dtm - dtm.utcoffset()) return int(((calendar.timegm(dtm.timetuple()) * 1000) + (dtm.microsecond / 1000)))
[ "def", "_datetime_to_millis", "(", "dtm", ")", ":", "if", "(", "dtm", ".", "utcoffset", "(", ")", "is", "not", "None", ")", ":", "dtm", "=", "(", "dtm", "-", "dtm", ".", "utcoffset", "(", ")", ")", "return", "int", "(", "(", "(", "calendar", ".", "timegm", "(", "dtm", ".", "timetuple", "(", ")", ")", "*", "1000", ")", "+", "(", "dtm", ".", "microsecond", "/", "1000", ")", ")", ")" ]
convert datetime to milliseconds since epoch utc .
train
true
23,501
def AnalyzeODex(filename, decompiler='dad', session=None): androconf.debug('AnalyzeODex') if (not session): session = CONF['SESSION'] with open(filename, 'r') as fd: data = fd.read() return session.addDEY(filename, data)
[ "def", "AnalyzeODex", "(", "filename", ",", "decompiler", "=", "'dad'", ",", "session", "=", "None", ")", ":", "androconf", ".", "debug", "(", "'AnalyzeODex'", ")", "if", "(", "not", "session", ")", ":", "session", "=", "CONF", "[", "'SESSION'", "]", "with", "open", "(", "filename", ",", "'r'", ")", "as", "fd", ":", "data", "=", "fd", ".", "read", "(", ")", "return", "session", ".", "addDEY", "(", "filename", ",", "data", ")" ]
analyze an android odex file and setup all stuff for a more quickly analysis ! .
train
false
23,502
def cinder_from_configuration(region, cluster_id, **config): def lazy_cinder_loader(): '\n Build the v1 or v2 ``ICinderVolumeManager`` wrapped for compatibility\n with the v1 API and wrapped to provide logging of API calls.\n This will be invoked by ``LazyLoadingProxy`` the first time an\n ``ICinderVolumeManager`` attribute is accessed.\n The reason for the lazy loading of the volume manager is so that the\n the cinder API version detection can delayed until the\n ``flocker-dataset-agent`` loop has started. And the reason for that is\n so that exceptions (e.g. keystone connection errors) that occur during\n the cinder API version detection, do not occur when the\n ``CinderBlockDeviceAPI`` is initialized and crash the process. This way\n errors will be caught by the loop and the cinder API version detection\n will be retried until it succeeds.\n\n :returns: The ``ICinderVolumeManager`` wrapper.\n ' session = get_keystone_session(**config) session.get_token() cinder_client = get_cinder_client(session=session, region=region) wrapped_cinder_volume_manager = _LoggingCinderVolumeManager(cinder_client.volumes) cinder_client_version = get_api_version(cinder_client.version) adapted_cinder_volume_manager = CINDER_V1_ADAPTERS[cinder_client_version.ver_major](wrapped_cinder_volume_manager) return adapted_cinder_volume_manager lazy_cinder_volume_manager_proxy = lazy_loading_proxy_for_interface(interface=ICinderVolumeManager, loader=lazy_cinder_loader) nova_client = get_nova_v2_client(session=get_keystone_session(**config), region=region) logging_nova_volume_manager = _LoggingNovaVolumeManager(_nova_volumes=nova_client.volumes) logging_nova_server_manager = _LoggingNovaServerManager(_nova_servers=nova_client.servers) return CinderBlockDeviceAPI(cinder_volume_manager=lazy_cinder_volume_manager_proxy, nova_volume_manager=logging_nova_volume_manager, nova_server_manager=logging_nova_server_manager, cluster_id=cluster_id)
[ "def", "cinder_from_configuration", "(", "region", ",", "cluster_id", ",", "**", "config", ")", ":", "def", "lazy_cinder_loader", "(", ")", ":", "session", "=", "get_keystone_session", "(", "**", "config", ")", "session", ".", "get_token", "(", ")", "cinder_client", "=", "get_cinder_client", "(", "session", "=", "session", ",", "region", "=", "region", ")", "wrapped_cinder_volume_manager", "=", "_LoggingCinderVolumeManager", "(", "cinder_client", ".", "volumes", ")", "cinder_client_version", "=", "get_api_version", "(", "cinder_client", ".", "version", ")", "adapted_cinder_volume_manager", "=", "CINDER_V1_ADAPTERS", "[", "cinder_client_version", ".", "ver_major", "]", "(", "wrapped_cinder_volume_manager", ")", "return", "adapted_cinder_volume_manager", "lazy_cinder_volume_manager_proxy", "=", "lazy_loading_proxy_for_interface", "(", "interface", "=", "ICinderVolumeManager", ",", "loader", "=", "lazy_cinder_loader", ")", "nova_client", "=", "get_nova_v2_client", "(", "session", "=", "get_keystone_session", "(", "**", "config", ")", ",", "region", "=", "region", ")", "logging_nova_volume_manager", "=", "_LoggingNovaVolumeManager", "(", "_nova_volumes", "=", "nova_client", ".", "volumes", ")", "logging_nova_server_manager", "=", "_LoggingNovaServerManager", "(", "_nova_servers", "=", "nova_client", ".", "servers", ")", "return", "CinderBlockDeviceAPI", "(", "cinder_volume_manager", "=", "lazy_cinder_volume_manager_proxy", ",", "nova_volume_manager", "=", "logging_nova_volume_manager", ",", "nova_server_manager", "=", "logging_nova_server_manager", ",", "cluster_id", "=", "cluster_id", ")" ]
build a cinderblockdeviceapi using configuration and credentials in config .
train
false
23,504
def Symmetric(n, name=None): var = SymmetricUpperTri(n, name) fill_mat = Constant(upper_tri_to_full(n)) return cvxtypes.reshape()((fill_mat * var), int(n), int(n))
[ "def", "Symmetric", "(", "n", ",", "name", "=", "None", ")", ":", "var", "=", "SymmetricUpperTri", "(", "n", ",", "name", ")", "fill_mat", "=", "Constant", "(", "upper_tri_to_full", "(", "n", ")", ")", "return", "cvxtypes", ".", "reshape", "(", ")", "(", "(", "fill_mat", "*", "var", ")", ",", "int", "(", "n", ")", ",", "int", "(", "n", ")", ")" ]
an expression representing a symmetric matrix .
train
false
23,506
def reg_context(name, ctx): if (name in _context_reg): raise ValueError(('context name %s is already defined' % (name,))) if (not isinstance(ctx, gpuarray.GpuContext)): raise TypeError('context is not GpuContext') _context_reg[name] = ctx
[ "def", "reg_context", "(", "name", ",", "ctx", ")", ":", "if", "(", "name", "in", "_context_reg", ")", ":", "raise", "ValueError", "(", "(", "'context name %s is already defined'", "%", "(", "name", ",", ")", ")", ")", "if", "(", "not", "isinstance", "(", "ctx", ",", "gpuarray", ".", "GpuContext", ")", ")", ":", "raise", "TypeError", "(", "'context is not GpuContext'", ")", "_context_reg", "[", "name", "]", "=", "ctx" ]
register a context by mapping it to a name .
train
false
23,509
def sniff_version(stream): doc = parse(stream) scheme_el = doc.getroot() version = scheme_el.attrib.get('version', None) if (scheme_el.find('widgets') is not None): version = '1.0' else: version = '2.0' return version
[ "def", "sniff_version", "(", "stream", ")", ":", "doc", "=", "parse", "(", "stream", ")", "scheme_el", "=", "doc", ".", "getroot", "(", ")", "version", "=", "scheme_el", ".", "attrib", ".", "get", "(", "'version'", ",", "None", ")", "if", "(", "scheme_el", ".", "find", "(", "'widgets'", ")", "is", "not", "None", ")", ":", "version", "=", "'1.0'", "else", ":", "version", "=", "'2.0'", "return", "version" ]
parse a scheme stream and return the schemes serialization version string .
train
false
23,510
def test_footprint_to_file(tmpdir): w = wcs.WCS({u'CTYPE1': u'RA---ZPN', u'CRUNIT1': u'deg', u'CRPIX1': (-334.95999), u'CRVAL1': 318.57907, u'CTYPE2': u'DEC--ZPN', u'CRUNIT2': u'deg', u'CRPIX2': 3045.3999, u'CRVAL2': 43.88538, u'PV2_1': 1.0, u'PV2_3': 220.0}) testfile = str(tmpdir.join(u'test.txt')) w.footprint_to_file(testfile) with open(testfile, u'r') as f: lines = f.readlines() assert (len(lines) == 4) assert (lines[2] == u'ICRS\n') assert (u'color=green' in lines[3]) w.footprint_to_file(testfile, coordsys=u'FK5', color=u'red') with open(testfile, u'r') as f: lines = f.readlines() assert (len(lines) == 4) assert (lines[2] == u'FK5\n') assert (u'color=red' in lines[3]) with pytest.raises(ValueError): w.footprint_to_file(testfile, coordsys=u'FOO')
[ "def", "test_footprint_to_file", "(", "tmpdir", ")", ":", "w", "=", "wcs", ".", "WCS", "(", "{", "u'CTYPE1'", ":", "u'RA---ZPN'", ",", "u'CRUNIT1'", ":", "u'deg'", ",", "u'CRPIX1'", ":", "(", "-", "334.95999", ")", ",", "u'CRVAL1'", ":", "318.57907", ",", "u'CTYPE2'", ":", "u'DEC--ZPN'", ",", "u'CRUNIT2'", ":", "u'deg'", ",", "u'CRPIX2'", ":", "3045.3999", ",", "u'CRVAL2'", ":", "43.88538", ",", "u'PV2_1'", ":", "1.0", ",", "u'PV2_3'", ":", "220.0", "}", ")", "testfile", "=", "str", "(", "tmpdir", ".", "join", "(", "u'test.txt'", ")", ")", "w", ".", "footprint_to_file", "(", "testfile", ")", "with", "open", "(", "testfile", ",", "u'r'", ")", "as", "f", ":", "lines", "=", "f", ".", "readlines", "(", ")", "assert", "(", "len", "(", "lines", ")", "==", "4", ")", "assert", "(", "lines", "[", "2", "]", "==", "u'ICRS\\n'", ")", "assert", "(", "u'color=green'", "in", "lines", "[", "3", "]", ")", "w", ".", "footprint_to_file", "(", "testfile", ",", "coordsys", "=", "u'FK5'", ",", "color", "=", "u'red'", ")", "with", "open", "(", "testfile", ",", "u'r'", ")", "as", "f", ":", "lines", "=", "f", ".", "readlines", "(", ")", "assert", "(", "len", "(", "lines", ")", "==", "4", ")", "assert", "(", "lines", "[", "2", "]", "==", "u'FK5\\n'", ")", "assert", "(", "u'color=red'", "in", "lines", "[", "3", "]", ")", "with", "pytest", ".", "raises", "(", "ValueError", ")", ":", "w", ".", "footprint_to_file", "(", "testfile", ",", "coordsys", "=", "u'FOO'", ")" ]
from github issue #1912 .
train
false
23,511
@register.filter def richtext_filters(content): for filter_name in settings.RICHTEXT_FILTERS: filter_func = import_dotted_path(filter_name) content = filter_func(content) return content
[ "@", "register", ".", "filter", "def", "richtext_filters", "(", "content", ")", ":", "for", "filter_name", "in", "settings", ".", "RICHTEXT_FILTERS", ":", "filter_func", "=", "import_dotted_path", "(", "filter_name", ")", "content", "=", "filter_func", "(", "content", ")", "return", "content" ]
takes a value edited via the wysiwyg editor .
train
false
23,512
def cjwelborn(): print '\n\n...so much for "meaningful". -Cj\n\n' rawattrs = {a: v for (a, v) in globals().items()} funcs = {k: v for (k, v) in rawattrs.items() if callable(v)} offenders = [] for (funcname, func) in funcs.items(): try: docs = func.__doc__ except AttributeError: continue if (not docs): offenders.append(funcname) if offenders: print "\nThese people don't like to document their functions:" print ' {}'.format('\n '.join(sorted(offenders))) else: print '\nEveryone documented their functions!' return offenders
[ "def", "cjwelborn", "(", ")", ":", "print", "'\\n\\n...so much for \"meaningful\". -Cj\\n\\n'", "rawattrs", "=", "{", "a", ":", "v", "for", "(", "a", ",", "v", ")", "in", "globals", "(", ")", ".", "items", "(", ")", "}", "funcs", "=", "{", "k", ":", "v", "for", "(", "k", ",", "v", ")", "in", "rawattrs", ".", "items", "(", ")", "if", "callable", "(", "v", ")", "}", "offenders", "=", "[", "]", "for", "(", "funcname", ",", "func", ")", "in", "funcs", ".", "items", "(", ")", ":", "try", ":", "docs", "=", "func", ".", "__doc__", "except", "AttributeError", ":", "continue", "if", "(", "not", "docs", ")", ":", "offenders", ".", "append", "(", "funcname", ")", "if", "offenders", ":", "print", "\"\\nThese people don't like to document their functions:\"", "print", "' {}'", ".", "format", "(", "'\\n '", ".", "join", "(", "sorted", "(", "offenders", ")", ")", ")", "else", ":", "print", "'\\nEveryone documented their functions!'", "return", "offenders" ]
anyone believe in documentation? .
train
false
23,515
@dec.skipif((execution.profile is None)) def test_prun_quotes(): _ip.magic("prun -q x = '\\t'") nt.assert_equal(_ip.user_ns['x'], ' DCTB ')
[ "@", "dec", ".", "skipif", "(", "(", "execution", ".", "profile", "is", "None", ")", ")", "def", "test_prun_quotes", "(", ")", ":", "_ip", ".", "magic", "(", "\"prun -q x = '\\\\t'\"", ")", "nt", ".", "assert_equal", "(", "_ip", ".", "user_ns", "[", "'x'", "]", ",", "' DCTB '", ")" ]
test that prun does not clobber string escapes .
train
false
23,516
def echo_via_pager(text, color=None): color = resolve_color_default(color) if (not isinstance(text, string_types)): text = text_type(text) from ._termui_impl import pager return pager((text + '\n'), color)
[ "def", "echo_via_pager", "(", "text", ",", "color", "=", "None", ")", ":", "color", "=", "resolve_color_default", "(", "color", ")", "if", "(", "not", "isinstance", "(", "text", ",", "string_types", ")", ")", ":", "text", "=", "text_type", "(", "text", ")", "from", ".", "_termui_impl", "import", "pager", "return", "pager", "(", "(", "text", "+", "'\\n'", ")", ",", "color", ")" ]
this function takes a text and shows it via an environment specific pager on stdout .
train
true
23,518
def test_json_view(): response = json_view((lambda r: {'x': 1}))(mock.Mock()) assert isinstance(response, http.HttpResponse) eq_(response.content, '{"x": 1}') eq_(response['Content-Type'], 'application/json') eq_(response.status_code, 200)
[ "def", "test_json_view", "(", ")", ":", "response", "=", "json_view", "(", "(", "lambda", "r", ":", "{", "'x'", ":", "1", "}", ")", ")", "(", "mock", ".", "Mock", "(", ")", ")", "assert", "isinstance", "(", "response", ",", "http", ".", "HttpResponse", ")", "eq_", "(", "response", ".", "content", ",", "'{\"x\": 1}'", ")", "eq_", "(", "response", "[", "'Content-Type'", "]", ",", "'application/json'", ")", "eq_", "(", "response", ".", "status_code", ",", "200", ")" ]
turns a python object into a response .
train
false
23,520
def numpy_cupy_allclose(rtol=1e-07, atol=0, err_msg='', verbose=True, name='xp', type_check=True, accept_error=False): def check_func(cupy_result, numpy_result): array.assert_allclose(cupy_result, numpy_result, rtol, atol, err_msg, verbose) return _make_decorator(check_func, name, type_check, accept_error)
[ "def", "numpy_cupy_allclose", "(", "rtol", "=", "1e-07", ",", "atol", "=", "0", ",", "err_msg", "=", "''", ",", "verbose", "=", "True", ",", "name", "=", "'xp'", ",", "type_check", "=", "True", ",", "accept_error", "=", "False", ")", ":", "def", "check_func", "(", "cupy_result", ",", "numpy_result", ")", ":", "array", ".", "assert_allclose", "(", "cupy_result", ",", "numpy_result", ",", "rtol", ",", "atol", ",", "err_msg", ",", "verbose", ")", "return", "_make_decorator", "(", "check_func", ",", "name", ",", "type_check", ",", "accept_error", ")" ]
decorator that checks numpy results and cupy ones are close .
train
false
23,521
def require_completion(f): @functools.wraps(f) def wrapper(*args, **kwargs): if store.exists(store.image_mark_path(kwargs['image_id'])): return toolkit.api_error('Image is being uploaded, retry later') return f(*args, **kwargs) return wrapper
[ "def", "require_completion", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "wrapper", "(", "*", "args", ",", "**", "kwargs", ")", ":", "if", "store", ".", "exists", "(", "store", ".", "image_mark_path", "(", "kwargs", "[", "'image_id'", "]", ")", ")", ":", "return", "toolkit", ".", "api_error", "(", "'Image is being uploaded, retry later'", ")", "return", "f", "(", "*", "args", ",", "**", "kwargs", ")", "return", "wrapper" ]
this make sure that the image push correctly finished .
train
false
23,522
def _parse_empty_like_args(context, builder, sig, args): arytype = sig.args[0] if isinstance(arytype, types.Array): ary = make_array(arytype)(context, builder, value=args[0]) shapes = cgutils.unpack_tuple(builder, ary.shape, count=arytype.ndim) return (sig.return_type, shapes) else: return (sig.return_type, ())
[ "def", "_parse_empty_like_args", "(", "context", ",", "builder", ",", "sig", ",", "args", ")", ":", "arytype", "=", "sig", ".", "args", "[", "0", "]", "if", "isinstance", "(", "arytype", ",", "types", ".", "Array", ")", ":", "ary", "=", "make_array", "(", "arytype", ")", "(", "context", ",", "builder", ",", "value", "=", "args", "[", "0", "]", ")", "shapes", "=", "cgutils", ".", "unpack_tuple", "(", "builder", ",", "ary", ".", "shape", ",", "count", "=", "arytype", ".", "ndim", ")", "return", "(", "sig", ".", "return_type", ",", "shapes", ")", "else", ":", "return", "(", "sig", ".", "return_type", ",", "(", ")", ")" ]
parse the arguments of a np .
train
false
23,524
def get_avg_app_waiting_time(): cursor = connection.cursor() cursor.execute('\n SELECT AVG(DATEDIFF(reviewed, nomination)) FROM versions\n RIGHT JOIN addons ON versions.addon_id = addons.id\n WHERE status = %s AND reviewed >= DATE_SUB(NOW(), INTERVAL 30 DAY)\n ', (mkt.STATUS_PUBLIC,)) row = cursor.fetchone() days = 0 if row: try: days = math.ceil(float(row[0])) except TypeError: pass return days
[ "def", "get_avg_app_waiting_time", "(", ")", ":", "cursor", "=", "connection", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "'\\n SELECT AVG(DATEDIFF(reviewed, nomination)) FROM versions\\n RIGHT JOIN addons ON versions.addon_id = addons.id\\n WHERE status = %s AND reviewed >= DATE_SUB(NOW(), INTERVAL 30 DAY)\\n '", ",", "(", "mkt", ".", "STATUS_PUBLIC", ",", ")", ")", "row", "=", "cursor", ".", "fetchone", "(", ")", "days", "=", "0", "if", "row", ":", "try", ":", "days", "=", "math", ".", "ceil", "(", "float", "(", "row", "[", "0", "]", ")", ")", "except", "TypeError", ":", "pass", "return", "days" ]
returns the rolling average from the past 30 days of the time taken for a pending app to become public .
train
false
23,525
def sbt(registry, xml_parent, data): sbt = XML.SubElement(xml_parent, 'org.jvnet.hudson.plugins.SbtPluginBuilder') mappings = [('name', 'name', ''), ('jvm-flags', 'jvmFlags', ''), ('sbt-flags', 'sbtFlags', '-Dsbt.log.noformat=true'), ('actions', 'actions', ''), ('subdir-path', 'subdirPath', '')] convert_mapping_to_xml(sbt, data, mappings, fail_required=True)
[ "def", "sbt", "(", "registry", ",", "xml_parent", ",", "data", ")", ":", "sbt", "=", "XML", ".", "SubElement", "(", "xml_parent", ",", "'org.jvnet.hudson.plugins.SbtPluginBuilder'", ")", "mappings", "=", "[", "(", "'name'", ",", "'name'", ",", "''", ")", ",", "(", "'jvm-flags'", ",", "'jvmFlags'", ",", "''", ")", ",", "(", "'sbt-flags'", ",", "'sbtFlags'", ",", "'-Dsbt.log.noformat=true'", ")", ",", "(", "'actions'", ",", "'actions'", ",", "''", ")", ",", "(", "'subdir-path'", ",", "'subdirPath'", ",", "''", ")", "]", "convert_mapping_to_xml", "(", "sbt", ",", "data", ",", "mappings", ",", "fail_required", "=", "True", ")" ]
yaml: sbt execute a sbt build step .
train
false
23,527
@utils.arg('server', metavar='<server>', help=_('Name or ID of server.')) def do_unpause(cs, args): _find_server(cs, args.server).unpause()
[ "@", "utils", ".", "arg", "(", "'server'", ",", "metavar", "=", "'<server>'", ",", "help", "=", "_", "(", "'Name or ID of server.'", ")", ")", "def", "do_unpause", "(", "cs", ",", "args", ")", ":", "_find_server", "(", "cs", ",", "args", ".", "server", ")", ".", "unpause", "(", ")" ]
unpause a server .
train
false
23,529
def _convert_playlist_to_v2(): if os.path.isfile(g.PLFILE): return elif (not os.path.isfile(g.OLD_PLFILE)): return try: with open(g.OLD_PLFILE, 'rb') as plf: old_playlists = pickle.load(plf) except IOError: sys.exit("Couldn't open old playlist file") backup = (g.OLD_PLFILE + '_v1_backup') if os.path.isfile(backup): sys.exit('Error, backup exists but new playlist exists not!') os.rename(g.OLD_PLFILE, backup) for (plname, plitem) in old_playlists.items(): songs = [] for video in plitem.songs: v = Video(video['link'], video['title'], video['duration']) songs.append(v) g.userpl[plname] = Playlist(plname, songs) save()
[ "def", "_convert_playlist_to_v2", "(", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "g", ".", "PLFILE", ")", ":", "return", "elif", "(", "not", "os", ".", "path", ".", "isfile", "(", "g", ".", "OLD_PLFILE", ")", ")", ":", "return", "try", ":", "with", "open", "(", "g", ".", "OLD_PLFILE", ",", "'rb'", ")", "as", "plf", ":", "old_playlists", "=", "pickle", ".", "load", "(", "plf", ")", "except", "IOError", ":", "sys", ".", "exit", "(", "\"Couldn't open old playlist file\"", ")", "backup", "=", "(", "g", ".", "OLD_PLFILE", "+", "'_v1_backup'", ")", "if", "os", ".", "path", ".", "isfile", "(", "backup", ")", ":", "sys", ".", "exit", "(", "'Error, backup exists but new playlist exists not!'", ")", "os", ".", "rename", "(", "g", ".", "OLD_PLFILE", ",", "backup", ")", "for", "(", "plname", ",", "plitem", ")", "in", "old_playlists", ".", "items", "(", ")", ":", "songs", "=", "[", "]", "for", "video", "in", "plitem", ".", "songs", ":", "v", "=", "Video", "(", "video", "[", "'link'", "]", ",", "video", "[", "'title'", "]", ",", "video", "[", "'duration'", "]", ")", "songs", ".", "append", "(", "v", ")", "g", ".", "userpl", "[", "plname", "]", "=", "Playlist", "(", "plname", ",", "songs", ")", "save", "(", ")" ]
convert previous playlist file to v2 playlist .
train
false
23,530
def unsetenv(key): try: del os.environ[key] except: pass if hasattr(os, u'unsetenv'): os.unsetenv(key)
[ "def", "unsetenv", "(", "key", ")", ":", "try", ":", "del", "os", ".", "environ", "[", "key", "]", "except", ":", "pass", "if", "hasattr", "(", "os", ",", "u'unsetenv'", ")", ":", "os", ".", "unsetenv", "(", "key", ")" ]
compatibility wrapper for unsetting environment variables .
train
false
23,531
def test_alias_args_error(): _ip.alias_manager.define_alias('parts', 'echo first %s second %s') with capture_output() as cap: _ip.run_cell('parts 1') nt.assert_equal(cap.stderr.split(':')[0], 'UsageError')
[ "def", "test_alias_args_error", "(", ")", ":", "_ip", ".", "alias_manager", ".", "define_alias", "(", "'parts'", ",", "'echo first %s second %s'", ")", "with", "capture_output", "(", ")", "as", "cap", ":", "_ip", ".", "run_cell", "(", "'parts 1'", ")", "nt", ".", "assert_equal", "(", "cap", ".", "stderr", ".", "split", "(", "':'", ")", "[", "0", "]", ",", "'UsageError'", ")" ]
error expanding with wrong number of arguments .
train
false
23,532
def must_be(_type): def _must_be(func): @functools.wraps(func) def wrapped(self, *args, **kwargs): if (not (self.kind == _type)): raise ValueError(u'This instance is not a {}'.format(_type)) return func(self, *args, **kwargs) return wrapped return _must_be
[ "def", "must_be", "(", "_type", ")", ":", "def", "_must_be", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapped", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", ":", "if", "(", "not", "(", "self", ".", "kind", "==", "_type", ")", ")", ":", "raise", "ValueError", "(", "u'This instance is not a {}'", ".", "format", "(", "_type", ")", ")", "return", "func", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", "return", "wrapped", "return", "_must_be" ]
a small decorator factory for osfstoragefilenode .
train
false
23,533
def normalize_digits_only(number, keep_non_digits=False): number = unicod(number) number_length = len(number) normalized_digits = U_EMPTY_STRING for ii in range(number_length): d = unicode_digit(number[ii], (-1)) if (d != (-1)): normalized_digits += unicod(d) elif keep_non_digits: normalized_digits += number[ii] return normalized_digits
[ "def", "normalize_digits_only", "(", "number", ",", "keep_non_digits", "=", "False", ")", ":", "number", "=", "unicod", "(", "number", ")", "number_length", "=", "len", "(", "number", ")", "normalized_digits", "=", "U_EMPTY_STRING", "for", "ii", "in", "range", "(", "number_length", ")", ":", "d", "=", "unicode_digit", "(", "number", "[", "ii", "]", ",", "(", "-", "1", ")", ")", "if", "(", "d", "!=", "(", "-", "1", ")", ")", ":", "normalized_digits", "+=", "unicod", "(", "d", ")", "elif", "keep_non_digits", ":", "normalized_digits", "+=", "number", "[", "ii", "]", "return", "normalized_digits" ]
normalizes a string of characters representing a phone number .
train
true
23,534
def porttree_matches(name): matches = [] for category in _porttree().dbapi.categories: if _porttree().dbapi.cp_list(((category + '/') + name)): matches.append(((category + '/') + name)) return matches
[ "def", "porttree_matches", "(", "name", ")", ":", "matches", "=", "[", "]", "for", "category", "in", "_porttree", "(", ")", ".", "dbapi", ".", "categories", ":", "if", "_porttree", "(", ")", ".", "dbapi", ".", "cp_list", "(", "(", "(", "category", "+", "'/'", ")", "+", "name", ")", ")", ":", "matches", ".", "append", "(", "(", "(", "category", "+", "'/'", ")", "+", "name", ")", ")", "return", "matches" ]
returns a list containing the matches for a given package name from the portage tree .
train
true
23,535
def GetIndices(_app=None): req = api_base_pb.StringProto() req.set_value(datastore_types.ResolveAppId(_app)) resp = datastore_pb.CompositeIndices() resp = _Call('GetIndices', req, resp) return resp.index_list()
[ "def", "GetIndices", "(", "_app", "=", "None", ")", ":", "req", "=", "api_base_pb", ".", "StringProto", "(", ")", "req", ".", "set_value", "(", "datastore_types", ".", "ResolveAppId", "(", "_app", ")", ")", "resp", "=", "datastore_pb", ".", "CompositeIndices", "(", ")", "resp", "=", "_Call", "(", "'GetIndices'", ",", "req", ",", "resp", ")", "return", "resp", ".", "index_list", "(", ")" ]
fetches all composite indices in the datastore for this app .
train
false
23,537
def get_test_doclist(doctype, name=None): import os, frappe from frappe import conf from frappe.modules.utils import peval_doclist from frappe.modules import scrub doctype = scrub(doctype) doctype_path = os.path.join(os.path.dirname(os.path.abspath(conf.__file__)), conf.test_data_path, doctype) if name: with open(os.path.join(doctype_path, (scrub(name) + u'.json')), u'r') as txtfile: doclist = peval_doclist(txtfile.read()) return doclist else: all_doclists = [] for fname in filter((lambda n: n.endswith(u'.json')), os.listdir(doctype_path)): with open(os.path.join(doctype_path, scrub(fname)), u'r') as txtfile: all_doclists.append(peval_doclist(txtfile.read())) return all_doclists
[ "def", "get_test_doclist", "(", "doctype", ",", "name", "=", "None", ")", ":", "import", "os", ",", "frappe", "from", "frappe", "import", "conf", "from", "frappe", ".", "modules", ".", "utils", "import", "peval_doclist", "from", "frappe", ".", "modules", "import", "scrub", "doctype", "=", "scrub", "(", "doctype", ")", "doctype_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "conf", ".", "__file__", ")", ")", ",", "conf", ".", "test_data_path", ",", "doctype", ")", "if", "name", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "doctype_path", ",", "(", "scrub", "(", "name", ")", "+", "u'.json'", ")", ")", ",", "u'r'", ")", "as", "txtfile", ":", "doclist", "=", "peval_doclist", "(", "txtfile", ".", "read", "(", ")", ")", "return", "doclist", "else", ":", "all_doclists", "=", "[", "]", "for", "fname", "in", "filter", "(", "(", "lambda", "n", ":", "n", ".", "endswith", "(", "u'.json'", ")", ")", ",", "os", ".", "listdir", "(", "doctype_path", ")", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "doctype_path", ",", "scrub", "(", "fname", ")", ")", ",", "u'r'", ")", "as", "txtfile", ":", "all_doclists", ".", "append", "(", "peval_doclist", "(", "txtfile", ".", "read", "(", ")", ")", ")", "return", "all_doclists" ]
get test doclist .
train
false
23,540
def check_mime_content_type(content_type): ctrls = u''.join((chr(x) for x in range(0, 32))) token_regex = u'[^()<>@,;:\\"/[\\]?= {}\x7f]+'.format(ctrls) return (re.match(u'(?P<type>{})/(?P<subtype>{})$'.format(token_regex, token_regex), content_type) is not None)
[ "def", "check_mime_content_type", "(", "content_type", ")", ":", "ctrls", "=", "u''", ".", "join", "(", "(", "chr", "(", "x", ")", "for", "x", "in", "range", "(", "0", ",", "32", ")", ")", ")", "token_regex", "=", "u'[^()<>@,;:\\\\\"/[\\\\]?= {}\\x7f]+'", ".", "format", "(", "ctrls", ")", "return", "(", "re", ".", "match", "(", "u'(?P<type>{})/(?P<subtype>{})$'", ".", "format", "(", "token_regex", ",", "token_regex", ")", ",", "content_type", ")", "is", "not", "None", ")" ]
raises a ~astropy .
train
false
23,541
def md5_hash_file(path): hasher = hashlib.md5() with open(path, 'rb') as afile: buf = afile.read() hasher.update(buf) return hasher.hexdigest()
[ "def", "md5_hash_file", "(", "path", ")", ":", "hasher", "=", "hashlib", ".", "md5", "(", ")", "with", "open", "(", "path", ",", "'rb'", ")", "as", "afile", ":", "buf", "=", "afile", ".", "read", "(", ")", "hasher", ".", "update", "(", "buf", ")", "return", "hasher", ".", "hexdigest", "(", ")" ]
return a md5 hashdigest for a file .
train
false
23,542
def render_body(raw_body): rendered = markdown.markdown(raw_body) rendered = _sanitize_html(rendered) rendered = _remove_unpaired_tags(rendered) return rendered
[ "def", "render_body", "(", "raw_body", ")", ":", "rendered", "=", "markdown", ".", "markdown", "(", "raw_body", ")", "rendered", "=", "_sanitize_html", "(", "rendered", ")", "rendered", "=", "_remove_unpaired_tags", "(", "rendered", ")", "return", "rendered" ]
render raw_body to html .
train
false
23,543
@celery_app.task(base=ArchiverTask, ignore_result=False) @logged('stat_addon') def stat_addon(addon_short_name, job_pk): addon_name = addon_short_name version = None if ('dataverse' in addon_short_name): addon_name = 'dataverse' version = ('latest' if (addon_short_name.split('-')[(-1)] == 'draft') else 'latest-published') create_app_context() job = ArchiveJob.load(job_pk) (src, dst, user) = job.info() src_addon = src.get_addon(addon_name) try: file_tree = src_addon._get_file_tree(user=user, version=version) except HTTPError as e: dst.archive_job.update_target(addon_short_name, ARCHIVER_NETWORK_ERROR, errors=[e.data['error']]) raise result = AggregateStatResult(src_addon._id, addon_short_name, targets=[utils.aggregate_file_tree_metadata(addon_short_name, file_tree, user)]) return result
[ "@", "celery_app", ".", "task", "(", "base", "=", "ArchiverTask", ",", "ignore_result", "=", "False", ")", "@", "logged", "(", "'stat_addon'", ")", "def", "stat_addon", "(", "addon_short_name", ",", "job_pk", ")", ":", "addon_name", "=", "addon_short_name", "version", "=", "None", "if", "(", "'dataverse'", "in", "addon_short_name", ")", ":", "addon_name", "=", "'dataverse'", "version", "=", "(", "'latest'", "if", "(", "addon_short_name", ".", "split", "(", "'-'", ")", "[", "(", "-", "1", ")", "]", "==", "'draft'", ")", "else", "'latest-published'", ")", "create_app_context", "(", ")", "job", "=", "ArchiveJob", ".", "load", "(", "job_pk", ")", "(", "src", ",", "dst", ",", "user", ")", "=", "job", ".", "info", "(", ")", "src_addon", "=", "src", ".", "get_addon", "(", "addon_name", ")", "try", ":", "file_tree", "=", "src_addon", ".", "_get_file_tree", "(", "user", "=", "user", ",", "version", "=", "version", ")", "except", "HTTPError", "as", "e", ":", "dst", ".", "archive_job", ".", "update_target", "(", "addon_short_name", ",", "ARCHIVER_NETWORK_ERROR", ",", "errors", "=", "[", "e", ".", "data", "[", "'error'", "]", "]", ")", "raise", "result", "=", "AggregateStatResult", "(", "src_addon", ".", "_id", ",", "addon_short_name", ",", "targets", "=", "[", "utils", ".", "aggregate_file_tree_metadata", "(", "addon_short_name", ",", "file_tree", ",", "user", ")", "]", ")", "return", "result" ]
collect metadata about the file tree of a given addon .
train
false
23,544
def _dict_compat(d): return dict(((_maybe_box_datetimelike(key), value) for (key, value) in iteritems(d)))
[ "def", "_dict_compat", "(", "d", ")", ":", "return", "dict", "(", "(", "(", "_maybe_box_datetimelike", "(", "key", ")", ",", "value", ")", "for", "(", "key", ",", "value", ")", "in", "iteritems", "(", "d", ")", ")", ")" ]
helper function to convert datetimelike-keyed dicts to timestamp-keyed dict parameters d: dict like object returns dict .
train
false
23,545
def _billed_ops_to_str(billed_ops): ops_as_strs = [] for op in billed_ops: op_name = datamodel_pb.BilledOpProto.BilledOp_Name(op.op()) ops_as_strs.append(('%s:%s' % (op_name, op.num_ops()))) return ', '.join(ops_as_strs)
[ "def", "_billed_ops_to_str", "(", "billed_ops", ")", ":", "ops_as_strs", "=", "[", "]", "for", "op", "in", "billed_ops", ":", "op_name", "=", "datamodel_pb", ".", "BilledOpProto", ".", "BilledOp_Name", "(", "op", ".", "op", "(", ")", ")", "ops_as_strs", ".", "append", "(", "(", "'%s:%s'", "%", "(", "op_name", ",", "op", ".", "num_ops", "(", ")", ")", ")", ")", "return", "', '", ".", "join", "(", "ops_as_strs", ")" ]
builds a string representation of a list of billedopproto .
train
false
23,546
def test_gaussian_vis_layer_make_state_conv(): n = None rows = 3 cols = 3 channels = 3 num_samples = 1000 tol = 0.042 beta = (1 / tol) axes = ['b', 0, 1, 'c'] random.shuffle(axes) axes = tuple(axes) print('axes:', axes) layer = GaussianVisLayer(rows=rows, cols=cols, channels=channels, init_beta=beta, axes=axes) rng = np.random.RandomState() mean = rng.uniform(1e-06, (1.0 - 1e-06), (rows, cols, channels)) z = mean layer.set_biases(z.astype(config.floatX)) init_state = layer.make_state(num_examples=num_samples, numpy_rng=rng) value = init_state.get_value() check_gaussian_samples(value, num_samples, n, rows, cols, channels, mean, tol)
[ "def", "test_gaussian_vis_layer_make_state_conv", "(", ")", ":", "n", "=", "None", "rows", "=", "3", "cols", "=", "3", "channels", "=", "3", "num_samples", "=", "1000", "tol", "=", "0.042", "beta", "=", "(", "1", "/", "tol", ")", "axes", "=", "[", "'b'", ",", "0", ",", "1", ",", "'c'", "]", "random", ".", "shuffle", "(", "axes", ")", "axes", "=", "tuple", "(", "axes", ")", "print", "(", "'axes:'", ",", "axes", ")", "layer", "=", "GaussianVisLayer", "(", "rows", "=", "rows", ",", "cols", "=", "cols", ",", "channels", "=", "channels", ",", "init_beta", "=", "beta", ",", "axes", "=", "axes", ")", "rng", "=", "np", ".", "random", ".", "RandomState", "(", ")", "mean", "=", "rng", ".", "uniform", "(", "1e-06", ",", "(", "1.0", "-", "1e-06", ")", ",", "(", "rows", ",", "cols", ",", "channels", ")", ")", "z", "=", "mean", "layer", ".", "set_biases", "(", "z", ".", "astype", "(", "config", ".", "floatX", ")", ")", "init_state", "=", "layer", ".", "make_state", "(", "num_examples", "=", "num_samples", ",", "numpy_rng", "=", "rng", ")", "value", "=", "init_state", ".", "get_value", "(", ")", "check_gaussian_samples", "(", "value", ",", "num_samples", ",", "n", ",", "rows", ",", "cols", ",", "channels", ",", "mean", ",", "tol", ")" ]
verifies that gaussianvislayer .
train
false
23,548
def logger(_modem, message_, type_): pass
[ "def", "logger", "(", "_modem", ",", "message_", ",", "type_", ")", ":", "pass" ]
configures and returns a logging instance .
train
false
23,549
def simple_split(s, keep=False, maxsplit=None): whitespace = '\n DCTB ' if (maxsplit == 0): if keep: return [s] else: return [s.strip(whitespace)] elif (maxsplit is None): maxsplit = 0 if keep: pattern = (('([' + whitespace) + '])') parts = re.split(pattern, s, maxsplit) return _combine_ws(parts, whitespace) else: pattern = (('[' + whitespace) + ']') parts = re.split(pattern, s, maxsplit) parts[(-1)] = parts[(-1)].rstrip() return [p for p in parts if p]
[ "def", "simple_split", "(", "s", ",", "keep", "=", "False", ",", "maxsplit", "=", "None", ")", ":", "whitespace", "=", "'\\n DCTB '", "if", "(", "maxsplit", "==", "0", ")", ":", "if", "keep", ":", "return", "[", "s", "]", "else", ":", "return", "[", "s", ".", "strip", "(", "whitespace", ")", "]", "elif", "(", "maxsplit", "is", "None", ")", ":", "maxsplit", "=", "0", "if", "keep", ":", "pattern", "=", "(", "(", "'(['", "+", "whitespace", ")", "+", "'])'", ")", "parts", "=", "re", ".", "split", "(", "pattern", ",", "s", ",", "maxsplit", ")", "return", "_combine_ws", "(", "parts", ",", "whitespace", ")", "else", ":", "pattern", "=", "(", "(", "'['", "+", "whitespace", ")", "+", "']'", ")", "parts", "=", "re", ".", "split", "(", "pattern", ",", "s", ",", "maxsplit", ")", "parts", "[", "(", "-", "1", ")", "]", "=", "parts", "[", "(", "-", "1", ")", "]", ".", "rstrip", "(", ")", "return", "[", "p", "for", "p", "in", "parts", "if", "p", "]" ]
split a string on whitespace .
train
false
23,550
def merge_attrs(list_of_attrs): result = DEFAULT_ATTRS for attr in list_of_attrs: result = Attrs(color=(attr.color or result.color), bgcolor=(attr.bgcolor or result.bgcolor), bold=(attr.bold or result.bold), underline=(attr.underline or result.underline), italic=(attr.italic or result.italic), blink=(attr.blink or result.blink), reverse=(attr.reverse or result.reverse)) return result
[ "def", "merge_attrs", "(", "list_of_attrs", ")", ":", "result", "=", "DEFAULT_ATTRS", "for", "attr", "in", "list_of_attrs", ":", "result", "=", "Attrs", "(", "color", "=", "(", "attr", ".", "color", "or", "result", ".", "color", ")", ",", "bgcolor", "=", "(", "attr", ".", "bgcolor", "or", "result", ".", "bgcolor", ")", ",", "bold", "=", "(", "attr", ".", "bold", "or", "result", ".", "bold", ")", ",", "underline", "=", "(", "attr", ".", "underline", "or", "result", ".", "underline", ")", ",", "italic", "=", "(", "attr", ".", "italic", "or", "result", ".", "italic", ")", ",", "blink", "=", "(", "attr", ".", "blink", "or", "result", ".", "blink", ")", ",", "reverse", "=", "(", "attr", ".", "reverse", "or", "result", ".", "reverse", ")", ")", "return", "result" ]
take a list of :class: .
train
true
23,551
def _roads_extract(resp): try: j = resp.json() except: if (resp.status_code != 200): raise googlemaps.exceptions.HTTPError(resp.status_code) raise googlemaps.exceptions.ApiError('UNKNOWN_ERROR', 'Received a malformed response.') if ('error' in j): error = j['error'] status = error['status'] if (status == 'RESOURCE_EXHAUSTED'): raise googlemaps.exceptions._RetriableRequest() if ('message' in error): raise googlemaps.exceptions.ApiError(status, error['message']) else: raise googlemaps.exceptions.ApiError(status) if (resp.status_code != 200): raise googlemaps.exceptions.HTTPError(resp.status_code) return j
[ "def", "_roads_extract", "(", "resp", ")", ":", "try", ":", "j", "=", "resp", ".", "json", "(", ")", "except", ":", "if", "(", "resp", ".", "status_code", "!=", "200", ")", ":", "raise", "googlemaps", ".", "exceptions", ".", "HTTPError", "(", "resp", ".", "status_code", ")", "raise", "googlemaps", ".", "exceptions", ".", "ApiError", "(", "'UNKNOWN_ERROR'", ",", "'Received a malformed response.'", ")", "if", "(", "'error'", "in", "j", ")", ":", "error", "=", "j", "[", "'error'", "]", "status", "=", "error", "[", "'status'", "]", "if", "(", "status", "==", "'RESOURCE_EXHAUSTED'", ")", ":", "raise", "googlemaps", ".", "exceptions", ".", "_RetriableRequest", "(", ")", "if", "(", "'message'", "in", "error", ")", ":", "raise", "googlemaps", ".", "exceptions", ".", "ApiError", "(", "status", ",", "error", "[", "'message'", "]", ")", "else", ":", "raise", "googlemaps", ".", "exceptions", ".", "ApiError", "(", "status", ")", "if", "(", "resp", ".", "status_code", "!=", "200", ")", ":", "raise", "googlemaps", ".", "exceptions", ".", "HTTPError", "(", "resp", ".", "status_code", ")", "return", "j" ]
extracts a result from a roads api http response .
train
false
23,552
def load_build_configuration_from_source(build_configuration, backends=None): backend_packages = OrderedSet(([u'pants.build_graph', u'pants.core_tasks'] + (backends or []))) for backend_package in backend_packages: load_backend(build_configuration, backend_package)
[ "def", "load_build_configuration_from_source", "(", "build_configuration", ",", "backends", "=", "None", ")", ":", "backend_packages", "=", "OrderedSet", "(", "(", "[", "u'pants.build_graph'", ",", "u'pants.core_tasks'", "]", "+", "(", "backends", "or", "[", "]", ")", ")", ")", "for", "backend_package", "in", "backend_packages", ":", "load_backend", "(", "build_configuration", ",", "backend_package", ")" ]
installs pants backend packages to provide build file symbols and cli goals .
train
false
23,553
def parse_yaml(path='/config/tool_destinations.yml', test=False, return_bool=False): try: if test: config = load(path) else: if (path == '/config/tool_destinations.yml'): config_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../..') opt_file = (config_directory + path) else: opt_file = path with open(opt_file, 'r') as stream: config = load(stream) try: if return_bool: valid_config = validate_config(config, return_bool) else: config = validate_config(config) except MalformedYMLException: if verbose: log.error(str(sys.exc_value)) raise except ScannerError: if verbose: log.error('Config is too malformed to fix!') raise if return_bool: return valid_config else: return config
[ "def", "parse_yaml", "(", "path", "=", "'/config/tool_destinations.yml'", ",", "test", "=", "False", ",", "return_bool", "=", "False", ")", ":", "try", ":", "if", "test", ":", "config", "=", "load", "(", "path", ")", "else", ":", "if", "(", "path", "==", "'/config/tool_destinations.yml'", ")", ":", "config_directory", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", ",", "'../../../..'", ")", "opt_file", "=", "(", "config_directory", "+", "path", ")", "else", ":", "opt_file", "=", "path", "with", "open", "(", "opt_file", ",", "'r'", ")", "as", "stream", ":", "config", "=", "load", "(", "stream", ")", "try", ":", "if", "return_bool", ":", "valid_config", "=", "validate_config", "(", "config", ",", "return_bool", ")", "else", ":", "config", "=", "validate_config", "(", "config", ")", "except", "MalformedYMLException", ":", "if", "verbose", ":", "log", ".", "error", "(", "str", "(", "sys", ".", "exc_value", ")", ")", "raise", "except", "ScannerError", ":", "if", "verbose", ":", "log", ".", "error", "(", "'Config is too malformed to fix!'", ")", "raise", "if", "return_bool", ":", "return", "valid_config", "else", ":", "return", "config" ]
get a yaml file from path and send it to validate_config for validation .
train
false
23,554
def with_patch_inspect(f): def wrapped(*args, **kwargs): save_findsource = inspect.findsource save_getargs = inspect.getargs inspect.findsource = findsource inspect.getargs = getargs try: return f(*args, **kwargs) finally: inspect.findsource = save_findsource inspect.getargs = save_getargs return wrapped
[ "def", "with_patch_inspect", "(", "f", ")", ":", "def", "wrapped", "(", "*", "args", ",", "**", "kwargs", ")", ":", "save_findsource", "=", "inspect", ".", "findsource", "save_getargs", "=", "inspect", ".", "getargs", "inspect", ".", "findsource", "=", "findsource", "inspect", ".", "getargs", "=", "getargs", "try", ":", "return", "f", "(", "*", "args", ",", "**", "kwargs", ")", "finally", ":", "inspect", ".", "findsource", "=", "save_findsource", "inspect", ".", "getargs", "=", "save_getargs", "return", "wrapped" ]
deprecated since ipython 6 .
train
false
23,555
def _notAfterBefore(cert_path, method): with open(cert_path) as f: x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, f.read()) timestamp = method(x509) reformatted_timestamp = [timestamp[0:4], '-', timestamp[4:6], '-', timestamp[6:8], 'T', timestamp[8:10], ':', timestamp[10:12], ':', timestamp[12:]] timestamp_str = ''.join(reformatted_timestamp) if six.PY3: timestamp_str = timestamp_str.decode('ascii') return pyrfc3339.parse(timestamp_str)
[ "def", "_notAfterBefore", "(", "cert_path", ",", "method", ")", ":", "with", "open", "(", "cert_path", ")", "as", "f", ":", "x509", "=", "OpenSSL", ".", "crypto", ".", "load_certificate", "(", "OpenSSL", ".", "crypto", ".", "FILETYPE_PEM", ",", "f", ".", "read", "(", ")", ")", "timestamp", "=", "method", "(", "x509", ")", "reformatted_timestamp", "=", "[", "timestamp", "[", "0", ":", "4", "]", ",", "'-'", ",", "timestamp", "[", "4", ":", "6", "]", ",", "'-'", ",", "timestamp", "[", "6", ":", "8", "]", ",", "'T'", ",", "timestamp", "[", "8", ":", "10", "]", ",", "':'", ",", "timestamp", "[", "10", ":", "12", "]", ",", "':'", ",", "timestamp", "[", "12", ":", "]", "]", "timestamp_str", "=", "''", ".", "join", "(", "reformatted_timestamp", ")", "if", "six", ".", "PY3", ":", "timestamp_str", "=", "timestamp_str", ".", "decode", "(", "'ascii'", ")", "return", "pyrfc3339", ".", "parse", "(", "timestamp_str", ")" ]
internal helper function for finding notbefore/notafter .
train
false
23,556
def GetProperties(obj, propList): bRetList = 1 if (type(propList) not in [TupleType, ListType]): bRetList = 0 propList = (propList,) realPropList = [] rc = [] for prop in propList: if (type(prop) != IntType): props = ((mapi.PS_PUBLIC_STRINGS, prop),) propIds = obj.GetIDsFromNames(props, 0) prop = mapitags.PROP_TAG(mapitags.PT_UNSPECIFIED, mapitags.PROP_ID(propIds[0])) realPropList.append(prop) (hr, data) = obj.GetProps(realPropList, 0) if (hr != 0): data = None return None if bRetList: return [v[1] for v in data] else: return data[0][1]
[ "def", "GetProperties", "(", "obj", ",", "propList", ")", ":", "bRetList", "=", "1", "if", "(", "type", "(", "propList", ")", "not", "in", "[", "TupleType", ",", "ListType", "]", ")", ":", "bRetList", "=", "0", "propList", "=", "(", "propList", ",", ")", "realPropList", "=", "[", "]", "rc", "=", "[", "]", "for", "prop", "in", "propList", ":", "if", "(", "type", "(", "prop", ")", "!=", "IntType", ")", ":", "props", "=", "(", "(", "mapi", ".", "PS_PUBLIC_STRINGS", ",", "prop", ")", ",", ")", "propIds", "=", "obj", ".", "GetIDsFromNames", "(", "props", ",", "0", ")", "prop", "=", "mapitags", ".", "PROP_TAG", "(", "mapitags", ".", "PT_UNSPECIFIED", ",", "mapitags", ".", "PROP_ID", "(", "propIds", "[", "0", "]", ")", ")", "realPropList", ".", "append", "(", "prop", ")", "(", "hr", ",", "data", ")", "=", "obj", ".", "GetProps", "(", "realPropList", ",", "0", ")", "if", "(", "hr", "!=", "0", ")", ":", "data", "=", "None", "return", "None", "if", "bRetList", ":", "return", "[", "v", "[", "1", "]", "for", "v", "in", "data", "]", "else", ":", "return", "data", "[", "0", "]", "[", "1", "]" ]
given a mapi object and a list of properties .
train
false
23,557
def bin_qual_scores(qual_scores): qual_bins = [] qual_lens = [] for l in qual_scores.values(): qual_lens.append(len(l)) max_seq_size = max(qual_lens) for base_position in range(max_seq_size): qual_bins.append([]) for scores in qual_scores.values(): try: qual_bins[base_position].append(scores[base_position]) except IndexError: continue return qual_bins
[ "def", "bin_qual_scores", "(", "qual_scores", ")", ":", "qual_bins", "=", "[", "]", "qual_lens", "=", "[", "]", "for", "l", "in", "qual_scores", ".", "values", "(", ")", ":", "qual_lens", ".", "append", "(", "len", "(", "l", ")", ")", "max_seq_size", "=", "max", "(", "qual_lens", ")", "for", "base_position", "in", "range", "(", "max_seq_size", ")", ":", "qual_bins", ".", "append", "(", "[", "]", ")", "for", "scores", "in", "qual_scores", ".", "values", "(", ")", ":", "try", ":", "qual_bins", "[", "base_position", "]", ".", "append", "(", "scores", "[", "base_position", "]", ")", "except", "IndexError", ":", "continue", "return", "qual_bins" ]
bins qual score according to nucleotide position qual_scores: dict of label: numpy array of base scores .
train
false
23,558
def kinda_order_mod(x, m): return order_mod(x, largest_factor_relatively_prime(m, x))
[ "def", "kinda_order_mod", "(", "x", ",", "m", ")", ":", "return", "order_mod", "(", "x", ",", "largest_factor_relatively_prime", "(", "m", ",", "x", ")", ")" ]
return the order of x in the multiplicative group mod m .
train
false
23,559
def _get_namespace(m): if isinstance(m, str): _import(m) return MODULES[m][0] elif isinstance(m, dict): return m elif hasattr(m, '__dict__'): return m.__dict__ else: raise TypeError(('Argument must be either a string, dict or module but it is: %s' % m))
[ "def", "_get_namespace", "(", "m", ")", ":", "if", "isinstance", "(", "m", ",", "str", ")", ":", "_import", "(", "m", ")", "return", "MODULES", "[", "m", "]", "[", "0", "]", "elif", "isinstance", "(", "m", ",", "dict", ")", ":", "return", "m", "elif", "hasattr", "(", "m", ",", "'__dict__'", ")", ":", "return", "m", ".", "__dict__", "else", ":", "raise", "TypeError", "(", "(", "'Argument must be either a string, dict or module but it is: %s'", "%", "m", ")", ")" ]
this is used by _lambdify to parse its arguments .
train
false
23,561
def user_requested_access(user): user = CourseCreator.objects.get(user=user) if (user.state != CourseCreator.GRANTED): user.state = CourseCreator.PENDING user.save()
[ "def", "user_requested_access", "(", "user", ")", ":", "user", "=", "CourseCreator", ".", "objects", ".", "get", "(", "user", "=", "user", ")", "if", "(", "user", ".", "state", "!=", "CourseCreator", ".", "GRANTED", ")", ":", "user", ".", "state", "=", "CourseCreator", ".", "PENDING", "user", ".", "save", "(", ")" ]
user has requested course creator access .
train
false
23,562
def do_multi_request(*args, **kwargs): results = [] clusters = kwargs['clusters'] for x in clusters: LOG.debug(_('Issuing request to cluster: %s'), x.name) rv = x.api_client.request(*args) results.append(rv) return results
[ "def", "do_multi_request", "(", "*", "args", ",", "**", "kwargs", ")", ":", "results", "=", "[", "]", "clusters", "=", "kwargs", "[", "'clusters'", "]", "for", "x", "in", "clusters", ":", "LOG", ".", "debug", "(", "_", "(", "'Issuing request to cluster: %s'", ")", ",", "x", ".", "name", ")", "rv", "=", "x", ".", "api_client", ".", "request", "(", "*", "args", ")", "results", ".", "append", "(", "rv", ")", "return", "results" ]
issue a request to all clusters .
train
false
23,563
def validate_readable(option, value): if (value is None): return value value = validate_string(option, value) open(value, 'r').close() return value
[ "def", "validate_readable", "(", "option", ",", "value", ")", ":", "if", "(", "value", "is", "None", ")", ":", "return", "value", "value", "=", "validate_string", "(", "option", ",", "value", ")", "open", "(", "value", ",", "'r'", ")", ".", "close", "(", ")", "return", "value" ]
validates that value is file-like and readable .
train
true
23,564
def _get_user_country_from_profile(user): cache_key = u'user.{user_id}.profile.country'.format(user_id=user.id) profile_country = cache.get(cache_key) if (profile_country is None): profile = getattr(user, 'profile', None) if ((profile is not None) and (profile.country.code is not None)): profile_country = profile.country.code.upper() else: profile_country = '' cache.set(cache_key, profile_country) return profile_country
[ "def", "_get_user_country_from_profile", "(", "user", ")", ":", "cache_key", "=", "u'user.{user_id}.profile.country'", ".", "format", "(", "user_id", "=", "user", ".", "id", ")", "profile_country", "=", "cache", ".", "get", "(", "cache_key", ")", "if", "(", "profile_country", "is", "None", ")", ":", "profile", "=", "getattr", "(", "user", ",", "'profile'", ",", "None", ")", "if", "(", "(", "profile", "is", "not", "None", ")", "and", "(", "profile", ".", "country", ".", "code", "is", "not", "None", ")", ")", ":", "profile_country", "=", "profile", ".", "country", ".", "code", ".", "upper", "(", ")", "else", ":", "profile_country", "=", "''", "cache", ".", "set", "(", "cache_key", ",", "profile_country", ")", "return", "profile_country" ]
check whether the user is embargoed based on the country code in the users profile .
train
false
23,565
@hug.get() def hello(request): return 'Hello World!'
[ "@", "hug", ".", "get", "(", ")", "def", "hello", "(", "request", ")", ":", "return", "'Hello World!'" ]
request mingle sync-data .
train
false
23,566
def decodestring(s): return binascii.a2b_base64(s)
[ "def", "decodestring", "(", "s", ")", ":", "return", "binascii", ".", "a2b_base64", "(", "s", ")" ]
decode a string .
train
false
23,567
def _listOfOnTimesInVec(vector): durations = [] numOnTimes = 0 totalOnTime = 0 nonzeros = numpy.array(vector).nonzero()[0] if (len(nonzeros) == 0): return (0, 0, []) if (len(nonzeros) == 1): return (1, 1, [1]) prev = nonzeros[0] onTime = 1 endIdx = nonzeros[(-1)] for idx in nonzeros[1:]: if (idx != (prev + 1)): totalOnTime += onTime numOnTimes += 1 durations.append(onTime) onTime = 1 else: onTime += 1 prev = idx totalOnTime += onTime numOnTimes += 1 durations.append(onTime) return (totalOnTime, numOnTimes, durations)
[ "def", "_listOfOnTimesInVec", "(", "vector", ")", ":", "durations", "=", "[", "]", "numOnTimes", "=", "0", "totalOnTime", "=", "0", "nonzeros", "=", "numpy", ".", "array", "(", "vector", ")", ".", "nonzero", "(", ")", "[", "0", "]", "if", "(", "len", "(", "nonzeros", ")", "==", "0", ")", ":", "return", "(", "0", ",", "0", ",", "[", "]", ")", "if", "(", "len", "(", "nonzeros", ")", "==", "1", ")", ":", "return", "(", "1", ",", "1", ",", "[", "1", "]", ")", "prev", "=", "nonzeros", "[", "0", "]", "onTime", "=", "1", "endIdx", "=", "nonzeros", "[", "(", "-", "1", ")", "]", "for", "idx", "in", "nonzeros", "[", "1", ":", "]", ":", "if", "(", "idx", "!=", "(", "prev", "+", "1", ")", ")", ":", "totalOnTime", "+=", "onTime", "numOnTimes", "+=", "1", "durations", ".", "append", "(", "onTime", ")", "onTime", "=", "1", "else", ":", "onTime", "+=", "1", "prev", "=", "idx", "totalOnTime", "+=", "onTime", "numOnTimes", "+=", "1", "durations", ".", "append", "(", "onTime", ")", "return", "(", "totalOnTime", ",", "numOnTimes", ",", "durations", ")" ]
returns 3 things for a vector: * the total on time * the number of runs * a list of the durations of each run .
train
true
23,568
def string_result(result, func, arguments): if result: s = bytes_to_str(ctypes.string_at(result)) libvlc_free(result) return s return None
[ "def", "string_result", "(", "result", ",", "func", ",", "arguments", ")", ":", "if", "result", ":", "s", "=", "bytes_to_str", "(", "ctypes", ".", "string_at", "(", "result", ")", ")", "libvlc_free", "(", "result", ")", "return", "s", "return", "None" ]
errcheck function .
train
true
23,569
def tpstats(): return _nodetool('tpstats')
[ "def", "tpstats", "(", ")", ":", "return", "_nodetool", "(", "'tpstats'", ")" ]
return tpstats info cli example: .
train
false
23,570
def _fwd_bem_homog_solution(solids, nps): return _fwd_bem_multi_solution(solids, None, nps)
[ "def", "_fwd_bem_homog_solution", "(", "solids", ",", "nps", ")", ":", "return", "_fwd_bem_multi_solution", "(", "solids", ",", "None", ",", "nps", ")" ]
helper to make a homogeneous solution .
train
false
23,571
def decide_if_cors(sender, request, **kwargs): host = urlparse(request.META['HTTP_ORIGIN']).netloc.split(':')[0] valid_url = False for url in WHITELIST_URLS: if request.path_info.startswith(url): valid_url = True if valid_url: project_slug = request.GET.get('project', None) try: project = Project.objects.get(slug=project_slug) except Project.DoesNotExist: log.warning('Invalid project passed to domain. [{project}:{domain}'.format(project=project_slug, domain=host)) return False domain = Domain.objects.filter(domain__icontains=host, project=project) if domain.exists(): return True return False
[ "def", "decide_if_cors", "(", "sender", ",", "request", ",", "**", "kwargs", ")", ":", "host", "=", "urlparse", "(", "request", ".", "META", "[", "'HTTP_ORIGIN'", "]", ")", ".", "netloc", ".", "split", "(", "':'", ")", "[", "0", "]", "valid_url", "=", "False", "for", "url", "in", "WHITELIST_URLS", ":", "if", "request", ".", "path_info", ".", "startswith", "(", "url", ")", ":", "valid_url", "=", "True", "if", "valid_url", ":", "project_slug", "=", "request", ".", "GET", ".", "get", "(", "'project'", ",", "None", ")", "try", ":", "project", "=", "Project", ".", "objects", ".", "get", "(", "slug", "=", "project_slug", ")", "except", "Project", ".", "DoesNotExist", ":", "log", ".", "warning", "(", "'Invalid project passed to domain. [{project}:{domain}'", ".", "format", "(", "project", "=", "project_slug", ",", "domain", "=", "host", ")", ")", "return", "False", "domain", "=", "Domain", ".", "objects", ".", "filter", "(", "domain__icontains", "=", "host", ",", "project", "=", "project", ")", "if", "domain", ".", "exists", "(", ")", ":", "return", "True", "return", "False" ]
decide whether a request should be given cors access .
train
false
23,572
def script_retcode(source, args=None, cwd=None, stdin=None, runas=None, shell=DEFAULT_SHELL, python_shell=None, env=None, template='jinja', umask=None, timeout=None, reset_system_locale=True, saltenv='base', output_loglevel='debug', log_callback=None, use_vt=False, password=None, **kwargs): if ('__env__' in kwargs): salt.utils.warn_until('Oxygen', "Parameter '__env__' has been detected in the argument list. This parameter is no longer used and has been replaced by 'saltenv' as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.") kwargs.pop('__env__') return script(source=source, args=args, cwd=cwd, stdin=stdin, runas=runas, shell=shell, python_shell=python_shell, env=env, template=template, umask=umask, timeout=timeout, reset_system_locale=reset_system_locale, saltenv=saltenv, output_loglevel=output_loglevel, log_callback=log_callback, use_vt=use_vt, password=password, **kwargs)['retcode']
[ "def", "script_retcode", "(", "source", ",", "args", "=", "None", ",", "cwd", "=", "None", ",", "stdin", "=", "None", ",", "runas", "=", "None", ",", "shell", "=", "DEFAULT_SHELL", ",", "python_shell", "=", "None", ",", "env", "=", "None", ",", "template", "=", "'jinja'", ",", "umask", "=", "None", ",", "timeout", "=", "None", ",", "reset_system_locale", "=", "True", ",", "saltenv", "=", "'base'", ",", "output_loglevel", "=", "'debug'", ",", "log_callback", "=", "None", ",", "use_vt", "=", "False", ",", "password", "=", "None", ",", "**", "kwargs", ")", ":", "if", "(", "'__env__'", "in", "kwargs", ")", ":", "salt", ".", "utils", ".", "warn_until", "(", "'Oxygen'", ",", "\"Parameter '__env__' has been detected in the argument list. This parameter is no longer used and has been replaced by 'saltenv' as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.\"", ")", "kwargs", ".", "pop", "(", "'__env__'", ")", "return", "script", "(", "source", "=", "source", ",", "args", "=", "args", ",", "cwd", "=", "cwd", ",", "stdin", "=", "stdin", ",", "runas", "=", "runas", ",", "shell", "=", "shell", ",", "python_shell", "=", "python_shell", ",", "env", "=", "env", ",", "template", "=", "template", ",", "umask", "=", "umask", ",", "timeout", "=", "timeout", ",", "reset_system_locale", "=", "reset_system_locale", ",", "saltenv", "=", "saltenv", ",", "output_loglevel", "=", "output_loglevel", ",", "log_callback", "=", "log_callback", ",", "use_vt", "=", "use_vt", ",", "password", "=", "password", ",", "**", "kwargs", ")", "[", "'retcode'", "]" ]
run :py:func:cmd .
train
false
23,573
def send_import_mail(task, result): ij = ImportJob.query.filter_by(task=task).first() if (not ij): return send_email_after_import(email=ij.user.email, result=result)
[ "def", "send_import_mail", "(", "task", ",", "result", ")", ":", "ij", "=", "ImportJob", ".", "query", ".", "filter_by", "(", "task", "=", "task", ")", ".", "first", "(", ")", "if", "(", "not", "ij", ")", ":", "return", "send_email_after_import", "(", "email", "=", "ij", ".", "user", ".", "email", ",", "result", "=", "result", ")" ]
send email after import .
train
false
23,574
@verbose def _merge_meg_eeg_fwds(megfwd, eegfwd, verbose=None): if ((megfwd is not None) and (eegfwd is not None)): if ((megfwd['sol']['data'].shape[1] != eegfwd['sol']['data'].shape[1]) or (megfwd['source_ori'] != eegfwd['source_ori']) or (megfwd['nsource'] != eegfwd['nsource']) or (megfwd['coord_frame'] != eegfwd['coord_frame'])): raise ValueError('The MEG and EEG forward solutions do not match') fwd = megfwd fwd['sol']['data'] = np.r_[(fwd['sol']['data'], eegfwd['sol']['data'])] fwd['_orig_sol'] = np.r_[(fwd['_orig_sol'], eegfwd['_orig_sol'])] fwd['sol']['nrow'] = (fwd['sol']['nrow'] + eegfwd['sol']['nrow']) fwd['sol']['row_names'] = (fwd['sol']['row_names'] + eegfwd['sol']['row_names']) if (fwd['sol_grad'] is not None): fwd['sol_grad']['data'] = np.r_[(fwd['sol_grad']['data'], eegfwd['sol_grad']['data'])] fwd['_orig_sol_grad'] = np.r_[(fwd['_orig_sol_grad'], eegfwd['_orig_sol_grad'])] fwd['sol_grad']['nrow'] = (fwd['sol_grad']['nrow'] + eegfwd['sol_grad']['nrow']) fwd['sol_grad']['row_names'] = (fwd['sol_grad']['row_names'] + eegfwd['sol_grad']['row_names']) fwd['nchan'] = (fwd['nchan'] + eegfwd['nchan']) logger.info(' MEG and EEG forward solutions combined') elif (megfwd is not None): fwd = megfwd else: fwd = eegfwd return fwd
[ "@", "verbose", "def", "_merge_meg_eeg_fwds", "(", "megfwd", ",", "eegfwd", ",", "verbose", "=", "None", ")", ":", "if", "(", "(", "megfwd", "is", "not", "None", ")", "and", "(", "eegfwd", "is", "not", "None", ")", ")", ":", "if", "(", "(", "megfwd", "[", "'sol'", "]", "[", "'data'", "]", ".", "shape", "[", "1", "]", "!=", "eegfwd", "[", "'sol'", "]", "[", "'data'", "]", ".", "shape", "[", "1", "]", ")", "or", "(", "megfwd", "[", "'source_ori'", "]", "!=", "eegfwd", "[", "'source_ori'", "]", ")", "or", "(", "megfwd", "[", "'nsource'", "]", "!=", "eegfwd", "[", "'nsource'", "]", ")", "or", "(", "megfwd", "[", "'coord_frame'", "]", "!=", "eegfwd", "[", "'coord_frame'", "]", ")", ")", ":", "raise", "ValueError", "(", "'The MEG and EEG forward solutions do not match'", ")", "fwd", "=", "megfwd", "fwd", "[", "'sol'", "]", "[", "'data'", "]", "=", "np", ".", "r_", "[", "(", "fwd", "[", "'sol'", "]", "[", "'data'", "]", ",", "eegfwd", "[", "'sol'", "]", "[", "'data'", "]", ")", "]", "fwd", "[", "'_orig_sol'", "]", "=", "np", ".", "r_", "[", "(", "fwd", "[", "'_orig_sol'", "]", ",", "eegfwd", "[", "'_orig_sol'", "]", ")", "]", "fwd", "[", "'sol'", "]", "[", "'nrow'", "]", "=", "(", "fwd", "[", "'sol'", "]", "[", "'nrow'", "]", "+", "eegfwd", "[", "'sol'", "]", "[", "'nrow'", "]", ")", "fwd", "[", "'sol'", "]", "[", "'row_names'", "]", "=", "(", "fwd", "[", "'sol'", "]", "[", "'row_names'", "]", "+", "eegfwd", "[", "'sol'", "]", "[", "'row_names'", "]", ")", "if", "(", "fwd", "[", "'sol_grad'", "]", "is", "not", "None", ")", ":", "fwd", "[", "'sol_grad'", "]", "[", "'data'", "]", "=", "np", ".", "r_", "[", "(", "fwd", "[", "'sol_grad'", "]", "[", "'data'", "]", ",", "eegfwd", "[", "'sol_grad'", "]", "[", "'data'", "]", ")", "]", "fwd", "[", "'_orig_sol_grad'", "]", "=", "np", ".", "r_", "[", "(", "fwd", "[", "'_orig_sol_grad'", "]", ",", "eegfwd", "[", "'_orig_sol_grad'", "]", ")", "]", "fwd", "[", "'sol_grad'", "]", "[", "'nrow'", "]", "=", "(", "fwd", "[", "'sol_grad'", "]", "[", "'nrow'", "]", "+", "eegfwd", "[", "'sol_grad'", "]", "[", "'nrow'", "]", ")", "fwd", "[", "'sol_grad'", "]", "[", "'row_names'", "]", "=", "(", "fwd", "[", "'sol_grad'", "]", "[", "'row_names'", "]", "+", "eegfwd", "[", "'sol_grad'", "]", "[", "'row_names'", "]", ")", "fwd", "[", "'nchan'", "]", "=", "(", "fwd", "[", "'nchan'", "]", "+", "eegfwd", "[", "'nchan'", "]", ")", "logger", ".", "info", "(", "' MEG and EEG forward solutions combined'", ")", "elif", "(", "megfwd", "is", "not", "None", ")", ":", "fwd", "=", "megfwd", "else", ":", "fwd", "=", "eegfwd", "return", "fwd" ]
merge loaded meg and eeg forward dicts into one dict .
train
false
23,575
def next_weekday(d, weekday): days_ahead = (weekday - d.weekday()) if (days_ahead <= 0): days_ahead += 7 return (d + datetime.timedelta(days_ahead))
[ "def", "next_weekday", "(", "d", ",", "weekday", ")", ":", "days_ahead", "=", "(", "weekday", "-", "d", ".", "weekday", "(", ")", ")", "if", "(", "days_ahead", "<=", "0", ")", ":", "days_ahead", "+=", "7", "return", "(", "d", "+", "datetime", ".", "timedelta", "(", "days_ahead", ")", ")" ]
return the next week day starting from d .
train
false
23,578
def toBitList(inbyte): return [getBit(inbyte, b) for b in range(8)]
[ "def", "toBitList", "(", "inbyte", ")", ":", "return", "[", "getBit", "(", "inbyte", ",", "b", ")", "for", "b", "in", "range", "(", "8", ")", "]" ]
name: tobitlist args: a byte desc: converts a byte into list for access to individual bits .
train
false
23,579
def LINEARREG(ds, count, timeperiod=(- (2 ** 31))): return call_talib_with_ds(ds, count, talib.LINEARREG, timeperiod)
[ "def", "LINEARREG", "(", "ds", ",", "count", ",", "timeperiod", "=", "(", "-", "(", "2", "**", "31", ")", ")", ")", ":", "return", "call_talib_with_ds", "(", "ds", ",", "count", ",", "talib", ".", "LINEARREG", ",", "timeperiod", ")" ]
linear regression .
train
false
23,580
def bisect_left_nodes_start(a, x, lo=0, hi=None): if (hi is None): hi = len(a) while (lo < hi): mid = ((lo + hi) // 2) if (a[mid].start is None): return mid if (a[mid].start[:2] == x): return mid if (a[mid].start[:2] < x): lo = (mid + 1) else: hi = mid return lo
[ "def", "bisect_left_nodes_start", "(", "a", ",", "x", ",", "lo", "=", "0", ",", "hi", "=", "None", ")", ":", "if", "(", "hi", "is", "None", ")", ":", "hi", "=", "len", "(", "a", ")", "while", "(", "lo", "<", "hi", ")", ":", "mid", "=", "(", "(", "lo", "+", "hi", ")", "//", "2", ")", "if", "(", "a", "[", "mid", "]", ".", "start", "is", "None", ")", ":", "return", "mid", "if", "(", "a", "[", "mid", "]", ".", "start", "[", ":", "2", "]", "==", "x", ")", ":", "return", "mid", "if", "(", "a", "[", "mid", "]", ".", "start", "[", ":", "2", "]", "<", "x", ")", ":", "lo", "=", "(", "mid", "+", "1", ")", "else", ":", "hi", "=", "mid", "return", "lo" ]
a version of bisect .
train
false
23,582
def archiver_for_path(path_name): if path_name.endswith(u'.tar.gz'): return TGZ elif path_name.endswith(u'.tar.bz2'): return TBZ2 else: (_, ext) = os.path.splitext(path_name) if ext: ext = ext[1:] if (not ext): raise ValueError(u'Could not determine archive type of path {}'.format(path_name)) return archiver(ext)
[ "def", "archiver_for_path", "(", "path_name", ")", ":", "if", "path_name", ".", "endswith", "(", "u'.tar.gz'", ")", ":", "return", "TGZ", "elif", "path_name", ".", "endswith", "(", "u'.tar.bz2'", ")", ":", "return", "TBZ2", "else", ":", "(", "_", ",", "ext", ")", "=", "os", ".", "path", ".", "splitext", "(", "path_name", ")", "if", "ext", ":", "ext", "=", "ext", "[", "1", ":", "]", "if", "(", "not", "ext", ")", ":", "raise", "ValueError", "(", "u'Could not determine archive type of path {}'", ".", "format", "(", "path_name", ")", ")", "return", "archiver", "(", "ext", ")" ]
returns an archiver for the given path name .
train
false
23,583
def _execute_cmd(plugin, args='', run_type='cmd.retcode'): data = {} all_plugins = list_plugins() if (plugin in all_plugins): data = __salt__[run_type]('{0}{1} {2}'.format(PLUGINDIR, plugin, args), python_shell=False) return data
[ "def", "_execute_cmd", "(", "plugin", ",", "args", "=", "''", ",", "run_type", "=", "'cmd.retcode'", ")", ":", "data", "=", "{", "}", "all_plugins", "=", "list_plugins", "(", ")", "if", "(", "plugin", "in", "all_plugins", ")", ":", "data", "=", "__salt__", "[", "run_type", "]", "(", "'{0}{1} {2}'", ".", "format", "(", "PLUGINDIR", ",", "plugin", ",", "args", ")", ",", "python_shell", "=", "False", ")", "return", "data" ]
execute nagios plugin if its in the directory with salt command specified in run_type .
train
true
23,584
def divisors(n, generator=False): n = as_int(abs(n)) if isprime(n): return [1, n] if (n == 1): return [1] if (n == 0): return [] rv = _divisors(n) if (not generator): return sorted(rv) return rv
[ "def", "divisors", "(", "n", ",", "generator", "=", "False", ")", ":", "n", "=", "as_int", "(", "abs", "(", "n", ")", ")", "if", "isprime", "(", "n", ")", ":", "return", "[", "1", ",", "n", "]", "if", "(", "n", "==", "1", ")", ":", "return", "[", "1", "]", "if", "(", "n", "==", "0", ")", ":", "return", "[", "]", "rv", "=", "_divisors", "(", "n", ")", "if", "(", "not", "generator", ")", ":", "return", "sorted", "(", "rv", ")", "return", "rv" ]
return all divisors of n sorted from 1 .
train
false
23,585
@cache_permission def can_edit_priority(user, project): return check_permission(user, project, 'trans.edit_priority')
[ "@", "cache_permission", "def", "can_edit_priority", "(", "user", ",", "project", ")", ":", "return", "check_permission", "(", "user", ",", "project", ",", "'trans.edit_priority'", ")" ]
checks whether user can edit translation priority .
train
false
23,589
def adaptAgent(agent_klass): def inner(*args, **kwargs): return RlglueAgentAdapter(agent_klass, *args, **kwargs) return inner
[ "def", "adaptAgent", "(", "agent_klass", ")", ":", "def", "inner", "(", "*", "args", ",", "**", "kwargs", ")", ":", "return", "RlglueAgentAdapter", "(", "agent_klass", ",", "*", "args", ",", "**", "kwargs", ")", "return", "inner" ]
return a factory function that instantiates a pybrain agent and adapts it to the rlglue framework interface .
train
false
23,590
def test_marshmallow_schema(): class UserSchema(Schema, ): name = fields.Str() schema_type = hug.types.MarshmallowSchema(UserSchema()) assert (schema_type({'name': 'test'}) == {'name': 'test'}) assert (schema_type('{"name": "test"}') == {'name': 'test'}) assert (schema_type.__doc__ == 'UserSchema') with pytest.raises(InvalidTypeData): schema_type({'name': 1})
[ "def", "test_marshmallow_schema", "(", ")", ":", "class", "UserSchema", "(", "Schema", ",", ")", ":", "name", "=", "fields", ".", "Str", "(", ")", "schema_type", "=", "hug", ".", "types", ".", "MarshmallowSchema", "(", "UserSchema", "(", ")", ")", "assert", "(", "schema_type", "(", "{", "'name'", ":", "'test'", "}", ")", "==", "{", "'name'", ":", "'test'", "}", ")", "assert", "(", "schema_type", "(", "'{\"name\": \"test\"}'", ")", "==", "{", "'name'", ":", "'test'", "}", ")", "assert", "(", "schema_type", ".", "__doc__", "==", "'UserSchema'", ")", "with", "pytest", ".", "raises", "(", "InvalidTypeData", ")", ":", "schema_type", "(", "{", "'name'", ":", "1", "}", ")" ]
test hugs marshmallow schema support .
train
false
23,592
@register.tag def mkrange(parser, token): tokens = token.split_contents() fnctl = tokens.pop(0) def error(): raise TemplateSyntaxError, (('%s accepts the syntax: {%% %s [start,] ' + "stop[, step] as context_name %%}, where 'start', 'stop' ") + ("and 'step' must all be integers." % (fnctl, fnctl))) range_args = [] while True: if (len(tokens) < 2): error() token = tokens.pop(0) if (token == 'as'): break range_args.append(token) if (len(tokens) != 1): error() context_name = tokens.pop() return RangeNode(parser, range_args, context_name)
[ "@", "register", ".", "tag", "def", "mkrange", "(", "parser", ",", "token", ")", ":", "tokens", "=", "token", ".", "split_contents", "(", ")", "fnctl", "=", "tokens", ".", "pop", "(", "0", ")", "def", "error", "(", ")", ":", "raise", "TemplateSyntaxError", ",", "(", "(", "'%s accepts the syntax: {%% %s [start,] '", "+", "\"stop[, step] as context_name %%}, where 'start', 'stop' \"", ")", "+", "(", "\"and 'step' must all be integers.\"", "%", "(", "fnctl", ",", "fnctl", ")", ")", ")", "range_args", "=", "[", "]", "while", "True", ":", "if", "(", "len", "(", "tokens", ")", "<", "2", ")", ":", "error", "(", ")", "token", "=", "tokens", ".", "pop", "(", "0", ")", "if", "(", "token", "==", "'as'", ")", ":", "break", "range_args", ".", "append", "(", "token", ")", "if", "(", "len", "(", "tokens", ")", "!=", "1", ")", ":", "error", "(", ")", "context_name", "=", "tokens", ".", "pop", "(", ")", "return", "RangeNode", "(", "parser", ",", "range_args", ",", "context_name", ")" ]
accepts the same arguments as the range builtin and creates a list containing the result of range .
train
false
23,594
def EmbeddingLookupFeatures(params, sparse_features, allow_weights): if (not isinstance(params, list)): params = [params] sparse_features = tf.convert_to_tensor(sparse_features) (indices, ids, weights) = gen_parser_ops.unpack_sparse_features(sparse_features) embeddings = tf.nn.embedding_lookup(params, ids) if allow_weights: broadcast_weights_shape = tf.concat(0, [tf.shape(weights), [1]]) embeddings *= tf.reshape(weights, broadcast_weights_shape) return tf.unsorted_segment_sum(embeddings, indices, tf.size(sparse_features))
[ "def", "EmbeddingLookupFeatures", "(", "params", ",", "sparse_features", ",", "allow_weights", ")", ":", "if", "(", "not", "isinstance", "(", "params", ",", "list", ")", ")", ":", "params", "=", "[", "params", "]", "sparse_features", "=", "tf", ".", "convert_to_tensor", "(", "sparse_features", ")", "(", "indices", ",", "ids", ",", "weights", ")", "=", "gen_parser_ops", ".", "unpack_sparse_features", "(", "sparse_features", ")", "embeddings", "=", "tf", ".", "nn", ".", "embedding_lookup", "(", "params", ",", "ids", ")", "if", "allow_weights", ":", "broadcast_weights_shape", "=", "tf", ".", "concat", "(", "0", ",", "[", "tf", ".", "shape", "(", "weights", ")", ",", "[", "1", "]", "]", ")", "embeddings", "*=", "tf", ".", "reshape", "(", "weights", ",", "broadcast_weights_shape", ")", "return", "tf", ".", "unsorted_segment_sum", "(", "embeddings", ",", "indices", ",", "tf", ".", "size", "(", "sparse_features", ")", ")" ]
computes embeddings for each entry of sparse features sparse_features .
train
false
23,595
def print_change_diffs(change_diffs): diff_strings = [] for (file_path, change_diff) in change_diffs.items(): if (not (change_diff[0] or change_diff[1] or change_diff[2] or change_diff[3])): continue diff_strings.append(('--- %s' % get_temp_file_path(file_path))) diff_strings.append(('+++ %s' % file_path)) for iter_category in range(4): change_category = change_diff[iter_category] if ((iter_category == 0) and change_category): diff_strings.append('*++ Additional unexpected adds') elif ((iter_category == 1) and change_category): diff_strings.append('/++ Not present expected adds') elif ((iter_category == 2) and change_category): diff_strings.append('*-- Additional unexpected removes') elif ((iter_category == 3) and change_category): diff_strings.append('/-- Not present expected removes') for line_change in change_category: diff_strings.append(str(line_change).encode('string-escape')) return '\n'.join(diff_strings)
[ "def", "print_change_diffs", "(", "change_diffs", ")", ":", "diff_strings", "=", "[", "]", "for", "(", "file_path", ",", "change_diff", ")", "in", "change_diffs", ".", "items", "(", ")", ":", "if", "(", "not", "(", "change_diff", "[", "0", "]", "or", "change_diff", "[", "1", "]", "or", "change_diff", "[", "2", "]", "or", "change_diff", "[", "3", "]", ")", ")", ":", "continue", "diff_strings", ".", "append", "(", "(", "'--- %s'", "%", "get_temp_file_path", "(", "file_path", ")", ")", ")", "diff_strings", ".", "append", "(", "(", "'+++ %s'", "%", "file_path", ")", ")", "for", "iter_category", "in", "range", "(", "4", ")", ":", "change_category", "=", "change_diff", "[", "iter_category", "]", "if", "(", "(", "iter_category", "==", "0", ")", "and", "change_category", ")", ":", "diff_strings", ".", "append", "(", "'*++ Additional unexpected adds'", ")", "elif", "(", "(", "iter_category", "==", "1", ")", "and", "change_category", ")", ":", "diff_strings", ".", "append", "(", "'/++ Not present expected adds'", ")", "elif", "(", "(", "iter_category", "==", "2", ")", "and", "change_category", ")", ":", "diff_strings", ".", "append", "(", "'*-- Additional unexpected removes'", ")", "elif", "(", "(", "iter_category", "==", "3", ")", "and", "change_category", ")", ":", "diff_strings", ".", "append", "(", "'/-- Not present expected removes'", ")", "for", "line_change", "in", "change_category", ":", "diff_strings", ".", "append", "(", "str", "(", "line_change", ")", ".", "encode", "(", "'string-escape'", ")", ")", "return", "'\\n'", ".", "join", "(", "diff_strings", ")" ]
pretty prints the output of the evaluate_config_changes function .
train
false
23,597
def header_property(wsgi_name): def fget(self): try: return (self.env[wsgi_name] or None) except KeyError: return None return property(fget)
[ "def", "header_property", "(", "wsgi_name", ")", ":", "def", "fget", "(", "self", ")", ":", "try", ":", "return", "(", "self", ".", "env", "[", "wsgi_name", "]", "or", "None", ")", "except", "KeyError", ":", "return", "None", "return", "property", "(", "fget", ")" ]
creates a read-only header property .
train
false
23,598
def getEvaluatedLinkValue(elementNode, word): if (word == ''): return '' if getStartsWithCurlyEqualRoundSquare(word): return getEvaluatedExpressionValue(elementNode, word) return word
[ "def", "getEvaluatedLinkValue", "(", "elementNode", ",", "word", ")", ":", "if", "(", "word", "==", "''", ")", ":", "return", "''", "if", "getStartsWithCurlyEqualRoundSquare", "(", "word", ")", ":", "return", "getEvaluatedExpressionValue", "(", "elementNode", ",", "word", ")", "return", "word" ]
get the evaluated link value .
train
false
23,599
@public def cache_value(duration=None): def decorator(fx): fx.__cached = None fx.__cached_at = 0 def wrapper(*args, **kwargs): dt = (time.time() - fx.__cached_at) if (((dt > duration) and (duration is not None)) or ((fx.__cached_at == 0) and (duration is None))): val = fx(*args, **kwargs) fx.__cached = val fx.__cached_at = time.time() else: val = fx.__cached return val wrapper.__doc__ = fx.__doc__ return wrapper return decorator
[ "@", "public", "def", "cache_value", "(", "duration", "=", "None", ")", ":", "def", "decorator", "(", "fx", ")", ":", "fx", ".", "__cached", "=", "None", "fx", ".", "__cached_at", "=", "0", "def", "wrapper", "(", "*", "args", ",", "**", "kwargs", ")", ":", "dt", "=", "(", "time", ".", "time", "(", ")", "-", "fx", ".", "__cached_at", ")", "if", "(", "(", "(", "dt", ">", "duration", ")", "and", "(", "duration", "is", "not", "None", ")", ")", "or", "(", "(", "fx", ".", "__cached_at", "==", "0", ")", "and", "(", "duration", "is", "None", ")", ")", ")", ":", "val", "=", "fx", "(", "*", "args", ",", "**", "kwargs", ")", "fx", ".", "__cached", "=", "val", "fx", ".", "__cached_at", "=", "time", ".", "time", "(", ")", "else", ":", "val", "=", "fx", ".", "__cached", "return", "val", "wrapper", ".", "__doc__", "=", "fx", ".", "__doc__", "return", "wrapper", "return", "decorator" ]
makes a function lazy .
train
false
23,600
@contextmanager def reversed(G): directed = G.is_directed() if directed: G.reverse(copy=False) try: (yield) finally: if directed: G.reverse(copy=False)
[ "@", "contextmanager", "def", "reversed", "(", "G", ")", ":", "directed", "=", "G", ".", "is_directed", "(", ")", "if", "directed", ":", "G", ".", "reverse", "(", "copy", "=", "False", ")", "try", ":", "(", "yield", ")", "finally", ":", "if", "directed", ":", "G", ".", "reverse", "(", "copy", "=", "False", ")" ]
a context manager for temporarily reversing a directed graph in place .
train
false
23,602
def get_holidays(employee, from_date, to_date): holiday_list = get_holiday_list_for_employee(employee) holidays = frappe.db.sql(u'select count(distinct holiday_date) from `tabHoliday` h1, `tabHoliday List` h2\n DCTB DCTB where h1.parent = h2.name and h1.holiday_date between %s and %s\n DCTB DCTB and h2.name = %s', (from_date, to_date, holiday_list))[0][0] return holidays
[ "def", "get_holidays", "(", "employee", ",", "from_date", ",", "to_date", ")", ":", "holiday_list", "=", "get_holiday_list_for_employee", "(", "employee", ")", "holidays", "=", "frappe", ".", "db", ".", "sql", "(", "u'select count(distinct holiday_date) from `tabHoliday` h1, `tabHoliday List` h2\\n DCTB DCTB where h1.parent = h2.name and h1.holiday_date between %s and %s\\n DCTB DCTB and h2.name = %s'", ",", "(", "from_date", ",", "to_date", ",", "holiday_list", ")", ")", "[", "0", "]", "[", "0", "]", "return", "holidays" ]
get holidays between two dates for the given employee .
train
false
23,603
def get_maker(autocommit=True, expire_on_commit=False): 'May assign __MAKER if not already assigned' global _MAKER, _ENGINE assert _ENGINE if (not _MAKER): _MAKER = sa_orm.sessionmaker(bind=_ENGINE, autocommit=autocommit, expire_on_commit=expire_on_commit) return _MAKER
[ "def", "get_maker", "(", "autocommit", "=", "True", ",", "expire_on_commit", "=", "False", ")", ":", "global", "_MAKER", ",", "_ENGINE", "assert", "_ENGINE", "if", "(", "not", "_MAKER", ")", ":", "_MAKER", "=", "sa_orm", ".", "sessionmaker", "(", "bind", "=", "_ENGINE", ",", "autocommit", "=", "autocommit", ",", "expire_on_commit", "=", "expire_on_commit", ")", "return", "_MAKER" ]
return a sqlalchemy sessionmaker using the given engine .
train
false
23,604
def notify_init_event(agent_type, agent): registry.notify(agent_type, events.AFTER_INIT, agent, agent=agent)
[ "def", "notify_init_event", "(", "agent_type", ",", "agent", ")", ":", "registry", ".", "notify", "(", "agent_type", ",", "events", ".", "AFTER_INIT", ",", "agent", ",", "agent", "=", "agent", ")" ]
notify init event for the specified agent .
train
false
23,605
def _image_mime_type(data): kind = imghdr.what(None, h=data) if (kind in ['gif', 'jpeg', 'png', 'tiff', 'bmp']): return 'image/{0}'.format(kind) elif (kind == 'pgm'): return 'image/x-portable-graymap' elif (kind == 'pbm'): return 'image/x-portable-bitmap' elif (kind == 'ppm'): return 'image/x-portable-pixmap' elif (kind == 'xbm'): return 'image/x-xbitmap' else: return 'image/x-{0}'.format(kind)
[ "def", "_image_mime_type", "(", "data", ")", ":", "kind", "=", "imghdr", ".", "what", "(", "None", ",", "h", "=", "data", ")", "if", "(", "kind", "in", "[", "'gif'", ",", "'jpeg'", ",", "'png'", ",", "'tiff'", ",", "'bmp'", "]", ")", ":", "return", "'image/{0}'", ".", "format", "(", "kind", ")", "elif", "(", "kind", "==", "'pgm'", ")", ":", "return", "'image/x-portable-graymap'", "elif", "(", "kind", "==", "'pbm'", ")", ":", "return", "'image/x-portable-bitmap'", "elif", "(", "kind", "==", "'ppm'", ")", ":", "return", "'image/x-portable-pixmap'", "elif", "(", "kind", "==", "'xbm'", ")", ":", "return", "'image/x-xbitmap'", "else", ":", "return", "'image/x-{0}'", ".", "format", "(", "kind", ")" ]
return the mime type of the image data .
train
false
23,606
def _connection_defaults(user=None, host=None, port=None, maintenance_db=None, password=None): if (not user): user = __salt__['config.option']('postgres.user') if (not host): host = __salt__['config.option']('postgres.host') if (not port): port = __salt__['config.option']('postgres.port') if (not maintenance_db): maintenance_db = __salt__['config.option']('postgres.maintenance_db') if (password is None): password = __salt__['config.option']('postgres.pass') return (user, host, port, maintenance_db, password)
[ "def", "_connection_defaults", "(", "user", "=", "None", ",", "host", "=", "None", ",", "port", "=", "None", ",", "maintenance_db", "=", "None", ",", "password", "=", "None", ")", ":", "if", "(", "not", "user", ")", ":", "user", "=", "__salt__", "[", "'config.option'", "]", "(", "'postgres.user'", ")", "if", "(", "not", "host", ")", ":", "host", "=", "__salt__", "[", "'config.option'", "]", "(", "'postgres.host'", ")", "if", "(", "not", "port", ")", ":", "port", "=", "__salt__", "[", "'config.option'", "]", "(", "'postgres.port'", ")", "if", "(", "not", "maintenance_db", ")", ":", "maintenance_db", "=", "__salt__", "[", "'config.option'", "]", "(", "'postgres.maintenance_db'", ")", "if", "(", "password", "is", "None", ")", ":", "password", "=", "__salt__", "[", "'config.option'", "]", "(", "'postgres.pass'", ")", "return", "(", "user", ",", "host", ",", "port", ",", "maintenance_db", ",", "password", ")" ]
returns a tuple of with config .
train
true
23,607
def segs2flags(segStart, segEnd, segLabel, winSize): flags = [] classNames = list(set(segLabel)) curPos = (winSize / 2.0) while (curPos < segEnd[(-1)]): for i in range(len(segStart)): if ((curPos > segStart[i]) and (curPos <= segEnd[i])): break flags.append(classNames.index(segLabel[i])) curPos += winSize return (numpy.array(flags), classNames)
[ "def", "segs2flags", "(", "segStart", ",", "segEnd", ",", "segLabel", ",", "winSize", ")", ":", "flags", "=", "[", "]", "classNames", "=", "list", "(", "set", "(", "segLabel", ")", ")", "curPos", "=", "(", "winSize", "/", "2.0", ")", "while", "(", "curPos", "<", "segEnd", "[", "(", "-", "1", ")", "]", ")", ":", "for", "i", "in", "range", "(", "len", "(", "segStart", ")", ")", ":", "if", "(", "(", "curPos", ">", "segStart", "[", "i", "]", ")", "and", "(", "curPos", "<=", "segEnd", "[", "i", "]", ")", ")", ":", "break", "flags", ".", "append", "(", "classNames", ".", "index", "(", "segLabel", "[", "i", "]", ")", ")", "curPos", "+=", "winSize", "return", "(", "numpy", ".", "array", "(", "flags", ")", ",", "classNames", ")" ]
this function converts segment endpoints and respective segment labels to fix-sized class labels .
train
false
23,609
def part_lister(mpupload, part_number_marker=None): more_results = True part = None while more_results: parts = mpupload.get_all_parts(None, part_number_marker) for part in parts: (yield part) part_number_marker = mpupload.next_part_number_marker more_results = mpupload.is_truncated
[ "def", "part_lister", "(", "mpupload", ",", "part_number_marker", "=", "None", ")", ":", "more_results", "=", "True", "part", "=", "None", "while", "more_results", ":", "parts", "=", "mpupload", ".", "get_all_parts", "(", "None", ",", "part_number_marker", ")", "for", "part", "in", "parts", ":", "(", "yield", "part", ")", "part_number_marker", "=", "mpupload", ".", "next_part_number_marker", "more_results", "=", "mpupload", ".", "is_truncated" ]
a generator function for listing parts of a multipart upload .
train
true
23,610
def ldap_search(filter, base=None, attr=None): if (base is None): base = base_dn() msgid = uldap().lo.lo.search(base, ldap_module().SCOPE_SUBTREE, filterstr=filter, attrlist=attr) while True: (result_type, result_data) = uldap().lo.lo.result(msgid, all=0) if (not result_data): break if (result_type is ldap_module().RES_SEARCH_RESULT): break elif (result_type is ldap_module().RES_SEARCH_ENTRY): for res in result_data: (yield res) uldap().lo.lo.abandon(msgid)
[ "def", "ldap_search", "(", "filter", ",", "base", "=", "None", ",", "attr", "=", "None", ")", ":", "if", "(", "base", "is", "None", ")", ":", "base", "=", "base_dn", "(", ")", "msgid", "=", "uldap", "(", ")", ".", "lo", ".", "lo", ".", "search", "(", "base", ",", "ldap_module", "(", ")", ".", "SCOPE_SUBTREE", ",", "filterstr", "=", "filter", ",", "attrlist", "=", "attr", ")", "while", "True", ":", "(", "result_type", ",", "result_data", ")", "=", "uldap", "(", ")", ".", "lo", ".", "lo", ".", "result", "(", "msgid", ",", "all", "=", "0", ")", "if", "(", "not", "result_data", ")", ":", "break", "if", "(", "result_type", "is", "ldap_module", "(", ")", ".", "RES_SEARCH_RESULT", ")", ":", "break", "elif", "(", "result_type", "is", "ldap_module", "(", ")", ".", "RES_SEARCH_ENTRY", ")", ":", "for", "res", "in", "result_data", ":", "(", "yield", "res", ")", "uldap", "(", ")", ".", "lo", ".", "lo", ".", "abandon", "(", "msgid", ")" ]
replaces uldaps search and uses a generator .
train
false
23,611
def confirm_login(): session['_fresh'] = True session['_id'] = _create_identifier() user_login_confirmed.send(current_app._get_current_object())
[ "def", "confirm_login", "(", ")", ":", "session", "[", "'_fresh'", "]", "=", "True", "session", "[", "'_id'", "]", "=", "_create_identifier", "(", ")", "user_login_confirmed", ".", "send", "(", "current_app", ".", "_get_current_object", "(", ")", ")" ]
this sets the current session as fresh .
train
false
23,613
def read_head_pos(fname): _check_fname(fname, must_exist=True, overwrite=True) data = np.loadtxt(fname, skiprows=1) data.shape = ((-1), 10) if np.isnan(data).any(): raise RuntimeError(('positions could not be read properly from %s' % fname)) return data
[ "def", "read_head_pos", "(", "fname", ")", ":", "_check_fname", "(", "fname", ",", "must_exist", "=", "True", ",", "overwrite", "=", "True", ")", "data", "=", "np", ".", "loadtxt", "(", "fname", ",", "skiprows", "=", "1", ")", "data", ".", "shape", "=", "(", "(", "-", "1", ")", ",", "10", ")", "if", "np", ".", "isnan", "(", "data", ")", ".", "any", "(", ")", ":", "raise", "RuntimeError", "(", "(", "'positions could not be read properly from %s'", "%", "fname", ")", ")", "return", "data" ]
read maxfilter-formatted head position parameters .
train
false
23,615
def remove_abs_path(cmd): if (cmd and os.path.isabs(cmd[0])): cmd = list(cmd) cmd[0] = os.path.basename(cmd[0]) return cmd
[ "def", "remove_abs_path", "(", "cmd", ")", ":", "if", "(", "cmd", "and", "os", ".", "path", ".", "isabs", "(", "cmd", "[", "0", "]", ")", ")", ":", "cmd", "=", "list", "(", "cmd", ")", "cmd", "[", "0", "]", "=", "os", ".", "path", ".", "basename", "(", "cmd", "[", "0", "]", ")", "return", "cmd" ]
remove absolute path of executable in cmd note: new instance of list is returned .
train
false
23,616
def skel_load(skeleton, inventory): for (key, value) in skeleton.iteritems(): _parse_belongs_to(key, belongs_to=value['belongs_to'], inventory=inventory)
[ "def", "skel_load", "(", "skeleton", ",", "inventory", ")", ":", "for", "(", "key", ",", "value", ")", "in", "skeleton", ".", "iteritems", "(", ")", ":", "_parse_belongs_to", "(", "key", ",", "belongs_to", "=", "value", "[", "'belongs_to'", "]", ",", "inventory", "=", "inventory", ")" ]
build out data as provided from the defined skel dictionary .
train
false
23,617
def make_teacher_model(train_data, validation_data, nb_epoch=3): model = Sequential() model.add(Conv2D(64, 3, 3, input_shape=input_shape, border_mode='same', name='conv1')) model.add(MaxPooling2D(name='pool1')) model.add(Conv2D(64, 3, 3, border_mode='same', name='conv2')) model.add(MaxPooling2D(name='pool2')) model.add(Flatten(name='flatten')) model.add(Dense(64, activation='relu', name='fc1')) model.add(Dense(nb_class, activation='softmax', name='fc2')) model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.01, momentum=0.9), metrics=['accuracy']) (train_x, train_y) = train_data history = model.fit(train_x, train_y, nb_epoch=nb_epoch, validation_data=validation_data) return (model, history)
[ "def", "make_teacher_model", "(", "train_data", ",", "validation_data", ",", "nb_epoch", "=", "3", ")", ":", "model", "=", "Sequential", "(", ")", "model", ".", "add", "(", "Conv2D", "(", "64", ",", "3", ",", "3", ",", "input_shape", "=", "input_shape", ",", "border_mode", "=", "'same'", ",", "name", "=", "'conv1'", ")", ")", "model", ".", "add", "(", "MaxPooling2D", "(", "name", "=", "'pool1'", ")", ")", "model", ".", "add", "(", "Conv2D", "(", "64", ",", "3", ",", "3", ",", "border_mode", "=", "'same'", ",", "name", "=", "'conv2'", ")", ")", "model", ".", "add", "(", "MaxPooling2D", "(", "name", "=", "'pool2'", ")", ")", "model", ".", "add", "(", "Flatten", "(", "name", "=", "'flatten'", ")", ")", "model", ".", "add", "(", "Dense", "(", "64", ",", "activation", "=", "'relu'", ",", "name", "=", "'fc1'", ")", ")", "model", ".", "add", "(", "Dense", "(", "nb_class", ",", "activation", "=", "'softmax'", ",", "name", "=", "'fc2'", ")", ")", "model", ".", "compile", "(", "loss", "=", "'categorical_crossentropy'", ",", "optimizer", "=", "SGD", "(", "lr", "=", "0.01", ",", "momentum", "=", "0.9", ")", ",", "metrics", "=", "[", "'accuracy'", "]", ")", "(", "train_x", ",", "train_y", ")", "=", "train_data", "history", "=", "model", ".", "fit", "(", "train_x", ",", "train_y", ",", "nb_epoch", "=", "nb_epoch", ",", "validation_data", "=", "validation_data", ")", "return", "(", "model", ",", "history", ")" ]
train a simple cnn as teacher model .
train
false
23,619
def init(mpstate): return SerialModule(mpstate)
[ "def", "init", "(", "mpstate", ")", ":", "return", "SerialModule", "(", "mpstate", ")" ]
initialize the global qwebsettings .
train
false
23,620
def __forwardmethods(fromClass, toClass, toPart, exclude=()): _dict = {} __methodDict(toClass, _dict) for ex in _dict.keys(): if ((ex[:1] == '_') or (ex[(-1):] == '_')): del _dict[ex] for ex in exclude: if _dict.has_key(ex): del _dict[ex] for ex in __methods(fromClass): if _dict.has_key(ex): del _dict[ex] for (method, func) in _dict.items(): d = {'method': method, 'func': func} if (type(toPart) == types.StringType): execString = (__stringBody % {'method': method, 'attribute': toPart}) exec execString in d fromClass.__dict__[method] = d[method]
[ "def", "__forwardmethods", "(", "fromClass", ",", "toClass", ",", "toPart", ",", "exclude", "=", "(", ")", ")", ":", "_dict", "=", "{", "}", "__methodDict", "(", "toClass", ",", "_dict", ")", "for", "ex", "in", "_dict", ".", "keys", "(", ")", ":", "if", "(", "(", "ex", "[", ":", "1", "]", "==", "'_'", ")", "or", "(", "ex", "[", "(", "-", "1", ")", ":", "]", "==", "'_'", ")", ")", ":", "del", "_dict", "[", "ex", "]", "for", "ex", "in", "exclude", ":", "if", "_dict", ".", "has_key", "(", "ex", ")", ":", "del", "_dict", "[", "ex", "]", "for", "ex", "in", "__methods", "(", "fromClass", ")", ":", "if", "_dict", ".", "has_key", "(", "ex", ")", ":", "del", "_dict", "[", "ex", "]", "for", "(", "method", ",", "func", ")", "in", "_dict", ".", "items", "(", ")", ":", "d", "=", "{", "'method'", ":", "method", ",", "'func'", ":", "func", "}", "if", "(", "type", "(", "toPart", ")", "==", "types", ".", "StringType", ")", ":", "execString", "=", "(", "__stringBody", "%", "{", "'method'", ":", "method", ",", "'attribute'", ":", "toPart", "}", ")", "exec", "execString", "in", "d", "fromClass", ".", "__dict__", "[", "method", "]", "=", "d", "[", "method", "]" ]
helper functions for scrolled canvas .
train
false
23,621
@flake8ext def check_python3_xrange(logical_line): if re.search('\\bxrange\\s*\\(', logical_line): (yield (0, 'N325: Do not use xrange. Use range, or six.moves.range for large loops.'))
[ "@", "flake8ext", "def", "check_python3_xrange", "(", "logical_line", ")", ":", "if", "re", ".", "search", "(", "'\\\\bxrange\\\\s*\\\\('", ",", "logical_line", ")", ":", "(", "yield", "(", "0", ",", "'N325: Do not use xrange. Use range, or six.moves.range for large loops.'", ")", ")" ]
n325 - do not use xrange .
train
false
23,622
def translateVector3Paths(paths, translateVector3): for path in paths: translateVector3Path(path, translateVector3)
[ "def", "translateVector3Paths", "(", "paths", ",", "translateVector3", ")", ":", "for", "path", "in", "paths", ":", "translateVector3Path", "(", "path", ",", "translateVector3", ")" ]
translate the vector3 paths .
train
false
23,624
def get_notepad_pages_localdokuwiki(pagesdir='/var/lib/dokuwiki/data/pages'): ipaddr_page = re.compile('^\\d+\\.\\d+\\.\\d+\\.\\d+\\.txt$') return [page[:(-4)] for page in os.listdir(pagesdir) if ipaddr_page.match(page)]
[ "def", "get_notepad_pages_localdokuwiki", "(", "pagesdir", "=", "'/var/lib/dokuwiki/data/pages'", ")", ":", "ipaddr_page", "=", "re", ".", "compile", "(", "'^\\\\d+\\\\.\\\\d+\\\\.\\\\d+\\\\.\\\\d+\\\\.txt$'", ")", "return", "[", "page", "[", ":", "(", "-", "4", ")", "]", "for", "page", "in", "os", ".", "listdir", "(", "pagesdir", ")", "if", "ipaddr_page", ".", "match", "(", "page", ")", "]" ]
returns a list of the ip addresses for which a dokuwiki page exists .
train
false
23,627
def choose_promo(promo_list): promo_range = [] total_views_needed = 0 for promo in promo_list: promo_range.append([total_views_needed, (total_views_needed + promo.views_needed_today()), promo]) total_views_needed += promo.views_needed_today() choice = random.randint(0, total_views_needed) for range_list in promo_range: if (range_list[0] <= choice <= range_list[1]): return range_list[2] return None
[ "def", "choose_promo", "(", "promo_list", ")", ":", "promo_range", "=", "[", "]", "total_views_needed", "=", "0", "for", "promo", "in", "promo_list", ":", "promo_range", ".", "append", "(", "[", "total_views_needed", ",", "(", "total_views_needed", "+", "promo", ".", "views_needed_today", "(", ")", ")", ",", "promo", "]", ")", "total_views_needed", "+=", "promo", ".", "views_needed_today", "(", ")", "choice", "=", "random", ".", "randint", "(", "0", ",", "total_views_needed", ")", "for", "range_list", "in", "promo_range", ":", "if", "(", "range_list", "[", "0", "]", "<=", "choice", "<=", "range_list", "[", "1", "]", ")", ":", "return", "range_list", "[", "2", "]", "return", "None" ]
this is the algorithm to pick which promo to show .
train
false
23,628
def addXMLFromObjects(depth, objects, output): for object in objects: object.addXML(depth, output)
[ "def", "addXMLFromObjects", "(", "depth", ",", "objects", ",", "output", ")", ":", "for", "object", "in", "objects", ":", "object", ".", "addXML", "(", "depth", ",", "output", ")" ]
add xml from objects .
train
false
23,629
def get_flashed_messages(with_categories=False, category_filter=[]): flashes = _request_ctx_stack.top.flashes if (flashes is None): _request_ctx_stack.top.flashes = flashes = (session.pop('_flashes') if ('_flashes' in session) else []) if category_filter: flashes = list(filter((lambda f: (f[0] in category_filter)), flashes)) if (not with_categories): return [x[1] for x in flashes] return flashes
[ "def", "get_flashed_messages", "(", "with_categories", "=", "False", ",", "category_filter", "=", "[", "]", ")", ":", "flashes", "=", "_request_ctx_stack", ".", "top", ".", "flashes", "if", "(", "flashes", "is", "None", ")", ":", "_request_ctx_stack", ".", "top", ".", "flashes", "=", "flashes", "=", "(", "session", ".", "pop", "(", "'_flashes'", ")", "if", "(", "'_flashes'", "in", "session", ")", "else", "[", "]", ")", "if", "category_filter", ":", "flashes", "=", "list", "(", "filter", "(", "(", "lambda", "f", ":", "(", "f", "[", "0", "]", "in", "category_filter", ")", ")", ",", "flashes", ")", ")", "if", "(", "not", "with_categories", ")", ":", "return", "[", "x", "[", "1", "]", "for", "x", "in", "flashes", "]", "return", "flashes" ]
pulls all flashed messages from the session and returns them .
train
true
23,630
def _get_external_ip(): try: ipaddr = socket.gethostbyname(socket.gethostname()) except: ipaddr = '127.0.0.1' if ipaddr.startswith('127.'): for addr in ('192.0.2.0', '198.51.100.0', 'google.com'): try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect((addr, 0)) ipaddr = s.getsockname()[0] if (not ipaddr.startswith('127.')): break except: time.sleep(0.3) return ipaddr
[ "def", "_get_external_ip", "(", ")", ":", "try", ":", "ipaddr", "=", "socket", ".", "gethostbyname", "(", "socket", ".", "gethostname", "(", ")", ")", "except", ":", "ipaddr", "=", "'127.0.0.1'", "if", "ipaddr", ".", "startswith", "(", "'127.'", ")", ":", "for", "addr", "in", "(", "'192.0.2.0'", ",", "'198.51.100.0'", ",", "'google.com'", ")", ":", "try", ":", "s", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_DGRAM", ")", "s", ".", "connect", "(", "(", "addr", ",", "0", ")", ")", "ipaddr", "=", "s", ".", "getsockname", "(", ")", "[", "0", "]", "if", "(", "not", "ipaddr", ".", "startswith", "(", "'127.'", ")", ")", ":", "break", "except", ":", "time", ".", "sleep", "(", "0.3", ")", "return", "ipaddr" ]
get ip address of interface used to connect to the outside world .
train
false
23,632
def p_command_read(p): p[0] = ('READ', p[2])
[ "def", "p_command_read", "(", "p", ")", ":", "p", "[", "0", "]", "=", "(", "'READ'", ",", "p", "[", "2", "]", ")" ]
command : read varlist .
train
false
23,633
def clean_series(start, stop, rollup, series): start_timestamp = to_timestamp(start) stop_timestamp = to_timestamp(stop) result = [] for (i, (timestamp, value)) in enumerate(series): assert (timestamp == (start_timestamp + (rollup * i))) if (timestamp >= stop_timestamp): break result.append((timestamp, value)) return result
[ "def", "clean_series", "(", "start", ",", "stop", ",", "rollup", ",", "series", ")", ":", "start_timestamp", "=", "to_timestamp", "(", "start", ")", "stop_timestamp", "=", "to_timestamp", "(", "stop", ")", "result", "=", "[", "]", "for", "(", "i", ",", "(", "timestamp", ",", "value", ")", ")", "in", "enumerate", "(", "series", ")", ":", "assert", "(", "timestamp", "==", "(", "start_timestamp", "+", "(", "rollup", "*", "i", ")", ")", ")", "if", "(", "timestamp", ">=", "stop_timestamp", ")", ":", "break", "result", ".", "append", "(", "(", "timestamp", ",", "value", ")", ")", "return", "result" ]
validate a series .
train
false