id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
44,173
@decorators.memoize def _check_xbps(): return salt.utils.which('xbps-install')
[ "@", "decorators", ".", "memoize", "def", "_check_xbps", "(", ")", ":", "return", "salt", ".", "utils", ".", "which", "(", "'xbps-install'", ")" ]
looks to see if xbps-install is present on the system .
train
false
44,174
def convert_search_torrent_to_json(torrent): if isinstance(torrent, dict): return convert_remote_torrent_to_json(torrent) return convert_db_torrent_to_json(torrent, include_rel_score=True)
[ "def", "convert_search_torrent_to_json", "(", "torrent", ")", ":", "if", "isinstance", "(", "torrent", ",", "dict", ")", ":", "return", "convert_remote_torrent_to_json", "(", "torrent", ")", "return", "convert_db_torrent_to_json", "(", "torrent", ",", "include_rel_score", "=", "True", ")" ]
converts a given torrent to a json dictionary .
train
false
44,177
def convert_to_native_paths(lst): newlst = [] for (i, rv) in enumerate(lst): rv = os.path.join(*rv.split('/')) if (sys.platform == 'win32'): pos = rv.find(':') if (pos != (-1)): if (rv[(pos + 1)] != '\\'): rv = ((rv[:(pos + 1)] + '\\') + rv[(pos + 1):]) newlst.append(sys_normcase(rv)) return newlst
[ "def", "convert_to_native_paths", "(", "lst", ")", ":", "newlst", "=", "[", "]", "for", "(", "i", ",", "rv", ")", "in", "enumerate", "(", "lst", ")", ":", "rv", "=", "os", ".", "path", ".", "join", "(", "*", "rv", ".", "split", "(", "'/'", ")", ")", "if", "(", "sys", ".", "platform", "==", "'win32'", ")", ":", "pos", "=", "rv", ".", "find", "(", "':'", ")", "if", "(", "pos", "!=", "(", "-", "1", ")", ")", ":", "if", "(", "rv", "[", "(", "pos", "+", "1", ")", "]", "!=", "'\\\\'", ")", ":", "rv", "=", "(", "(", "rv", "[", ":", "(", "pos", "+", "1", ")", "]", "+", "'\\\\'", ")", "+", "rv", "[", "(", "pos", "+", "1", ")", ":", "]", ")", "newlst", ".", "append", "(", "sys_normcase", "(", "rv", ")", ")", "return", "newlst" ]
converts a list of / separated paths into a list of native paths and converts to lowercase if the system is case insensitive .
train
false
44,180
def check_skip_with_microversion(test_min_version, test_max_version, cfg_min_version, cfg_max_version): min_version = api_version_request.APIVersionRequest(test_min_version) max_version = api_version_request.APIVersionRequest(test_max_version) config_min_version = api_version_request.APIVersionRequest(cfg_min_version) config_max_version = api_version_request.APIVersionRequest(cfg_max_version) if ((min_version > max_version) or (config_min_version > config_max_version)): msg = ('Test Class versions [%s - %s]. Configuration versions [%s - %s].' % (min_version.get_string(), max_version.get_string(), config_min_version.get_string(), config_max_version.get_string())) raise exceptions.InvalidAPIVersionRange(msg) if ((max_version < config_min_version) or (config_max_version < min_version)): msg = ('The microversion range[%s - %s] of this test is out of the configuration range[%s - %s].' % (min_version.get_string(), max_version.get_string(), config_min_version.get_string(), config_max_version.get_string())) raise testtools.TestCase.skipException(msg)
[ "def", "check_skip_with_microversion", "(", "test_min_version", ",", "test_max_version", ",", "cfg_min_version", ",", "cfg_max_version", ")", ":", "min_version", "=", "api_version_request", ".", "APIVersionRequest", "(", "test_min_version", ")", "max_version", "=", "api_version_request", ".", "APIVersionRequest", "(", "test_max_version", ")", "config_min_version", "=", "api_version_request", ".", "APIVersionRequest", "(", "cfg_min_version", ")", "config_max_version", "=", "api_version_request", ".", "APIVersionRequest", "(", "cfg_max_version", ")", "if", "(", "(", "min_version", ">", "max_version", ")", "or", "(", "config_min_version", ">", "config_max_version", ")", ")", ":", "msg", "=", "(", "'Test Class versions [%s - %s]. Configuration versions [%s - %s].'", "%", "(", "min_version", ".", "get_string", "(", ")", ",", "max_version", ".", "get_string", "(", ")", ",", "config_min_version", ".", "get_string", "(", ")", ",", "config_max_version", ".", "get_string", "(", ")", ")", ")", "raise", "exceptions", ".", "InvalidAPIVersionRange", "(", "msg", ")", "if", "(", "(", "max_version", "<", "config_min_version", ")", "or", "(", "config_max_version", "<", "min_version", ")", ")", ":", "msg", "=", "(", "'The microversion range[%s - %s] of this test is out of the configuration range[%s - %s].'", "%", "(", "min_version", ".", "get_string", "(", ")", ",", "max_version", ".", "get_string", "(", ")", ",", "config_min_version", ".", "get_string", "(", ")", ",", "config_max_version", ".", "get_string", "(", ")", ")", ")", "raise", "testtools", ".", "TestCase", ".", "skipException", "(", "msg", ")" ]
checks api microversions range and returns whether test needs to be skip compare the test and configured microversion range and returns whether test microversion range is out of configured one .
train
false
44,181
def update_translation_project(tp, initialize_from_templates, response_url): script_name = (u'/' if (settings.FORCE_SCRIPT_NAME is None) else force_unicode(settings.FORCE_SCRIPT_NAME)) set_script_prefix(script_name) try: with useable_connection(): if initialize_from_templates: tp.init_from_templates() else: tp.update_from_disk() except Exception as e: tp_init_failed_async.send(sender=tp.__class__, instance=tp) raise e tp_inited_async.send(sender=tp.__class__, instance=tp, response_url=response_url)
[ "def", "update_translation_project", "(", "tp", ",", "initialize_from_templates", ",", "response_url", ")", ":", "script_name", "=", "(", "u'/'", "if", "(", "settings", ".", "FORCE_SCRIPT_NAME", "is", "None", ")", "else", "force_unicode", "(", "settings", ".", "FORCE_SCRIPT_NAME", ")", ")", "set_script_prefix", "(", "script_name", ")", "try", ":", "with", "useable_connection", "(", ")", ":", "if", "initialize_from_templates", ":", "tp", ".", "init_from_templates", "(", ")", "else", ":", "tp", ".", "update_from_disk", "(", ")", "except", "Exception", "as", "e", ":", "tp_init_failed_async", ".", "send", "(", "sender", "=", "tp", ".", "__class__", ",", "instance", "=", "tp", ")", "raise", "e", "tp_inited_async", ".", "send", "(", "sender", "=", "tp", ".", "__class__", ",", "instance", "=", "tp", ",", "response_url", "=", "response_url", ")" ]
wraps translation project initializing to allow it to be running as rq job .
train
false
44,182
def select_autoescape(enabled_extensions=('html', 'htm', 'xml'), disabled_extensions=(), default_for_string=True, default=False): enabled_patterns = tuple((('.' + x.lstrip('.').lower()) for x in enabled_extensions)) disabled_patterns = tuple((('.' + x.lstrip('.').lower()) for x in disabled_extensions)) def autoescape(template_name): if (template_name is None): return default_for_string template_name = template_name.lower() if template_name.endswith(enabled_patterns): return True if template_name.endswith(disabled_patterns): return False return default return autoescape
[ "def", "select_autoescape", "(", "enabled_extensions", "=", "(", "'html'", ",", "'htm'", ",", "'xml'", ")", ",", "disabled_extensions", "=", "(", ")", ",", "default_for_string", "=", "True", ",", "default", "=", "False", ")", ":", "enabled_patterns", "=", "tuple", "(", "(", "(", "'.'", "+", "x", ".", "lstrip", "(", "'.'", ")", ".", "lower", "(", ")", ")", "for", "x", "in", "enabled_extensions", ")", ")", "disabled_patterns", "=", "tuple", "(", "(", "(", "'.'", "+", "x", ".", "lstrip", "(", "'.'", ")", ".", "lower", "(", ")", ")", "for", "x", "in", "disabled_extensions", ")", ")", "def", "autoescape", "(", "template_name", ")", ":", "if", "(", "template_name", "is", "None", ")", ":", "return", "default_for_string", "template_name", "=", "template_name", ".", "lower", "(", ")", "if", "template_name", ".", "endswith", "(", "enabled_patterns", ")", ":", "return", "True", "if", "template_name", ".", "endswith", "(", "disabled_patterns", ")", ":", "return", "False", "return", "default", "return", "autoescape" ]
intelligently sets the initial value of autoescaping based on the filename of the template .
train
true
44,183
def addsitedir(sys_path, sitedir, known_paths=None): if (known_paths is None): known_paths = _init_pathinfo(sys_path) reset = 1 else: reset = 0 (sitedir, sitedircase) = makepath(sitedir) if (not (sitedircase in known_paths)): sys_path.append(sitedir) known_paths.add(sitedircase) try: names = os.listdir(sitedir) except OSError: return names = [name for name in names if name.endswith('.pth')] for name in sorted(names): addpackage(sys_path, sitedir, name, known_paths) if reset: known_paths = None return known_paths
[ "def", "addsitedir", "(", "sys_path", ",", "sitedir", ",", "known_paths", "=", "None", ")", ":", "if", "(", "known_paths", "is", "None", ")", ":", "known_paths", "=", "_init_pathinfo", "(", "sys_path", ")", "reset", "=", "1", "else", ":", "reset", "=", "0", "(", "sitedir", ",", "sitedircase", ")", "=", "makepath", "(", "sitedir", ")", "if", "(", "not", "(", "sitedircase", "in", "known_paths", ")", ")", ":", "sys_path", ".", "append", "(", "sitedir", ")", "known_paths", ".", "add", "(", "sitedircase", ")", "try", ":", "names", "=", "os", ".", "listdir", "(", "sitedir", ")", "except", "OSError", ":", "return", "names", "=", "[", "name", "for", "name", "in", "names", "if", "name", ".", "endswith", "(", "'.pth'", ")", "]", "for", "name", "in", "sorted", "(", "names", ")", ":", "addpackage", "(", "sys_path", ",", "sitedir", ",", "name", ",", "known_paths", ")", "if", "reset", ":", "known_paths", "=", "None", "return", "known_paths" ]
add sitedir argument to sys_path if missing and handle .
train
false
44,184
def format_relationship_links(related_link=None, self_link=None, rel_meta=None, self_meta=None): ret = {'links': {}} if related_link: ret['links'].update({'related': {'href': (related_link or {}), 'meta': (rel_meta or {})}}) if self_link: ret['links'].update({'self': {'href': (self_link or {}), 'meta': (self_meta or {})}}) return ret
[ "def", "format_relationship_links", "(", "related_link", "=", "None", ",", "self_link", "=", "None", ",", "rel_meta", "=", "None", ",", "self_meta", "=", "None", ")", ":", "ret", "=", "{", "'links'", ":", "{", "}", "}", "if", "related_link", ":", "ret", "[", "'links'", "]", ".", "update", "(", "{", "'related'", ":", "{", "'href'", ":", "(", "related_link", "or", "{", "}", ")", ",", "'meta'", ":", "(", "rel_meta", "or", "{", "}", ")", "}", "}", ")", "if", "self_link", ":", "ret", "[", "'links'", "]", ".", "update", "(", "{", "'self'", ":", "{", "'href'", ":", "(", "self_link", "or", "{", "}", ")", ",", "'meta'", ":", "(", "self_meta", "or", "{", "}", ")", "}", "}", ")", "return", "ret" ]
properly handles formatting of self and related links according to json api .
train
false
44,186
def predict(sess, network, X, x, y_op): dp_dict = dict_to_one(network.all_drop) feed_dict = {x: X} feed_dict.update(dp_dict) return sess.run(y_op, feed_dict=feed_dict)
[ "def", "predict", "(", "sess", ",", "network", ",", "X", ",", "x", ",", "y_op", ")", ":", "dp_dict", "=", "dict_to_one", "(", "network", ".", "all_drop", ")", "feed_dict", "=", "{", "x", ":", "X", "}", "feed_dict", ".", "update", "(", "dp_dict", ")", "return", "sess", ".", "run", "(", "y_op", ",", "feed_dict", "=", "feed_dict", ")" ]
get the features for a batch of data using network inputs: in_data: data batch .
train
false
44,188
def format_counter_name(s): def splitCamels(s): ' Convert "fooBar" to "foo bar" ' return re.sub('[a-z][A-Z]', (lambda x: ((x.group(0)[0] + ' ') + x.group(0)[1].lower())), s) return string.capwords(re.sub('_', ' ', splitCamels(s)).lower())
[ "def", "format_counter_name", "(", "s", ")", ":", "def", "splitCamels", "(", "s", ")", ":", "return", "re", ".", "sub", "(", "'[a-z][A-Z]'", ",", "(", "lambda", "x", ":", "(", "(", "x", ".", "group", "(", "0", ")", "[", "0", "]", "+", "' '", ")", "+", "x", ".", "group", "(", "0", ")", "[", "1", "]", ".", "lower", "(", ")", ")", ")", ",", "s", ")", "return", "string", ".", "capwords", "(", "re", ".", "sub", "(", "'_'", ",", "' '", ",", "splitCamels", "(", "s", ")", ")", ".", "lower", "(", ")", ")" ]
makes counter/config names human readable: foobar_baz -> "foobar baz" foo_barbaz -> "foo bar baz" .
train
false
44,189
@home_routes.route('/logout/') @login_required def logout_view(): record_user_login_logout('user_logout', login.current_user) login.logout_user() return redirect(url_for('.index'))
[ "@", "home_routes", ".", "route", "(", "'/logout/'", ")", "@", "login_required", "def", "logout_view", "(", ")", ":", "record_user_login_logout", "(", "'user_logout'", ",", "login", ".", "current_user", ")", "login", ".", "logout_user", "(", ")", "return", "redirect", "(", "url_for", "(", "'.index'", ")", ")" ]
logout method which redirect to index .
train
false
44,190
@register.tag def language(parser, token): bits = token.split_contents() if (len(bits) != 2): raise TemplateSyntaxError(("'%s' takes one argument (language)" % bits[0])) language = parser.compile_filter(bits[1]) nodelist = parser.parse(('endlanguage',)) parser.delete_first_token() return LanguageNode(nodelist, language)
[ "@", "register", ".", "tag", "def", "language", "(", "parser", ",", "token", ")", ":", "bits", "=", "token", ".", "split_contents", "(", ")", "if", "(", "len", "(", "bits", ")", "!=", "2", ")", ":", "raise", "TemplateSyntaxError", "(", "(", "\"'%s' takes one argument (language)\"", "%", "bits", "[", "0", "]", ")", ")", "language", "=", "parser", ".", "compile_filter", "(", "bits", "[", "1", "]", ")", "nodelist", "=", "parser", ".", "parse", "(", "(", "'endlanguage'", ",", ")", ")", "parser", ".", "delete_first_token", "(", ")", "return", "LanguageNode", "(", "nodelist", ",", "language", ")" ]
this will enable the given language just for this block .
train
false
44,191
@contextmanager def context(grpc_context): try: (yield) except KeyError as key_error: grpc_context.code(status.Code.NOT_FOUND) grpc_context.details('Unable to find the item keyed by {}'.format(key_error))
[ "@", "contextmanager", "def", "context", "(", "grpc_context", ")", ":", "try", ":", "(", "yield", ")", "except", "KeyError", "as", "key_error", ":", "grpc_context", ".", "code", "(", "status", ".", "Code", ".", "NOT_FOUND", ")", "grpc_context", ".", "details", "(", "'Unable to find the item keyed by {}'", ".", "format", "(", "key_error", ")", ")" ]
work context - restful controller .
train
false
44,192
def get_pelican_cls(settings): cls = settings['PELICAN_CLASS'] if isinstance(cls, six.string_types): (module, cls_name) = cls.rsplit('.', 1) module = __import__(module) cls = getattr(module, cls_name) return cls
[ "def", "get_pelican_cls", "(", "settings", ")", ":", "cls", "=", "settings", "[", "'PELICAN_CLASS'", "]", "if", "isinstance", "(", "cls", ",", "six", ".", "string_types", ")", ":", "(", "module", ",", "cls_name", ")", "=", "cls", ".", "rsplit", "(", "'.'", ",", "1", ")", "module", "=", "__import__", "(", "module", ")", "cls", "=", "getattr", "(", "module", ",", "cls_name", ")", "return", "cls" ]
get the pelican class requested in settings .
train
true
44,194
def _GetMSBuildToolSettings(msbuild_settings, tool): return msbuild_settings.setdefault(tool.msbuild_name, {})
[ "def", "_GetMSBuildToolSettings", "(", "msbuild_settings", ",", "tool", ")", ":", "return", "msbuild_settings", ".", "setdefault", "(", "tool", ".", "msbuild_name", ",", "{", "}", ")" ]
returns an msbuild tool dictionary .
train
false
44,196
def test_scenario_with_hash_within_double_quotes(): scenario = Scenario.from_string(INLINE_COMMENTS_IGNORED_WITHIN_DOUBLE_QUOTES) (step1, step2) = scenario.steps expect(step1.sentence).to.equal(u'Given I am logged in on twitter') expect(step2.sentence).to.equal(u'When I search for the hashtag "#hammer"')
[ "def", "test_scenario_with_hash_within_double_quotes", "(", ")", ":", "scenario", "=", "Scenario", ".", "from_string", "(", "INLINE_COMMENTS_IGNORED_WITHIN_DOUBLE_QUOTES", ")", "(", "step1", ",", "step2", ")", "=", "scenario", ".", "steps", "expect", "(", "step1", ".", "sentence", ")", ".", "to", ".", "equal", "(", "u'Given I am logged in on twitter'", ")", "expect", "(", "step2", ".", "sentence", ")", ".", "to", ".", "equal", "(", "u'When I search for the hashtag \"#hammer\"'", ")" ]
scenarios have hashes within double quotes and yet dont consider them as comments .
train
false
44,197
def _getRegisteredExecutable(exeName): registered = None if sys.platform.startswith('win'): if (os.path.splitext(exeName)[1].lower() != '.exe'): exeName += '.exe' import _winreg try: key = ('SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\' + exeName) value = _winreg.QueryValue(_winreg.HKEY_LOCAL_MACHINE, key) registered = (value, ('from HKLM\\' + key)) except _winreg.error: pass if (registered and (not os.path.exists(registered[0]))): registered = None return registered
[ "def", "_getRegisteredExecutable", "(", "exeName", ")", ":", "registered", "=", "None", "if", "sys", ".", "platform", ".", "startswith", "(", "'win'", ")", ":", "if", "(", "os", ".", "path", ".", "splitext", "(", "exeName", ")", "[", "1", "]", ".", "lower", "(", ")", "!=", "'.exe'", ")", ":", "exeName", "+=", "'.exe'", "import", "_winreg", "try", ":", "key", "=", "(", "'SOFTWARE\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\App Paths\\\\'", "+", "exeName", ")", "value", "=", "_winreg", ".", "QueryValue", "(", "_winreg", ".", "HKEY_LOCAL_MACHINE", ",", "key", ")", "registered", "=", "(", "value", ",", "(", "'from HKLM\\\\'", "+", "key", ")", ")", "except", "_winreg", ".", "error", ":", "pass", "if", "(", "registered", "and", "(", "not", "os", ".", "path", ".", "exists", "(", "registered", "[", "0", "]", ")", ")", ")", ":", "registered", "=", "None", "return", "registered" ]
windows allow application paths to be registered in the registry .
train
true
44,199
def nfa_to_dfa(old_machine, debug=None): new_machine = Machines.FastMachine() state_map = StateMap(new_machine) for (key, old_state) in old_machine.initial_states.items(): new_state = state_map.old_to_new(epsilon_closure(old_state)) new_machine.make_initial_state(key, new_state) for new_state in new_machine.states: transitions = TransitionMap() for old_state in state_map.new_to_old(new_state): for (event, old_target_states) in old_state.transitions.items(): if (event and old_target_states): transitions.add_set(event, set_epsilon_closure(old_target_states)) for (event, old_states) in transitions.items(): new_machine.add_transitions(new_state, event, state_map.old_to_new(old_states)) if debug: debug.write('\n===== State Mapping =====\n') state_map.dump(debug) return new_machine
[ "def", "nfa_to_dfa", "(", "old_machine", ",", "debug", "=", "None", ")", ":", "new_machine", "=", "Machines", ".", "FastMachine", "(", ")", "state_map", "=", "StateMap", "(", "new_machine", ")", "for", "(", "key", ",", "old_state", ")", "in", "old_machine", ".", "initial_states", ".", "items", "(", ")", ":", "new_state", "=", "state_map", ".", "old_to_new", "(", "epsilon_closure", "(", "old_state", ")", ")", "new_machine", ".", "make_initial_state", "(", "key", ",", "new_state", ")", "for", "new_state", "in", "new_machine", ".", "states", ":", "transitions", "=", "TransitionMap", "(", ")", "for", "old_state", "in", "state_map", ".", "new_to_old", "(", "new_state", ")", ":", "for", "(", "event", ",", "old_target_states", ")", "in", "old_state", ".", "transitions", ".", "items", "(", ")", ":", "if", "(", "event", "and", "old_target_states", ")", ":", "transitions", ".", "add_set", "(", "event", ",", "set_epsilon_closure", "(", "old_target_states", ")", ")", "for", "(", "event", ",", "old_states", ")", "in", "transitions", ".", "items", "(", ")", ":", "new_machine", ".", "add_transitions", "(", "new_state", ",", "event", ",", "state_map", ".", "old_to_new", "(", "old_states", ")", ")", "if", "debug", ":", "debug", ".", "write", "(", "'\\n===== State Mapping =====\\n'", ")", "state_map", ".", "dump", "(", "debug", ")", "return", "new_machine" ]
given a nondeterministic machine .
train
false
44,202
def check_postgres_user(): config = odoo.tools.config if (config['db_user'] == 'postgres'): sys.stderr.write("Using the database user 'postgres' is a security risk, aborting.") sys.exit(1)
[ "def", "check_postgres_user", "(", ")", ":", "config", "=", "odoo", ".", "tools", ".", "config", "if", "(", "config", "[", "'db_user'", "]", "==", "'postgres'", ")", ":", "sys", ".", "stderr", ".", "write", "(", "\"Using the database user 'postgres' is a security risk, aborting.\"", ")", "sys", ".", "exit", "(", "1", ")" ]
exit if the configured database user is postgres .
train
false
44,203
def make_str(value): if isinstance(value, bytes): try: return value.decode(get_filesystem_encoding()) except UnicodeError: return value.decode('utf-8', 'replace') return text_type(value)
[ "def", "make_str", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "bytes", ")", ":", "try", ":", "return", "value", ".", "decode", "(", "get_filesystem_encoding", "(", ")", ")", "except", "UnicodeError", ":", "return", "value", ".", "decode", "(", "'utf-8'", ",", "'replace'", ")", "return", "text_type", "(", "value", ")" ]
converts a value into a valid string .
train
true
44,205
@task() @timeit def maybe_award_badge(badge_template, year, user): badge = get_or_create_badge(badge_template, year) if badge.is_awarded_to(user): return from kitsune.questions.models import Answer qs = Answer.objects.filter(creator=user, created__gte=date(year, 1, 1), created__lt=date((year + 1), 1, 1)) if (qs.count() >= 30): badge.award_to(user) return True
[ "@", "task", "(", ")", "@", "timeit", "def", "maybe_award_badge", "(", "badge_template", ",", "year", ",", "user", ")", ":", "badge", "=", "get_or_create_badge", "(", "badge_template", ",", "year", ")", "if", "badge", ".", "is_awarded_to", "(", "user", ")", ":", "return", "from", "kitsune", ".", "questions", ".", "models", "import", "Answer", "qs", "=", "Answer", ".", "objects", ".", "filter", "(", "creator", "=", "user", ",", "created__gte", "=", "date", "(", "year", ",", "1", ",", "1", ")", ",", "created__lt", "=", "date", "(", "(", "year", "+", "1", ")", ",", "1", ",", "1", ")", ")", "if", "(", "qs", ".", "count", "(", ")", ">=", "30", ")", ":", "badge", ".", "award_to", "(", "user", ")", "return", "True" ]
award the specific badge to the user if theyve earned it .
train
false
44,208
def msu_encoding(t): full = (c.encode('hex_codec').zfill(4) for c in t.decode('utf8')) uppr = (x.upper() for x in full) return ('%U' + '%U'.join(uppr))
[ "def", "msu_encoding", "(", "t", ")", ":", "full", "=", "(", "c", ".", "encode", "(", "'hex_codec'", ")", ".", "zfill", "(", "4", ")", "for", "c", "in", "t", ".", "decode", "(", "'utf8'", ")", ")", "uppr", "=", "(", "x", ".", "upper", "(", ")", "for", "x", "in", "full", ")", "return", "(", "'%U'", "+", "'%U'", ".", "join", "(", "uppr", ")", ")" ]
microsoft %u encoding .
train
false
44,209
def queens_fitness(genome): fitness = 0 for check_queen_col in range(len(genome)): is_attacked = 0 for other_queen_col in range(len(genome)): if (check_queen_col != other_queen_col): check_queen_row = int(genome[check_queen_col]) other_queen_row = int(genome[other_queen_col]) if (check_queen_row == other_queen_row): is_attacked = 1 break elif (abs((check_queen_row - other_queen_row)) == abs((check_queen_col - other_queen_col))): is_attacked = 1 break if (not is_attacked): fitness += 1 return fitness
[ "def", "queens_fitness", "(", "genome", ")", ":", "fitness", "=", "0", "for", "check_queen_col", "in", "range", "(", "len", "(", "genome", ")", ")", ":", "is_attacked", "=", "0", "for", "other_queen_col", "in", "range", "(", "len", "(", "genome", ")", ")", ":", "if", "(", "check_queen_col", "!=", "other_queen_col", ")", ":", "check_queen_row", "=", "int", "(", "genome", "[", "check_queen_col", "]", ")", "other_queen_row", "=", "int", "(", "genome", "[", "other_queen_col", "]", ")", "if", "(", "check_queen_row", "==", "other_queen_row", ")", ":", "is_attacked", "=", "1", "break", "elif", "(", "abs", "(", "(", "check_queen_row", "-", "other_queen_row", ")", ")", "==", "abs", "(", "(", "check_queen_col", "-", "other_queen_col", ")", ")", ")", ":", "is_attacked", "=", "1", "break", "if", "(", "not", "is_attacked", ")", ":", "fitness", "+=", "1", "return", "fitness" ]
calculate the fitness of an organization of queens on the chessboard .
train
false
44,211
def parse_vs(vs): return V(vs).version
[ "def", "parse_vs", "(", "vs", ")", ":", "return", "V", "(", "vs", ")", ".", "version" ]
version string to list .
train
false
44,212
def intersect_trust_region(x, s, Delta): a = np.dot(s, s) if (a == 0): raise ValueError('`s` is zero.') b = np.dot(x, s) c = (np.dot(x, x) - (Delta ** 2)) if (c > 0): raise ValueError('`x` is not within the trust region.') d = np.sqrt(((b * b) - (a * c))) q = (- (b + copysign(d, b))) t1 = (q / a) t2 = (c / q) if (t1 < t2): return (t1, t2) else: return (t2, t1)
[ "def", "intersect_trust_region", "(", "x", ",", "s", ",", "Delta", ")", ":", "a", "=", "np", ".", "dot", "(", "s", ",", "s", ")", "if", "(", "a", "==", "0", ")", ":", "raise", "ValueError", "(", "'`s` is zero.'", ")", "b", "=", "np", ".", "dot", "(", "x", ",", "s", ")", "c", "=", "(", "np", ".", "dot", "(", "x", ",", "x", ")", "-", "(", "Delta", "**", "2", ")", ")", "if", "(", "c", ">", "0", ")", ":", "raise", "ValueError", "(", "'`x` is not within the trust region.'", ")", "d", "=", "np", ".", "sqrt", "(", "(", "(", "b", "*", "b", ")", "-", "(", "a", "*", "c", ")", ")", ")", "q", "=", "(", "-", "(", "b", "+", "copysign", "(", "d", ",", "b", ")", ")", ")", "t1", "=", "(", "q", "/", "a", ")", "t2", "=", "(", "c", "/", "q", ")", "if", "(", "t1", "<", "t2", ")", ":", "return", "(", "t1", ",", "t2", ")", "else", ":", "return", "(", "t2", ",", "t1", ")" ]
find the intersection of a line with the boundary of a trust region .
train
false
44,213
def split(line): if (not line.strip()): raise exceptions.MpdNoCommand(u'No command given') match = WORD_RE.match(line) if (not match): raise exceptions.MpdUnknownError(u'Invalid word character') (whitespace, command, remainder) = match.groups() if whitespace: raise exceptions.MpdUnknownError(u'Letter expected') result = [command] while remainder: match = PARAM_RE.match(remainder) if (not match): msg = _determine_error_message(remainder) raise exceptions.MpdArgError(msg, command=command) (unquoted, quoted, remainder) = match.groups() result.append((unquoted or UNESCAPE_RE.sub(u'\\g<1>', quoted))) return result
[ "def", "split", "(", "line", ")", ":", "if", "(", "not", "line", ".", "strip", "(", ")", ")", ":", "raise", "exceptions", ".", "MpdNoCommand", "(", "u'No command given'", ")", "match", "=", "WORD_RE", ".", "match", "(", "line", ")", "if", "(", "not", "match", ")", ":", "raise", "exceptions", ".", "MpdUnknownError", "(", "u'Invalid word character'", ")", "(", "whitespace", ",", "command", ",", "remainder", ")", "=", "match", ".", "groups", "(", ")", "if", "whitespace", ":", "raise", "exceptions", ".", "MpdUnknownError", "(", "u'Letter expected'", ")", "result", "=", "[", "command", "]", "while", "remainder", ":", "match", "=", "PARAM_RE", ".", "match", "(", "remainder", ")", "if", "(", "not", "match", ")", ":", "msg", "=", "_determine_error_message", "(", "remainder", ")", "raise", "exceptions", ".", "MpdArgError", "(", "msg", ",", "command", "=", "command", ")", "(", "unquoted", ",", "quoted", ",", "remainder", ")", "=", "match", ".", "groups", "(", ")", "result", ".", "append", "(", "(", "unquoted", "or", "UNESCAPE_RE", ".", "sub", "(", "u'\\\\g<1>'", ",", "quoted", ")", ")", ")", "return", "result" ]
split the source string by the occurrences of the pattern .
train
false
44,214
def parse_like_term(term): if term.startswith('^'): return '^{}'.format(re.escape(term[1:])) elif term.startswith('='): return '^{}$'.format(re.escape(term[1:])) return re.escape(term)
[ "def", "parse_like_term", "(", "term", ")", ":", "if", "term", ".", "startswith", "(", "'^'", ")", ":", "return", "'^{}'", ".", "format", "(", "re", ".", "escape", "(", "term", "[", "1", ":", "]", ")", ")", "elif", "term", ".", "startswith", "(", "'='", ")", ":", "return", "'^{}$'", ".", "format", "(", "re", ".", "escape", "(", "term", "[", "1", ":", "]", ")", ")", "return", "re", ".", "escape", "(", "term", ")" ]
parse search term into tuple .
train
false
44,215
def assess_rheader(r, tabs=[]): if (r.representation == 'html'): rheader_tabs = s3_rheader_tabs(r, tabs) assess = r.record if assess: table = db.assess_assess rheader = DIV(TABLE(TR(TH(('%s: ' % T('Date & Time'))), table.datetime.represent(assess.datetime), TH(('%s: ' % T('Location'))), table.location_id.represent(assess.location_id), TH(('%s: ' % T('Assessor'))), table.assessor_person_id.represent(assess.assessor_person_id))), rheader_tabs) return rheader return None
[ "def", "assess_rheader", "(", "r", ",", "tabs", "=", "[", "]", ")", ":", "if", "(", "r", ".", "representation", "==", "'html'", ")", ":", "rheader_tabs", "=", "s3_rheader_tabs", "(", "r", ",", "tabs", ")", "assess", "=", "r", ".", "record", "if", "assess", ":", "table", "=", "db", ".", "assess_assess", "rheader", "=", "DIV", "(", "TABLE", "(", "TR", "(", "TH", "(", "(", "'%s: '", "%", "T", "(", "'Date & Time'", ")", ")", ")", ",", "table", ".", "datetime", ".", "represent", "(", "assess", ".", "datetime", ")", ",", "TH", "(", "(", "'%s: '", "%", "T", "(", "'Location'", ")", ")", ")", ",", "table", ".", "location_id", ".", "represent", "(", "assess", ".", "location_id", ")", ",", "TH", "(", "(", "'%s: '", "%", "T", "(", "'Assessor'", ")", ")", ")", ",", "table", ".", "assessor_person_id", ".", "represent", "(", "assess", ".", "assessor_person_id", ")", ")", ")", ",", "rheader_tabs", ")", "return", "rheader", "return", "None" ]
resource headers for flexible impact assessments .
train
false
44,216
def proxied_site(server_name, enabled=True, **kwargs): site(server_name, template_contents=PROXIED_SITE_TEMPLATE, enabled=enabled, **kwargs)
[ "def", "proxied_site", "(", "server_name", ",", "enabled", "=", "True", ",", "**", "kwargs", ")", ":", "site", "(", "server_name", ",", "template_contents", "=", "PROXIED_SITE_TEMPLATE", ",", "enabled", "=", "enabled", ",", "**", "kwargs", ")" ]
require an nginx site for a proxied app .
train
false
44,217
def flavor_field_data(request, include_empty_option=False): flavors = flavor_list(request) if flavors: flavors_list = sort_flavor_list(request, flavors) if include_empty_option: return ([('', _('Select Flavor'))] + flavors_list) return flavors_list if include_empty_option: return [('', _('No flavors available'))] return []
[ "def", "flavor_field_data", "(", "request", ",", "include_empty_option", "=", "False", ")", ":", "flavors", "=", "flavor_list", "(", "request", ")", "if", "flavors", ":", "flavors_list", "=", "sort_flavor_list", "(", "request", ",", "flavors", ")", "if", "include_empty_option", ":", "return", "(", "[", "(", "''", ",", "_", "(", "'Select Flavor'", ")", ")", "]", "+", "flavors_list", ")", "return", "flavors_list", "if", "include_empty_option", ":", "return", "[", "(", "''", ",", "_", "(", "'No flavors available'", ")", ")", "]", "return", "[", "]" ]
returns a list of tuples of all image flavors .
train
true
44,219
def make_proxy(global_conf, address, allowed_request_methods='', suppress_http_headers=''): allowed_request_methods = aslist(allowed_request_methods) suppress_http_headers = aslist(suppress_http_headers) return Proxy(address, allowed_request_methods=allowed_request_methods, suppress_http_headers=suppress_http_headers)
[ "def", "make_proxy", "(", "global_conf", ",", "address", ",", "allowed_request_methods", "=", "''", ",", "suppress_http_headers", "=", "''", ")", ":", "allowed_request_methods", "=", "aslist", "(", "allowed_request_methods", ")", "suppress_http_headers", "=", "aslist", "(", "suppress_http_headers", ")", "return", "Proxy", "(", "address", ",", "allowed_request_methods", "=", "allowed_request_methods", ",", "suppress_http_headers", "=", "suppress_http_headers", ")" ]
make a wsgi application that proxies to another address: address the full url ending with a trailing / allowed_request_methods: a space seperated list of request methods suppress_http_headers a space seperated list of http headers that should not be passed on to target host .
train
false
44,220
def delete_containers(logger, conf): def _deleter(url, token, container): try: client.delete_container(url, token, container) except client.ClientException as e: if (e.http_status != HTTP_CONFLICT): logger.warn(("Unable to delete container '%s'. Got http status '%d'." % (container, e.http_status))) _func_on_containers(logger, conf, 'del_concurrency', _deleter)
[ "def", "delete_containers", "(", "logger", ",", "conf", ")", ":", "def", "_deleter", "(", "url", ",", "token", ",", "container", ")", ":", "try", ":", "client", ".", "delete_container", "(", "url", ",", "token", ",", "container", ")", "except", "client", ".", "ClientException", "as", "e", ":", "if", "(", "e", ".", "http_status", "!=", "HTTP_CONFLICT", ")", ":", "logger", ".", "warn", "(", "(", "\"Unable to delete container '%s'. Got http status '%d'.\"", "%", "(", "container", ",", "e", ".", "http_status", ")", ")", ")", "_func_on_containers", "(", "logger", ",", "conf", ",", "'del_concurrency'", ",", "_deleter", ")" ]
utility function to delete benchmark containers .
train
false
44,223
def ssl_protocols(): return _SSL_PROTOCOLS.keys()
[ "def", "ssl_protocols", "(", ")", ":", "return", "_SSL_PROTOCOLS", ".", "keys", "(", ")" ]
return acronyms for ssl protocols .
train
false
44,225
def organization_update(context, data_dict): return _group_or_org_update(context, data_dict, is_org=True)
[ "def", "organization_update", "(", "context", ",", "data_dict", ")", ":", "return", "_group_or_org_update", "(", "context", ",", "data_dict", ",", "is_org", "=", "True", ")" ]
update a organization .
train
false
44,226
def var_acf(coefs, sig_u, nlags=None): (p, k, _) = coefs.shape if (nlags is None): nlags = p result = np.zeros(((nlags + 1), k, k)) result[:p] = _var_acf(coefs, sig_u) for h in range(p, (nlags + 1)): for j in range(p): result[h] += np.dot(coefs[j], result[((h - j) - 1)]) return result
[ "def", "var_acf", "(", "coefs", ",", "sig_u", ",", "nlags", "=", "None", ")", ":", "(", "p", ",", "k", ",", "_", ")", "=", "coefs", ".", "shape", "if", "(", "nlags", "is", "None", ")", ":", "nlags", "=", "p", "result", "=", "np", ".", "zeros", "(", "(", "(", "nlags", "+", "1", ")", ",", "k", ",", "k", ")", ")", "result", "[", ":", "p", "]", "=", "_var_acf", "(", "coefs", ",", "sig_u", ")", "for", "h", "in", "range", "(", "p", ",", "(", "nlags", "+", "1", ")", ")", ":", "for", "j", "in", "range", "(", "p", ")", ":", "result", "[", "h", "]", "+=", "np", ".", "dot", "(", "coefs", "[", "j", "]", ",", "result", "[", "(", "(", "h", "-", "j", ")", "-", "1", ")", "]", ")", "return", "result" ]
compute autocovariance function acf_y(h) up to nlags of stable var(p) process parameters coefs : ndarray coefficient matrices a_i sig_u : ndarray covariance of white noise process u_t nlags : int .
train
false
44,227
def update_info_dict(oldInfoDict, newInfoDict): for (k, v) in newInfoDict.items(): if any((isinstance(v, t) for t in (tuple, list, dict))): pass elif ((oldInfoDict.get(k) is None) or (v not in (None, '', '0', 0))): oldInfoDict[k] = v
[ "def", "update_info_dict", "(", "oldInfoDict", ",", "newInfoDict", ")", ":", "for", "(", "k", ",", "v", ")", "in", "newInfoDict", ".", "items", "(", ")", ":", "if", "any", "(", "(", "isinstance", "(", "v", ",", "t", ")", "for", "t", "in", "(", "tuple", ",", "list", ",", "dict", ")", ")", ")", ":", "pass", "elif", "(", "(", "oldInfoDict", ".", "get", "(", "k", ")", "is", "None", ")", "or", "(", "v", "not", "in", "(", "None", ",", "''", ",", "'0'", ",", "0", ")", ")", ")", ":", "oldInfoDict", "[", "k", "]", "=", "v" ]
only normal values will be updated here .
train
false
44,228
def check_assumptions(expr, against=None, **assumptions): if (against is not None): assumptions = against.assumptions0 expr = sympify(expr) result = True for (key, expected) in assumptions.items(): if (expected is None): continue test = getattr(expr, ('is_' + key), None) if (test is expected): continue elif (test is not None): return False result = None return result
[ "def", "check_assumptions", "(", "expr", ",", "against", "=", "None", ",", "**", "assumptions", ")", ":", "if", "(", "against", "is", "not", "None", ")", ":", "assumptions", "=", "against", ".", "assumptions0", "expr", "=", "sympify", "(", "expr", ")", "result", "=", "True", "for", "(", "key", ",", "expected", ")", "in", "assumptions", ".", "items", "(", ")", ":", "if", "(", "expected", "is", "None", ")", ":", "continue", "test", "=", "getattr", "(", "expr", ",", "(", "'is_'", "+", "key", ")", ",", "None", ")", "if", "(", "test", "is", "expected", ")", ":", "continue", "elif", "(", "test", "is", "not", "None", ")", ":", "return", "False", "result", "=", "None", "return", "result" ]
checks whether expression expr satisfies all assumptions .
train
false
44,230
def install_python(name, version=None, install_args=None, override_args=False): return install(name, version=version, source='python', install_args=install_args, override_args=override_args)
[ "def", "install_python", "(", "name", ",", "version", "=", "None", ",", "install_args", "=", "None", ",", "override_args", "=", "False", ")", ":", "return", "install", "(", "name", ",", "version", "=", "version", ",", "source", "=", "'python'", ",", "install_args", "=", "install_args", ",", "override_args", "=", "override_args", ")" ]
instructs chocolatey to install a package via pythons easy_install .
train
true
44,231
def stSpectralRollOff(X, c, fs): totalEnergy = numpy.sum((X ** 2)) fftLength = len(X) Thres = (c * totalEnergy) CumSum = (numpy.cumsum((X ** 2)) + eps) [a] = numpy.nonzero((CumSum > Thres)) if (len(a) > 0): mC = (numpy.float64(a[0]) / float(fftLength)) else: mC = 0.0 return mC
[ "def", "stSpectralRollOff", "(", "X", ",", "c", ",", "fs", ")", ":", "totalEnergy", "=", "numpy", ".", "sum", "(", "(", "X", "**", "2", ")", ")", "fftLength", "=", "len", "(", "X", ")", "Thres", "=", "(", "c", "*", "totalEnergy", ")", "CumSum", "=", "(", "numpy", ".", "cumsum", "(", "(", "X", "**", "2", ")", ")", "+", "eps", ")", "[", "a", "]", "=", "numpy", ".", "nonzero", "(", "(", "CumSum", ">", "Thres", ")", ")", "if", "(", "len", "(", "a", ")", ">", "0", ")", ":", "mC", "=", "(", "numpy", ".", "float64", "(", "a", "[", "0", "]", ")", "/", "float", "(", "fftLength", ")", ")", "else", ":", "mC", "=", "0.0", "return", "mC" ]
computes spectral roll-off .
train
true
44,232
def _field_diff(field, old, new): oldval = old.get(field) newval = new.get(field) if (isinstance(oldval, float) and isinstance(newval, float) and (abs((oldval - newval)) < FLOAT_EPSILON)): return None elif (oldval == newval): return None oldstr = old.formatted().get(field, u'') newstr = new.formatted().get(field, u'') if isinstance(oldval, basestring): (oldstr, newstr) = colordiff(oldval, newstr) else: (oldstr, newstr) = (colorize('red', oldstr), colorize('red', newstr)) return u'{0} -> {1}'.format(oldstr, newstr)
[ "def", "_field_diff", "(", "field", ",", "old", ",", "new", ")", ":", "oldval", "=", "old", ".", "get", "(", "field", ")", "newval", "=", "new", ".", "get", "(", "field", ")", "if", "(", "isinstance", "(", "oldval", ",", "float", ")", "and", "isinstance", "(", "newval", ",", "float", ")", "and", "(", "abs", "(", "(", "oldval", "-", "newval", ")", ")", "<", "FLOAT_EPSILON", ")", ")", ":", "return", "None", "elif", "(", "oldval", "==", "newval", ")", ":", "return", "None", "oldstr", "=", "old", ".", "formatted", "(", ")", ".", "get", "(", "field", ",", "u''", ")", "newstr", "=", "new", ".", "formatted", "(", ")", ".", "get", "(", "field", ",", "u''", ")", "if", "isinstance", "(", "oldval", ",", "basestring", ")", ":", "(", "oldstr", ",", "newstr", ")", "=", "colordiff", "(", "oldval", ",", "newstr", ")", "else", ":", "(", "oldstr", ",", "newstr", ")", "=", "(", "colorize", "(", "'red'", ",", "oldstr", ")", ",", "colorize", "(", "'red'", ",", "newstr", ")", ")", "return", "u'{0} -> {1}'", ".", "format", "(", "oldstr", ",", "newstr", ")" ]
given two model objects .
train
false
44,234
def evaluateFalse(s): node = ast.parse(s) node = EvaluateFalseTransformer().visit(node) node = ast.Expression(node.body[0].value) return ast.fix_missing_locations(node)
[ "def", "evaluateFalse", "(", "s", ")", ":", "node", "=", "ast", ".", "parse", "(", "s", ")", "node", "=", "EvaluateFalseTransformer", "(", ")", ".", "visit", "(", "node", ")", "node", "=", "ast", ".", "Expression", "(", "node", ".", "body", "[", "0", "]", ".", "value", ")", "return", "ast", ".", "fix_missing_locations", "(", "node", ")" ]
replaces operators with the sympy equivalent and sets evaluate=false .
train
false
44,236
def db_null_instance_uuid_scan(delete=False): engine = get_engine() meta = sqlalchemy.MetaData(bind=engine) meta.reflect(engine) processed = {} for table in reversed(meta.sorted_tables): if (table.name not in ('fixed_ips', 'shadow_fixed_ips')): processed[table.name] = _process_null_records(table, 'instance_uuid', check_fkeys=True, delete=delete) for table_name in ('instances', 'shadow_instances'): table = db_utils.get_table(engine, table_name) processed[table.name] = _process_null_records(table, 'uuid', check_fkeys=False, delete=delete) return processed
[ "def", "db_null_instance_uuid_scan", "(", "delete", "=", "False", ")", ":", "engine", "=", "get_engine", "(", ")", "meta", "=", "sqlalchemy", ".", "MetaData", "(", "bind", "=", "engine", ")", "meta", ".", "reflect", "(", "engine", ")", "processed", "=", "{", "}", "for", "table", "in", "reversed", "(", "meta", ".", "sorted_tables", ")", ":", "if", "(", "table", ".", "name", "not", "in", "(", "'fixed_ips'", ",", "'shadow_fixed_ips'", ")", ")", ":", "processed", "[", "table", ".", "name", "]", "=", "_process_null_records", "(", "table", ",", "'instance_uuid'", ",", "check_fkeys", "=", "True", ",", "delete", "=", "delete", ")", "for", "table_name", "in", "(", "'instances'", ",", "'shadow_instances'", ")", ":", "table", "=", "db_utils", ".", "get_table", "(", "engine", ",", "table_name", ")", "processed", "[", "table", ".", "name", "]", "=", "_process_null_records", "(", "table", ",", "'uuid'", ",", "check_fkeys", "=", "False", ",", "delete", "=", "delete", ")", "return", "processed" ]
utility for scanning the database to look for null instance uuid rows .
train
false
44,237
def read_environ(): enc = sys.getfilesystemencoding() esc = 'surrogateescape' try: ''.encode('utf-8', esc) except LookupError: esc = 'replace' environ = {} for (k, v) in os.environ.items(): if _needs_transcode(k): if (sys.platform == 'win32'): software = os.environ.get('SERVER_SOFTWARE', '').lower() if software.startswith('microsoft-iis/'): v = v.encode('utf-8').decode('iso-8859-1') elif software.startswith('apache/'): pass elif (software.startswith('simplehttp/') and ('python/3' in software)): v = v.encode('utf-8').decode('iso-8859-1') else: v = v.encode(enc, 'replace').decode('iso-8859-1') else: v = v.encode(enc, esc).decode('iso-8859-1') environ[k] = v return environ
[ "def", "read_environ", "(", ")", ":", "enc", "=", "sys", ".", "getfilesystemencoding", "(", ")", "esc", "=", "'surrogateescape'", "try", ":", ".", "encode", "(", "'utf-8'", ",", "esc", ")", "except", "LookupError", ":", "esc", "=", "'replace'", "environ", "=", "{", "}", "for", "(", "k", ",", "v", ")", "in", "os", ".", "environ", ".", "items", "(", ")", ":", "if", "_needs_transcode", "(", "k", ")", ":", "if", "(", "sys", ".", "platform", "==", "'win32'", ")", ":", "software", "=", "os", ".", "environ", ".", "get", "(", "'SERVER_SOFTWARE'", ",", "''", ")", ".", "lower", "(", ")", "if", "software", ".", "startswith", "(", "'microsoft-iis/'", ")", ":", "v", "=", "v", ".", "encode", "(", "'utf-8'", ")", ".", "decode", "(", "'iso-8859-1'", ")", "elif", "software", ".", "startswith", "(", "'apache/'", ")", ":", "pass", "elif", "(", "software", ".", "startswith", "(", "'simplehttp/'", ")", "and", "(", "'python/3'", "in", "software", ")", ")", ":", "v", "=", "v", ".", "encode", "(", "'utf-8'", ")", ".", "decode", "(", "'iso-8859-1'", ")", "else", ":", "v", "=", "v", ".", "encode", "(", "enc", ",", "'replace'", ")", ".", "decode", "(", "'iso-8859-1'", ")", "else", ":", "v", "=", "v", ".", "encode", "(", "enc", ",", "esc", ")", ".", "decode", "(", "'iso-8859-1'", ")", "environ", "[", "k", "]", "=", "v", "return", "environ" ]
read environment .
train
false
44,239
def gf_lcm(f, g, p, K): if ((not f) or (not g)): return [] h = gf_quo(gf_mul(f, g, p, K), gf_gcd(f, g, p, K), p, K) return gf_monic(h, p, K)[1]
[ "def", "gf_lcm", "(", "f", ",", "g", ",", "p", ",", "K", ")", ":", "if", "(", "(", "not", "f", ")", "or", "(", "not", "g", ")", ")", ":", "return", "[", "]", "h", "=", "gf_quo", "(", "gf_mul", "(", "f", ",", "g", ",", "p", ",", "K", ")", ",", "gf_gcd", "(", "f", ",", "g", ",", "p", ",", "K", ")", ",", "p", ",", "K", ")", "return", "gf_monic", "(", "h", ",", "p", ",", "K", ")", "[", "1", "]" ]
compute polynomial lcm in gf(p)[x] .
train
false
44,240
def get_exe(base, target_name): return cx.Executable('qutebrowser/__main__.py', base=base, targetName=target_name, shortcutName='qutebrowser', shortcutDir='ProgramMenuFolder', icon=os.path.join(BASEDIR, 'icons', 'qutebrowser.ico'))
[ "def", "get_exe", "(", "base", ",", "target_name", ")", ":", "return", "cx", ".", "Executable", "(", "'qutebrowser/__main__.py'", ",", "base", "=", "base", ",", "targetName", "=", "target_name", ",", "shortcutName", "=", "'qutebrowser'", ",", "shortcutDir", "=", "'ProgramMenuFolder'", ",", "icon", "=", "os", ".", "path", ".", "join", "(", "BASEDIR", ",", "'icons'", ",", "'qutebrowser.ico'", ")", ")" ]
get the qutebrowser cx .
train
false
44,242
def replication_details(host=None, core_name=None): ret = _get_return_dict() if (_get_none_or_value(core_name) is None): success = True for name in __opts__['solr.cores']: resp = _replication_request('details', host=host, core_name=name) data = {name: {'data': resp['data']}} ret = _update_return_dict(ret, success, data, resp['errors'], resp['warnings']) else: resp = _replication_request('details', host=host, core_name=core_name) if resp['success']: ret = _update_return_dict(ret, resp['success'], resp['data'], resp['errors'], resp['warnings']) else: return resp return ret
[ "def", "replication_details", "(", "host", "=", "None", ",", "core_name", "=", "None", ")", ":", "ret", "=", "_get_return_dict", "(", ")", "if", "(", "_get_none_or_value", "(", "core_name", ")", "is", "None", ")", ":", "success", "=", "True", "for", "name", "in", "__opts__", "[", "'solr.cores'", "]", ":", "resp", "=", "_replication_request", "(", "'details'", ",", "host", "=", "host", ",", "core_name", "=", "name", ")", "data", "=", "{", "name", ":", "{", "'data'", ":", "resp", "[", "'data'", "]", "}", "}", "ret", "=", "_update_return_dict", "(", "ret", ",", "success", ",", "data", ",", "resp", "[", "'errors'", "]", ",", "resp", "[", "'warnings'", "]", ")", "else", ":", "resp", "=", "_replication_request", "(", "'details'", ",", "host", "=", "host", ",", "core_name", "=", "core_name", ")", "if", "resp", "[", "'success'", "]", ":", "ret", "=", "_update_return_dict", "(", "ret", ",", "resp", "[", "'success'", "]", ",", "resp", "[", "'data'", "]", ",", "resp", "[", "'errors'", "]", ",", "resp", "[", "'warnings'", "]", ")", "else", ":", "return", "resp", "return", "ret" ]
get the full replication details .
train
true
44,244
def test_only_one_value_intrp(Chart): chart = Chart(interpolate='cubic') chart.add('S', [1]) q = chart.render_pyquery() assert (len(q('.legend')) == 1)
[ "def", "test_only_one_value_intrp", "(", "Chart", ")", ":", "chart", "=", "Chart", "(", "interpolate", "=", "'cubic'", ")", "chart", ".", "add", "(", "'S'", ",", "[", "1", "]", ")", "q", "=", "chart", ".", "render_pyquery", "(", ")", "assert", "(", "len", "(", "q", "(", "'.legend'", ")", ")", "==", "1", ")" ]
test interpolated chart rendering with only one value .
train
false
44,245
def action_logging(f): @functools.wraps(f) def wrapper(*args, **kwargs): session = settings.Session() if (current_user and hasattr(current_user, 'username')): user = current_user.username else: user = 'anonymous' log = models.Log(event=f.__name__, task_instance=None, owner=user, extra=str(list(request.args.items())), task_id=request.args.get('task_id'), dag_id=request.args.get('dag_id')) if ('execution_date' in request.args): log.execution_date = dateparser.parse(request.args.get('execution_date')) session.add(log) session.commit() return f(*args, **kwargs) return wrapper
[ "def", "action_logging", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "wrapper", "(", "*", "args", ",", "**", "kwargs", ")", ":", "session", "=", "settings", ".", "Session", "(", ")", "if", "(", "current_user", "and", "hasattr", "(", "current_user", ",", "'username'", ")", ")", ":", "user", "=", "current_user", ".", "username", "else", ":", "user", "=", "'anonymous'", "log", "=", "models", ".", "Log", "(", "event", "=", "f", ".", "__name__", ",", "task_instance", "=", "None", ",", "owner", "=", "user", ",", "extra", "=", "str", "(", "list", "(", "request", ".", "args", ".", "items", "(", ")", ")", ")", ",", "task_id", "=", "request", ".", "args", ".", "get", "(", "'task_id'", ")", ",", "dag_id", "=", "request", ".", "args", ".", "get", "(", "'dag_id'", ")", ")", "if", "(", "'execution_date'", "in", "request", ".", "args", ")", ":", "log", ".", "execution_date", "=", "dateparser", ".", "parse", "(", "request", ".", "args", ".", "get", "(", "'execution_date'", ")", ")", "session", ".", "add", "(", "log", ")", "session", ".", "commit", "(", ")", "return", "f", "(", "*", "args", ",", "**", "kwargs", ")", "return", "wrapper" ]
decorator to log user actions .
train
false
44,246
def unique_lineage_name(path, filename, chmod=420, mode='w'): preferred_path = os.path.join(path, ('%s.conf' % filename)) try: return (safe_open(preferred_path, chmod=chmod), preferred_path) except OSError as err: if (err.errno != errno.EEXIST): raise return _unique_file(path, filename_pat=(lambda count: ('%s-%04d.conf' % (filename, count))), count=1, chmod=chmod, mode=mode)
[ "def", "unique_lineage_name", "(", "path", ",", "filename", ",", "chmod", "=", "420", ",", "mode", "=", "'w'", ")", ":", "preferred_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "(", "'%s.conf'", "%", "filename", ")", ")", "try", ":", "return", "(", "safe_open", "(", "preferred_path", ",", "chmod", "=", "chmod", ")", ",", "preferred_path", ")", "except", "OSError", "as", "err", ":", "if", "(", "err", ".", "errno", "!=", "errno", ".", "EEXIST", ")", ":", "raise", "return", "_unique_file", "(", "path", ",", "filename_pat", "=", "(", "lambda", "count", ":", "(", "'%s-%04d.conf'", "%", "(", "filename", ",", "count", ")", ")", ")", ",", "count", "=", "1", ",", "chmod", "=", "chmod", ",", "mode", "=", "mode", ")" ]
safely finds a unique file using lineage convention .
train
false
44,247
def modulestore(): def load_function(engine_path): '\n Load the given engine\n ' (module_path, _, name) = engine_path.rpartition('.') return getattr(importlib.import_module(module_path), name) if (ModuleStoreNoSettings.modulestore is None): class_ = load_function(ModuleStoreNoSettings.MODULESTORE['ENGINE']) options = {} options.update(ModuleStoreNoSettings.MODULESTORE['OPTIONS']) options['render_template'] = render_to_template_mock ModuleStoreNoSettings.modulestore = class_(None, ModuleStoreNoSettings.MODULESTORE['DOC_STORE_CONFIG'], branch_setting_func=(lambda : ModuleStoreEnum.Branch.draft_preferred), **options) return ModuleStoreNoSettings.modulestore
[ "def", "modulestore", "(", ")", ":", "def", "load_function", "(", "engine_path", ")", ":", "(", "module_path", ",", "_", ",", "name", ")", "=", "engine_path", ".", "rpartition", "(", "'.'", ")", "return", "getattr", "(", "importlib", ".", "import_module", "(", "module_path", ")", ",", "name", ")", "if", "(", "ModuleStoreNoSettings", ".", "modulestore", "is", "None", ")", ":", "class_", "=", "load_function", "(", "ModuleStoreNoSettings", ".", "MODULESTORE", "[", "'ENGINE'", "]", ")", "options", "=", "{", "}", "options", ".", "update", "(", "ModuleStoreNoSettings", ".", "MODULESTORE", "[", "'OPTIONS'", "]", ")", "options", "[", "'render_template'", "]", "=", "render_to_template_mock", "ModuleStoreNoSettings", ".", "modulestore", "=", "class_", "(", "None", ",", "ModuleStoreNoSettings", ".", "MODULESTORE", "[", "'DOC_STORE_CONFIG'", "]", ",", "branch_setting_func", "=", "(", "lambda", ":", "ModuleStoreEnum", ".", "Branch", ".", "draft_preferred", ")", ",", "**", "options", ")", "return", "ModuleStoreNoSettings", ".", "modulestore" ]
mock the django dependent global modulestore function to disentangle tests from django .
train
false
44,248
def unbroadcast(x, *axes): rval = Rebroadcast(*[(axis, False) for axis in axes])(x) return theano.tensor.opt.apply_rebroadcast_opt(rval)
[ "def", "unbroadcast", "(", "x", ",", "*", "axes", ")", ":", "rval", "=", "Rebroadcast", "(", "*", "[", "(", "axis", ",", "False", ")", "for", "axis", "in", "axes", "]", ")", "(", "x", ")", "return", "theano", ".", "tensor", ".", "opt", ".", "apply_rebroadcast_opt", "(", "rval", ")" ]
make the input impossible to broadcast in the specified axes .
train
false
44,249
def ones_like(a, dtype=None): if (dtype is None): dtype = a.dtype return ones(a.shape, dtype)
[ "def", "ones_like", "(", "a", ",", "dtype", "=", "None", ")", ":", "if", "(", "dtype", "is", "None", ")", ":", "dtype", "=", "a", ".", "dtype", "return", "ones", "(", "a", ".", "shape", ",", "dtype", ")" ]
creates a one-filled :class:cupy .
train
false
44,251
def s_mutate(): return blocks.CURRENT.mutate()
[ "def", "s_mutate", "(", ")", ":", "return", "blocks", ".", "CURRENT", ".", "mutate", "(", ")" ]
mutate the current request and return false if mutations are exhausted .
train
false
44,253
def _calculate_to_transitions(trans_probs): transitions = dict() for (from_state, to_state) in trans_probs: try: transitions[to_state].append(from_state) except KeyError: transitions[to_state] = [from_state] return transitions
[ "def", "_calculate_to_transitions", "(", "trans_probs", ")", ":", "transitions", "=", "dict", "(", ")", "for", "(", "from_state", ",", "to_state", ")", "in", "trans_probs", ":", "try", ":", "transitions", "[", "to_state", "]", ".", "append", "(", "from_state", ")", "except", "KeyError", ":", "transitions", "[", "to_state", "]", "=", "[", "from_state", "]", "return", "transitions" ]
calculate which to transitions are allowed for each state this looks through all of the trans_probs .
train
false
44,254
def test_recalculate_max_depth(): X = iris.data clf = IsolationForest().fit(X) for est in clf.estimators_: assert_equal(est.max_depth, int(np.ceil(np.log2(X.shape[0]))))
[ "def", "test_recalculate_max_depth", "(", ")", ":", "X", "=", "iris", ".", "data", "clf", "=", "IsolationForest", "(", ")", ".", "fit", "(", "X", ")", "for", "est", "in", "clf", ".", "estimators_", ":", "assert_equal", "(", "est", ".", "max_depth", ",", "int", "(", "np", ".", "ceil", "(", "np", ".", "log2", "(", "X", ".", "shape", "[", "0", "]", ")", ")", ")", ")" ]
check max_depth recalculation when max_samples is reset to n_samples .
train
false
44,256
def test_tf_mxne_vs_mxne(): alpha_space = 60.0 alpha_time = 0.0 (M, G, active_set) = _generate_tf_data() (X_hat_tf, active_set_hat_tf, E) = tf_mixed_norm_solver(M, G, alpha_space, alpha_time, maxit=200, tol=1e-08, verbose=True, debias=False, n_orient=1, tstep=4, wsize=32) (X_hat_l21, _, _) = mixed_norm_solver(M, G, alpha_space, maxit=200, tol=1e-08, verbose=False, n_orient=1, active_set_size=None, debias=False) assert_allclose(X_hat_tf, X_hat_l21, rtol=0.1)
[ "def", "test_tf_mxne_vs_mxne", "(", ")", ":", "alpha_space", "=", "60.0", "alpha_time", "=", "0.0", "(", "M", ",", "G", ",", "active_set", ")", "=", "_generate_tf_data", "(", ")", "(", "X_hat_tf", ",", "active_set_hat_tf", ",", "E", ")", "=", "tf_mixed_norm_solver", "(", "M", ",", "G", ",", "alpha_space", ",", "alpha_time", ",", "maxit", "=", "200", ",", "tol", "=", "1e-08", ",", "verbose", "=", "True", ",", "debias", "=", "False", ",", "n_orient", "=", "1", ",", "tstep", "=", "4", ",", "wsize", "=", "32", ")", "(", "X_hat_l21", ",", "_", ",", "_", ")", "=", "mixed_norm_solver", "(", "M", ",", "G", ",", "alpha_space", ",", "maxit", "=", "200", ",", "tol", "=", "1e-08", ",", "verbose", "=", "False", ",", "n_orient", "=", "1", ",", "active_set_size", "=", "None", ",", "debias", "=", "False", ")", "assert_allclose", "(", "X_hat_tf", ",", "X_hat_l21", ",", "rtol", "=", "0.1", ")" ]
test equivalence of tf-mxne and mxne .
train
false
44,258
def count_seqs(fasta_filepath, parser=parse_fasta): return count_seqs_from_file(open(fasta_filepath, 'U'), parser=parser)
[ "def", "count_seqs", "(", "fasta_filepath", ",", "parser", "=", "parse_fasta", ")", ":", "return", "count_seqs_from_file", "(", "open", "(", "fasta_filepath", ",", "'U'", ")", ",", "parser", "=", "parser", ")" ]
count the sequences in fasta_filepath fasta_filepath: string indicating the full path to the file .
train
false
44,259
def DumpPyTree(tree, target_stream=sys.stdout): dumper = PyTreeDumper(target_stream) dumper.Visit(tree)
[ "def", "DumpPyTree", "(", "tree", ",", "target_stream", "=", "sys", ".", "stdout", ")", ":", "dumper", "=", "PyTreeDumper", "(", "target_stream", ")", "dumper", ".", "Visit", "(", "tree", ")" ]
convenience function for dumping a given pytree .
train
false
44,260
def huge_deployment(): return _huge(Deployment(), Node(hostname=u'192.0.2.31'))
[ "def", "huge_deployment", "(", ")", ":", "return", "_huge", "(", "Deployment", "(", ")", ",", "Node", "(", "hostname", "=", "u'192.0.2.31'", ")", ")" ]
return a configuration with many containers .
train
false
44,261
def parse_csr(csr_file, certificate_type, http_dc_validation=False): valid_certs = set(['QuickSSL Premium', 'RapidSSL', 'RapidSSL Wildcard', 'PremiumSSL', 'InstantSSL', 'PositiveSSL', 'PositiveSSL Wildcard', 'True BusinessID with EV', 'True BusinessID', 'True BusinessID Wildcard', 'True BusinessID Multi Domain', 'True BusinessID with EV Multi Domain', 'Secure Site', 'Secure Site Pro', 'Secure Site with EV', 'Secure Site Pro with EV', 'EssentialSSL', 'EssentialSSL Wildcard', 'InstantSSL Pro', 'PremiumSSL Wildcard', 'EV SSL', 'EV SSL SGC', 'SSL123', 'SSL Web Server', 'SGC Supercert', 'SSL Webserver EV', 'EV Multi Domain SSL', 'Multi Domain SSL', 'PositiveSSL Multi Domain', 'Unified Communications']) if (certificate_type not in valid_certs): salt.utils.namecheap.log.error(('Invalid option for certificate_type=' + certificate_type)) raise Exception(('Invalid option for certificate_type=' + certificate_type)) opts = salt.utils.namecheap.get_opts('namecheap.ssl.parseCSR') csr_handle = open(csr_file, 'rb') opts['csr'] = csr_handle.read() opts['CertificateType'] = certificate_type if http_dc_validation: opts['HTTPDCValidation'] = 'true' csr_handle.close() response_xml = salt.utils.namecheap.post_request(opts) sslparseresult = response_xml.getElementsByTagName('SSLParseCSRResult')[0] return salt.utils.namecheap.xml_to_dict(sslparseresult)
[ "def", "parse_csr", "(", "csr_file", ",", "certificate_type", ",", "http_dc_validation", "=", "False", ")", ":", "valid_certs", "=", "set", "(", "[", "'QuickSSL Premium'", ",", "'RapidSSL'", ",", "'RapidSSL Wildcard'", ",", "'PremiumSSL'", ",", "'InstantSSL'", ",", "'PositiveSSL'", ",", "'PositiveSSL Wildcard'", ",", "'True BusinessID with EV'", ",", "'True BusinessID'", ",", "'True BusinessID Wildcard'", ",", "'True BusinessID Multi Domain'", ",", "'True BusinessID with EV Multi Domain'", ",", "'Secure Site'", ",", "'Secure Site Pro'", ",", "'Secure Site with EV'", ",", "'Secure Site Pro with EV'", ",", "'EssentialSSL'", ",", "'EssentialSSL Wildcard'", ",", "'InstantSSL Pro'", ",", "'PremiumSSL Wildcard'", ",", "'EV SSL'", ",", "'EV SSL SGC'", ",", "'SSL123'", ",", "'SSL Web Server'", ",", "'SGC Supercert'", ",", "'SSL Webserver EV'", ",", "'EV Multi Domain SSL'", ",", "'Multi Domain SSL'", ",", "'PositiveSSL Multi Domain'", ",", "'Unified Communications'", "]", ")", "if", "(", "certificate_type", "not", "in", "valid_certs", ")", ":", "salt", ".", "utils", ".", "namecheap", ".", "log", ".", "error", "(", "(", "'Invalid option for certificate_type='", "+", "certificate_type", ")", ")", "raise", "Exception", "(", "(", "'Invalid option for certificate_type='", "+", "certificate_type", ")", ")", "opts", "=", "salt", ".", "utils", ".", "namecheap", ".", "get_opts", "(", "'namecheap.ssl.parseCSR'", ")", "csr_handle", "=", "open", "(", "csr_file", ",", "'rb'", ")", "opts", "[", "'csr'", "]", "=", "csr_handle", ".", "read", "(", ")", "opts", "[", "'CertificateType'", "]", "=", "certificate_type", "if", "http_dc_validation", ":", "opts", "[", "'HTTPDCValidation'", "]", "=", "'true'", "csr_handle", ".", "close", "(", ")", "response_xml", "=", "salt", ".", "utils", ".", "namecheap", ".", "post_request", "(", "opts", ")", "sslparseresult", "=", "response_xml", ".", "getElementsByTagName", "(", "'SSLParseCSRResult'", ")", "[", "0", "]", "return", "salt", ".", "utils", ".", "namecheap", ".", "xml_to_dict", "(", "sslparseresult", ")" ]
parses the csr returns a dictionary of result values required parameters: csr_file string certificate signing request file certificate_type string type of ssl certificate .
train
false
44,262
def integral_image(img): S = img for i in range(img.ndim): S = S.cumsum(axis=i) return S
[ "def", "integral_image", "(", "img", ")", ":", "S", "=", "img", "for", "i", "in", "range", "(", "img", ".", "ndim", ")", ":", "S", "=", "S", ".", "cumsum", "(", "axis", "=", "i", ")", "return", "S" ]
integral image / summed area table .
train
false
44,263
def clear_buffer(remote_conn): if remote_conn.recv_ready(): return remote_conn.recv(MAX_BUFFER)
[ "def", "clear_buffer", "(", "remote_conn", ")", ":", "if", "remote_conn", ".", "recv_ready", "(", ")", ":", "return", "remote_conn", ".", "recv", "(", "MAX_BUFFER", ")" ]
clear any data in the receive buffer .
train
false
44,265
@require_context def attachment_specs_delete(context, attachment_id, key): session = get_session() with session.begin(): _attachment_specs_get_item(context, attachment_id, key, session) _attachment_specs_query(context, attachment_id, session).filter_by(key=key).update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')})
[ "@", "require_context", "def", "attachment_specs_delete", "(", "context", ",", "attachment_id", ",", "key", ")", ":", "session", "=", "get_session", "(", ")", "with", "session", ".", "begin", "(", ")", ":", "_attachment_specs_get_item", "(", "context", ",", "attachment_id", ",", "key", ",", "session", ")", "_attachment_specs_query", "(", "context", ",", "attachment_id", ",", "session", ")", ".", "filter_by", "(", "key", "=", "key", ")", ".", "update", "(", "{", "'deleted'", ":", "True", ",", "'deleted_at'", ":", "timeutils", ".", "utcnow", "(", ")", ",", "'updated_at'", ":", "literal_column", "(", "'updated_at'", ")", "}", ")" ]
delete attachment_specs for the specified attachment record .
train
false
44,267
def parse_numeral(value, int_enabled=True, roman_enabled=True, word_enabled=True, clean=True): if int_enabled: try: if clean: match = _clean_re.match(value) if match: clean_value = match.group(1) return int(clean_value) return int(value) except ValueError: pass if roman_enabled: try: if clean: for word in value.split(): try: return __parse_roman(word.upper()) except ValueError: pass return __parse_roman(value) except ValueError: pass if word_enabled: try: if clean: for word in value.split(): try: return __parse_word(word) except ValueError: pass return __parse_word(value) except ValueError: pass raise ValueError(('Invalid numeral: ' + value))
[ "def", "parse_numeral", "(", "value", ",", "int_enabled", "=", "True", ",", "roman_enabled", "=", "True", ",", "word_enabled", "=", "True", ",", "clean", "=", "True", ")", ":", "if", "int_enabled", ":", "try", ":", "if", "clean", ":", "match", "=", "_clean_re", ".", "match", "(", "value", ")", "if", "match", ":", "clean_value", "=", "match", ".", "group", "(", "1", ")", "return", "int", "(", "clean_value", ")", "return", "int", "(", "value", ")", "except", "ValueError", ":", "pass", "if", "roman_enabled", ":", "try", ":", "if", "clean", ":", "for", "word", "in", "value", ".", "split", "(", ")", ":", "try", ":", "return", "__parse_roman", "(", "word", ".", "upper", "(", ")", ")", "except", "ValueError", ":", "pass", "return", "__parse_roman", "(", "value", ")", "except", "ValueError", ":", "pass", "if", "word_enabled", ":", "try", ":", "if", "clean", ":", "for", "word", "in", "value", ".", "split", "(", ")", ":", "try", ":", "return", "__parse_word", "(", "word", ")", "except", "ValueError", ":", "pass", "return", "__parse_word", "(", "value", ")", "except", "ValueError", ":", "pass", "raise", "ValueError", "(", "(", "'Invalid numeral: '", "+", "value", ")", ")" ]
parse a numeric value into integer .
train
false
44,269
def custom_popen(cmd): creationflags = 0 if (sys.platform == 'win32'): creationflags = 134217728 try: p = Popen(cmd, bufsize=0, stdout=PIPE, stdin=PIPE, stderr=STDOUT, creationflags=creationflags) except OSError: ex = sys.exc_info()[1] if (ex.errno == errno.ENOENT): raise RarCannotExec(('Unrar not installed? (rarfile.UNRAR_TOOL=%r)' % UNRAR_TOOL)) raise return p
[ "def", "custom_popen", "(", "cmd", ")", ":", "creationflags", "=", "0", "if", "(", "sys", ".", "platform", "==", "'win32'", ")", ":", "creationflags", "=", "134217728", "try", ":", "p", "=", "Popen", "(", "cmd", ",", "bufsize", "=", "0", ",", "stdout", "=", "PIPE", ",", "stdin", "=", "PIPE", ",", "stderr", "=", "STDOUT", ",", "creationflags", "=", "creationflags", ")", "except", "OSError", ":", "ex", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "if", "(", "ex", ".", "errno", "==", "errno", ".", "ENOENT", ")", ":", "raise", "RarCannotExec", "(", "(", "'Unrar not installed? (rarfile.UNRAR_TOOL=%r)'", "%", "UNRAR_TOOL", ")", ")", "raise", "return", "p" ]
disconnect cmd from parent fds .
train
false
44,270
def is_keyword(text): import keyword return (text in keyword.kwlist)
[ "def", "is_keyword", "(", "text", ")", ":", "import", "keyword", "return", "(", "text", "in", "keyword", ".", "kwlist", ")" ]
test if passed string is the name of a python keyword .
train
false
44,272
def _AddSerializePartialToStringMethod(message_descriptor, cls): def SerializePartialToString(self): out = StringIO() self._InternalSerialize(out.write) return out.getvalue() cls.SerializePartialToString = SerializePartialToString def InternalSerialize(self, write_bytes): for (field_descriptor, field_value) in self.ListFields(): field_descriptor._encoder(write_bytes, field_value) for (tag_bytes, value_bytes) in self._unknown_fields: write_bytes(tag_bytes) write_bytes(value_bytes) cls._InternalSerialize = InternalSerialize
[ "def", "_AddSerializePartialToStringMethod", "(", "message_descriptor", ",", "cls", ")", ":", "def", "SerializePartialToString", "(", "self", ")", ":", "out", "=", "StringIO", "(", ")", "self", ".", "_InternalSerialize", "(", "out", ".", "write", ")", "return", "out", ".", "getvalue", "(", ")", "cls", ".", "SerializePartialToString", "=", "SerializePartialToString", "def", "InternalSerialize", "(", "self", ",", "write_bytes", ")", ":", "for", "(", "field_descriptor", ",", "field_value", ")", "in", "self", ".", "ListFields", "(", ")", ":", "field_descriptor", ".", "_encoder", "(", "write_bytes", ",", "field_value", ")", "for", "(", "tag_bytes", ",", "value_bytes", ")", "in", "self", ".", "_unknown_fields", ":", "write_bytes", "(", "tag_bytes", ")", "write_bytes", "(", "value_bytes", ")", "cls", ".", "_InternalSerialize", "=", "InternalSerialize" ]
helper for _addmessagemethods() .
train
true
44,275
def one_or_more(schema, unique_items=False): schema.setdefault(u'title', u'single value') return {u'oneOf': [{u'title': u'multiple values', u'type': u'array', u'items': schema, u'minItems': 1, u'uniqueItems': unique_items}, schema]}
[ "def", "one_or_more", "(", "schema", ",", "unique_items", "=", "False", ")", ":", "schema", ".", "setdefault", "(", "u'title'", ",", "u'single value'", ")", "return", "{", "u'oneOf'", ":", "[", "{", "u'title'", ":", "u'multiple values'", ",", "u'type'", ":", "u'array'", ",", "u'items'", ":", "schema", ",", "u'minItems'", ":", "1", ",", "u'uniqueItems'", ":", "unique_items", "}", ",", "schema", "]", "}" ]
helper function to construct a schema that validates items matching schema or an array containing items matching schema .
train
false
44,276
def get_source_node(node): source = (node.registered_from or node.forked_from) if (source is None): return None if check_node(source): return source return get_source_node(source)
[ "def", "get_source_node", "(", "node", ")", ":", "source", "=", "(", "node", ".", "registered_from", "or", "node", ".", "forked_from", ")", "if", "(", "source", "is", "None", ")", ":", "return", "None", "if", "check_node", "(", "source", ")", ":", "return", "source", "return", "get_source_node", "(", "source", ")" ]
recursively search for source node .
train
false
44,277
def _determine_local_import_names(start_dir): file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] return [basename for (basename, extension) in file_ext_pairs if ((extension == '.py') or (os.path.isdir(os.path.join(start_dir, basename)) and (basename not in '__pycache__')))]
[ "def", "_determine_local_import_names", "(", "start_dir", ")", ":", "file_ext_pairs", "=", "[", "os", ".", "path", ".", "splitext", "(", "path", ")", "for", "path", "in", "os", ".", "listdir", "(", "start_dir", ")", "]", "return", "[", "basename", "for", "(", "basename", ",", "extension", ")", "in", "file_ext_pairs", "if", "(", "(", "extension", "==", "'.py'", ")", "or", "(", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "start_dir", ",", "basename", ")", ")", "and", "(", "basename", "not", "in", "'__pycache__'", ")", ")", ")", "]" ]
determines all import names that should be considered "local" .
train
false
44,278
def get_config_var(name): return get_config_vars().get(name)
[ "def", "get_config_var", "(", "name", ")", ":", "return", "get_config_vars", "(", ")", ".", "get", "(", "name", ")" ]
return the value of a single variable using the dictionary returned by get_config_vars() .
train
false
44,279
def dict_from_yaml(yaml_str): try: retrieved_dict = yaml.safe_load(yaml_str) assert isinstance(retrieved_dict, dict) return retrieved_dict except yaml.YAMLError as e: raise InvalidInputException(e)
[ "def", "dict_from_yaml", "(", "yaml_str", ")", ":", "try", ":", "retrieved_dict", "=", "yaml", ".", "safe_load", "(", "yaml_str", ")", "assert", "isinstance", "(", "retrieved_dict", ",", "dict", ")", "return", "retrieved_dict", "except", "yaml", ".", "YAMLError", "as", "e", ":", "raise", "InvalidInputException", "(", "e", ")" ]
gets the dict representation of a yaml string .
train
false
44,281
def _set_version_locations(config): split_branches = False version_paths = [_get_version_branch_path(config)] for release in RELEASES: for branch in MIGRATION_BRANCHES: version_path = _get_version_branch_path(config, release, branch) if (split_branches or os.path.exists(version_path)): split_branches = True version_paths.append(version_path) config.set_main_option('version_locations', ' '.join(version_paths))
[ "def", "_set_version_locations", "(", "config", ")", ":", "split_branches", "=", "False", "version_paths", "=", "[", "_get_version_branch_path", "(", "config", ")", "]", "for", "release", "in", "RELEASES", ":", "for", "branch", "in", "MIGRATION_BRANCHES", ":", "version_path", "=", "_get_version_branch_path", "(", "config", ",", "release", ",", "branch", ")", "if", "(", "split_branches", "or", "os", ".", "path", ".", "exists", "(", "version_path", ")", ")", ":", "split_branches", "=", "True", "version_paths", ".", "append", "(", "version_path", ")", "config", ".", "set_main_option", "(", "'version_locations'", ",", "' '", ".", "join", "(", "version_paths", ")", ")" ]
make alembic see all revisions in all migration branches .
train
false
44,282
def itersubclasses(cls, _seen=None): if (not isinstance(cls, type)): raise TypeError('itersubclasses must be called with new-style classes, not {:.100r}'.format(cls)) if (_seen is None): _seen = set() try: subs = cls.__subclasses__() except TypeError: subs = cls.__subclasses__(cls) for sub in sorted(subs, key=operator.attrgetter('__name__')): if (sub not in _seen): _seen.add(sub) (yield sub) for sub in itersubclasses(sub, _seen): (yield sub)
[ "def", "itersubclasses", "(", "cls", ",", "_seen", "=", "None", ")", ":", "if", "(", "not", "isinstance", "(", "cls", ",", "type", ")", ")", ":", "raise", "TypeError", "(", "'itersubclasses must be called with new-style classes, not {:.100r}'", ".", "format", "(", "cls", ")", ")", "if", "(", "_seen", "is", "None", ")", ":", "_seen", "=", "set", "(", ")", "try", ":", "subs", "=", "cls", ".", "__subclasses__", "(", ")", "except", "TypeError", ":", "subs", "=", "cls", ".", "__subclasses__", "(", "cls", ")", "for", "sub", "in", "sorted", "(", "subs", ",", "key", "=", "operator", ".", "attrgetter", "(", "'__name__'", ")", ")", ":", "if", "(", "sub", "not", "in", "_seen", ")", ":", "_seen", ".", "add", "(", "sub", ")", "(", "yield", "sub", ")", "for", "sub", "in", "itersubclasses", "(", "sub", ",", "_seen", ")", ":", "(", "yield", "sub", ")" ]
itersubclasses generator over all subclasses of a given class .
train
false
44,284
def make_one_liner(script): return ('python -c "%s"' % script.strip().replace('\n', ';'))
[ "def", "make_one_liner", "(", "script", ")", ":", "return", "(", "'python -c \"%s\"'", "%", "script", ".", "strip", "(", ")", ".", "replace", "(", "'\\n'", ",", "';'", ")", ")" ]
returns command to execute python script as a one-line python program e .
train
false
44,285
def param_init_gru(options, params, prefix='gru', nin=None, dim=None): if (nin == None): nin = options['dim_proj'] if (dim == None): dim = options['dim_proj'] W = numpy.concatenate([norm_weight(nin, dim), norm_weight(nin, dim)], axis=1) params[_p(prefix, 'W')] = W params[_p(prefix, 'b')] = numpy.zeros(((2 * dim),)).astype('float32') U = numpy.concatenate([ortho_weight(dim), ortho_weight(dim)], axis=1) params[_p(prefix, 'U')] = U Wx = norm_weight(nin, dim) params[_p(prefix, 'Wx')] = Wx Ux = ortho_weight(dim) params[_p(prefix, 'Ux')] = Ux params[_p(prefix, 'bx')] = numpy.zeros((dim,)).astype('float32') return params
[ "def", "param_init_gru", "(", "options", ",", "params", ",", "prefix", "=", "'gru'", ",", "nin", "=", "None", ",", "dim", "=", "None", ")", ":", "if", "(", "nin", "==", "None", ")", ":", "nin", "=", "options", "[", "'dim_proj'", "]", "if", "(", "dim", "==", "None", ")", ":", "dim", "=", "options", "[", "'dim_proj'", "]", "W", "=", "numpy", ".", "concatenate", "(", "[", "norm_weight", "(", "nin", ",", "dim", ")", ",", "norm_weight", "(", "nin", ",", "dim", ")", "]", ",", "axis", "=", "1", ")", "params", "[", "_p", "(", "prefix", ",", "'W'", ")", "]", "=", "W", "params", "[", "_p", "(", "prefix", ",", "'b'", ")", "]", "=", "numpy", ".", "zeros", "(", "(", "(", "2", "*", "dim", ")", ",", ")", ")", ".", "astype", "(", "'float32'", ")", "U", "=", "numpy", ".", "concatenate", "(", "[", "ortho_weight", "(", "dim", ")", ",", "ortho_weight", "(", "dim", ")", "]", ",", "axis", "=", "1", ")", "params", "[", "_p", "(", "prefix", ",", "'U'", ")", "]", "=", "U", "Wx", "=", "norm_weight", "(", "nin", ",", "dim", ")", "params", "[", "_p", "(", "prefix", ",", "'Wx'", ")", "]", "=", "Wx", "Ux", "=", "ortho_weight", "(", "dim", ")", "params", "[", "_p", "(", "prefix", ",", "'Ux'", ")", "]", "=", "Ux", "params", "[", "_p", "(", "prefix", ",", "'bx'", ")", "]", "=", "numpy", ".", "zeros", "(", "(", "dim", ",", ")", ")", ".", "astype", "(", "'float32'", ")", "return", "params" ]
parameter init for gru .
train
false
44,286
def test_pl(): o = nikola.utils.slugify(u'za\u017c\xf3\u0142\u0107g\u0119\u015bl\u0105ja\u017a\u0144', lang=u'pl') assert (o == u'zazolcgeslajazn') assert isinstance(o, nikola.utils.unicode_str)
[ "def", "test_pl", "(", ")", ":", "o", "=", "nikola", ".", "utils", ".", "slugify", "(", "u'za\\u017c\\xf3\\u0142\\u0107g\\u0119\\u015bl\\u0105ja\\u017a\\u0144'", ",", "lang", "=", "u'pl'", ")", "assert", "(", "o", "==", "u'zazolcgeslajazn'", ")", "assert", "isinstance", "(", "o", ",", "nikola", ".", "utils", ".", "unicode_str", ")" ]
test a string with polish diacritical characters .
train
false
44,287
def vip_create(request, **kwargs): body = {'vip': {'address': kwargs['address'], 'name': kwargs['name'], 'description': kwargs['description'], 'subnet_id': kwargs['subnet_id'], 'protocol_port': kwargs['protocol_port'], 'protocol': kwargs['protocol'], 'pool_id': kwargs['pool_id'], 'session_persistence': kwargs['session_persistence'], 'connection_limit': kwargs['connection_limit'], 'admin_state_up': kwargs['admin_state_up']}} vip = quantumclient(request).create_vip(body).get('vip') return Vip(vip)
[ "def", "vip_create", "(", "request", ",", "**", "kwargs", ")", ":", "body", "=", "{", "'vip'", ":", "{", "'address'", ":", "kwargs", "[", "'address'", "]", ",", "'name'", ":", "kwargs", "[", "'name'", "]", ",", "'description'", ":", "kwargs", "[", "'description'", "]", ",", "'subnet_id'", ":", "kwargs", "[", "'subnet_id'", "]", ",", "'protocol_port'", ":", "kwargs", "[", "'protocol_port'", "]", ",", "'protocol'", ":", "kwargs", "[", "'protocol'", "]", ",", "'pool_id'", ":", "kwargs", "[", "'pool_id'", "]", ",", "'session_persistence'", ":", "kwargs", "[", "'session_persistence'", "]", ",", "'connection_limit'", ":", "kwargs", "[", "'connection_limit'", "]", ",", "'admin_state_up'", ":", "kwargs", "[", "'admin_state_up'", "]", "}", "}", "vip", "=", "quantumclient", "(", "request", ")", ".", "create_vip", "(", "body", ")", ".", "get", "(", "'vip'", ")", "return", "Vip", "(", "vip", ")" ]
create a vip for a specified pool .
train
false
44,288
def download_all_package_dicts(num_greenlets=5, limit=None): package_names = xmlrpclib.ServerProxy('https://pypi.python.org/pypi').list_packages()[:limit] logger.info('Downloading %s packages', len(package_names)) return Pool(num_greenlets).imap(download_package_dict, package_names)
[ "def", "download_all_package_dicts", "(", "num_greenlets", "=", "5", ",", "limit", "=", "None", ")", ":", "package_names", "=", "xmlrpclib", ".", "ServerProxy", "(", "'https://pypi.python.org/pypi'", ")", ".", "list_packages", "(", ")", "[", ":", "limit", "]", "logger", ".", "info", "(", "'Downloading %s packages'", ",", "len", "(", "package_names", ")", ")", "return", "Pool", "(", "num_greenlets", ")", ".", "imap", "(", "download_package_dict", ",", "package_names", ")" ]
download all the package dicts from pypi .
train
false
44,289
def _rec_clear_denoms(g, v, K0, K1): common = K1.one if (not v): for c in g: common = K1.lcm(common, K0.denom(c)) else: w = (v - 1) for c in g: common = K1.lcm(common, _rec_clear_denoms(c, w, K0, K1)) return common
[ "def", "_rec_clear_denoms", "(", "g", ",", "v", ",", "K0", ",", "K1", ")", ":", "common", "=", "K1", ".", "one", "if", "(", "not", "v", ")", ":", "for", "c", "in", "g", ":", "common", "=", "K1", ".", "lcm", "(", "common", ",", "K0", ".", "denom", "(", "c", ")", ")", "else", ":", "w", "=", "(", "v", "-", "1", ")", "for", "c", "in", "g", ":", "common", "=", "K1", ".", "lcm", "(", "common", ",", "_rec_clear_denoms", "(", "c", ",", "w", ",", "K0", ",", "K1", ")", ")", "return", "common" ]
recursive helper for :func:dmp_clear_denoms .
train
false
44,290
def run_inference_on_image(image): if (not tf.gfile.Exists(image)): tf.logging.fatal('File does not exist %s', image) image_data = tf.gfile.FastGFile(image, 'rb').read() create_graph() with tf.Session() as sess: softmax_tensor = sess.graph.get_tensor_by_name('softmax:0') predictions = sess.run(softmax_tensor, {'DecodeJpeg/contents:0': image_data}) predictions = np.squeeze(predictions) node_lookup = NodeLookup() top_k = predictions.argsort()[(- FLAGS.num_top_predictions):][::(-1)] for node_id in top_k: human_string = node_lookup.id_to_string(node_id) score = predictions[node_id] print(('%s (score = %.5f)' % (human_string, score)))
[ "def", "run_inference_on_image", "(", "image", ")", ":", "if", "(", "not", "tf", ".", "gfile", ".", "Exists", "(", "image", ")", ")", ":", "tf", ".", "logging", ".", "fatal", "(", "'File does not exist %s'", ",", "image", ")", "image_data", "=", "tf", ".", "gfile", ".", "FastGFile", "(", "image", ",", "'rb'", ")", ".", "read", "(", ")", "create_graph", "(", ")", "with", "tf", ".", "Session", "(", ")", "as", "sess", ":", "softmax_tensor", "=", "sess", ".", "graph", ".", "get_tensor_by_name", "(", "'softmax:0'", ")", "predictions", "=", "sess", ".", "run", "(", "softmax_tensor", ",", "{", "'DecodeJpeg/contents:0'", ":", "image_data", "}", ")", "predictions", "=", "np", ".", "squeeze", "(", "predictions", ")", "node_lookup", "=", "NodeLookup", "(", ")", "top_k", "=", "predictions", ".", "argsort", "(", ")", "[", "(", "-", "FLAGS", ".", "num_top_predictions", ")", ":", "]", "[", ":", ":", "(", "-", "1", ")", "]", "for", "node_id", "in", "top_k", ":", "human_string", "=", "node_lookup", ".", "id_to_string", "(", "node_id", ")", "score", "=", "predictions", "[", "node_id", "]", "print", "(", "(", "'%s (score = %.5f)'", "%", "(", "human_string", ",", "score", ")", ")", ")" ]
runs inference on an image .
train
true
44,291
def SetInfo(userName=None): if (userName is None): userName = win32api.GetUserName() oldData = win32net.NetUserGetInfo(server, userName, 3) try: d = oldData.copy() d['usr_comment'] = 'Test comment' win32net.NetUserSetInfo(server, userName, 3, d) new = win32net.NetUserGetInfo(server, userName, 3)['usr_comment'] if (str(new) != 'Test comment'): raise RuntimeError(('Could not read the same comment back - got %s' % new)) print 'Changed the data for the user' finally: win32net.NetUserSetInfo(server, userName, 3, oldData)
[ "def", "SetInfo", "(", "userName", "=", "None", ")", ":", "if", "(", "userName", "is", "None", ")", ":", "userName", "=", "win32api", ".", "GetUserName", "(", ")", "oldData", "=", "win32net", ".", "NetUserGetInfo", "(", "server", ",", "userName", ",", "3", ")", "try", ":", "d", "=", "oldData", ".", "copy", "(", ")", "d", "[", "'usr_comment'", "]", "=", "'Test comment'", "win32net", ".", "NetUserSetInfo", "(", "server", ",", "userName", ",", "3", ",", "d", ")", "new", "=", "win32net", ".", "NetUserGetInfo", "(", "server", ",", "userName", ",", "3", ")", "[", "'usr_comment'", "]", "if", "(", "str", "(", "new", ")", "!=", "'Test comment'", ")", ":", "raise", "RuntimeError", "(", "(", "'Could not read the same comment back - got %s'", "%", "new", ")", ")", "print", "'Changed the data for the user'", "finally", ":", "win32net", ".", "NetUserSetInfo", "(", "server", ",", "userName", ",", "3", ",", "oldData", ")" ]
attempts to change the current users comment .
train
false
44,292
def get_file_extension(fileName): return os.path.splitext(fileName.lower())[(-1)][1:]
[ "def", "get_file_extension", "(", "fileName", ")", ":", "return", "os", ".", "path", ".", "splitext", "(", "fileName", ".", "lower", "(", ")", ")", "[", "(", "-", "1", ")", "]", "[", "1", ":", "]" ]
get the file extension in the form of: py .
train
false
44,293
def _get_created(created): if (created == 'now'): return datetime.now() elif created: return created else: return datetime(2011, random.randint(1, 12), random.randint(1, 28), random.randint(0, 23), random.randint(0, 59), random.randint(0, 59))
[ "def", "_get_created", "(", "created", ")", ":", "if", "(", "created", "==", "'now'", ")", ":", "return", "datetime", ".", "now", "(", ")", "elif", "created", ":", "return", "created", "else", ":", "return", "datetime", "(", "2011", ",", "random", ".", "randint", "(", "1", ",", "12", ")", ",", "random", ".", "randint", "(", "1", ",", "28", ")", ",", "random", ".", "randint", "(", "0", ",", "23", ")", ",", "random", ".", "randint", "(", "0", ",", "59", ")", ",", "random", ".", "randint", "(", "0", ",", "59", ")", ")" ]
returns a datetime .
train
false
44,295
def get_representations_of_kind(kind, start=None, end=None): q = Property.all() q.ancestor(Property.key_for_kind(kind)) if ((start is not None) and (start != '')): q.filter('__key__ >=', Property.key_for_property(kind, start)) if (end is not None): if (end == ''): return {} q.filter('__key__ <', Property.key_for_property(kind, end)) result = {} for property in q.run(): result[property.property_name] = property.property_representation return result
[ "def", "get_representations_of_kind", "(", "kind", ",", "start", "=", "None", ",", "end", "=", "None", ")", ":", "q", "=", "Property", ".", "all", "(", ")", "q", ".", "ancestor", "(", "Property", ".", "key_for_kind", "(", "kind", ")", ")", "if", "(", "(", "start", "is", "not", "None", ")", "and", "(", "start", "!=", "''", ")", ")", ":", "q", ".", "filter", "(", "'__key__ >='", ",", "Property", ".", "key_for_property", "(", "kind", ",", "start", ")", ")", "if", "(", "end", "is", "not", "None", ")", ":", "if", "(", "end", "==", "''", ")", ":", "return", "{", "}", "q", ".", "filter", "(", "'__key__ <'", ",", "Property", ".", "key_for_property", "(", "kind", ",", "end", ")", ")", "result", "=", "{", "}", "for", "property", "in", "q", ".", "run", "(", ")", ":", "result", "[", "property", ".", "property_name", "]", "=", "property", ".", "property_representation", "return", "result" ]
return all representations of properties of kind in the specified range .
train
false
44,297
def test_as_import(script): import pip.commands.install as inst assert (inst is not None)
[ "def", "test_as_import", "(", "script", ")", ":", "import", "pip", ".", "commands", ".", "install", "as", "inst", "assert", "(", "inst", "is", "not", "None", ")" ]
test that pip .
train
false
44,300
def is_above_limit(count): return ((count is not None) and (count >= UNLIMITED))
[ "def", "is_above_limit", "(", "count", ")", ":", "return", "(", "(", "count", "is", "not", "None", ")", "and", "(", "count", ">=", "UNLIMITED", ")", ")" ]
checks whether a count is above the maximum .
train
false
44,301
def target_list_option(s): return _convert(s, (list, tuple))
[ "def", "target_list_option", "(", "s", ")", ":", "return", "_convert", "(", "s", ",", "(", "list", ",", "tuple", ")", ")" ]
same type as list_option .
train
false
44,302
def download_daily_bars(instrument, year, csvFile): bars = download_csv(instrument, datetime.date(year, 1, 1), datetime.date(year, 12, 31), 'd') f = open(csvFile, 'w') f.write(bars) f.close()
[ "def", "download_daily_bars", "(", "instrument", ",", "year", ",", "csvFile", ")", ":", "bars", "=", "download_csv", "(", "instrument", ",", "datetime", ".", "date", "(", "year", ",", "1", ",", "1", ")", ",", "datetime", ".", "date", "(", "year", ",", "12", ",", "31", ")", ",", "'d'", ")", "f", "=", "open", "(", "csvFile", ",", "'w'", ")", "f", ".", "write", "(", "bars", ")", "f", ".", "close", "(", ")" ]
download daily bars from google finance for a given year .
train
false
44,303
def _assert_equal_entries(utest, found, output, count=None): utest.assertEqual(found[0], output[0]) utest.assertEqual(found[1], (count or output[1])) (found_time, output_time) = (MyTime.localtime(found[2]), MyTime.localtime(output[2])) try: utest.assertEqual(found_time, output_time) except AssertionError as e: utest.assertEqual((float(found[2]), found_time), (float(output[2]), output_time)) if ((len(output) > 3) and (count is None)): if ((os.linesep != '\n') or sys.platform.startswith('cygwin')): srepr = (lambda x: repr(x).replace('\\r', '')) else: srepr = repr utest.assertEqual(srepr(found[3]), srepr(output[3]))
[ "def", "_assert_equal_entries", "(", "utest", ",", "found", ",", "output", ",", "count", "=", "None", ")", ":", "utest", ".", "assertEqual", "(", "found", "[", "0", "]", ",", "output", "[", "0", "]", ")", "utest", ".", "assertEqual", "(", "found", "[", "1", "]", ",", "(", "count", "or", "output", "[", "1", "]", ")", ")", "(", "found_time", ",", "output_time", ")", "=", "(", "MyTime", ".", "localtime", "(", "found", "[", "2", "]", ")", ",", "MyTime", ".", "localtime", "(", "output", "[", "2", "]", ")", ")", "try", ":", "utest", ".", "assertEqual", "(", "found_time", ",", "output_time", ")", "except", "AssertionError", "as", "e", ":", "utest", ".", "assertEqual", "(", "(", "float", "(", "found", "[", "2", "]", ")", ",", "found_time", ")", ",", "(", "float", "(", "output", "[", "2", "]", ")", ",", "output_time", ")", ")", "if", "(", "(", "len", "(", "output", ")", ">", "3", ")", "and", "(", "count", "is", "None", ")", ")", ":", "if", "(", "(", "os", ".", "linesep", "!=", "'\\n'", ")", "or", "sys", ".", "platform", ".", "startswith", "(", "'cygwin'", ")", ")", ":", "srepr", "=", "(", "lambda", "x", ":", "repr", "(", "x", ")", ".", "replace", "(", "'\\\\r'", ",", "''", ")", ")", "else", ":", "srepr", "=", "repr", "utest", ".", "assertEqual", "(", "srepr", "(", "found", "[", "3", "]", ")", ",", "srepr", "(", "output", "[", "3", "]", ")", ")" ]
little helper to unify comparisons with the target entries and report helpful failure reports instead of millions of seconds ;) .
train
false
44,304
def log_ratio_measure(segmented_topics, per_topic_postings, num_docs, normalize=False): m_lr = [] for s_i in segmented_topics: for (w_prime, w_star) in s_i: w_prime_docs = per_topic_postings[w_prime] w_star_docs = per_topic_postings[w_star] co_docs = w_prime_docs.intersection(w_star_docs) if normalize: numerator = log_ratio_measure([[(w_prime, w_star)]], per_topic_postings, num_docs)[0] co_doc_prob = (len(co_docs) / float(num_docs)) m_lr_i = (numerator / (- np.log((co_doc_prob + EPSILON)))) else: numerator = ((len(co_docs) / float(num_docs)) + EPSILON) denominator = ((len(w_prime_docs) / float(num_docs)) * (len(w_star_docs) / float(num_docs))) m_lr_i = np.log((numerator / denominator)) m_lr.append(m_lr_i) return m_lr
[ "def", "log_ratio_measure", "(", "segmented_topics", ",", "per_topic_postings", ",", "num_docs", ",", "normalize", "=", "False", ")", ":", "m_lr", "=", "[", "]", "for", "s_i", "in", "segmented_topics", ":", "for", "(", "w_prime", ",", "w_star", ")", "in", "s_i", ":", "w_prime_docs", "=", "per_topic_postings", "[", "w_prime", "]", "w_star_docs", "=", "per_topic_postings", "[", "w_star", "]", "co_docs", "=", "w_prime_docs", ".", "intersection", "(", "w_star_docs", ")", "if", "normalize", ":", "numerator", "=", "log_ratio_measure", "(", "[", "[", "(", "w_prime", ",", "w_star", ")", "]", "]", ",", "per_topic_postings", ",", "num_docs", ")", "[", "0", "]", "co_doc_prob", "=", "(", "len", "(", "co_docs", ")", "/", "float", "(", "num_docs", ")", ")", "m_lr_i", "=", "(", "numerator", "/", "(", "-", "np", ".", "log", "(", "(", "co_doc_prob", "+", "EPSILON", ")", ")", ")", ")", "else", ":", "numerator", "=", "(", "(", "len", "(", "co_docs", ")", "/", "float", "(", "num_docs", ")", ")", "+", "EPSILON", ")", "denominator", "=", "(", "(", "len", "(", "w_prime_docs", ")", "/", "float", "(", "num_docs", ")", ")", "*", "(", "len", "(", "w_star_docs", ")", "/", "float", "(", "num_docs", ")", ")", ")", "m_lr_i", "=", "np", ".", "log", "(", "(", "numerator", "/", "denominator", ")", ")", "m_lr", ".", "append", "(", "m_lr_i", ")", "return", "m_lr" ]
if normalize=false: popularly known as pmi .
train
false
44,305
def getAdditionalLoopLength(loop, point, pointIndex): afterPoint = loop[pointIndex] beforePoint = loop[(((pointIndex + len(loop)) - 1) % len(loop))] return ((abs((point - beforePoint)) + abs((point - afterPoint))) - abs((afterPoint - beforePoint)))
[ "def", "getAdditionalLoopLength", "(", "loop", ",", "point", ",", "pointIndex", ")", ":", "afterPoint", "=", "loop", "[", "pointIndex", "]", "beforePoint", "=", "loop", "[", "(", "(", "(", "pointIndex", "+", "len", "(", "loop", ")", ")", "-", "1", ")", "%", "len", "(", "loop", ")", ")", "]", "return", "(", "(", "abs", "(", "(", "point", "-", "beforePoint", ")", ")", "+", "abs", "(", "(", "point", "-", "afterPoint", ")", ")", ")", "-", "abs", "(", "(", "afterPoint", "-", "beforePoint", ")", ")", ")" ]
get the additional length added by inserting a point into a loop .
train
false
44,306
@register.tag def staticpage_url(parser, token): bits = token.split_contents() syntax_message = ("%(tag_name)s expects a syntax of %(tag_name)s 'virtual/path'" % dict(tag_name=bits[0])) quote_message = ("%s tag's argument should be in quotes" % bits[0]) if (len(bits) == 2): virtual_path = bits[1] if (not ((virtual_path[0] == virtual_path[(-1)]) and (virtual_path[0] in ('"', "'")))): raise template.TemplateSyntaxError(quote_message) return StaticPageURLNode(virtual_path[1:(-1)]) raise template.TemplateSyntaxError(syntax_message)
[ "@", "register", ".", "tag", "def", "staticpage_url", "(", "parser", ",", "token", ")", ":", "bits", "=", "token", ".", "split_contents", "(", ")", "syntax_message", "=", "(", "\"%(tag_name)s expects a syntax of %(tag_name)s 'virtual/path'\"", "%", "dict", "(", "tag_name", "=", "bits", "[", "0", "]", ")", ")", "quote_message", "=", "(", "\"%s tag's argument should be in quotes\"", "%", "bits", "[", "0", "]", ")", "if", "(", "len", "(", "bits", ")", "==", "2", ")", ":", "virtual_path", "=", "bits", "[", "1", "]", "if", "(", "not", "(", "(", "virtual_path", "[", "0", "]", "==", "virtual_path", "[", "(", "-", "1", ")", "]", ")", "and", "(", "virtual_path", "[", "0", "]", "in", "(", "'\"'", ",", "\"'\"", ")", ")", ")", ")", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "quote_message", ")", "return", "StaticPageURLNode", "(", "virtual_path", "[", "1", ":", "(", "-", "1", ")", "]", ")", "raise", "template", ".", "TemplateSyntaxError", "(", "syntax_message", ")" ]
returns the internal url for a static page based on its virtual path .
train
false
44,307
def _MakeAsyncCall(method, request, response, get_result_hook=None, rpc=None): if (rpc is None): rpc = create_rpc() assert (rpc.service == 'taskqueue'), repr(rpc.service) rpc.make_call(method, request, response, get_result_hook, None) return rpc
[ "def", "_MakeAsyncCall", "(", "method", ",", "request", ",", "response", ",", "get_result_hook", "=", "None", ",", "rpc", "=", "None", ")", ":", "if", "(", "rpc", "is", "None", ")", ":", "rpc", "=", "create_rpc", "(", ")", "assert", "(", "rpc", ".", "service", "==", "'taskqueue'", ")", ",", "repr", "(", "rpc", ".", "service", ")", "rpc", ".", "make_call", "(", "method", ",", "request", ",", "response", ",", "get_result_hook", ",", "None", ")", "return", "rpc" ]
internal helper to schedule an asynchronous rpc .
train
false
44,308
def test_label_subject(): label = read_label(label_fname) assert_is(label.subject, None) assert_true(('unknown' in repr(label))) label = read_label(label_fname, subject='fsaverage') assert_true((label.subject == 'fsaverage')) assert_true(('fsaverage' in repr(label)))
[ "def", "test_label_subject", "(", ")", ":", "label", "=", "read_label", "(", "label_fname", ")", "assert_is", "(", "label", ".", "subject", ",", "None", ")", "assert_true", "(", "(", "'unknown'", "in", "repr", "(", "label", ")", ")", ")", "label", "=", "read_label", "(", "label_fname", ",", "subject", "=", "'fsaverage'", ")", "assert_true", "(", "(", "label", ".", "subject", "==", "'fsaverage'", ")", ")", "assert_true", "(", "(", "'fsaverage'", "in", "repr", "(", "label", ")", ")", ")" ]
test label subject name extraction .
train
false
44,309
def log_writer(): while True: mpstate.logfile_raw.write(mpstate.logqueue_raw.get()) while (not mpstate.logqueue_raw.empty()): mpstate.logfile_raw.write(mpstate.logqueue_raw.get()) while (not mpstate.logqueue.empty()): mpstate.logfile.write(mpstate.logqueue.get()) if mpstate.settings.flushlogs: mpstate.logfile.flush() mpstate.logfile_raw.flush()
[ "def", "log_writer", "(", ")", ":", "while", "True", ":", "mpstate", ".", "logfile_raw", ".", "write", "(", "mpstate", ".", "logqueue_raw", ".", "get", "(", ")", ")", "while", "(", "not", "mpstate", ".", "logqueue_raw", ".", "empty", "(", ")", ")", ":", "mpstate", ".", "logfile_raw", ".", "write", "(", "mpstate", ".", "logqueue_raw", ".", "get", "(", ")", ")", "while", "(", "not", "mpstate", ".", "logqueue", ".", "empty", "(", ")", ")", ":", "mpstate", ".", "logfile", ".", "write", "(", "mpstate", ".", "logqueue", ".", "get", "(", ")", ")", "if", "mpstate", ".", "settings", ".", "flushlogs", ":", "mpstate", ".", "logfile", ".", "flush", "(", ")", "mpstate", ".", "logfile_raw", ".", "flush", "(", ")" ]
log writing thread .
train
true
44,310
def _parse_fmadm_config(output): result = [] output = output.split('\n') header = [field for field in output[0].lower().split(' ') if field] del output[0] for entry in output: entry = [item for item in entry.split(' ') if item] entry = (entry[0:3] + [' '.join(entry[3:])]) component = OrderedDict() for field in header: component[field] = entry[header.index(field)] result.append(component) keyed_result = OrderedDict() for component in result: keyed_result[component['module']] = component del keyed_result[component['module']]['module'] result = keyed_result return result
[ "def", "_parse_fmadm_config", "(", "output", ")", ":", "result", "=", "[", "]", "output", "=", "output", ".", "split", "(", "'\\n'", ")", "header", "=", "[", "field", "for", "field", "in", "output", "[", "0", "]", ".", "lower", "(", ")", ".", "split", "(", "' '", ")", "if", "field", "]", "del", "output", "[", "0", "]", "for", "entry", "in", "output", ":", "entry", "=", "[", "item", "for", "item", "in", "entry", ".", "split", "(", "' '", ")", "if", "item", "]", "entry", "=", "(", "entry", "[", "0", ":", "3", "]", "+", "[", "' '", ".", "join", "(", "entry", "[", "3", ":", "]", ")", "]", ")", "component", "=", "OrderedDict", "(", ")", "for", "field", "in", "header", ":", "component", "[", "field", "]", "=", "entry", "[", "header", ".", "index", "(", "field", ")", "]", "result", ".", "append", "(", "component", ")", "keyed_result", "=", "OrderedDict", "(", ")", "for", "component", "in", "result", ":", "keyed_result", "[", "component", "[", "'module'", "]", "]", "=", "component", "del", "keyed_result", "[", "component", "[", "'module'", "]", "]", "[", "'module'", "]", "result", "=", "keyed_result", "return", "result" ]
parsbb fmdump/fmadm output .
train
true
44,311
def c_logout(client): return '@quit'
[ "def", "c_logout", "(", "client", ")", ":", "return", "'@quit'" ]
logouts of the game .
train
false
44,314
def update_dict_of_lists(master, newdata): for (key, values) in newdata.items(): master.setdefault(key, []).extend(values)
[ "def", "update_dict_of_lists", "(", "master", ",", "newdata", ")", ":", "for", "(", "key", ",", "values", ")", "in", "newdata", ".", "items", "(", ")", ":", "master", ".", "setdefault", "(", "key", ",", "[", "]", ")", ".", "extend", "(", "values", ")" ]
extend the list values of master with those from newdata .
train
false
44,315
def pootle_context(request): return {'settings': {'POOTLE_CUSTOM_LOGO': getattr(settings, 'POOTLE_CUSTOM_LOGO', ''), 'POOTLE_TITLE': settings.POOTLE_TITLE, 'POOTLE_INSTANCE_ID': settings.POOTLE_INSTANCE_ID, 'POOTLE_CONTACT_ENABLED': (settings.POOTLE_CONTACT_ENABLED and settings.POOTLE_CONTACT_EMAIL), 'POOTLE_MARKUP_FILTER': get_markup_filter_name(), 'POOTLE_SIGNUP_ENABLED': settings.POOTLE_SIGNUP_ENABLED, 'SCRIPT_NAME': settings.SCRIPT_NAME, 'POOTLE_CACHE_TIMEOUT': settings.POOTLE_CACHE_TIMEOUT, 'DEBUG': settings.DEBUG}, 'custom': settings.POOTLE_CUSTOM_TEMPLATE_CONTEXT, 'ALL_LANGUAGES': Language.live.cached_dict(translation.get_language(), request.user.is_superuser), 'ALL_PROJECTS': Project.objects.cached_dict(request.user), 'SOCIAL_AUTH_PROVIDERS': _get_social_auth_providers(request), 'display_agreement': _agreement_context(request)}
[ "def", "pootle_context", "(", "request", ")", ":", "return", "{", "'settings'", ":", "{", "'POOTLE_CUSTOM_LOGO'", ":", "getattr", "(", "settings", ",", "'POOTLE_CUSTOM_LOGO'", ",", "''", ")", ",", "'POOTLE_TITLE'", ":", "settings", ".", "POOTLE_TITLE", ",", "'POOTLE_INSTANCE_ID'", ":", "settings", ".", "POOTLE_INSTANCE_ID", ",", "'POOTLE_CONTACT_ENABLED'", ":", "(", "settings", ".", "POOTLE_CONTACT_ENABLED", "and", "settings", ".", "POOTLE_CONTACT_EMAIL", ")", ",", "'POOTLE_MARKUP_FILTER'", ":", "get_markup_filter_name", "(", ")", ",", "'POOTLE_SIGNUP_ENABLED'", ":", "settings", ".", "POOTLE_SIGNUP_ENABLED", ",", "'SCRIPT_NAME'", ":", "settings", ".", "SCRIPT_NAME", ",", "'POOTLE_CACHE_TIMEOUT'", ":", "settings", ".", "POOTLE_CACHE_TIMEOUT", ",", "'DEBUG'", ":", "settings", ".", "DEBUG", "}", ",", "'custom'", ":", "settings", ".", "POOTLE_CUSTOM_TEMPLATE_CONTEXT", ",", "'ALL_LANGUAGES'", ":", "Language", ".", "live", ".", "cached_dict", "(", "translation", ".", "get_language", "(", ")", ",", "request", ".", "user", ".", "is_superuser", ")", ",", "'ALL_PROJECTS'", ":", "Project", ".", "objects", ".", "cached_dict", "(", "request", ".", "user", ")", ",", "'SOCIAL_AUTH_PROVIDERS'", ":", "_get_social_auth_providers", "(", "request", ")", ",", "'display_agreement'", ":", "_agreement_context", "(", "request", ")", "}" ]
exposes settings to templates .
train
false