id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
5,662
def StemmingAnalyzer(expression=default_pattern, stoplist=STOP_WORDS, minsize=2, maxsize=None, gaps=False, stemfn=stem, ignore=None, cachesize=50000): ret = RegexTokenizer(expression=expression, gaps=gaps) chain = (ret | LowercaseFilter()) if (stoplist is not None): chain = (chain | StopFilter(stoplist=stoplist, minsize=minsize, maxsize=maxsize)) return (chain | StemFilter(stemfn=stemfn, ignore=ignore, cachesize=cachesize))
[ "def", "StemmingAnalyzer", "(", "expression", "=", "default_pattern", ",", "stoplist", "=", "STOP_WORDS", ",", "minsize", "=", "2", ",", "maxsize", "=", "None", ",", "gaps", "=", "False", ",", "stemfn", "=", "stem", ",", "ignore", "=", "None", ",", "cachesize", "=", "50000", ")", ":", "ret", "=", "RegexTokenizer", "(", "expression", "=", "expression", ",", "gaps", "=", "gaps", ")", "chain", "=", "(", "ret", "|", "LowercaseFilter", "(", ")", ")", "if", "(", "stoplist", "is", "not", "None", ")", ":", "chain", "=", "(", "chain", "|", "StopFilter", "(", "stoplist", "=", "stoplist", ",", "minsize", "=", "minsize", ",", "maxsize", "=", "maxsize", ")", ")", "return", "(", "chain", "|", "StemFilter", "(", "stemfn", "=", "stemfn", ",", "ignore", "=", "ignore", ",", "cachesize", "=", "cachesize", ")", ")" ]
composes a regextokenizer with a lower case filter .
train
false
5,663
def write_items(lib, query, pretend, force): (items, albums) = _do_query(lib, query, False, False) for item in items: if (not os.path.exists(syspath(item.path))): log.info(u'missing file: {0}'.format(util.displayable_path(item.path))) continue try: clean_item = library.Item.from_path(item.path) except library.ReadError as exc: log.error(u'error reading {0}: {1}'.format(displayable_path(item.path), exc)) continue changed = ui.show_model_changes(item, clean_item, library.Item._media_fields, force) if ((changed or force) and (not pretend)): item.try_sync()
[ "def", "write_items", "(", "lib", ",", "query", ",", "pretend", ",", "force", ")", ":", "(", "items", ",", "albums", ")", "=", "_do_query", "(", "lib", ",", "query", ",", "False", ",", "False", ")", "for", "item", "in", "items", ":", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "syspath", "(", "item", ".", "path", ")", ")", ")", ":", "log", ".", "info", "(", "u'missing file: {0}'", ".", "format", "(", "util", ".", "displayable_path", "(", "item", ".", "path", ")", ")", ")", "continue", "try", ":", "clean_item", "=", "library", ".", "Item", ".", "from_path", "(", "item", ".", "path", ")", "except", "library", ".", "ReadError", "as", "exc", ":", "log", ".", "error", "(", "u'error reading {0}: {1}'", ".", "format", "(", "displayable_path", "(", "item", ".", "path", ")", ",", "exc", ")", ")", "continue", "changed", "=", "ui", ".", "show_model_changes", "(", "item", ",", "clean_item", ",", "library", ".", "Item", ".", "_media_fields", ",", "force", ")", "if", "(", "(", "changed", "or", "force", ")", "and", "(", "not", "pretend", ")", ")", ":", "item", ".", "try_sync", "(", ")" ]
write tag information from the database to the respective files in the filesystem .
train
false
5,664
def get_bracket_regions(settings, minimap): styles = settings.get('bracket_styles', DEFAULT_STYLES) icon_path = 'Packages/BracketHighlighter/icons' for (key, value) in DEFAULT_STYLES.items(): if (key not in styles): styles[key] = value continue for (k, v) in value.items(): if (k not in styles[key]): styles[key][k] = v default_settings = styles['default'] for (k, v) in styles.items(): (yield (k, StyleDefinition(k, v, default_settings, icon_path, minimap)))
[ "def", "get_bracket_regions", "(", "settings", ",", "minimap", ")", ":", "styles", "=", "settings", ".", "get", "(", "'bracket_styles'", ",", "DEFAULT_STYLES", ")", "icon_path", "=", "'Packages/BracketHighlighter/icons'", "for", "(", "key", ",", "value", ")", "in", "DEFAULT_STYLES", ".", "items", "(", ")", ":", "if", "(", "key", "not", "in", "styles", ")", ":", "styles", "[", "key", "]", "=", "value", "continue", "for", "(", "k", ",", "v", ")", "in", "value", ".", "items", "(", ")", ":", "if", "(", "k", "not", "in", "styles", "[", "key", "]", ")", ":", "styles", "[", "key", "]", "[", "k", "]", "=", "v", "default_settings", "=", "styles", "[", "'default'", "]", "for", "(", "k", ",", "v", ")", "in", "styles", ".", "items", "(", ")", ":", "(", "yield", "(", "k", ",", "StyleDefinition", "(", "k", ",", "v", ",", "default_settings", ",", "icon_path", ",", "minimap", ")", ")", ")" ]
get styled regions for brackets to use .
train
false
5,667
def CopyTool(flavor, out_path, generator_flags={}): prefix = {'aix': 'flock', 'solaris': 'flock', 'mac': 'mac', 'win': 'win'}.get(flavor, None) if (not prefix): return source_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), ('%s_tool.py' % prefix)) with open(source_path) as source_file: source = source_file.readlines() header = '# Generated by gyp. Do not edit.\n' mac_toolchain_dir = generator_flags.get('mac_toolchain_dir', None) if ((flavor == 'mac') and mac_toolchain_dir): header += ("import os;\nos.environ['DEVELOPER_DIR']='%s'\n" % mac_toolchain_dir) tool_path = os.path.join(out_path, ('gyp-%s-tool' % prefix)) with open(tool_path, 'w') as tool_file: tool_file.write(''.join(([source[0], header] + source[1:]))) os.chmod(tool_path, 493)
[ "def", "CopyTool", "(", "flavor", ",", "out_path", ",", "generator_flags", "=", "{", "}", ")", ":", "prefix", "=", "{", "'aix'", ":", "'flock'", ",", "'solaris'", ":", "'flock'", ",", "'mac'", ":", "'mac'", ",", "'win'", ":", "'win'", "}", ".", "get", "(", "flavor", ",", "None", ")", "if", "(", "not", "prefix", ")", ":", "return", "source_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", ",", "(", "'%s_tool.py'", "%", "prefix", ")", ")", "with", "open", "(", "source_path", ")", "as", "source_file", ":", "source", "=", "source_file", ".", "readlines", "(", ")", "header", "=", "'# Generated by gyp. Do not edit.\\n'", "mac_toolchain_dir", "=", "generator_flags", ".", "get", "(", "'mac_toolchain_dir'", ",", "None", ")", "if", "(", "(", "flavor", "==", "'mac'", ")", "and", "mac_toolchain_dir", ")", ":", "header", "+=", "(", "\"import os;\\nos.environ['DEVELOPER_DIR']='%s'\\n\"", "%", "mac_toolchain_dir", ")", "tool_path", "=", "os", ".", "path", ".", "join", "(", "out_path", ",", "(", "'gyp-%s-tool'", "%", "prefix", ")", ")", "with", "open", "(", "tool_path", ",", "'w'", ")", "as", "tool_file", ":", "tool_file", ".", "write", "(", "''", ".", "join", "(", "(", "[", "source", "[", "0", "]", ",", "header", "]", "+", "source", "[", "1", ":", "]", ")", ")", ")", "os", ".", "chmod", "(", "tool_path", ",", "493", ")" ]
finds _tool .
train
false
5,668
def broken_view(request): raise KeyError('Oops! Looks like you wrote some bad code.')
[ "def", "broken_view", "(", "request", ")", ":", "raise", "KeyError", "(", "'Oops! Looks like you wrote some bad code.'", ")" ]
a view which just raises an exception .
train
false
5,669
def _setup(text): root = text._root() engine = searchengine.get(root) if (not hasattr(engine, '_searchdialog')): engine._searchdialog = SearchDialog(root, engine) return engine._searchdialog
[ "def", "_setup", "(", "text", ")", ":", "root", "=", "text", ".", "_root", "(", ")", "engine", "=", "searchengine", ".", "get", "(", "root", ")", "if", "(", "not", "hasattr", "(", "engine", ",", "'_searchdialog'", ")", ")", ":", "engine", ".", "_searchdialog", "=", "SearchDialog", "(", "root", ",", "engine", ")", "return", "engine", ".", "_searchdialog" ]
prepare and global state and updates it with command line options .
train
false
5,670
@pytest.mark.django_db @pytest.mark.parametrize('show_all_pages', [True, False]) def test_page_links_plugin_hide_expired(show_all_pages): context = get_jinja_context() page = create_page(eternal=True, visible_in_menu=True) another_page = create_page(eternal=True, visible_in_menu=True) plugin = PageLinksPlugin({'pages': [page.pk, another_page.pk], 'show_all_pages': show_all_pages}) assert (page in plugin.get_context_data(context)['pages']) page.available_from = None page.available_to = None page.save() assert (page in plugin.get_context_data(context)['pages']) plugin.config['hide_expired'] = True pages_in_context = plugin.get_context_data(context)['pages'] assert (page not in pages_in_context) assert (another_page in pages_in_context)
[ "@", "pytest", ".", "mark", ".", "django_db", "@", "pytest", ".", "mark", ".", "parametrize", "(", "'show_all_pages'", ",", "[", "True", ",", "False", "]", ")", "def", "test_page_links_plugin_hide_expired", "(", "show_all_pages", ")", ":", "context", "=", "get_jinja_context", "(", ")", "page", "=", "create_page", "(", "eternal", "=", "True", ",", "visible_in_menu", "=", "True", ")", "another_page", "=", "create_page", "(", "eternal", "=", "True", ",", "visible_in_menu", "=", "True", ")", "plugin", "=", "PageLinksPlugin", "(", "{", "'pages'", ":", "[", "page", ".", "pk", ",", "another_page", ".", "pk", "]", ",", "'show_all_pages'", ":", "show_all_pages", "}", ")", "assert", "(", "page", "in", "plugin", ".", "get_context_data", "(", "context", ")", "[", "'pages'", "]", ")", "page", ".", "available_from", "=", "None", "page", ".", "available_to", "=", "None", "page", ".", "save", "(", ")", "assert", "(", "page", "in", "plugin", ".", "get_context_data", "(", "context", ")", "[", "'pages'", "]", ")", "plugin", ".", "config", "[", "'hide_expired'", "]", "=", "True", "pages_in_context", "=", "plugin", ".", "get_context_data", "(", "context", ")", "[", "'pages'", "]", "assert", "(", "page", "not", "in", "pages_in_context", ")", "assert", "(", "another_page", "in", "pages_in_context", ")" ]
make sure plugin correctly filters out expired pages based on plugin configuration .
train
false
5,671
def find_up(l_node, f_node): if isinstance(l_node, gof.Apply): l_outs = l_node.outputs else: l_outs = l_node l_ins = gof.graph.inputs(l_outs) nodes = gof.graph.io_toposort(l_ins, l_outs) return (f_node in nodes)
[ "def", "find_up", "(", "l_node", ",", "f_node", ")", ":", "if", "isinstance", "(", "l_node", ",", "gof", ".", "Apply", ")", ":", "l_outs", "=", "l_node", ".", "outputs", "else", ":", "l_outs", "=", "l_node", "l_ins", "=", "gof", ".", "graph", ".", "inputs", "(", "l_outs", ")", "nodes", "=", "gof", ".", "graph", ".", "io_toposort", "(", "l_ins", ",", "l_outs", ")", "return", "(", "f_node", "in", "nodes", ")" ]
goes up in the graph and returns true if a node in nodes is found .
train
false
5,672
@require_POST @ensure_csrf_cookie @cache_control(no_cache=True, no_store=True, must_revalidate=True) @require_level('staff') def list_instructor_tasks(request, course_id): course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id) problem_location_str = strip_if_string(request.POST.get('problem_location_str', False)) student = request.POST.get('unique_student_identifier', None) if (student is not None): student = get_student_from_identifier(student) if (student and (not problem_location_str)): return HttpResponseBadRequest('unique_student_identifier must accompany problem_location_str') if problem_location_str: try: module_state_key = course_id.make_usage_key_from_deprecated_string(problem_location_str) except InvalidKeyError: return HttpResponseBadRequest() if student: tasks = lms.djangoapps.instructor_task.api.get_instructor_task_history(course_id, module_state_key, student) else: tasks = lms.djangoapps.instructor_task.api.get_instructor_task_history(course_id, module_state_key) else: tasks = lms.djangoapps.instructor_task.api.get_running_instructor_tasks(course_id) response_payload = {'tasks': map(extract_task_features, tasks)} return JsonResponse(response_payload)
[ "@", "require_POST", "@", "ensure_csrf_cookie", "@", "cache_control", "(", "no_cache", "=", "True", ",", "no_store", "=", "True", ",", "must_revalidate", "=", "True", ")", "@", "require_level", "(", "'staff'", ")", "def", "list_instructor_tasks", "(", "request", ",", "course_id", ")", ":", "course_id", "=", "SlashSeparatedCourseKey", ".", "from_deprecated_string", "(", "course_id", ")", "problem_location_str", "=", "strip_if_string", "(", "request", ".", "POST", ".", "get", "(", "'problem_location_str'", ",", "False", ")", ")", "student", "=", "request", ".", "POST", ".", "get", "(", "'unique_student_identifier'", ",", "None", ")", "if", "(", "student", "is", "not", "None", ")", ":", "student", "=", "get_student_from_identifier", "(", "student", ")", "if", "(", "student", "and", "(", "not", "problem_location_str", ")", ")", ":", "return", "HttpResponseBadRequest", "(", "'unique_student_identifier must accompany problem_location_str'", ")", "if", "problem_location_str", ":", "try", ":", "module_state_key", "=", "course_id", ".", "make_usage_key_from_deprecated_string", "(", "problem_location_str", ")", "except", "InvalidKeyError", ":", "return", "HttpResponseBadRequest", "(", ")", "if", "student", ":", "tasks", "=", "lms", ".", "djangoapps", ".", "instructor_task", ".", "api", ".", "get_instructor_task_history", "(", "course_id", ",", "module_state_key", ",", "student", ")", "else", ":", "tasks", "=", "lms", ".", "djangoapps", ".", "instructor_task", ".", "api", ".", "get_instructor_task_history", "(", "course_id", ",", "module_state_key", ")", "else", ":", "tasks", "=", "lms", ".", "djangoapps", ".", "instructor_task", ".", "api", ".", "get_running_instructor_tasks", "(", "course_id", ")", "response_payload", "=", "{", "'tasks'", ":", "map", "(", "extract_task_features", ",", "tasks", ")", "}", "return", "JsonResponse", "(", "response_payload", ")" ]
list instructor tasks .
train
false
5,673
def node_redundancy(G, nodes=None): if (nodes is None): nodes = G if any(((len(G[v]) < 2) for v in nodes)): raise NetworkXError('Cannot compute redundancy coefficient for a node that has fewer than two neighbors.') return {v: _node_redundancy(G, v) for v in nodes}
[ "def", "node_redundancy", "(", "G", ",", "nodes", "=", "None", ")", ":", "if", "(", "nodes", "is", "None", ")", ":", "nodes", "=", "G", "if", "any", "(", "(", "(", "len", "(", "G", "[", "v", "]", ")", "<", "2", ")", "for", "v", "in", "nodes", ")", ")", ":", "raise", "NetworkXError", "(", "'Cannot compute redundancy coefficient for a node that has fewer than two neighbors.'", ")", "return", "{", "v", ":", "_node_redundancy", "(", "G", ",", "v", ")", "for", "v", "in", "nodes", "}" ]
computes the node redundancy coefficients for the nodes in the bipartite graph g .
train
false
5,674
def task_cli_pkg_install(distribution, package_source=PackageSource()): commands = task_package_install('clusterhq-flocker-cli', distribution, package_source) return sequence([(Effect(Sudo(command=e.intent.command, log_command_filter=e.intent.log_command_filter)) if isinstance(e.intent, Run) else e) for e in commands.intent.effects])
[ "def", "task_cli_pkg_install", "(", "distribution", ",", "package_source", "=", "PackageSource", "(", ")", ")", ":", "commands", "=", "task_package_install", "(", "'clusterhq-flocker-cli'", ",", "distribution", ",", "package_source", ")", "return", "sequence", "(", "[", "(", "Effect", "(", "Sudo", "(", "command", "=", "e", ".", "intent", ".", "command", ",", "log_command_filter", "=", "e", ".", "intent", ".", "log_command_filter", ")", ")", "if", "isinstance", "(", "e", ".", "intent", ",", "Run", ")", "else", "e", ")", "for", "e", "in", "commands", ".", "intent", ".", "effects", "]", ")" ]
install the flocker cli package .
train
false
5,675
def pick_types_forward(orig, meg=True, eeg=False, ref_meg=True, seeg=False, ecog=False, include=[], exclude=[]): info = orig['info'] sel = pick_types(info, meg, eeg, ref_meg=ref_meg, seeg=seeg, ecog=ecog, include=include, exclude=exclude) if (len(sel) == 0): raise ValueError('No valid channels found') include_ch_names = [info['ch_names'][k] for k in sel] return pick_channels_forward(orig, include_ch_names)
[ "def", "pick_types_forward", "(", "orig", ",", "meg", "=", "True", ",", "eeg", "=", "False", ",", "ref_meg", "=", "True", ",", "seeg", "=", "False", ",", "ecog", "=", "False", ",", "include", "=", "[", "]", ",", "exclude", "=", "[", "]", ")", ":", "info", "=", "orig", "[", "'info'", "]", "sel", "=", "pick_types", "(", "info", ",", "meg", ",", "eeg", ",", "ref_meg", "=", "ref_meg", ",", "seeg", "=", "seeg", ",", "ecog", "=", "ecog", ",", "include", "=", "include", ",", "exclude", "=", "exclude", ")", "if", "(", "len", "(", "sel", ")", "==", "0", ")", ":", "raise", "ValueError", "(", "'No valid channels found'", ")", "include_ch_names", "=", "[", "info", "[", "'ch_names'", "]", "[", "k", "]", "for", "k", "in", "sel", "]", "return", "pick_channels_forward", "(", "orig", ",", "include_ch_names", ")" ]
pick by channel type and names from a forward operator .
train
false
5,676
def delete_role_policy(role_name, policy_name, region=None, key=None, keyid=None, profile=None): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) _policy = get_role_policy(role_name, policy_name, region, key, keyid, profile) if (not _policy): return True try: conn.delete_role_policy(role_name, policy_name) msg = 'Successfully deleted {0} policy for role {1}.' log.info(msg.format(policy_name, role_name)) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete {0} policy for role {1}.' log.error(msg.format(policy_name, role_name)) return False
[ "def", "delete_role_policy", "(", "role_name", ",", "policy_name", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "_policy", "=", "get_role_policy", "(", "role_name", ",", "policy_name", ",", "region", ",", "key", ",", "keyid", ",", "profile", ")", "if", "(", "not", "_policy", ")", ":", "return", "True", "try", ":", "conn", ".", "delete_role_policy", "(", "role_name", ",", "policy_name", ")", "msg", "=", "'Successfully deleted {0} policy for role {1}.'", "log", ".", "info", "(", "msg", ".", "format", "(", "policy_name", ",", "role_name", ")", ")", "return", "True", "except", "boto", ".", "exception", ".", "BotoServerError", "as", "e", ":", "log", ".", "debug", "(", "e", ")", "msg", "=", "'Failed to delete {0} policy for role {1}.'", "log", ".", "error", "(", "msg", ".", "format", "(", "policy_name", ",", "role_name", ")", ")", "return", "False" ]
delete a role policy .
train
true
5,677
@app.route('/cookies/delete') def delete_cookies(): cookies = dict(request.args.items()) r = app.make_response(redirect(url_for('view_cookies'))) for (key, value) in cookies.items(): r.delete_cookie(key=key) return r
[ "@", "app", ".", "route", "(", "'/cookies/delete'", ")", "def", "delete_cookies", "(", ")", ":", "cookies", "=", "dict", "(", "request", ".", "args", ".", "items", "(", ")", ")", "r", "=", "app", ".", "make_response", "(", "redirect", "(", "url_for", "(", "'view_cookies'", ")", ")", ")", "for", "(", "key", ",", "value", ")", "in", "cookies", ".", "items", "(", ")", ":", "r", ".", "delete_cookie", "(", "key", "=", "key", ")", "return", "r" ]
deletes cookie(s) as provided by the query string and redirects to cookie list .
train
true
5,678
def _pretty_frame_relation_type(freltyp): outstr = u'<frame relation type ({0.ID}): {0.superFrameName} -- {0.name} -> {0.subFrameName}>'.format(freltyp) return outstr
[ "def", "_pretty_frame_relation_type", "(", "freltyp", ")", ":", "outstr", "=", "u'<frame relation type ({0.ID}): {0.superFrameName} -- {0.name} -> {0.subFrameName}>'", ".", "format", "(", "freltyp", ")", "return", "outstr" ]
helper function for pretty-printing a frame relation type .
train
false
5,680
def alter_subprocess_kwargs_by_platform(**kwargs): kwargs.setdefault('close_fds', (os.name == 'posix')) if (os.name == 'nt'): CONSOLE_CREATION_FLAGS = 0 CREATE_NO_WINDOW = 134217728 CONSOLE_CREATION_FLAGS |= CREATE_NO_WINDOW kwargs.setdefault('creationflags', CONSOLE_CREATION_FLAGS) return kwargs
[ "def", "alter_subprocess_kwargs_by_platform", "(", "**", "kwargs", ")", ":", "kwargs", ".", "setdefault", "(", "'close_fds'", ",", "(", "os", ".", "name", "==", "'posix'", ")", ")", "if", "(", "os", ".", "name", "==", "'nt'", ")", ":", "CONSOLE_CREATION_FLAGS", "=", "0", "CREATE_NO_WINDOW", "=", "134217728", "CONSOLE_CREATION_FLAGS", "|=", "CREATE_NO_WINDOW", "kwargs", ".", "setdefault", "(", "'creationflags'", ",", "CONSOLE_CREATION_FLAGS", ")", "return", "kwargs" ]
given a dict .
train
true
5,684
def norm_lls_grad(y, params): (mu, sigma2) = params.T dllsdmu = ((y - mu) / sigma2) dllsdsigma2 = (((((y - mu) ** 2) / sigma2) - 1) / np.sqrt(sigma2)) return np.column_stack((dllsdmu, dllsdsigma2))
[ "def", "norm_lls_grad", "(", "y", ",", "params", ")", ":", "(", "mu", ",", "sigma2", ")", "=", "params", ".", "T", "dllsdmu", "=", "(", "(", "y", "-", "mu", ")", "/", "sigma2", ")", "dllsdsigma2", "=", "(", "(", "(", "(", "(", "y", "-", "mu", ")", "**", "2", ")", "/", "sigma2", ")", "-", "1", ")", "/", "np", ".", "sqrt", "(", "sigma2", ")", ")", "return", "np", ".", "column_stack", "(", "(", "dllsdmu", ",", "dllsdsigma2", ")", ")" ]
jacobian of normal loglikelihood wrt mean mu and variance sigma2 parameters y : array .
train
false
5,685
def decode_unicode_obj(obj): if isinstance(obj, dict): r = {} for (k, v) in iteritems(obj): r[decode_unicode_string(k)] = decode_unicode_obj(v) return r elif isinstance(obj, six.string_types): return decode_unicode_string(obj) elif isinstance(obj, (list, tuple)): return [decode_unicode_obj(x) for x in obj] else: return obj
[ "def", "decode_unicode_obj", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "dict", ")", ":", "r", "=", "{", "}", "for", "(", "k", ",", "v", ")", "in", "iteritems", "(", "obj", ")", ":", "r", "[", "decode_unicode_string", "(", "k", ")", "]", "=", "decode_unicode_obj", "(", "v", ")", "return", "r", "elif", "isinstance", "(", "obj", ",", "six", ".", "string_types", ")", ":", "return", "decode_unicode_string", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "[", "decode_unicode_obj", "(", "x", ")", "for", "x", "in", "obj", "]", "else", ":", "return", "obj" ]
decode unicoded dict/list/tuple encoded by unicode_obj .
train
true
5,686
def approximate_taylor_polynomial(f, x, degree, scale, order=None): if (order is None): order = degree n = (order + 1) xs = ((scale * np.cos(np.linspace(0, np.pi, n, endpoint=(n % 1)))) + x) P = KroghInterpolator(xs, f(xs)) d = P.derivatives(x, der=(degree + 1)) return np.poly1d((d / factorial(np.arange((degree + 1))))[::(-1)])
[ "def", "approximate_taylor_polynomial", "(", "f", ",", "x", ",", "degree", ",", "scale", ",", "order", "=", "None", ")", ":", "if", "(", "order", "is", "None", ")", ":", "order", "=", "degree", "n", "=", "(", "order", "+", "1", ")", "xs", "=", "(", "(", "scale", "*", "np", ".", "cos", "(", "np", ".", "linspace", "(", "0", ",", "np", ".", "pi", ",", "n", ",", "endpoint", "=", "(", "n", "%", "1", ")", ")", ")", ")", "+", "x", ")", "P", "=", "KroghInterpolator", "(", "xs", ",", "f", "(", "xs", ")", ")", "d", "=", "P", ".", "derivatives", "(", "x", ",", "der", "=", "(", "degree", "+", "1", ")", ")", "return", "np", ".", "poly1d", "(", "(", "d", "/", "factorial", "(", "np", ".", "arange", "(", "(", "degree", "+", "1", ")", ")", ")", ")", "[", ":", ":", "(", "-", "1", ")", "]", ")" ]
estimate the taylor polynomial of f at x by polynomial fitting .
train
false
5,687
def aslinearoperator(A): if isinstance(A, LinearOperator): return A elif (isinstance(A, np.ndarray) or isinstance(A, np.matrix)): if (A.ndim > 2): raise ValueError('array must have ndim <= 2') A = np.atleast_2d(np.asarray(A)) return MatrixLinearOperator(A) elif isspmatrix(A): return MatrixLinearOperator(A) elif (hasattr(A, 'shape') and hasattr(A, 'matvec')): rmatvec = None dtype = None if hasattr(A, 'rmatvec'): rmatvec = A.rmatvec if hasattr(A, 'dtype'): dtype = A.dtype return LinearOperator(A.shape, A.matvec, rmatvec=rmatvec, dtype=dtype) else: raise TypeError('type not understood')
[ "def", "aslinearoperator", "(", "A", ")", ":", "if", "isinstance", "(", "A", ",", "LinearOperator", ")", ":", "return", "A", "elif", "(", "isinstance", "(", "A", ",", "np", ".", "ndarray", ")", "or", "isinstance", "(", "A", ",", "np", ".", "matrix", ")", ")", ":", "if", "(", "A", ".", "ndim", ">", "2", ")", ":", "raise", "ValueError", "(", "'array must have ndim <= 2'", ")", "A", "=", "np", ".", "atleast_2d", "(", "np", ".", "asarray", "(", "A", ")", ")", "return", "MatrixLinearOperator", "(", "A", ")", "elif", "isspmatrix", "(", "A", ")", ":", "return", "MatrixLinearOperator", "(", "A", ")", "elif", "(", "hasattr", "(", "A", ",", "'shape'", ")", "and", "hasattr", "(", "A", ",", "'matvec'", ")", ")", ":", "rmatvec", "=", "None", "dtype", "=", "None", "if", "hasattr", "(", "A", ",", "'rmatvec'", ")", ":", "rmatvec", "=", "A", ".", "rmatvec", "if", "hasattr", "(", "A", ",", "'dtype'", ")", ":", "dtype", "=", "A", ".", "dtype", "return", "LinearOperator", "(", "A", ".", "shape", ",", "A", ".", "matvec", ",", "rmatvec", "=", "rmatvec", ",", "dtype", "=", "dtype", ")", "else", ":", "raise", "TypeError", "(", "'type not understood'", ")" ]
return a as a linearoperator .
train
false
5,688
def _process_text_args(override, fontdict=None, **kwargs): if (fontdict is not None): override.update(fontdict) override.update(kwargs) return override
[ "def", "_process_text_args", "(", "override", ",", "fontdict", "=", "None", ",", "**", "kwargs", ")", ":", "if", "(", "fontdict", "is", "not", "None", ")", ":", "override", ".", "update", "(", "fontdict", ")", "override", ".", "update", "(", "kwargs", ")", "return", "override" ]
return an override dict .
train
false
5,689
def clear_time_override(): utcnow.override_time = None
[ "def", "clear_time_override", "(", ")", ":", "utcnow", ".", "override_time", "=", "None" ]
remove the overridden time .
train
false
5,691
def find_executable_linenos(filename): try: prog = open(filename, 'rU').read() except IOError as err: print >>sys.stderr, ('Not printing coverage data for %r: %s' % (filename, err)) return {} code = compile(prog, filename, 'exec') strs = find_strings(filename) return find_lines(code, strs)
[ "def", "find_executable_linenos", "(", "filename", ")", ":", "try", ":", "prog", "=", "open", "(", "filename", ",", "'rU'", ")", ".", "read", "(", ")", "except", "IOError", "as", "err", ":", "print", ">>", "sys", ".", "stderr", ",", "(", "'Not printing coverage data for %r: %s'", "%", "(", "filename", ",", "err", ")", ")", "return", "{", "}", "code", "=", "compile", "(", "prog", ",", "filename", ",", "'exec'", ")", "strs", "=", "find_strings", "(", "filename", ")", "return", "find_lines", "(", "code", ",", "strs", ")" ]
return dict where keys are line numbers in the line number table .
train
false
5,692
@ensure_csrf_cookie @cache_control(no_cache=True, no_store=True, must_revalidate=True) @require_level('staff') @require_POST def get_registration_codes(request, course_id): course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id) registration_codes = CourseRegistrationCode.objects.filter(course_id=course_id).order_by('invoice_item__invoice__company_name') company_name = request.POST['download_company_name'] if company_name: registration_codes = registration_codes.filter(invoice_item__invoice__company_name=company_name) csv_type = 'download' return registration_codes_csv('Registration_Codes.csv', registration_codes, csv_type)
[ "@", "ensure_csrf_cookie", "@", "cache_control", "(", "no_cache", "=", "True", ",", "no_store", "=", "True", ",", "must_revalidate", "=", "True", ")", "@", "require_level", "(", "'staff'", ")", "@", "require_POST", "def", "get_registration_codes", "(", "request", ",", "course_id", ")", ":", "course_id", "=", "SlashSeparatedCourseKey", ".", "from_deprecated_string", "(", "course_id", ")", "registration_codes", "=", "CourseRegistrationCode", ".", "objects", ".", "filter", "(", "course_id", "=", "course_id", ")", ".", "order_by", "(", "'invoice_item__invoice__company_name'", ")", "company_name", "=", "request", ".", "POST", "[", "'download_company_name'", "]", "if", "company_name", ":", "registration_codes", "=", "registration_codes", ".", "filter", "(", "invoice_item__invoice__company_name", "=", "company_name", ")", "csv_type", "=", "'download'", "return", "registration_codes_csv", "(", "'Registration_Codes.csv'", ",", "registration_codes", ",", "csv_type", ")" ]
respond with csv which contains a summary of all registration codes .
train
false
5,693
def version_is_compatible(imp_version, version): version_parts = version.split('.') imp_version_parts = imp_version.split('.') if (int(version_parts[0]) != int(imp_version_parts[0])): return False if (int(version_parts[1]) > int(imp_version_parts[1])): return False return True
[ "def", "version_is_compatible", "(", "imp_version", ",", "version", ")", ":", "version_parts", "=", "version", ".", "split", "(", "'.'", ")", "imp_version_parts", "=", "imp_version", ".", "split", "(", "'.'", ")", "if", "(", "int", "(", "version_parts", "[", "0", "]", ")", "!=", "int", "(", "imp_version_parts", "[", "0", "]", ")", ")", ":", "return", "False", "if", "(", "int", "(", "version_parts", "[", "1", "]", ")", ">", "int", "(", "imp_version_parts", "[", "1", "]", ")", ")", ":", "return", "False", "return", "True" ]
determine whether versions are compatible .
train
false
5,694
def sanitize_redirect(host, redirect_to): if redirect_to: try: netloc = (urlparse(redirect_to)[1] or host) except (TypeError, AttributeError): pass else: if (netloc == host): return redirect_to
[ "def", "sanitize_redirect", "(", "host", ",", "redirect_to", ")", ":", "if", "redirect_to", ":", "try", ":", "netloc", "=", "(", "urlparse", "(", "redirect_to", ")", "[", "1", "]", "or", "host", ")", "except", "(", "TypeError", ",", "AttributeError", ")", ":", "pass", "else", ":", "if", "(", "netloc", "==", "host", ")", ":", "return", "redirect_to" ]
given the hostname and an untrusted url to redirect to .
train
false
5,696
def get_collection_by_alias(bus, alias): service_obj = bus_get_object(bus, SS_PATH) service_iface = dbus.Interface(service_obj, SERVICE_IFACE) collection_path = service_iface.ReadAlias(alias, signature='s') if (len(collection_path) <= 1): raise ItemNotFoundException('No collection with such alias.') return Collection(bus, collection_path)
[ "def", "get_collection_by_alias", "(", "bus", ",", "alias", ")", ":", "service_obj", "=", "bus_get_object", "(", "bus", ",", "SS_PATH", ")", "service_iface", "=", "dbus", ".", "Interface", "(", "service_obj", ",", "SERVICE_IFACE", ")", "collection_path", "=", "service_iface", ".", "ReadAlias", "(", "alias", ",", "signature", "=", "'s'", ")", "if", "(", "len", "(", "collection_path", ")", "<=", "1", ")", ":", "raise", "ItemNotFoundException", "(", "'No collection with such alias.'", ")", "return", "Collection", "(", "bus", ",", "collection_path", ")" ]
returns the collection with the given alias .
train
false
5,698
def LocalGroupEnum(): resume = 0 nmembers = 0 while 1: (data, total, resume) = win32net.NetLocalGroupEnum(server, 1, resume) for group in data: verbose(('Found group %(name)s:%(comment)s ' % group)) memberresume = 0 while 1: (memberdata, total, memberresume) = win32net.NetLocalGroupGetMembers(server, group['name'], 2, resume) for member in memberdata: (username, domain, type) = win32security.LookupAccountSid(server, member['sid']) nmembers = (nmembers + 1) verbose((' Member %s (%s)' % (username, member['domainandname']))) if (memberresume == 0): break if (not resume): break assert nmembers, 'Couldnt find a single member in a single group!' print 'Enumerated all the local groups'
[ "def", "LocalGroupEnum", "(", ")", ":", "resume", "=", "0", "nmembers", "=", "0", "while", "1", ":", "(", "data", ",", "total", ",", "resume", ")", "=", "win32net", ".", "NetLocalGroupEnum", "(", "server", ",", "1", ",", "resume", ")", "for", "group", "in", "data", ":", "verbose", "(", "(", "'Found group %(name)s:%(comment)s '", "%", "group", ")", ")", "memberresume", "=", "0", "while", "1", ":", "(", "memberdata", ",", "total", ",", "memberresume", ")", "=", "win32net", ".", "NetLocalGroupGetMembers", "(", "server", ",", "group", "[", "'name'", "]", ",", "2", ",", "resume", ")", "for", "member", "in", "memberdata", ":", "(", "username", ",", "domain", ",", "type", ")", "=", "win32security", ".", "LookupAccountSid", "(", "server", ",", "member", "[", "'sid'", "]", ")", "nmembers", "=", "(", "nmembers", "+", "1", ")", "verbose", "(", "(", "' Member %s (%s)'", "%", "(", "username", ",", "member", "[", "'domainandname'", "]", ")", ")", ")", "if", "(", "memberresume", "==", "0", ")", ":", "break", "if", "(", "not", "resume", ")", ":", "break", "assert", "nmembers", ",", "'Couldnt find a single member in a single group!'", "print", "'Enumerated all the local groups'" ]
enumerates all the local groups .
train
false
5,699
def quantity_allclose(a, b, rtol=1e-05, atol=None, **kwargs): import numpy as np return np.allclose(*_unquantify_allclose_arguments(a, b, rtol, atol), **kwargs)
[ "def", "quantity_allclose", "(", "a", ",", "b", ",", "rtol", "=", "1e-05", ",", "atol", "=", "None", ",", "**", "kwargs", ")", ":", "import", "numpy", "as", "np", "return", "np", ".", "allclose", "(", "*", "_unquantify_allclose_arguments", "(", "a", ",", "b", ",", "rtol", ",", "atol", ")", ",", "**", "kwargs", ")" ]
returns true if two arrays are element-wise equal within a tolerance .
train
false
5,700
@command('(mv|sw)\\s*(\\d{1,4})\\s*[\\s,]\\s*(\\d{1,4})') def songlist_mv_sw(action, a, b): (i, j) = ((int(a) - 1), (int(b) - 1)) if (action == 'mv'): g.model.songs.insert(j, g.model.songs.pop(i)) g.message = (util.F('song move') % (g.model[j].title, b)) elif (action == 'sw'): (g.model[i], g.model[j]) = (g.model[j], g.model[i]) g.message = (util.F('song sw') % (min(a, b), max(a, b))) g.content = content.generate_songlist_display()
[ "@", "command", "(", "'(mv|sw)\\\\s*(\\\\d{1,4})\\\\s*[\\\\s,]\\\\s*(\\\\d{1,4})'", ")", "def", "songlist_mv_sw", "(", "action", ",", "a", ",", "b", ")", ":", "(", "i", ",", "j", ")", "=", "(", "(", "int", "(", "a", ")", "-", "1", ")", ",", "(", "int", "(", "b", ")", "-", "1", ")", ")", "if", "(", "action", "==", "'mv'", ")", ":", "g", ".", "model", ".", "songs", ".", "insert", "(", "j", ",", "g", ".", "model", ".", "songs", ".", "pop", "(", "i", ")", ")", "g", ".", "message", "=", "(", "util", ".", "F", "(", "'song move'", ")", "%", "(", "g", ".", "model", "[", "j", "]", ".", "title", ",", "b", ")", ")", "elif", "(", "action", "==", "'sw'", ")", ":", "(", "g", ".", "model", "[", "i", "]", ",", "g", ".", "model", "[", "j", "]", ")", "=", "(", "g", ".", "model", "[", "j", "]", ",", "g", ".", "model", "[", "i", "]", ")", "g", ".", "message", "=", "(", "util", ".", "F", "(", "'song sw'", ")", "%", "(", "min", "(", "a", ",", "b", ")", ",", "max", "(", "a", ",", "b", ")", ")", ")", "g", ".", "content", "=", "content", ".", "generate_songlist_display", "(", ")" ]
move a song or swap two songs .
train
false
5,701
def _RemoveRedundantHandlers(handler_list): no_duplicates = [] patterns_found_so_far = set() for i in xrange(len(handler_list)): current_handler = handler_list[i] matched_by_later = False for j in xrange((i + 1), len(handler_list)): if current_handler.IsFullyHandledBy(handler_list[j]): matched_by_later = True break if ((not matched_by_later) and (current_handler.pattern not in patterns_found_so_far)): no_duplicates.append(current_handler) patterns_found_so_far.add(current_handler.pattern) return no_duplicates
[ "def", "_RemoveRedundantHandlers", "(", "handler_list", ")", ":", "no_duplicates", "=", "[", "]", "patterns_found_so_far", "=", "set", "(", ")", "for", "i", "in", "xrange", "(", "len", "(", "handler_list", ")", ")", ":", "current_handler", "=", "handler_list", "[", "i", "]", "matched_by_later", "=", "False", "for", "j", "in", "xrange", "(", "(", "i", "+", "1", ")", ",", "len", "(", "handler_list", ")", ")", ":", "if", "current_handler", ".", "IsFullyHandledBy", "(", "handler_list", "[", "j", "]", ")", ":", "matched_by_later", "=", "True", "break", "if", "(", "(", "not", "matched_by_later", ")", "and", "(", "current_handler", ".", "pattern", "not", "in", "patterns_found_so_far", ")", ")", ":", "no_duplicates", ".", "append", "(", "current_handler", ")", "patterns_found_so_far", ".", "add", "(", "current_handler", ".", "pattern", ")", "return", "no_duplicates" ]
removes duplicated or unnecessary handlers from the list .
train
false
5,702
def remove_content_type(app_label, model_name): try: ct = ContentType.objects.get(app_label=app_label, model=model_name.lower()) ct.delete() except ContentType.DoesNotExist: pass
[ "def", "remove_content_type", "(", "app_label", ",", "model_name", ")", ":", "try", ":", "ct", "=", "ContentType", ".", "objects", ".", "get", "(", "app_label", "=", "app_label", ",", "model", "=", "model_name", ".", "lower", "(", ")", ")", "ct", ".", "delete", "(", ")", "except", "ContentType", ".", "DoesNotExist", ":", "pass" ]
delete from the django contenttype table .
train
false
5,705
def stub_verify(conn, cert, errno, errdepth, code): return True
[ "def", "stub_verify", "(", "conn", ",", "cert", ",", "errno", ",", "errdepth", ",", "code", ")", ":", "return", "True" ]
we dont verify the server when we attempt a mitm .
train
false
5,707
def server_message(response): message = None if (response.headers.get('content-type') and ('text/html' in response.headers.get('content-type'))): try: soup = BeautifulSoup(response.content, 'html5lib') except Exception: pass message = soup.find('body') elements = ('header', 'script', 'footer', 'nav', 'input', 'textarea') for element in elements: for tag in soup.find_all(element): tag.replaceWith('') message = (message.text if message else soup.text) message = message.strip() if (message is None): message = response.content.strip() if message: if (len(message) > 150): message = (message[:150] + '...') logger.debug('Server responded with message: %s', message)
[ "def", "server_message", "(", "response", ")", ":", "message", "=", "None", "if", "(", "response", ".", "headers", ".", "get", "(", "'content-type'", ")", "and", "(", "'text/html'", "in", "response", ".", "headers", ".", "get", "(", "'content-type'", ")", ")", ")", ":", "try", ":", "soup", "=", "BeautifulSoup", "(", "response", ".", "content", ",", "'html5lib'", ")", "except", "Exception", ":", "pass", "message", "=", "soup", ".", "find", "(", "'body'", ")", "elements", "=", "(", "'header'", ",", "'script'", ",", "'footer'", ",", "'nav'", ",", "'input'", ",", "'textarea'", ")", "for", "element", "in", "elements", ":", "for", "tag", "in", "soup", ".", "find_all", "(", "element", ")", ":", "tag", ".", "replaceWith", "(", "''", ")", "message", "=", "(", "message", ".", "text", "if", "message", "else", "soup", ".", "text", ")", "message", "=", "message", ".", "strip", "(", ")", "if", "(", "message", "is", "None", ")", ":", "message", "=", "response", ".", "content", ".", "strip", "(", ")", "if", "message", ":", "if", "(", "len", "(", "message", ")", ">", "150", ")", ":", "message", "=", "(", "message", "[", ":", "150", "]", "+", "'...'", ")", "logger", ".", "debug", "(", "'Server responded with message: %s'", ",", "message", ")" ]
extract server message from response and log in to logger with debug level .
train
false
5,708
def exceptionFromStanza(stanza): children = [] condition = text = textLang = appCondition = type = code = None for element in stanza.elements(): if ((element.name == 'error') and (element.uri == stanza.uri)): code = element.getAttribute('code') type = element.getAttribute('type') error = _parseError(element, NS_XMPP_STANZAS) condition = error['condition'] text = error['text'] textLang = error['textLang'] appCondition = error['appCondition'] if ((not condition) and code): (condition, type) = CODES_TO_CONDITIONS[code] text = _getText(stanza.error) else: children.append(element) if (condition is None): return StanzaError(None) exception = StanzaError(condition, type, text, textLang, appCondition) exception.children = children exception.stanza = stanza return exception
[ "def", "exceptionFromStanza", "(", "stanza", ")", ":", "children", "=", "[", "]", "condition", "=", "text", "=", "textLang", "=", "appCondition", "=", "type", "=", "code", "=", "None", "for", "element", "in", "stanza", ".", "elements", "(", ")", ":", "if", "(", "(", "element", ".", "name", "==", "'error'", ")", "and", "(", "element", ".", "uri", "==", "stanza", ".", "uri", ")", ")", ":", "code", "=", "element", ".", "getAttribute", "(", "'code'", ")", "type", "=", "element", ".", "getAttribute", "(", "'type'", ")", "error", "=", "_parseError", "(", "element", ",", "NS_XMPP_STANZAS", ")", "condition", "=", "error", "[", "'condition'", "]", "text", "=", "error", "[", "'text'", "]", "textLang", "=", "error", "[", "'textLang'", "]", "appCondition", "=", "error", "[", "'appCondition'", "]", "if", "(", "(", "not", "condition", ")", "and", "code", ")", ":", "(", "condition", ",", "type", ")", "=", "CODES_TO_CONDITIONS", "[", "code", "]", "text", "=", "_getText", "(", "stanza", ".", "error", ")", "else", ":", "children", ".", "append", "(", "element", ")", "if", "(", "condition", "is", "None", ")", ":", "return", "StanzaError", "(", "None", ")", "exception", "=", "StanzaError", "(", "condition", ",", "type", ",", "text", ",", "textLang", ",", "appCondition", ")", "exception", ".", "children", "=", "children", "exception", ".", "stanza", "=", "stanza", "return", "exception" ]
build an exception object from an error stanza .
train
false
5,709
def batch_shuffle(index_array, batch_size): batch_count = int((len(index_array) / batch_size)) last_batch = index_array[(batch_count * batch_size):] index_array = index_array[:(batch_count * batch_size)] index_array = index_array.reshape((batch_count, batch_size)) np.random.shuffle(index_array) index_array = index_array.flatten() return np.append(index_array, last_batch)
[ "def", "batch_shuffle", "(", "index_array", ",", "batch_size", ")", ":", "batch_count", "=", "int", "(", "(", "len", "(", "index_array", ")", "/", "batch_size", ")", ")", "last_batch", "=", "index_array", "[", "(", "batch_count", "*", "batch_size", ")", ":", "]", "index_array", "=", "index_array", "[", ":", "(", "batch_count", "*", "batch_size", ")", "]", "index_array", "=", "index_array", ".", "reshape", "(", "(", "batch_count", ",", "batch_size", ")", ")", "np", ".", "random", ".", "shuffle", "(", "index_array", ")", "index_array", "=", "index_array", ".", "flatten", "(", ")", "return", "np", ".", "append", "(", "index_array", ",", "last_batch", ")" ]
this shuffles an array in a batch-wise fashion .
train
false
5,710
def _cond(condition, then_lambda, else_lambda): try: cond_fn = tf.cond except AttributeError: from tensorflow.python.ops import control_flow_ops cond_fn = control_flow_ops.cond return cond_fn(condition, then_lambda, else_lambda)
[ "def", "_cond", "(", "condition", ",", "then_lambda", ",", "else_lambda", ")", ":", "try", ":", "cond_fn", "=", "tf", ".", "cond", "except", "AttributeError", ":", "from", "tensorflow", ".", "python", ".", "ops", "import", "control_flow_ops", "cond_fn", "=", "control_flow_ops", ".", "cond", "return", "cond_fn", "(", "condition", ",", "then_lambda", ",", "else_lambda", ")" ]
backwards compatible interface to tf .
train
false
5,711
def hrm_training_job_title(row): try: person_id = row.hrm_training.person_id except AttributeError: person_id = None if person_id: s3db = current.s3db table = s3db.hrm_human_resource jtable = s3db.hrm_job_title query = (((table.person_id == person_id) & (table.status != 2)) & (table.job_title_id == jtable.id)) jobs = current.db(query).select(jtable.name, distinct=True, orderby=jtable.name) if jobs: output = '' for job in jobs: repr = job.name if output: output = ('%s, %s' % (output, repr)) else: output = repr return output return current.messages['NONE']
[ "def", "hrm_training_job_title", "(", "row", ")", ":", "try", ":", "person_id", "=", "row", ".", "hrm_training", ".", "person_id", "except", "AttributeError", ":", "person_id", "=", "None", "if", "person_id", ":", "s3db", "=", "current", ".", "s3db", "table", "=", "s3db", ".", "hrm_human_resource", "jtable", "=", "s3db", ".", "hrm_job_title", "query", "=", "(", "(", "(", "table", ".", "person_id", "==", "person_id", ")", "&", "(", "table", ".", "status", "!=", "2", ")", ")", "&", "(", "table", ".", "job_title_id", "==", "jtable", ".", "id", ")", ")", "jobs", "=", "current", ".", "db", "(", "query", ")", ".", "select", "(", "jtable", ".", "name", ",", "distinct", "=", "True", ",", "orderby", "=", "jtable", ".", "name", ")", "if", "jobs", ":", "output", "=", "''", "for", "job", "in", "jobs", ":", "repr", "=", "job", ".", "name", "if", "output", ":", "output", "=", "(", "'%s, %s'", "%", "(", "output", ",", "repr", ")", ")", "else", ":", "output", "=", "repr", "return", "output", "return", "current", ".", "messages", "[", "'NONE'", "]" ]
which job titles(s) the person is active with .
train
false
5,713
def print_queries(): for query in connection.queries: print (query['sql'] + ';\n')
[ "def", "print_queries", "(", ")", ":", "for", "query", "in", "connection", ".", "queries", ":", "print", "(", "query", "[", "'sql'", "]", "+", "';\\n'", ")" ]
print all sql queries executed so far .
train
false
5,714
def load_host_keys(filename): from paramiko.hostkeys import HostKeys return HostKeys(filename)
[ "def", "load_host_keys", "(", "filename", ")", ":", "from", "paramiko", ".", "hostkeys", "import", "HostKeys", "return", "HostKeys", "(", "filename", ")" ]
read a file of known ssh host keys .
train
false
5,716
def cas(key, value, old_value): store = load() if (key not in store): return False if (store[key] != old_value): return False store[key] = value dump(store) return True
[ "def", "cas", "(", "key", ",", "value", ",", "old_value", ")", ":", "store", "=", "load", "(", ")", "if", "(", "key", "not", "in", "store", ")", ":", "return", "False", "if", "(", "store", "[", "key", "]", "!=", "old_value", ")", ":", "return", "False", "store", "[", "key", "]", "=", "value", "dump", "(", "store", ")", "return", "True" ]
check and set a value in the minion datastore cli example: .
train
true
5,717
def find_lines_from_code(code, strs): linenos = {} for (_, lineno) in dis.findlinestarts(code): if (lineno not in strs): linenos[lineno] = 1 return linenos
[ "def", "find_lines_from_code", "(", "code", ",", "strs", ")", ":", "linenos", "=", "{", "}", "for", "(", "_", ",", "lineno", ")", "in", "dis", ".", "findlinestarts", "(", "code", ")", ":", "if", "(", "lineno", "not", "in", "strs", ")", ":", "linenos", "[", "lineno", "]", "=", "1", "return", "linenos" ]
return dict where keys are lines in the line number table .
train
false
5,718
def report_config_interaction(modified, modifiers): if isinstance(modified, str): modified = (modified,) if isinstance(modifiers, str): modifiers = (modifiers,) for var in modified: VAR_MODIFIERS.setdefault(var, set()).update(modifiers)
[ "def", "report_config_interaction", "(", "modified", ",", "modifiers", ")", ":", "if", "isinstance", "(", "modified", ",", "str", ")", ":", "modified", "=", "(", "modified", ",", ")", "if", "isinstance", "(", "modifiers", ",", "str", ")", ":", "modifiers", "=", "(", "modifiers", ",", ")", "for", "var", "in", "modified", ":", "VAR_MODIFIERS", ".", "setdefault", "(", "var", ",", "set", "(", ")", ")", ".", "update", "(", "modifiers", ")" ]
registers config option interaction to be checked by set_by_cli .
train
false
5,719
def _get_echo_exe_path(): if (sys.platform == 'win32'): return os.path.join(utils.abs_datapath(), 'userscripts', 'echo.bat') else: return 'echo'
[ "def", "_get_echo_exe_path", "(", ")", ":", "if", "(", "sys", ".", "platform", "==", "'win32'", ")", ":", "return", "os", ".", "path", ".", "join", "(", "utils", ".", "abs_datapath", "(", ")", ",", "'userscripts'", ",", "'echo.bat'", ")", "else", ":", "return", "'echo'" ]
return the path to an echo-like command .
train
false
5,720
def examplesquickTest(vm, prompt=Prompt): installPexpect(vm, prompt) vm.sendline('sudo -n python ~/mininet/examples/test/runner.py -v -quick')
[ "def", "examplesquickTest", "(", "vm", ",", "prompt", "=", "Prompt", ")", ":", "installPexpect", "(", "vm", ",", "prompt", ")", "vm", ".", "sendline", "(", "'sudo -n python ~/mininet/examples/test/runner.py -v -quick'", ")" ]
quick test of mininet examples .
train
false
5,723
def posts(request, forum_slug, thread_id, form=None, post_preview=None, is_reply=False): thread = get_object_or_404(Thread, pk=thread_id) forum = thread.forum if ((forum.slug != forum_slug) and (not is_reply)): new_forum = get_object_or_404(Forum, slug=forum_slug) if new_forum.allows_viewing_by(request.user): return HttpResponseRedirect(thread.get_absolute_url()) raise Http404 elif (forum.slug != forum_slug): raise Http404 if (not forum.allows_viewing_by(request.user)): raise Http404 posts_ = thread.post_set.all() count = posts_.count() if count: last_post = posts_[(count - 1)] else: last_post = None posts_ = posts_.select_related('author', 'updated_by') posts_ = posts_.extra(select={'author_post_count': 'SELECT COUNT(*) FROM forums_post WHERE forums_post.author_id = auth_user.id'}) posts_ = paginate(request, posts_, constants.POSTS_PER_PAGE, count=count) if (not form): form = ReplyForm() feed_urls = ((reverse('forums.posts.feed', kwargs={'forum_slug': forum_slug, 'thread_id': thread_id}), PostsFeed().title(thread)),) is_watching_thread = (request.user.is_authenticated() and NewPostEvent.is_notifying(request.user, thread)) return render(request, 'forums/posts.html', {'forum': forum, 'thread': thread, 'posts': posts_, 'form': form, 'count': count, 'last_post': last_post, 'post_preview': post_preview, 'is_watching_thread': is_watching_thread, 'feeds': feed_urls, 'forums': Forum.objects.all()})
[ "def", "posts", "(", "request", ",", "forum_slug", ",", "thread_id", ",", "form", "=", "None", ",", "post_preview", "=", "None", ",", "is_reply", "=", "False", ")", ":", "thread", "=", "get_object_or_404", "(", "Thread", ",", "pk", "=", "thread_id", ")", "forum", "=", "thread", ".", "forum", "if", "(", "(", "forum", ".", "slug", "!=", "forum_slug", ")", "and", "(", "not", "is_reply", ")", ")", ":", "new_forum", "=", "get_object_or_404", "(", "Forum", ",", "slug", "=", "forum_slug", ")", "if", "new_forum", ".", "allows_viewing_by", "(", "request", ".", "user", ")", ":", "return", "HttpResponseRedirect", "(", "thread", ".", "get_absolute_url", "(", ")", ")", "raise", "Http404", "elif", "(", "forum", ".", "slug", "!=", "forum_slug", ")", ":", "raise", "Http404", "if", "(", "not", "forum", ".", "allows_viewing_by", "(", "request", ".", "user", ")", ")", ":", "raise", "Http404", "posts_", "=", "thread", ".", "post_set", ".", "all", "(", ")", "count", "=", "posts_", ".", "count", "(", ")", "if", "count", ":", "last_post", "=", "posts_", "[", "(", "count", "-", "1", ")", "]", "else", ":", "last_post", "=", "None", "posts_", "=", "posts_", ".", "select_related", "(", "'author'", ",", "'updated_by'", ")", "posts_", "=", "posts_", ".", "extra", "(", "select", "=", "{", "'author_post_count'", ":", "'SELECT COUNT(*) FROM forums_post WHERE forums_post.author_id = auth_user.id'", "}", ")", "posts_", "=", "paginate", "(", "request", ",", "posts_", ",", "constants", ".", "POSTS_PER_PAGE", ",", "count", "=", "count", ")", "if", "(", "not", "form", ")", ":", "form", "=", "ReplyForm", "(", ")", "feed_urls", "=", "(", "(", "reverse", "(", "'forums.posts.feed'", ",", "kwargs", "=", "{", "'forum_slug'", ":", "forum_slug", ",", "'thread_id'", ":", "thread_id", "}", ")", ",", "PostsFeed", "(", ")", ".", "title", "(", "thread", ")", ")", ",", ")", "is_watching_thread", "=", "(", "request", ".", "user", ".", "is_authenticated", "(", ")", "and", "NewPostEvent", ".", "is_notifying", "(", "request", ".", "user", ",", "thread", ")", ")", "return", "render", "(", "request", ",", "'forums/posts.html'", ",", "{", "'forum'", ":", "forum", ",", "'thread'", ":", "thread", ",", "'posts'", ":", "posts_", ",", "'form'", ":", "form", ",", "'count'", ":", "count", ",", "'last_post'", ":", "last_post", ",", "'post_preview'", ":", "post_preview", ",", "'is_watching_thread'", ":", "is_watching_thread", ",", "'feeds'", ":", "feed_urls", ",", "'forums'", ":", "Forum", ".", "objects", ".", "all", "(", ")", "}", ")" ]
view all the posts in a thread .
train
false
5,725
def reduce(fn, sequences, outputs_info, non_sequences=None, go_backwards=False, mode=None, name=None): rval = scan(fn=fn, sequences=sequences, outputs_info=outputs_info, non_sequences=non_sequences, go_backwards=go_backwards, truncate_gradient=(-1), mode=mode, name=name) if isinstance(rval[0], (list, tuple)): return ([x[(-1)] for x in rval[0]], rval[1]) else: return (rval[0][(-1)], rval[1])
[ "def", "reduce", "(", "fn", ",", "sequences", ",", "outputs_info", ",", "non_sequences", "=", "None", ",", "go_backwards", "=", "False", ",", "mode", "=", "None", ",", "name", "=", "None", ")", ":", "rval", "=", "scan", "(", "fn", "=", "fn", ",", "sequences", "=", "sequences", ",", "outputs_info", "=", "outputs_info", ",", "non_sequences", "=", "non_sequences", ",", "go_backwards", "=", "go_backwards", ",", "truncate_gradient", "=", "(", "-", "1", ")", ",", "mode", "=", "mode", ",", "name", "=", "name", ")", "if", "isinstance", "(", "rval", "[", "0", "]", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "(", "[", "x", "[", "(", "-", "1", ")", "]", "for", "x", "in", "rval", "[", "0", "]", "]", ",", "rval", "[", "1", "]", ")", "else", ":", "return", "(", "rval", "[", "0", "]", "[", "(", "-", "1", ")", "]", ",", "rval", "[", "1", "]", ")" ]
similar behaviour as pythons reduce .
train
false
5,726
def get_collections(): return _do_mb_query('collection', '')
[ "def", "get_collections", "(", ")", ":", "return", "_do_mb_query", "(", "'collection'", ",", "''", ")" ]
list the collections for the currently :func:authenticated <auth> user as a dict with a collection-list key .
train
false
5,729
def test_negative_and_float__sparktext(): 'Test negative values' chart = Line() chart.add('_', [0.1, 0.2, 0.9, (-0.5)]) assert (chart.render_sparktext() == u('\xe2\x96\x81\xe2\x96\x82\xe2\x96\x88\xe2\x96\x81'))
[ "def", "test_negative_and_float__sparktext", "(", ")", ":", "chart", "=", "Line", "(", ")", "chart", ".", "add", "(", "'_'", ",", "[", "0.1", ",", "0.2", ",", "0.9", ",", "(", "-", "0.5", ")", "]", ")", "assert", "(", "chart", ".", "render_sparktext", "(", ")", "==", "u", "(", "'\\xe2\\x96\\x81\\xe2\\x96\\x82\\xe2\\x96\\x88\\xe2\\x96\\x81'", ")", ")" ]
test negative values .
train
false
5,730
def pressure_network(flow_rates, Qtot, k): P = (k * (flow_rates ** 2)) F = np.hstack(((P[1:] - P[0]), (flow_rates.sum() - Qtot))) return F
[ "def", "pressure_network", "(", "flow_rates", ",", "Qtot", ",", "k", ")", ":", "P", "=", "(", "k", "*", "(", "flow_rates", "**", "2", ")", ")", "F", "=", "np", ".", "hstack", "(", "(", "(", "P", "[", "1", ":", "]", "-", "P", "[", "0", "]", ")", ",", "(", "flow_rates", ".", "sum", "(", ")", "-", "Qtot", ")", ")", ")", "return", "F" ]
evaluate non-linear equation system representing the pressures and flows in a system of n parallel pipes:: f_i = p_i - p_0 .
train
false
5,731
def get_tensor_children_placeholders(tensor): placeholders_list = [] if (tensor.op.type == 'Placeholder'): placeholders_list.append(tensor) if tensor.op: for t in tensor.op.outputs: if (not ('read:0' in t.name)): placeholders_list += get_tensor_children_placeholders(t) return list(set(placeholders_list))
[ "def", "get_tensor_children_placeholders", "(", "tensor", ")", ":", "placeholders_list", "=", "[", "]", "if", "(", "tensor", ".", "op", ".", "type", "==", "'Placeholder'", ")", ":", "placeholders_list", ".", "append", "(", "tensor", ")", "if", "tensor", ".", "op", ":", "for", "t", "in", "tensor", ".", "op", ".", "outputs", ":", "if", "(", "not", "(", "'read:0'", "in", "t", ".", "name", ")", ")", ":", "placeholders_list", "+=", "get_tensor_children_placeholders", "(", "t", ")", "return", "list", "(", "set", "(", "placeholders_list", ")", ")" ]
get all placeholders that is depending the given tensor .
train
false
5,732
def change_settings(new_settings={}, file=None): gl = globals() if (file is not None): execfile(file) gl.update(locals()) gl.update(new_settings)
[ "def", "change_settings", "(", "new_settings", "=", "{", "}", ",", "file", "=", "None", ")", ":", "gl", "=", "globals", "(", ")", "if", "(", "file", "is", "not", "None", ")", ":", "execfile", "(", "file", ")", "gl", ".", "update", "(", "locals", "(", ")", ")", "gl", ".", "update", "(", "new_settings", ")" ]
changes the value of configuration variables .
train
false
5,733
def getfullnameof(mod, xtrapath=None): from distutils.sysconfig import get_python_lib numpy_core_paths = [os.path.join(get_python_lib(), 'numpy', 'core')] if is_venv: numpy_core_paths.append(os.path.join(base_prefix, 'Lib', 'site-packages', 'numpy', 'core')) epath = ((sys.path + numpy_core_paths) + winutils.get_system_path()) if (xtrapath is not None): if (type(xtrapath) == type('')): epath.insert(0, xtrapath) else: epath = (xtrapath + epath) for p in epath: npth = os.path.join(p, mod) if (os.path.exists(npth) and matchDLLArch(npth)): return npth for p in epath: npth = os.path.join(p, mod.lower()) if (os.path.exists(npth) and matchDLLArch(npth)): return npth return ''
[ "def", "getfullnameof", "(", "mod", ",", "xtrapath", "=", "None", ")", ":", "from", "distutils", ".", "sysconfig", "import", "get_python_lib", "numpy_core_paths", "=", "[", "os", ".", "path", ".", "join", "(", "get_python_lib", "(", ")", ",", "'numpy'", ",", "'core'", ")", "]", "if", "is_venv", ":", "numpy_core_paths", ".", "append", "(", "os", ".", "path", ".", "join", "(", "base_prefix", ",", "'Lib'", ",", "'site-packages'", ",", "'numpy'", ",", "'core'", ")", ")", "epath", "=", "(", "(", "sys", ".", "path", "+", "numpy_core_paths", ")", "+", "winutils", ".", "get_system_path", "(", ")", ")", "if", "(", "xtrapath", "is", "not", "None", ")", ":", "if", "(", "type", "(", "xtrapath", ")", "==", "type", "(", "''", ")", ")", ":", "epath", ".", "insert", "(", "0", ",", "xtrapath", ")", "else", ":", "epath", "=", "(", "xtrapath", "+", "epath", ")", "for", "p", "in", "epath", ":", "npth", "=", "os", ".", "path", ".", "join", "(", "p", ",", "mod", ")", "if", "(", "os", ".", "path", ".", "exists", "(", "npth", ")", "and", "matchDLLArch", "(", "npth", ")", ")", ":", "return", "npth", "for", "p", "in", "epath", ":", "npth", "=", "os", ".", "path", ".", "join", "(", "p", ",", "mod", ".", "lower", "(", ")", ")", "if", "(", "os", ".", "path", ".", "exists", "(", "npth", ")", "and", "matchDLLArch", "(", "npth", ")", ")", ":", "return", "npth", "return", "''" ]
return the full path name of mod .
train
false
5,734
@register.simple_tag def simple_unlimited_args_kwargs(one, two='hi', *args, **kwargs): sorted_kwarg = sorted(kwargs.iteritems(), key=operator.itemgetter(0)) return ('simple_unlimited_args_kwargs - Expected result: %s / %s' % (', '.join([unicode(arg) for arg in ([one, two] + list(args))]), ', '.join([('%s=%s' % (k, v)) for (k, v) in sorted_kwarg])))
[ "@", "register", ".", "simple_tag", "def", "simple_unlimited_args_kwargs", "(", "one", ",", "two", "=", "'hi'", ",", "*", "args", ",", "**", "kwargs", ")", ":", "sorted_kwarg", "=", "sorted", "(", "kwargs", ".", "iteritems", "(", ")", ",", "key", "=", "operator", ".", "itemgetter", "(", "0", ")", ")", "return", "(", "'simple_unlimited_args_kwargs - Expected result: %s / %s'", "%", "(", "', '", ".", "join", "(", "[", "unicode", "(", "arg", ")", "for", "arg", "in", "(", "[", "one", ",", "two", "]", "+", "list", "(", "args", ")", ")", "]", ")", ",", "', '", ".", "join", "(", "[", "(", "'%s=%s'", "%", "(", "k", ",", "v", ")", ")", "for", "(", "k", ",", "v", ")", "in", "sorted_kwarg", "]", ")", ")", ")" ]
expected simple_unlimited_args_kwargs __doc__ .
train
false
5,735
def image_meta_to_http_headers(image_meta): headers = {} for (k, v) in image_meta.items(): if (v is not None): if (k == 'properties'): for (pk, pv) in v.items(): if (pv is not None): headers[('x-image-meta-property-%s' % pk.lower())] = unicode(pv) else: headers[('x-image-meta-%s' % k.lower())] = unicode(v) return headers
[ "def", "image_meta_to_http_headers", "(", "image_meta", ")", ":", "headers", "=", "{", "}", "for", "(", "k", ",", "v", ")", "in", "image_meta", ".", "items", "(", ")", ":", "if", "(", "v", "is", "not", "None", ")", ":", "if", "(", "k", "==", "'properties'", ")", ":", "for", "(", "pk", ",", "pv", ")", "in", "v", ".", "items", "(", ")", ":", "if", "(", "pv", "is", "not", "None", ")", ":", "headers", "[", "(", "'x-image-meta-property-%s'", "%", "pk", ".", "lower", "(", ")", ")", "]", "=", "unicode", "(", "pv", ")", "else", ":", "headers", "[", "(", "'x-image-meta-%s'", "%", "k", ".", "lower", "(", ")", ")", "]", "=", "unicode", "(", "v", ")", "return", "headers" ]
returns a set of image metadata into a dict of http headers that can be fed to either a webob request object or an httplib .
train
false
5,737
def cleanQuery(query): retVal = query for sqlStatements in SQL_STATEMENTS.values(): for sqlStatement in sqlStatements: sqlStatementEsc = sqlStatement.replace('(', '\\(') queryMatch = re.search(('(%s)' % sqlStatementEsc), query, re.I) if (queryMatch and ('sys_exec' not in query)): retVal = retVal.replace(queryMatch.group(1), sqlStatement.upper()) return retVal
[ "def", "cleanQuery", "(", "query", ")", ":", "retVal", "=", "query", "for", "sqlStatements", "in", "SQL_STATEMENTS", ".", "values", "(", ")", ":", "for", "sqlStatement", "in", "sqlStatements", ":", "sqlStatementEsc", "=", "sqlStatement", ".", "replace", "(", "'('", ",", "'\\\\('", ")", "queryMatch", "=", "re", ".", "search", "(", "(", "'(%s)'", "%", "sqlStatementEsc", ")", ",", "query", ",", "re", ".", "I", ")", "if", "(", "queryMatch", "and", "(", "'sys_exec'", "not", "in", "query", ")", ")", ":", "retVal", "=", "retVal", ".", "replace", "(", "queryMatch", ".", "group", "(", "1", ")", ",", "sqlStatement", ".", "upper", "(", ")", ")", "return", "retVal" ]
switch all sql statement keywords to upper case .
train
false
5,741
def instance_decorator(decorator): class Decorator(object, ): def __init__(self, func=nop, *args, **kws): self.__name__ = func.__name__ self.__doc__ = func.__doc__ self._data_name = ('%s_%d_decorated_instance' % (func.__name__, id(self))) self._func = func self._args = args self._kws = kws def __get__(self, obj, cls=None): if (obj is None): return data_name = self._data_name try: return obj.__dict__[data_name] except KeyError: decorated = decorator(obj, self._func, *self._args, **self._kws) obj.__dict__[data_name] = decorated return decorated return Decorator
[ "def", "instance_decorator", "(", "decorator", ")", ":", "class", "Decorator", "(", "object", ",", ")", ":", "def", "__init__", "(", "self", ",", "func", "=", "nop", ",", "*", "args", ",", "**", "kws", ")", ":", "self", ".", "__name__", "=", "func", ".", "__name__", "self", ".", "__doc__", "=", "func", ".", "__doc__", "self", ".", "_data_name", "=", "(", "'%s_%d_decorated_instance'", "%", "(", "func", ".", "__name__", ",", "id", "(", "self", ")", ")", ")", "self", ".", "_func", "=", "func", "self", ".", "_args", "=", "args", "self", ".", "_kws", "=", "kws", "def", "__get__", "(", "self", ",", "obj", ",", "cls", "=", "None", ")", ":", "if", "(", "obj", "is", "None", ")", ":", "return", "data_name", "=", "self", ".", "_data_name", "try", ":", "return", "obj", ".", "__dict__", "[", "data_name", "]", "except", "KeyError", ":", "decorated", "=", "decorator", "(", "obj", ",", "self", ".", "_func", ",", "*", "self", ".", "_args", ",", "**", "self", ".", "_kws", ")", "obj", ".", "__dict__", "[", "data_name", "]", "=", "decorated", "return", "decorated", "return", "Decorator" ]
meta-decorator to define decorators that decorate a method in a concrete instance .
train
false
5,742
def strftime(date, date_format): def strip_zeros(x): return (x.lstrip(u'0') or u'0') c89_directives = u'aAbBcdfHIjmMpSUwWxXyYzZ%' format_options = u'%[-]?.' candidates = re.findall(format_options, date_format) template = re.sub(format_options, u'%s', date_format) (lang_code, enc) = locale.getlocale(locale.LC_TIME) formatted_candidates = [] for candidate in candidates: if (candidate[(-1)] in c89_directives): if (len(candidate) == 3): candidate = u'%{}'.format(candidate[(-1)]) conversion = strip_zeros else: conversion = None if isinstance(date, SafeDatetime): formatted = date.strftime(candidate, safe=False) else: formatted = date.strftime(candidate) if ((not six.PY3) and (enc is not None)): formatted = formatted.decode(enc) if conversion: formatted = conversion(formatted) else: formatted = candidate formatted_candidates.append(formatted) return (template % tuple(formatted_candidates))
[ "def", "strftime", "(", "date", ",", "date_format", ")", ":", "def", "strip_zeros", "(", "x", ")", ":", "return", "(", "x", ".", "lstrip", "(", "u'0'", ")", "or", "u'0'", ")", "c89_directives", "=", "u'aAbBcdfHIjmMpSUwWxXyYzZ%'", "format_options", "=", "u'%[-]?.'", "candidates", "=", "re", ".", "findall", "(", "format_options", ",", "date_format", ")", "template", "=", "re", ".", "sub", "(", "format_options", ",", "u'%s'", ",", "date_format", ")", "(", "lang_code", ",", "enc", ")", "=", "locale", ".", "getlocale", "(", "locale", ".", "LC_TIME", ")", "formatted_candidates", "=", "[", "]", "for", "candidate", "in", "candidates", ":", "if", "(", "candidate", "[", "(", "-", "1", ")", "]", "in", "c89_directives", ")", ":", "if", "(", "len", "(", "candidate", ")", "==", "3", ")", ":", "candidate", "=", "u'%{}'", ".", "format", "(", "candidate", "[", "(", "-", "1", ")", "]", ")", "conversion", "=", "strip_zeros", "else", ":", "conversion", "=", "None", "if", "isinstance", "(", "date", ",", "SafeDatetime", ")", ":", "formatted", "=", "date", ".", "strftime", "(", "candidate", ",", "safe", "=", "False", ")", "else", ":", "formatted", "=", "date", ".", "strftime", "(", "candidate", ")", "if", "(", "(", "not", "six", ".", "PY3", ")", "and", "(", "enc", "is", "not", "None", ")", ")", ":", "formatted", "=", "formatted", ".", "decode", "(", "enc", ")", "if", "conversion", ":", "formatted", "=", "conversion", "(", "formatted", ")", "else", ":", "formatted", "=", "candidate", "formatted_candidates", ".", "append", "(", "formatted", ")", "return", "(", "template", "%", "tuple", "(", "formatted_candidates", ")", ")" ]
a version of strftime that returns unicode strings and tries to handle dates before 1900 .
train
false
5,743
@pytest.mark.parametrize('parallel', [True, False]) def test_commented_header(parallel, read_commented_header): text = '\n # A B C\n 1 2 3\n 4 5 6\n' t1 = read_commented_header(text, parallel=parallel) expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C')) assert_table_equal(t1, expected) text = ('# first commented line\n # second commented line\n\n' + text) t2 = read_commented_header(text, header_start=2, data_start=0, parallel=parallel) assert_table_equal(t2, expected) t3 = read_commented_header(text, header_start=(-1), data_start=0, parallel=parallel) assert_table_equal(t3, expected) text += '7 8 9' t4 = read_commented_header(text, header_start=2, data_start=2, parallel=parallel) expected = Table([[7], [8], [9]], names=('A', 'B', 'C')) assert_table_equal(t4, expected) with pytest.raises(ParameterError): read_commented_header(text, header_start=(-1), data_start=(-1), parallel=parallel)
[ "@", "pytest", ".", "mark", ".", "parametrize", "(", "'parallel'", ",", "[", "True", ",", "False", "]", ")", "def", "test_commented_header", "(", "parallel", ",", "read_commented_header", ")", ":", "text", "=", "'\\n # A B C\\n 1 2 3\\n 4 5 6\\n'", "t1", "=", "read_commented_header", "(", "text", ",", "parallel", "=", "parallel", ")", "expected", "=", "Table", "(", "[", "[", "1", ",", "4", "]", ",", "[", "2", ",", "5", "]", ",", "[", "3", ",", "6", "]", "]", ",", "names", "=", "(", "'A'", ",", "'B'", ",", "'C'", ")", ")", "assert_table_equal", "(", "t1", ",", "expected", ")", "text", "=", "(", "'# first commented line\\n # second commented line\\n\\n'", "+", "text", ")", "t2", "=", "read_commented_header", "(", "text", ",", "header_start", "=", "2", ",", "data_start", "=", "0", ",", "parallel", "=", "parallel", ")", "assert_table_equal", "(", "t2", ",", "expected", ")", "t3", "=", "read_commented_header", "(", "text", ",", "header_start", "=", "(", "-", "1", ")", ",", "data_start", "=", "0", ",", "parallel", "=", "parallel", ")", "assert_table_equal", "(", "t3", ",", "expected", ")", "text", "+=", "'7 8 9'", "t4", "=", "read_commented_header", "(", "text", ",", "header_start", "=", "2", ",", "data_start", "=", "2", ",", "parallel", "=", "parallel", ")", "expected", "=", "Table", "(", "[", "[", "7", "]", ",", "[", "8", "]", ",", "[", "9", "]", "]", ",", "names", "=", "(", "'A'", ",", "'B'", ",", "'C'", ")", ")", "assert_table_equal", "(", "t4", ",", "expected", ")", "with", "pytest", ".", "raises", "(", "ParameterError", ")", ":", "read_commented_header", "(", "text", ",", "header_start", "=", "(", "-", "1", ")", ",", "data_start", "=", "(", "-", "1", ")", ",", "parallel", "=", "parallel", ")" ]
the fastcommentedheader reader should mimic the behavior of the commentedheader by overriding the default header behavior of fastbasic .
train
false
5,744
def maxdists(Z): Z = np.asarray(Z, order='c', dtype=np.double) is_valid_linkage(Z, throw=True, name='Z') n = (Z.shape[0] + 1) MD = np.zeros(((n - 1),)) [Z] = _copy_arrays_if_base_present([Z]) _hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n)) return MD
[ "def", "maxdists", "(", "Z", ")", ":", "Z", "=", "np", ".", "asarray", "(", "Z", ",", "order", "=", "'c'", ",", "dtype", "=", "np", ".", "double", ")", "is_valid_linkage", "(", "Z", ",", "throw", "=", "True", ",", "name", "=", "'Z'", ")", "n", "=", "(", "Z", ".", "shape", "[", "0", "]", "+", "1", ")", "MD", "=", "np", ".", "zeros", "(", "(", "(", "n", "-", "1", ")", ",", ")", ")", "[", "Z", "]", "=", "_copy_arrays_if_base_present", "(", "[", "Z", "]", ")", "_hierarchy", ".", "get_max_dist_for_each_cluster", "(", "Z", ",", "MD", ",", "int", "(", "n", ")", ")", "return", "MD" ]
returns the maximum distance between any non-singleton cluster .
train
false
5,746
def import_dynamic(modname): if (sys.version_info >= (3, 3)): import importlib importlib.invalidate_caches() __import__(modname) return sys.modules[modname]
[ "def", "import_dynamic", "(", "modname", ")", ":", "if", "(", "sys", ".", "version_info", ">=", "(", "3", ",", "3", ")", ")", ":", "import", "importlib", "importlib", ".", "invalidate_caches", "(", ")", "__import__", "(", "modname", ")", "return", "sys", ".", "modules", "[", "modname", "]" ]
import and return a module of the given name .
train
false
5,747
def _service_get(s_name, **connection_args): nitro = _connect(**connection_args) if (nitro is None): return None service = NSService() service.set_name(s_name) try: service = NSService.get(nitro, service) except NSNitroError as error: log.debug('netscaler module error - NSService.get() failed: {0}'.format(error)) service = None _disconnect(nitro) return service
[ "def", "_service_get", "(", "s_name", ",", "**", "connection_args", ")", ":", "nitro", "=", "_connect", "(", "**", "connection_args", ")", "if", "(", "nitro", "is", "None", ")", ":", "return", "None", "service", "=", "NSService", "(", ")", "service", ".", "set_name", "(", "s_name", ")", "try", ":", "service", "=", "NSService", ".", "get", "(", "nitro", ",", "service", ")", "except", "NSNitroError", "as", "error", ":", "log", ".", "debug", "(", "'netscaler module error - NSService.get() failed: {0}'", ".", "format", "(", "error", ")", ")", "service", "=", "None", "_disconnect", "(", "nitro", ")", "return", "service" ]
returns a service ressource or none .
train
true
5,748
def validate_pre_plugin_load(): if (cfg.CONF.core_plugin is None): msg = _('Neutron core_plugin not configured!') return msg
[ "def", "validate_pre_plugin_load", "(", ")", ":", "if", "(", "cfg", ".", "CONF", ".", "core_plugin", "is", "None", ")", ":", "msg", "=", "_", "(", "'Neutron core_plugin not configured!'", ")", "return", "msg" ]
checks if the configuration variables are valid .
train
false
5,749
def _get_models_to_patch(): return list(itertools.chain(*[app_config.get_models(include_auto_created=False) for app_config in apps.get_app_configs() if ((app_config.label == 'osf') or app_config.label.startswith('addons_'))]))
[ "def", "_get_models_to_patch", "(", ")", ":", "return", "list", "(", "itertools", ".", "chain", "(", "*", "[", "app_config", ".", "get_models", "(", "include_auto_created", "=", "False", ")", "for", "app_config", "in", "apps", ".", "get_app_configs", "(", ")", "if", "(", "(", "app_config", ".", "label", "==", "'osf'", ")", "or", "app_config", ".", "label", ".", "startswith", "(", "'addons_'", ")", ")", "]", ")", ")" ]
return all models from osf and addons .
train
false
5,750
def match_str(filter_str, dct): return all((_match_one(filter_part, dct) for filter_part in filter_str.split(u'&')))
[ "def", "match_str", "(", "filter_str", ",", "dct", ")", ":", "return", "all", "(", "(", "_match_one", "(", "filter_part", ",", "dct", ")", "for", "filter_part", "in", "filter_str", ".", "split", "(", "u'&'", ")", ")", ")" ]
filter a dictionary with a simple string syntax .
train
false
5,751
def delete_api_deployment(restApiId, deploymentId, region=None, key=None, keyid=None, profile=None): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.delete_deployment(restApiId=restApiId, deploymentId=deploymentId) return {'deleted': True} except ClientError as e: return {'deleted': False, 'error': salt.utils.boto3.get_error(e)}
[ "def", "delete_api_deployment", "(", "restApiId", ",", "deploymentId", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "conn", ".", "delete_deployment", "(", "restApiId", "=", "restApiId", ",", "deploymentId", "=", "deploymentId", ")", "return", "{", "'deleted'", ":", "True", "}", "except", "ClientError", "as", "e", ":", "return", "{", "'deleted'", ":", "False", ",", "'error'", ":", "salt", ".", "utils", ".", "boto3", ".", "get_error", "(", "e", ")", "}" ]
deletes api deployment for a given restapiid and deploymentid cli example: .
train
false
5,753
def possible_filename(filename): if isinstance(filename, six.text_type): return True elif isinstance(filename, six.binary_type): return (not ((sys.platform == u'win32') and (sys.version_info[:2] >= (3, 3)))) return False
[ "def", "possible_filename", "(", "filename", ")", ":", "if", "isinstance", "(", "filename", ",", "six", ".", "text_type", ")", ":", "return", "True", "elif", "isinstance", "(", "filename", ",", "six", ".", "binary_type", ")", ":", "return", "(", "not", "(", "(", "sys", ".", "platform", "==", "u'win32'", ")", "and", "(", "sys", ".", "version_info", "[", ":", "2", "]", ">=", "(", "3", ",", "3", ")", ")", ")", ")", "return", "False" ]
determine if the filename argument is an allowable type for a filename .
train
false
5,754
def auto_through(field): return ((not field.rel.through) or getattr(getattr(field.rel.through, '_meta', None), 'auto_created', False))
[ "def", "auto_through", "(", "field", ")", ":", "return", "(", "(", "not", "field", ".", "rel", ".", "through", ")", "or", "getattr", "(", "getattr", "(", "field", ".", "rel", ".", "through", ",", "'_meta'", ",", "None", ")", ",", "'auto_created'", ",", "False", ")", ")" ]
returns if the m2m class passed in has an autogenerated through table or not .
train
false
5,756
def st_mode_to_octal(mode): try: return oct(mode)[(-4):] except (TypeError, IndexError): return ''
[ "def", "st_mode_to_octal", "(", "mode", ")", ":", "try", ":", "return", "oct", "(", "mode", ")", "[", "(", "-", "4", ")", ":", "]", "except", "(", "TypeError", ",", "IndexError", ")", ":", "return", "''" ]
convert the st_mode value from a stat(2) call (as returned from os .
train
false
5,757
def _req_json_rpc(url, method, *args, **kwargs): data = json.dumps({'method': method, 'params': args}) try: res = requests.post(url, data=data, timeout=5, **kwargs) except requests.exceptions.Timeout: _LOGGER.exception('Connection to the router timed out') return if (res.status_code == 200): try: result = res.json() except ValueError: _LOGGER.exception('Failed to parse response from luci') return try: return result['result'] except KeyError: _LOGGER.exception('No result in response from luci') return elif (res.status_code == 401): _LOGGER.exception('Failed to authenticate, please check your username and password') return else: _LOGGER.error('Invalid response from luci: %s', res)
[ "def", "_req_json_rpc", "(", "url", ",", "method", ",", "*", "args", ",", "**", "kwargs", ")", ":", "data", "=", "json", ".", "dumps", "(", "{", "'method'", ":", "method", ",", "'params'", ":", "args", "}", ")", "try", ":", "res", "=", "requests", ".", "post", "(", "url", ",", "data", "=", "data", ",", "timeout", "=", "5", ",", "**", "kwargs", ")", "except", "requests", ".", "exceptions", ".", "Timeout", ":", "_LOGGER", ".", "exception", "(", "'Connection to the router timed out'", ")", "return", "if", "(", "res", ".", "status_code", "==", "200", ")", ":", "try", ":", "result", "=", "res", ".", "json", "(", ")", "except", "ValueError", ":", "_LOGGER", ".", "exception", "(", "'Failed to parse response from luci'", ")", "return", "try", ":", "return", "result", "[", "'result'", "]", "except", "KeyError", ":", "_LOGGER", ".", "exception", "(", "'No result in response from luci'", ")", "return", "elif", "(", "res", ".", "status_code", "==", "401", ")", ":", "_LOGGER", ".", "exception", "(", "'Failed to authenticate, please check your username and password'", ")", "return", "else", ":", "_LOGGER", ".", "error", "(", "'Invalid response from luci: %s'", ",", "res", ")" ]
perform one json rpc operation .
train
false
5,758
def _Replacement_write_data(writer, data, is_attrib=False): data = data.replace('&', '&amp;').replace('<', '&lt;') data = data.replace('"', '&quot;').replace('>', '&gt;') if is_attrib: data = data.replace('\r', '&#xD;').replace('\n', '&#xA;').replace(' DCTB ', '&#x9;') writer.write(data)
[ "def", "_Replacement_write_data", "(", "writer", ",", "data", ",", "is_attrib", "=", "False", ")", ":", "data", "=", "data", ".", "replace", "(", "'&'", ",", "'&amp;'", ")", ".", "replace", "(", "'<'", ",", "'&lt;'", ")", "data", "=", "data", ".", "replace", "(", "'\"'", ",", "'&quot;'", ")", ".", "replace", "(", "'>'", ",", "'&gt;'", ")", "if", "is_attrib", ":", "data", "=", "data", ".", "replace", "(", "'\\r'", ",", "'&#xD;'", ")", ".", "replace", "(", "'\\n'", ",", "'&#xA;'", ")", ".", "replace", "(", "' DCTB '", ",", "'&#x9;'", ")", "writer", ".", "write", "(", "data", ")" ]
writes datachars to writer .
train
false
5,759
def _offset_or_limit_clause_asint(clause, attrname): if (clause is None): return None try: value = clause._limit_offset_value except AttributeError: raise exc.CompileError(('This SELECT structure does not use a simple integer value for %s' % attrname)) else: return util.asint(value)
[ "def", "_offset_or_limit_clause_asint", "(", "clause", ",", "attrname", ")", ":", "if", "(", "clause", "is", "None", ")", ":", "return", "None", "try", ":", "value", "=", "clause", ".", "_limit_offset_value", "except", "AttributeError", ":", "raise", "exc", ".", "CompileError", "(", "(", "'This SELECT structure does not use a simple integer value for %s'", "%", "attrname", ")", ")", "else", ":", "return", "util", ".", "asint", "(", "value", ")" ]
convert the "offset or limit" clause of a select construct to an integer .
train
false
5,760
def event_elapsed_time(evtstart, evtend): msec = c_float() driver.cuEventElapsedTime(byref(msec), evtstart.handle, evtend.handle) return msec.value
[ "def", "event_elapsed_time", "(", "evtstart", ",", "evtend", ")", ":", "msec", "=", "c_float", "(", ")", "driver", ".", "cuEventElapsedTime", "(", "byref", "(", "msec", ")", ",", "evtstart", ".", "handle", ",", "evtend", ".", "handle", ")", "return", "msec", ".", "value" ]
compute the elapsed time between two events in milliseconds .
train
false
5,761
def get_video_from_youtube_id(youtube_id): for (channel, language) in available_content_databases(): video = _get_video_from_youtube_id(channel=channel, language=language, youtube_id=youtube_id) if video: return video
[ "def", "get_video_from_youtube_id", "(", "youtube_id", ")", ":", "for", "(", "channel", ",", "language", ")", "in", "available_content_databases", "(", ")", ":", "video", "=", "_get_video_from_youtube_id", "(", "channel", "=", "channel", ",", "language", "=", "language", ",", "youtube_id", "=", "youtube_id", ")", "if", "video", ":", "return", "video" ]
this function is provided to ensure that the data migration 0029_set_video_id_for_realz in the main app is still able to be run if needed .
train
false
5,762
def _import_type_text(dest, elem, type=None): if (type is None): type = elem.attrib['type'] if _should_skip_elem(elem, type, dest): return dest[type] = _text(elem)
[ "def", "_import_type_text", "(", "dest", ",", "elem", ",", "type", "=", "None", ")", ":", "if", "(", "type", "is", "None", ")", ":", "type", "=", "elem", ".", "attrib", "[", "'type'", "]", "if", "_should_skip_elem", "(", "elem", ",", "type", ",", "dest", ")", ":", "return", "dest", "[", "type", "]", "=", "_text", "(", "elem", ")" ]
conditionally import the elements inner text(s) into the dest dict .
train
false
5,764
def InstallModule(conf_module_name, params, options, log=(lambda *args: None)): if (not hasattr(sys, 'frozen')): conf_module_name = os.path.abspath(conf_module_name) if (not os.path.isfile(conf_module_name)): raise ConfigurationError(('%s does not exist' % (conf_module_name,))) loader_dll = GetLoaderModuleName(conf_module_name) _PatchParamsModule(params, loader_dll) Install(params, options) log(1, 'Installation complete.')
[ "def", "InstallModule", "(", "conf_module_name", ",", "params", ",", "options", ",", "log", "=", "(", "lambda", "*", "args", ":", "None", ")", ")", ":", "if", "(", "not", "hasattr", "(", "sys", ",", "'frozen'", ")", ")", ":", "conf_module_name", "=", "os", ".", "path", ".", "abspath", "(", "conf_module_name", ")", "if", "(", "not", "os", ".", "path", ".", "isfile", "(", "conf_module_name", ")", ")", ":", "raise", "ConfigurationError", "(", "(", "'%s does not exist'", "%", "(", "conf_module_name", ",", ")", ")", ")", "loader_dll", "=", "GetLoaderModuleName", "(", "conf_module_name", ")", "_PatchParamsModule", "(", "params", ",", "loader_dll", ")", "Install", "(", "params", ",", "options", ")", "log", "(", "1", ",", "'Installation complete.'", ")" ]
install the extension .
train
false
5,765
def getDiagonalFlippedLoop(loop): diagonalFlippedLoop = [] for point in loop: diagonalFlippedLoop.append(complex(point.imag, point.real)) return diagonalFlippedLoop
[ "def", "getDiagonalFlippedLoop", "(", "loop", ")", ":", "diagonalFlippedLoop", "=", "[", "]", "for", "point", "in", "loop", ":", "diagonalFlippedLoop", ".", "append", "(", "complex", "(", "point", ".", "imag", ",", "point", ".", "real", ")", ")", "return", "diagonalFlippedLoop" ]
get loop flipped over the dialogonal .
train
false
5,767
def auto_configure_disk(session, vdi_ref, new_gb): with vdi_attached_here(session, vdi_ref, read_only=False) as dev: partitions = _get_partitions(dev) if (len(partitions) != 1): return (_num, start, old_sectors, ptype) = partitions[0] if (ptype in ('ext3', 'ext4')): new_sectors = ((((new_gb * 1024) * 1024) * 1024) / SECTOR_SIZE) _resize_part_and_fs(dev, start, old_sectors, new_sectors)
[ "def", "auto_configure_disk", "(", "session", ",", "vdi_ref", ",", "new_gb", ")", ":", "with", "vdi_attached_here", "(", "session", ",", "vdi_ref", ",", "read_only", "=", "False", ")", "as", "dev", ":", "partitions", "=", "_get_partitions", "(", "dev", ")", "if", "(", "len", "(", "partitions", ")", "!=", "1", ")", ":", "return", "(", "_num", ",", "start", ",", "old_sectors", ",", "ptype", ")", "=", "partitions", "[", "0", "]", "if", "(", "ptype", "in", "(", "'ext3'", ",", "'ext4'", ")", ")", ":", "new_sectors", "=", "(", "(", "(", "(", "new_gb", "*", "1024", ")", "*", "1024", ")", "*", "1024", ")", "/", "SECTOR_SIZE", ")", "_resize_part_and_fs", "(", "dev", ",", "start", ",", "old_sectors", ",", "new_sectors", ")" ]
partition and resize fs to match the size specified by instance_types .
train
false
5,768
def has_app_context(): return (_app_ctx_stack.top is not None)
[ "def", "has_app_context", "(", ")", ":", "return", "(", "_app_ctx_stack", ".", "top", "is", "not", "None", ")" ]
works like :func:has_request_context but for the application context .
train
false
5,769
def namignizer_iterator(names, counts, batch_size, num_steps, epoch_size): name_distribution = (counts / counts.sum()) for i in range(epoch_size): data = np.zeros(((batch_size * num_steps) + 1)) samples = np.random.choice(names, size=((batch_size * num_steps) // 2), replace=True, p=name_distribution) data_index = 0 for sample in samples: if (data_index >= (batch_size * num_steps)): break for letter in (map(_letter_to_number, sample) + [_EON]): if (data_index >= (batch_size * num_steps)): break data[data_index] = letter data_index += 1 x = data[:(batch_size * num_steps)].reshape((batch_size, num_steps)) y = data[1:((batch_size * num_steps) + 1)].reshape((batch_size, num_steps)) (yield (x, y))
[ "def", "namignizer_iterator", "(", "names", ",", "counts", ",", "batch_size", ",", "num_steps", ",", "epoch_size", ")", ":", "name_distribution", "=", "(", "counts", "/", "counts", ".", "sum", "(", ")", ")", "for", "i", "in", "range", "(", "epoch_size", ")", ":", "data", "=", "np", ".", "zeros", "(", "(", "(", "batch_size", "*", "num_steps", ")", "+", "1", ")", ")", "samples", "=", "np", ".", "random", ".", "choice", "(", "names", ",", "size", "=", "(", "(", "batch_size", "*", "num_steps", ")", "//", "2", ")", ",", "replace", "=", "True", ",", "p", "=", "name_distribution", ")", "data_index", "=", "0", "for", "sample", "in", "samples", ":", "if", "(", "data_index", ">=", "(", "batch_size", "*", "num_steps", ")", ")", ":", "break", "for", "letter", "in", "(", "map", "(", "_letter_to_number", ",", "sample", ")", "+", "[", "_EON", "]", ")", ":", "if", "(", "data_index", ">=", "(", "batch_size", "*", "num_steps", ")", ")", ":", "break", "data", "[", "data_index", "]", "=", "letter", "data_index", "+=", "1", "x", "=", "data", "[", ":", "(", "batch_size", "*", "num_steps", ")", "]", ".", "reshape", "(", "(", "batch_size", ",", "num_steps", ")", ")", "y", "=", "data", "[", "1", ":", "(", "(", "batch_size", "*", "num_steps", ")", "+", "1", ")", "]", ".", "reshape", "(", "(", "batch_size", ",", "num_steps", ")", ")", "(", "yield", "(", "x", ",", "y", ")", ")" ]
takes a list of names and counts like those output from read_names .
train
false
5,771
def convert_kvp_str_to_list(data): kvp = [x.strip() for x in data.split('=', 1)] if ((len(kvp) == 2) and kvp[0]): return kvp msg = (_("'%s' is not of the form <key>=[value]") % data) raise q_exc.InvalidInput(error_message=msg)
[ "def", "convert_kvp_str_to_list", "(", "data", ")", ":", "kvp", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "data", ".", "split", "(", "'='", ",", "1", ")", "]", "if", "(", "(", "len", "(", "kvp", ")", "==", "2", ")", "and", "kvp", "[", "0", "]", ")", ":", "return", "kvp", "msg", "=", "(", "_", "(", "\"'%s' is not of the form <key>=[value]\"", ")", "%", "data", ")", "raise", "q_exc", ".", "InvalidInput", "(", "error_message", "=", "msg", ")" ]
convert a value of the form key=value to [key .
train
false
5,772
def index_db(index_filename, filenames=None, format=None, alphabet=None, key_function=None): if (not isinstance(index_filename, basestring)): raise TypeError('Need a string for the index filename') if isinstance(filenames, basestring): filenames = [filenames] if ((filenames is not None) and (not isinstance(filenames, list))): raise TypeError('Need a list of filenames (as strings), or one filename') if ((format is not None) and (not isinstance(format, basestring))): raise TypeError('Need a string for the file format (lower case)') if (format and (format != format.lower())): raise ValueError(("Format string '%s' should be lower case" % format)) if ((alphabet is not None) and (not (isinstance(alphabet, Alphabet) or isinstance(alphabet, AlphabetEncoder)))): raise ValueError(('Invalid alphabet, %r' % alphabet)) from ._index import _FormatToRandomAccess from Bio.File import _SQLiteManySeqFilesDict repr = ('SeqIO.index_db(%r, filenames=%r, format=%r, alphabet=%r, key_function=%r)' % (index_filename, filenames, format, alphabet, key_function)) def proxy_factory(format, filename=None): 'Given a filename returns proxy object, else boolean if format OK.' if filename: return _FormatToRandomAccess[format](filename, format, alphabet) else: return (format in _FormatToRandomAccess) return _SQLiteManySeqFilesDict(index_filename, filenames, proxy_factory, format, key_function, repr)
[ "def", "index_db", "(", "index_filename", ",", "filenames", "=", "None", ",", "format", "=", "None", ",", "alphabet", "=", "None", ",", "key_function", "=", "None", ")", ":", "if", "(", "not", "isinstance", "(", "index_filename", ",", "basestring", ")", ")", ":", "raise", "TypeError", "(", "'Need a string for the index filename'", ")", "if", "isinstance", "(", "filenames", ",", "basestring", ")", ":", "filenames", "=", "[", "filenames", "]", "if", "(", "(", "filenames", "is", "not", "None", ")", "and", "(", "not", "isinstance", "(", "filenames", ",", "list", ")", ")", ")", ":", "raise", "TypeError", "(", "'Need a list of filenames (as strings), or one filename'", ")", "if", "(", "(", "format", "is", "not", "None", ")", "and", "(", "not", "isinstance", "(", "format", ",", "basestring", ")", ")", ")", ":", "raise", "TypeError", "(", "'Need a string for the file format (lower case)'", ")", "if", "(", "format", "and", "(", "format", "!=", "format", ".", "lower", "(", ")", ")", ")", ":", "raise", "ValueError", "(", "(", "\"Format string '%s' should be lower case\"", "%", "format", ")", ")", "if", "(", "(", "alphabet", "is", "not", "None", ")", "and", "(", "not", "(", "isinstance", "(", "alphabet", ",", "Alphabet", ")", "or", "isinstance", "(", "alphabet", ",", "AlphabetEncoder", ")", ")", ")", ")", ":", "raise", "ValueError", "(", "(", "'Invalid alphabet, %r'", "%", "alphabet", ")", ")", "from", ".", "_index", "import", "_FormatToRandomAccess", "from", "Bio", ".", "File", "import", "_SQLiteManySeqFilesDict", "repr", "=", "(", "'SeqIO.index_db(%r, filenames=%r, format=%r, alphabet=%r, key_function=%r)'", "%", "(", "index_filename", ",", "filenames", ",", "format", ",", "alphabet", ",", "key_function", ")", ")", "def", "proxy_factory", "(", "format", ",", "filename", "=", "None", ")", ":", "if", "filename", ":", "return", "_FormatToRandomAccess", "[", "format", "]", "(", "filename", ",", "format", ",", "alphabet", ")", "else", ":", "return", "(", "format", "in", "_FormatToRandomAccess", ")", "return", "_SQLiteManySeqFilesDict", "(", "index_filename", ",", "filenames", ",", "proxy_factory", ",", "format", ",", "key_function", ",", "repr", ")" ]
indexes several search output files into an sqlite database .
train
false
5,773
def get_blocks_with_unallocated(module, cp_driver, lb_driver, network_domain): total_unallocated_ips = 0 all_blocks = list_public_ip_blocks(module, cp_driver, network_domain) unalloc_blocks = [] unalloc_addresses = [] for block in all_blocks: d_blocks = get_block_allocation(module, cp_driver, lb_driver, network_domain, block) i = 0 for addr in d_blocks['addresses']: if (addr['allocated'] is False): if (i == 0): unalloc_blocks.append(d_blocks) unalloc_addresses.append(addr['address']) total_unallocated_ips += 1 i += 1 return {'unallocated_count': total_unallocated_ips, 'ip_blocks': unalloc_blocks, 'unallocated_addresses': unalloc_addresses}
[ "def", "get_blocks_with_unallocated", "(", "module", ",", "cp_driver", ",", "lb_driver", ",", "network_domain", ")", ":", "total_unallocated_ips", "=", "0", "all_blocks", "=", "list_public_ip_blocks", "(", "module", ",", "cp_driver", ",", "network_domain", ")", "unalloc_blocks", "=", "[", "]", "unalloc_addresses", "=", "[", "]", "for", "block", "in", "all_blocks", ":", "d_blocks", "=", "get_block_allocation", "(", "module", ",", "cp_driver", ",", "lb_driver", ",", "network_domain", ",", "block", ")", "i", "=", "0", "for", "addr", "in", "d_blocks", "[", "'addresses'", "]", ":", "if", "(", "addr", "[", "'allocated'", "]", "is", "False", ")", ":", "if", "(", "i", "==", "0", ")", ":", "unalloc_blocks", ".", "append", "(", "d_blocks", ")", "unalloc_addresses", ".", "append", "(", "addr", "[", "'address'", "]", ")", "total_unallocated_ips", "+=", "1", "i", "+=", "1", "return", "{", "'unallocated_count'", ":", "total_unallocated_ips", ",", "'ip_blocks'", ":", "unalloc_blocks", ",", "'unallocated_addresses'", ":", "unalloc_addresses", "}" ]
gets ip blocks with one or more unallocated ips .
train
false
5,775
def approx_jacobian(x, func, epsilon, *args): x0 = asfarray(x) f0 = atleast_1d(func(*((x0,) + args))) jac = zeros([len(x0), len(f0)]) dx = zeros(len(x0)) for i in range(len(x0)): dx[i] = epsilon jac[i] = ((func(*(((x0 + dx),) + args)) - f0) / epsilon) dx[i] = 0.0 return jac.transpose()
[ "def", "approx_jacobian", "(", "x", ",", "func", ",", "epsilon", ",", "*", "args", ")", ":", "x0", "=", "asfarray", "(", "x", ")", "f0", "=", "atleast_1d", "(", "func", "(", "*", "(", "(", "x0", ",", ")", "+", "args", ")", ")", ")", "jac", "=", "zeros", "(", "[", "len", "(", "x0", ")", ",", "len", "(", "f0", ")", "]", ")", "dx", "=", "zeros", "(", "len", "(", "x0", ")", ")", "for", "i", "in", "range", "(", "len", "(", "x0", ")", ")", ":", "dx", "[", "i", "]", "=", "epsilon", "jac", "[", "i", "]", "=", "(", "(", "func", "(", "*", "(", "(", "(", "x0", "+", "dx", ")", ",", ")", "+", "args", ")", ")", "-", "f0", ")", "/", "epsilon", ")", "dx", "[", "i", "]", "=", "0.0", "return", "jac", ".", "transpose", "(", ")" ]
approximate the jacobian matrix of a callable function .
train
false
5,777
def EncodeRspFileList(args): if (not args): return '' if args[0].startswith('call '): (call, program) = args[0].split(' ', 1) program = ((call + ' ') + os.path.normpath(program)) else: program = os.path.normpath(args[0]) return ((program + ' ') + ' '.join((QuoteForRspFile(arg) for arg in args[1:])))
[ "def", "EncodeRspFileList", "(", "args", ")", ":", "if", "(", "not", "args", ")", ":", "return", "''", "if", "args", "[", "0", "]", ".", "startswith", "(", "'call '", ")", ":", "(", "call", ",", "program", ")", "=", "args", "[", "0", "]", ".", "split", "(", "' '", ",", "1", ")", "program", "=", "(", "(", "call", "+", "' '", ")", "+", "os", ".", "path", ".", "normpath", "(", "program", ")", ")", "else", ":", "program", "=", "os", ".", "path", ".", "normpath", "(", "args", "[", "0", "]", ")", "return", "(", "(", "program", "+", "' '", ")", "+", "' '", ".", "join", "(", "(", "QuoteForRspFile", "(", "arg", ")", "for", "arg", "in", "args", "[", "1", ":", "]", ")", ")", ")" ]
process a list of arguments using quotecmdexeargument .
train
false
5,779
def _compare_combo(raw, new, times, n_times): for ti in times: orig = raw[:, (ti % n_times)][0] assert_allclose(orig, new[:, ti][0])
[ "def", "_compare_combo", "(", "raw", ",", "new", ",", "times", ",", "n_times", ")", ":", "for", "ti", "in", "times", ":", "orig", "=", "raw", "[", ":", ",", "(", "ti", "%", "n_times", ")", "]", "[", "0", "]", "assert_allclose", "(", "orig", ",", "new", "[", ":", ",", "ti", "]", "[", "0", "]", ")" ]
compare data .
train
false
5,780
def _restore_service(service): _apply_service(service, SonosDevice.restore)
[ "def", "_restore_service", "(", "service", ")", ":", "_apply_service", "(", "service", ",", "SonosDevice", ".", "restore", ")" ]
restore a snapshot .
train
false
5,781
def pil_from_ndarray(ndarray): try: if ((ndarray.dtype == 'float32') or (ndarray.dtype == 'float64')): assert (ndarray.min() >= 0.0) assert (ndarray.max() <= 1.0) ndarray = np.cast['uint8']((ndarray * 255)) if ((len(ndarray.shape) == 3) and (ndarray.shape[2] == 1)): ndarray = ndarray[:, :, 0] ensure_Image() rval = Image.fromarray(ndarray) return rval except Exception as e: logger.exception('original exception: ') logger.exception(e) logger.exception('ndarray.dtype: {0}'.format(ndarray.dtype)) logger.exception('ndarray.shape: {0}'.format(ndarray.shape)) raise assert False
[ "def", "pil_from_ndarray", "(", "ndarray", ")", ":", "try", ":", "if", "(", "(", "ndarray", ".", "dtype", "==", "'float32'", ")", "or", "(", "ndarray", ".", "dtype", "==", "'float64'", ")", ")", ":", "assert", "(", "ndarray", ".", "min", "(", ")", ">=", "0.0", ")", "assert", "(", "ndarray", ".", "max", "(", ")", "<=", "1.0", ")", "ndarray", "=", "np", ".", "cast", "[", "'uint8'", "]", "(", "(", "ndarray", "*", "255", ")", ")", "if", "(", "(", "len", "(", "ndarray", ".", "shape", ")", "==", "3", ")", "and", "(", "ndarray", ".", "shape", "[", "2", "]", "==", "1", ")", ")", ":", "ndarray", "=", "ndarray", "[", ":", ",", ":", ",", "0", "]", "ensure_Image", "(", ")", "rval", "=", "Image", ".", "fromarray", "(", "ndarray", ")", "return", "rval", "except", "Exception", "as", "e", ":", "logger", ".", "exception", "(", "'original exception: '", ")", "logger", ".", "exception", "(", "e", ")", "logger", ".", "exception", "(", "'ndarray.dtype: {0}'", ".", "format", "(", "ndarray", ".", "dtype", ")", ")", "logger", ".", "exception", "(", "'ndarray.shape: {0}'", ".", "format", "(", "ndarray", ".", "shape", ")", ")", "raise", "assert", "False" ]
converts an ndarray to a pil image .
train
false
5,782
@pytest.fixture def config_tmpdir(monkeypatch, tmpdir): confdir = (tmpdir / 'config') path = str(confdir) os.mkdir(path) monkeypatch.setattr('qutebrowser.utils.standarddir.config', (lambda : path)) return confdir
[ "@", "pytest", ".", "fixture", "def", "config_tmpdir", "(", "monkeypatch", ",", "tmpdir", ")", ":", "confdir", "=", "(", "tmpdir", "/", "'config'", ")", "path", "=", "str", "(", "confdir", ")", "os", ".", "mkdir", "(", "path", ")", "monkeypatch", ".", "setattr", "(", "'qutebrowser.utils.standarddir.config'", ",", "(", "lambda", ":", "path", ")", ")", "return", "confdir" ]
set tmpdir/config as the configdir .
train
false
5,786
def GetRegisteredHelpFile(helpDesc): try: return GetRegistryDefaultValue(((BuildDefaultPythonKey() + '\\Help\\') + helpDesc)) except win32api.error: try: return GetRegistryDefaultValue(((BuildDefaultPythonKey() + '\\Help\\') + helpDesc), win32con.HKEY_CURRENT_USER) except win32api.error: pass return None
[ "def", "GetRegisteredHelpFile", "(", "helpDesc", ")", ":", "try", ":", "return", "GetRegistryDefaultValue", "(", "(", "(", "BuildDefaultPythonKey", "(", ")", "+", "'\\\\Help\\\\'", ")", "+", "helpDesc", ")", ")", "except", "win32api", ".", "error", ":", "try", ":", "return", "GetRegistryDefaultValue", "(", "(", "(", "BuildDefaultPythonKey", "(", ")", "+", "'\\\\Help\\\\'", ")", "+", "helpDesc", ")", ",", "win32con", ".", "HKEY_CURRENT_USER", ")", "except", "win32api", ".", "error", ":", "pass", "return", "None" ]
given a description .
train
false
5,787
def block_comments_begin_with_a_space(physical_line, line_number): MESSAGE = "K002 block comments should start with '# '" if ((line_number == 1) and physical_line.startswith('#!')): return text = physical_line.strip() if text.startswith('#'): if ((len(text) > 1) and (not text[1].isspace())): return (physical_line.index('#'), MESSAGE)
[ "def", "block_comments_begin_with_a_space", "(", "physical_line", ",", "line_number", ")", ":", "MESSAGE", "=", "\"K002 block comments should start with '# '\"", "if", "(", "(", "line_number", "==", "1", ")", "and", "physical_line", ".", "startswith", "(", "'#!'", ")", ")", ":", "return", "text", "=", "physical_line", ".", "strip", "(", ")", "if", "text", ".", "startswith", "(", "'#'", ")", ":", "if", "(", "(", "len", "(", "text", ")", ">", "1", ")", "and", "(", "not", "text", "[", "1", "]", ".", "isspace", "(", ")", ")", ")", ":", "return", "(", "physical_line", ".", "index", "(", "'#'", ")", ",", "MESSAGE", ")" ]
there should be a space after the # of block comments .
train
false
5,788
def test_compare_fiff(): check_usage(mne_compare_fiff)
[ "def", "test_compare_fiff", "(", ")", ":", "check_usage", "(", "mne_compare_fiff", ")" ]
test mne compare_fiff .
train
false
5,789
def get_icon(name, default=None, resample=False): icon_path = get_image_path(name, default=None) if (icon_path is not None): icon = QIcon(icon_path) elif isinstance(default, QIcon): icon = default elif (default is None): try: icon = get_std_icon(name[:(-4)]) except AttributeError: icon = QIcon(get_image_path(name, default)) else: icon = QIcon(get_image_path(name, default)) if resample: icon0 = QIcon() for size in (16, 24, 32, 48, 96, 128, 256, 512): icon0.addPixmap(icon.pixmap(size, size)) return icon0 else: return icon
[ "def", "get_icon", "(", "name", ",", "default", "=", "None", ",", "resample", "=", "False", ")", ":", "icon_path", "=", "get_image_path", "(", "name", ",", "default", "=", "None", ")", "if", "(", "icon_path", "is", "not", "None", ")", ":", "icon", "=", "QIcon", "(", "icon_path", ")", "elif", "isinstance", "(", "default", ",", "QIcon", ")", ":", "icon", "=", "default", "elif", "(", "default", "is", "None", ")", ":", "try", ":", "icon", "=", "get_std_icon", "(", "name", "[", ":", "(", "-", "4", ")", "]", ")", "except", "AttributeError", ":", "icon", "=", "QIcon", "(", "get_image_path", "(", "name", ",", "default", ")", ")", "else", ":", "icon", "=", "QIcon", "(", "get_image_path", "(", "name", ",", "default", ")", ")", "if", "resample", ":", "icon0", "=", "QIcon", "(", ")", "for", "size", "in", "(", "16", ",", "24", ",", "32", ",", "48", ",", "96", ",", "128", ",", "256", ",", "512", ")", ":", "icon0", ".", "addPixmap", "(", "icon", ".", "pixmap", "(", "size", ",", "size", ")", ")", "return", "icon0", "else", ":", "return", "icon" ]
retrieve a qicon for the named image from the zip file if it exists .
train
true
5,790
def compileADF(expr, psets): adfdict = {} func = None for (pset, subexpr) in reversed(zip(psets, expr)): pset.context.update(adfdict) func = compile(subexpr, pset) adfdict.update({pset.name: func}) return func
[ "def", "compileADF", "(", "expr", ",", "psets", ")", ":", "adfdict", "=", "{", "}", "func", "=", "None", "for", "(", "pset", ",", "subexpr", ")", "in", "reversed", "(", "zip", "(", "psets", ",", "expr", ")", ")", ":", "pset", ".", "context", ".", "update", "(", "adfdict", ")", "func", "=", "compile", "(", "subexpr", ",", "pset", ")", "adfdict", ".", "update", "(", "{", "pset", ".", "name", ":", "func", "}", ")", "return", "func" ]
compile the expression represented by a list of trees .
train
false
5,791
def watch(no_compress): setup() import time compile_less() build(no_compress=True) while True: compile_less() if files_dirty(): build(no_compress=True) time.sleep(3)
[ "def", "watch", "(", "no_compress", ")", ":", "setup", "(", ")", "import", "time", "compile_less", "(", ")", "build", "(", "no_compress", "=", "True", ")", "while", "True", ":", "compile_less", "(", ")", "if", "files_dirty", "(", ")", ":", "build", "(", "no_compress", "=", "True", ")", "time", ".", "sleep", "(", "3", ")" ]
add a file to the watch list .
train
false
5,792
@docfiller def gaussian_gradient_magnitude(input, sigma, output=None, mode='reflect', cval=0.0, **kwargs): input = numpy.asarray(input) def derivative(input, axis, output, mode, cval, sigma, **kwargs): order = ([0] * input.ndim) order[axis] = 1 return gaussian_filter(input, sigma, order, output, mode, cval, **kwargs) return generic_gradient_magnitude(input, derivative, output, mode, cval, extra_arguments=(sigma,), extra_keywords=kwargs)
[ "@", "docfiller", "def", "gaussian_gradient_magnitude", "(", "input", ",", "sigma", ",", "output", "=", "None", ",", "mode", "=", "'reflect'", ",", "cval", "=", "0.0", ",", "**", "kwargs", ")", ":", "input", "=", "numpy", ".", "asarray", "(", "input", ")", "def", "derivative", "(", "input", ",", "axis", ",", "output", ",", "mode", ",", "cval", ",", "sigma", ",", "**", "kwargs", ")", ":", "order", "=", "(", "[", "0", "]", "*", "input", ".", "ndim", ")", "order", "[", "axis", "]", "=", "1", "return", "gaussian_filter", "(", "input", ",", "sigma", ",", "order", ",", "output", ",", "mode", ",", "cval", ",", "**", "kwargs", ")", "return", "generic_gradient_magnitude", "(", "input", ",", "derivative", ",", "output", ",", "mode", ",", "cval", ",", "extra_arguments", "=", "(", "sigma", ",", ")", ",", "extra_keywords", "=", "kwargs", ")" ]
multidimensional gradient magnitude using gaussian derivatives .
train
false
5,793
def init_subsystem(subsystem_type, options=None): init_subsystems([subsystem_type], options)
[ "def", "init_subsystem", "(", "subsystem_type", ",", "options", "=", "None", ")", ":", "init_subsystems", "(", "[", "subsystem_type", "]", ",", "options", ")" ]
singular form of :func:pants_test .
train
false
5,796
def gjrconvertparams(self, params, nar, nma): (p, q) = (nar, nma) ar = np.concatenate(([1], params[:p])) ma = np.zeros(((q + 1), 3)) ma[(0, 0)] = params[(-1)] ma[:, 1] = np.concatenate(([0], params[p:(p + q)])) ma[:, 2] = np.concatenate(([0], params[(p + q):(p + (2 * q))])) mu = params[(-1)] params2 = (ar, ma) return paramsclass
[ "def", "gjrconvertparams", "(", "self", ",", "params", ",", "nar", ",", "nma", ")", ":", "(", "p", ",", "q", ")", "=", "(", "nar", ",", "nma", ")", "ar", "=", "np", ".", "concatenate", "(", "(", "[", "1", "]", ",", "params", "[", ":", "p", "]", ")", ")", "ma", "=", "np", ".", "zeros", "(", "(", "(", "q", "+", "1", ")", ",", "3", ")", ")", "ma", "[", "(", "0", ",", "0", ")", "]", "=", "params", "[", "(", "-", "1", ")", "]", "ma", "[", ":", ",", "1", "]", "=", "np", ".", "concatenate", "(", "(", "[", "0", "]", ",", "params", "[", "p", ":", "(", "p", "+", "q", ")", "]", ")", ")", "ma", "[", ":", ",", "2", "]", "=", "np", ".", "concatenate", "(", "(", "[", "0", "]", ",", "params", "[", "(", "p", "+", "q", ")", ":", "(", "p", "+", "(", "2", "*", "q", ")", ")", "]", ")", ")", "mu", "=", "params", "[", "(", "-", "1", ")", "]", "params2", "=", "(", "ar", ",", "ma", ")", "return", "paramsclass" ]
flat to matrix notes needs to be overwritten by subclass .
train
false
5,797
def str(val): return format('%.12g', val)
[ "def", "str", "(", "val", ")", ":", "return", "format", "(", "'%.12g'", ",", "val", ")" ]
convert float to integer .
train
false
5,798
def get_network_timezone(network): if (network is None): return sr_timezone try: return (tz.gettz(network_dict[network]) or sr_timezone) except Exception: return sr_timezone
[ "def", "get_network_timezone", "(", "network", ")", ":", "if", "(", "network", "is", "None", ")", ":", "return", "sr_timezone", "try", ":", "return", "(", "tz", ".", "gettz", "(", "network_dict", "[", "network", "]", ")", "or", "sr_timezone", ")", "except", "Exception", ":", "return", "sr_timezone" ]
get a timezone of a network from a given network dict .
train
false