id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
52,131
def ecitmatch(**keywds): cgi = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/ecitmatch.cgi' variables = _update_ecitmatch_variables(keywds) return _open(cgi, variables, ecitmatch=True)
[ "def", "ecitmatch", "(", "**", "keywds", ")", ":", "cgi", "=", "'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/ecitmatch.cgi'", "variables", "=", "_update_ecitmatch_variables", "(", "keywds", ")", "return", "_open", "(", "cgi", ",", "variables", ",", "ecitmatch", "=", "True", ")" ]
ecitmatch retrieves pmids-citation linking ecitmatch retrieves pubmed ids that correspond to a set of input citation strings .
train
false
52,132
def make_context(context, request=None, **kwargs): if ((context is not None) and (not isinstance(context, dict))): raise TypeError(('context must be a dict rather than %s.' % context.__class__.__name__)) if (request is None): context = Context(context, **kwargs) else: original_context = context context = RequestContext(request, **kwargs) if original_context: context.push(original_context) return context
[ "def", "make_context", "(", "context", ",", "request", "=", "None", ",", "**", "kwargs", ")", ":", "if", "(", "(", "context", "is", "not", "None", ")", "and", "(", "not", "isinstance", "(", "context", ",", "dict", ")", ")", ")", ":", "raise", "TypeError", "(", "(", "'context must be a dict rather than %s.'", "%", "context", ".", "__class__", ".", "__name__", ")", ")", "if", "(", "request", "is", "None", ")", ":", "context", "=", "Context", "(", "context", ",", "**", "kwargs", ")", "else", ":", "original_context", "=", "context", "context", "=", "RequestContext", "(", "request", ",", "**", "kwargs", ")", "if", "original_context", ":", "context", ".", "push", "(", "original_context", ")", "return", "context" ]
create a suitable context from a plain dict and optionally an httprequest .
train
false
52,133
def str_extract(arr, pat, flags=0, expand=None): if (expand is None): warnings.warn(((('currently extract(expand=None) ' + 'means expand=False (return Index/Series/DataFrame) ') + 'but in a future version of pandas this will be changed ') + 'to expand=True (return DataFrame)'), FutureWarning, stacklevel=3) expand = False if (not isinstance(expand, bool)): raise ValueError('expand must be True or False') if expand: return _str_extract_frame(arr._orig, pat, flags=flags) else: (result, name) = _str_extract_noexpand(arr._data, pat, flags=flags) return arr._wrap_result(result, name=name, expand=expand)
[ "def", "str_extract", "(", "arr", ",", "pat", ",", "flags", "=", "0", ",", "expand", "=", "None", ")", ":", "if", "(", "expand", "is", "None", ")", ":", "warnings", ".", "warn", "(", "(", "(", "(", "'currently extract(expand=None) '", "+", "'means expand=False (return Index/Series/DataFrame) '", ")", "+", "'but in a future version of pandas this will be changed '", ")", "+", "'to expand=True (return DataFrame)'", ")", ",", "FutureWarning", ",", "stacklevel", "=", "3", ")", "expand", "=", "False", "if", "(", "not", "isinstance", "(", "expand", ",", "bool", ")", ")", ":", "raise", "ValueError", "(", "'expand must be True or False'", ")", "if", "expand", ":", "return", "_str_extract_frame", "(", "arr", ".", "_orig", ",", "pat", ",", "flags", "=", "flags", ")", "else", ":", "(", "result", ",", "name", ")", "=", "_str_extract_noexpand", "(", "arr", ".", "_data", ",", "pat", ",", "flags", "=", "flags", ")", "return", "arr", ".", "_wrap_result", "(", "result", ",", "name", "=", "name", ",", "expand", "=", "expand", ")" ]
for each subject string in the series .
train
false
52,135
def kivy_register_post_configuration(callback): __kivy_post_configuration.append(callback)
[ "def", "kivy_register_post_configuration", "(", "callback", ")", ":", "__kivy_post_configuration", ".", "append", "(", "callback", ")" ]
register a function to be called when kivy_configure() is called .
train
false
52,137
def get_explore_recommendations(user, request): data = generate_recommendation_data() exercise_parents_table = get_exercise_parents_lookup_table() recent_exercises = get_most_recent_exercises(user) recent_subtopics = list(set([exercise_parents_table[ex]['subtopic_id'] for ex in recent_exercises if (ex in exercise_parents_table)])) sampleNum = min(len(recent_subtopics), settings.TOPIC_RECOMMENDATION_DEPTH) random_subtopics = random.sample(recent_subtopics, sampleNum) added = [] final = [] for subtopic_id in random_subtopics: related_subtopics = data[subtopic_id]['related_subtopics'][2:7] recommended_topic = next((topic for topic in related_subtopics if ((topic not in added) and (topic not in recent_subtopics)))) if recommended_topic: final.append({'suggested_topic': (get_content_item(language=request.language, content_id=recommended_topic, topic=True) or {}), 'interest_topic': (get_content_item(language=request.language, content_id=subtopic_id, topic=True) or {})}) added.append(recommended_topic) return final
[ "def", "get_explore_recommendations", "(", "user", ",", "request", ")", ":", "data", "=", "generate_recommendation_data", "(", ")", "exercise_parents_table", "=", "get_exercise_parents_lookup_table", "(", ")", "recent_exercises", "=", "get_most_recent_exercises", "(", "user", ")", "recent_subtopics", "=", "list", "(", "set", "(", "[", "exercise_parents_table", "[", "ex", "]", "[", "'subtopic_id'", "]", "for", "ex", "in", "recent_exercises", "if", "(", "ex", "in", "exercise_parents_table", ")", "]", ")", ")", "sampleNum", "=", "min", "(", "len", "(", "recent_subtopics", ")", ",", "settings", ".", "TOPIC_RECOMMENDATION_DEPTH", ")", "random_subtopics", "=", "random", ".", "sample", "(", "recent_subtopics", ",", "sampleNum", ")", "added", "=", "[", "]", "final", "=", "[", "]", "for", "subtopic_id", "in", "random_subtopics", ":", "related_subtopics", "=", "data", "[", "subtopic_id", "]", "[", "'related_subtopics'", "]", "[", "2", ":", "7", "]", "recommended_topic", "=", "next", "(", "(", "topic", "for", "topic", "in", "related_subtopics", "if", "(", "(", "topic", "not", "in", "added", ")", "and", "(", "topic", "not", "in", "recent_subtopics", ")", ")", ")", ")", "if", "recommended_topic", ":", "final", ".", "append", "(", "{", "'suggested_topic'", ":", "(", "get_content_item", "(", "language", "=", "request", ".", "language", ",", "content_id", "=", "recommended_topic", ",", "topic", "=", "True", ")", "or", "{", "}", ")", ",", "'interest_topic'", ":", "(", "get_content_item", "(", "language", "=", "request", ".", "language", ",", "content_id", "=", "subtopic_id", ",", "topic", "=", "True", ")", "or", "{", "}", ")", "}", ")", "added", ".", "append", "(", "recommended_topic", ")", "return", "final" ]
get the recommendations for the explore section .
train
false
52,139
def collect_uncollected_logs(host): if host.job: try: logs = host.job.get_client_logs() for (hostname, remote_path, local_path) in logs: if (hostname == host.hostname): logging.info('Retrieving logs from %s:%s into %s', hostname, remote_path, local_path) host.get_file((remote_path + '/'), (local_path + '/')) except Exception as e: logging.warning('Error while trying to collect stranded Autotest client logs: %s', e)
[ "def", "collect_uncollected_logs", "(", "host", ")", ":", "if", "host", ".", "job", ":", "try", ":", "logs", "=", "host", ".", "job", ".", "get_client_logs", "(", ")", "for", "(", "hostname", ",", "remote_path", ",", "local_path", ")", "in", "logs", ":", "if", "(", "hostname", "==", "host", ".", "hostname", ")", ":", "logging", ".", "info", "(", "'Retrieving logs from %s:%s into %s'", ",", "hostname", ",", "remote_path", ",", "local_path", ")", "host", ".", "get_file", "(", "(", "remote_path", "+", "'/'", ")", ",", "(", "local_path", "+", "'/'", ")", ")", "except", "Exception", "as", "e", ":", "logging", ".", "warning", "(", "'Error while trying to collect stranded Autotest client logs: %s'", ",", "e", ")" ]
collects any leftover uncollected logs from the client .
train
false
52,140
def num_obs_dm(d): d = np.asarray(d, order='c') is_valid_dm(d, tol=np.inf, throw=True, name='d') return d.shape[0]
[ "def", "num_obs_dm", "(", "d", ")", ":", "d", "=", "np", ".", "asarray", "(", "d", ",", "order", "=", "'c'", ")", "is_valid_dm", "(", "d", ",", "tol", "=", "np", ".", "inf", ",", "throw", "=", "True", ",", "name", "=", "'d'", ")", "return", "d", ".", "shape", "[", "0", "]" ]
returns the number of original observations that correspond to a square .
train
false
52,142
def prune_constants(constraints): pruned_constraints = [] for constr in constraints: constr_type = type(constr) expr = copy.deepcopy(constr.expr) is_constant = prune_expr(expr) if is_constant: expr = lo.LinOp(lo.NO_OP, expr.size, [], None) pruned = constr_type(expr, constr.constr_id, constr.size) pruned_constraints.append(pruned) return pruned_constraints
[ "def", "prune_constants", "(", "constraints", ")", ":", "pruned_constraints", "=", "[", "]", "for", "constr", "in", "constraints", ":", "constr_type", "=", "type", "(", "constr", ")", "expr", "=", "copy", ".", "deepcopy", "(", "constr", ".", "expr", ")", "is_constant", "=", "prune_expr", "(", "expr", ")", "if", "is_constant", ":", "expr", "=", "lo", ".", "LinOp", "(", "lo", ".", "NO_OP", ",", "expr", ".", "size", ",", "[", "]", ",", "None", ")", "pruned", "=", "constr_type", "(", "expr", ",", "constr", ".", "constr_id", ",", "constr", ".", "size", ")", "pruned_constraints", ".", "append", "(", "pruned", ")", "return", "pruned_constraints" ]
returns a new list of constraints with constant terms removed .
train
false
52,144
def _size_of(object_): retval = sys.getsizeof(object_, DEFAULT_SIZE_OF) if isinstance(object_, dict): retval += sum((_size_of(_) for _ in itertools.chain.from_iterable(object_.items()))) elif hasattr(object_, '__iter__'): retval += sum((_size_of(_) for _ in object_)) return retval
[ "def", "_size_of", "(", "object_", ")", ":", "retval", "=", "sys", ".", "getsizeof", "(", "object_", ",", "DEFAULT_SIZE_OF", ")", "if", "isinstance", "(", "object_", ",", "dict", ")", ":", "retval", "+=", "sum", "(", "(", "_size_of", "(", "_", ")", "for", "_", "in", "itertools", ".", "chain", ".", "from_iterable", "(", "object_", ".", "items", "(", ")", ")", ")", ")", "elif", "hasattr", "(", "object_", ",", "'__iter__'", ")", ":", "retval", "+=", "sum", "(", "(", "_size_of", "(", "_", ")", "for", "_", "in", "object_", ")", ")", "return", "retval" ]
returns total size of a given object_ .
train
false
52,145
def _process_match(rule, syms): subs = {} varlist = rule._varlist if (not (len(varlist) == len(syms))): raise RuntimeError("length of varlist doesn't match length of syms.") for (v, s) in zip(varlist, syms): if ((v in subs) and (subs[v] != s)): return None else: subs[v] = s return subs
[ "def", "_process_match", "(", "rule", ",", "syms", ")", ":", "subs", "=", "{", "}", "varlist", "=", "rule", ".", "_varlist", "if", "(", "not", "(", "len", "(", "varlist", ")", "==", "len", "(", "syms", ")", ")", ")", ":", "raise", "RuntimeError", "(", "\"length of varlist doesn't match length of syms.\"", ")", "for", "(", "v", ",", "s", ")", "in", "zip", "(", "varlist", ",", "syms", ")", ":", "if", "(", "(", "v", "in", "subs", ")", "and", "(", "subs", "[", "v", "]", "!=", "s", ")", ")", ":", "return", "None", "else", ":", "subs", "[", "v", "]", "=", "s", "return", "subs" ]
process a match to determine if it is correct .
train
false
52,146
def _clipped_image_as_bitmap(image, bbox): (l, b, width, height) = bbox.get_bounds() r = (l + width) t = (b + height) srcBmp = wx.BitmapFromImage(image) srcDC = wx.MemoryDC() srcDC.SelectObject(srcBmp) destBmp = wx.EmptyBitmap(int(width), int(height)) destDC = wx.MemoryDC() destDC.SelectObject(destBmp) destDC.BeginDrawing() x = int(l) y = int((image.GetHeight() - t)) destDC.Blit(0, 0, int(width), int(height), srcDC, x, y) destDC.EndDrawing() srcDC.SelectObject(wx.NullBitmap) destDC.SelectObject(wx.NullBitmap) return destBmp
[ "def", "_clipped_image_as_bitmap", "(", "image", ",", "bbox", ")", ":", "(", "l", ",", "b", ",", "width", ",", "height", ")", "=", "bbox", ".", "get_bounds", "(", ")", "r", "=", "(", "l", "+", "width", ")", "t", "=", "(", "b", "+", "height", ")", "srcBmp", "=", "wx", ".", "BitmapFromImage", "(", "image", ")", "srcDC", "=", "wx", ".", "MemoryDC", "(", ")", "srcDC", ".", "SelectObject", "(", "srcBmp", ")", "destBmp", "=", "wx", ".", "EmptyBitmap", "(", "int", "(", "width", ")", ",", "int", "(", "height", ")", ")", "destDC", "=", "wx", ".", "MemoryDC", "(", ")", "destDC", ".", "SelectObject", "(", "destBmp", ")", "destDC", ".", "BeginDrawing", "(", ")", "x", "=", "int", "(", "l", ")", "y", "=", "int", "(", "(", "image", ".", "GetHeight", "(", ")", "-", "t", ")", ")", "destDC", ".", "Blit", "(", "0", ",", "0", ",", "int", "(", "width", ")", ",", "int", "(", "height", ")", ",", "srcDC", ",", "x", ",", "y", ")", "destDC", ".", "EndDrawing", "(", ")", "srcDC", ".", "SelectObject", "(", "wx", ".", "NullBitmap", ")", "destDC", ".", "SelectObject", "(", "wx", ".", "NullBitmap", ")", "return", "destBmp" ]
convert the region of a wx .
train
false
52,149
def get_cli_body_ssh(command, response, module): if ('^' == response[0]): body = [] elif ('running' in command): body = response else: if (command in response[0]): response = [response[0].split(command)[1]] try: body = [json.loads(response[0])] except ValueError: module.fail_json(msg='Command does not support JSON output', command=command) return body
[ "def", "get_cli_body_ssh", "(", "command", ",", "response", ",", "module", ")", ":", "if", "(", "'^'", "==", "response", "[", "0", "]", ")", ":", "body", "=", "[", "]", "elif", "(", "'running'", "in", "command", ")", ":", "body", "=", "response", "else", ":", "if", "(", "command", "in", "response", "[", "0", "]", ")", ":", "response", "=", "[", "response", "[", "0", "]", ".", "split", "(", "command", ")", "[", "1", "]", "]", "try", ":", "body", "=", "[", "json", ".", "loads", "(", "response", "[", "0", "]", ")", "]", "except", "ValueError", ":", "module", ".", "fail_json", "(", "msg", "=", "'Command does not support JSON output'", ",", "command", "=", "command", ")", "return", "body" ]
get response for when transport=cli .
train
false
52,150
def _auth(uri): (user, password) = _get_credentials() if ((user is False) or (password is False)): return False basic = _HTTPBasicAuthHandler() basic.add_password(realm='Tomcat Manager Application', uri=uri, user=user, passwd=password) digest = _HTTPDigestAuthHandler() digest.add_password(realm='Tomcat Manager Application', uri=uri, user=user, passwd=password) return _build_opener(basic, digest)
[ "def", "_auth", "(", "uri", ")", ":", "(", "user", ",", "password", ")", "=", "_get_credentials", "(", ")", "if", "(", "(", "user", "is", "False", ")", "or", "(", "password", "is", "False", ")", ")", ":", "return", "False", "basic", "=", "_HTTPBasicAuthHandler", "(", ")", "basic", ".", "add_password", "(", "realm", "=", "'Tomcat Manager Application'", ",", "uri", "=", "uri", ",", "user", "=", "user", ",", "passwd", "=", "password", ")", "digest", "=", "_HTTPDigestAuthHandler", "(", ")", "digest", ".", "add_password", "(", "realm", "=", "'Tomcat Manager Application'", ",", "uri", "=", "uri", ",", "user", "=", "user", ",", "passwd", "=", "password", ")", "return", "_build_opener", "(", "basic", ",", "digest", ")" ]
returns a authentication handler .
train
true
52,152
def intersecting_ranges(ranges): ranges = sorted(ranges, key=op.attrgetter('start')) return sorted_diff(ranges, group_ranges(ranges))
[ "def", "intersecting_ranges", "(", "ranges", ")", ":", "ranges", "=", "sorted", "(", "ranges", ",", "key", "=", "op", ".", "attrgetter", "(", "'start'", ")", ")", "return", "sorted_diff", "(", "ranges", ",", "group_ranges", "(", "ranges", ")", ")" ]
return any ranges that intersect .
train
true
52,153
def log_event(event): tracker.send(event)
[ "def", "log_event", "(", "event", ")", ":", "tracker", ".", "send", "(", "event", ")" ]
capture a event by sending it to the register trackers .
train
false
52,154
def tensor_indices(s, typ): if isinstance(s, str): a = [x.name for x in symbols(s, seq=True)] else: raise ValueError('expecting a string') tilist = [TensorIndex(i, typ) for i in a] if (len(tilist) == 1): return tilist[0] return tilist
[ "def", "tensor_indices", "(", "s", ",", "typ", ")", ":", "if", "isinstance", "(", "s", ",", "str", ")", ":", "a", "=", "[", "x", ".", "name", "for", "x", "in", "symbols", "(", "s", ",", "seq", "=", "True", ")", "]", "else", ":", "raise", "ValueError", "(", "'expecting a string'", ")", "tilist", "=", "[", "TensorIndex", "(", "i", ",", "typ", ")", "for", "i", "in", "a", "]", "if", "(", "len", "(", "tilist", ")", "==", "1", ")", ":", "return", "tilist", "[", "0", "]", "return", "tilist" ]
returns list of tensor indices given their names and their types parameters s : string of comma separated names of indices typ : list of tensorindextype of the indices examples .
train
false
52,155
def render_panel(request): toolbar = DebugToolbar.fetch(request.GET[u'store_id']) if (toolbar is None): content = _(u"Data for this panel isn't available anymore. Please reload the page and retry.") content = (u'<p>%s</p>' % escape(content)) else: panel = toolbar.get_panel_by_id(request.GET[u'panel_id']) content = panel.content return HttpResponse(content)
[ "def", "render_panel", "(", "request", ")", ":", "toolbar", "=", "DebugToolbar", ".", "fetch", "(", "request", ".", "GET", "[", "u'store_id'", "]", ")", "if", "(", "toolbar", "is", "None", ")", ":", "content", "=", "_", "(", "u\"Data for this panel isn't available anymore. Please reload the page and retry.\"", ")", "content", "=", "(", "u'<p>%s</p>'", "%", "escape", "(", "content", ")", ")", "else", ":", "panel", "=", "toolbar", ".", "get_panel_by_id", "(", "request", ".", "GET", "[", "u'panel_id'", "]", ")", "content", "=", "panel", ".", "content", "return", "HttpResponse", "(", "content", ")" ]
render the contents of a panel .
train
false
52,156
def types_msg(instance, types): reprs = [] for type in types: try: reprs.append(repr(type['name'])) except Exception: reprs.append(repr(type)) return ('%r is not of type %s' % (instance, ', '.join(reprs)))
[ "def", "types_msg", "(", "instance", ",", "types", ")", ":", "reprs", "=", "[", "]", "for", "type", "in", "types", ":", "try", ":", "reprs", ".", "append", "(", "repr", "(", "type", "[", "'name'", "]", ")", ")", "except", "Exception", ":", "reprs", ".", "append", "(", "repr", "(", "type", ")", ")", "return", "(", "'%r is not of type %s'", "%", "(", "instance", ",", "', '", ".", "join", "(", "reprs", ")", ")", ")" ]
create an error message for a failure to match the given types .
train
true
52,157
def get_gating_milestone(course_key, content_key, relationship): try: return find_gating_milestones(course_key, content_key, relationship)[0] except IndexError: return None
[ "def", "get_gating_milestone", "(", "course_key", ",", "content_key", ",", "relationship", ")", ":", "try", ":", "return", "find_gating_milestones", "(", "course_key", ",", "content_key", ",", "relationship", ")", "[", "0", "]", "except", "IndexError", ":", "return", "None" ]
gets a single gating milestone dict related to the given supplied parameters .
train
false
52,158
def _is_connected_by_alternating_path(G, v, matching, targets): matched_edges = {(u, v) for (u, v) in matching.items() if (u <= v)} unmatched_edges = (set(G.edges()) - matched_edges) def _alternating_dfs(u, depth, along_matched=True): 'Returns True if and only if `u` is connected to one of the\n targets by an alternating path.\n\n `u` is a vertex in the graph `G`.\n\n `depth` specifies the maximum recursion depth of the depth-first\n search.\n\n If `along_matched` is True, this step of the depth-first search\n will continue only through edges in the given matching. Otherwise, it\n will continue only through edges *not* in the given matching.\n\n ' if (u in targets): return True if (depth < 0): return False valid_edges = (matched_edges if along_matched else unmatched_edges) for v in G[u]: if (((u, v) in valid_edges) or ((v, u) in valid_edges)): return _alternating_dfs(v, (depth - 1), (not along_matched)) return False return (_alternating_dfs(v, len(G), along_matched=True) or _alternating_dfs(v, len(G), along_matched=False))
[ "def", "_is_connected_by_alternating_path", "(", "G", ",", "v", ",", "matching", ",", "targets", ")", ":", "matched_edges", "=", "{", "(", "u", ",", "v", ")", "for", "(", "u", ",", "v", ")", "in", "matching", ".", "items", "(", ")", "if", "(", "u", "<=", "v", ")", "}", "unmatched_edges", "=", "(", "set", "(", "G", ".", "edges", "(", ")", ")", "-", "matched_edges", ")", "def", "_alternating_dfs", "(", "u", ",", "depth", ",", "along_matched", "=", "True", ")", ":", "if", "(", "u", "in", "targets", ")", ":", "return", "True", "if", "(", "depth", "<", "0", ")", ":", "return", "False", "valid_edges", "=", "(", "matched_edges", "if", "along_matched", "else", "unmatched_edges", ")", "for", "v", "in", "G", "[", "u", "]", ":", "if", "(", "(", "(", "u", ",", "v", ")", "in", "valid_edges", ")", "or", "(", "(", "v", ",", "u", ")", "in", "valid_edges", ")", ")", ":", "return", "_alternating_dfs", "(", "v", ",", "(", "depth", "-", "1", ")", ",", "(", "not", "along_matched", ")", ")", "return", "False", "return", "(", "_alternating_dfs", "(", "v", ",", "len", "(", "G", ")", ",", "along_matched", "=", "True", ")", "or", "_alternating_dfs", "(", "v", ",", "len", "(", "G", ")", ",", "along_matched", "=", "False", ")", ")" ]
returns true if and only if the vertex v is connected to one of the target vertices by an alternating path in g .
train
false
52,159
def p_parameter_list_2(t): pass
[ "def", "p_parameter_list_2", "(", "t", ")", ":", "pass" ]
parameter_list : parameter_list comma parameter_declaration .
train
false
52,160
@task @needs('pavelib.i18n.i18n_extract') @cmdopts([('settings=', 's', 'The settings to use (defaults to devstack)')]) @timed def i18n_dummy(options): settings = options.get('settings', DEFAULT_SETTINGS) sh('i18n_tool dummy') sh('i18n_tool generate') for system in ['lms', 'cms']: sh(django_cmd(system, settings, 'compilejsi18n'))
[ "@", "task", "@", "needs", "(", "'pavelib.i18n.i18n_extract'", ")", "@", "cmdopts", "(", "[", "(", "'settings='", ",", "'s'", ",", "'The settings to use (defaults to devstack)'", ")", "]", ")", "@", "timed", "def", "i18n_dummy", "(", "options", ")", ":", "settings", "=", "options", ".", "get", "(", "'settings'", ",", "DEFAULT_SETTINGS", ")", "sh", "(", "'i18n_tool dummy'", ")", "sh", "(", "'i18n_tool generate'", ")", "for", "system", "in", "[", "'lms'", ",", "'cms'", "]", ":", "sh", "(", "django_cmd", "(", "system", ",", "settings", ",", "'compilejsi18n'", ")", ")" ]
simulate international translation by generating dummy strings corresponding to source strings .
train
false
52,161
def app_uninstall(app, request): try: path = apath(app, request) rmtree(path) return True except Exception: return False
[ "def", "app_uninstall", "(", "app", ",", "request", ")", ":", "try", ":", "path", "=", "apath", "(", "app", ",", "request", ")", "rmtree", "(", "path", ")", "return", "True", "except", "Exception", ":", "return", "False" ]
uninstalls the application .
train
false
52,162
def coins(): return load('coins.png')
[ "def", "coins", "(", ")", ":", "return", "load", "(", "'coins.png'", ")" ]
greek coins from pompeii .
train
false
52,164
def new_private_link(name, user, nodes, anonymous): key = str(uuid.uuid4()).replace('-', '') if name: name = strip_html(name) if ((name is None) or (not name.strip())): raise ValidationValueError('Invalid link name.') else: name = 'Shared project link' private_link = PrivateLink(key=key, name=name, creator=user, anonymous=anonymous) private_link.save() private_link.nodes.add(*nodes) private_link.save() return private_link
[ "def", "new_private_link", "(", "name", ",", "user", ",", "nodes", ",", "anonymous", ")", ":", "key", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", ".", "replace", "(", "'-'", ",", "''", ")", "if", "name", ":", "name", "=", "strip_html", "(", "name", ")", "if", "(", "(", "name", "is", "None", ")", "or", "(", "not", "name", ".", "strip", "(", ")", ")", ")", ":", "raise", "ValidationValueError", "(", "'Invalid link name.'", ")", "else", ":", "name", "=", "'Shared project link'", "private_link", "=", "PrivateLink", "(", "key", "=", "key", ",", "name", "=", "name", ",", "creator", "=", "user", ",", "anonymous", "=", "anonymous", ")", "private_link", ".", "save", "(", ")", "private_link", ".", "nodes", ".", "add", "(", "*", "nodes", ")", "private_link", ".", "save", "(", ")", "return", "private_link" ]
create a new private link .
train
false
52,165
def add_from_csv(csv_file, version=None, overwrite=False): diagnostic_messages = [] for (module_name, new_metadata) in parse_assigned_metadata_initial(csv_file): filename = module_loader.find_plugin(module_name, mod_type='.py') if (filename is None): diagnostic_messages.append('Unable to find the module file for {}'.format(module_name)) continue try: write_metadata(filename, new_metadata, version, overwrite) except ParseError as e: diagnostic_messages.append(e.args[0]) continue if diagnostic_messages: pprint(diagnostic_messages) return 0
[ "def", "add_from_csv", "(", "csv_file", ",", "version", "=", "None", ",", "overwrite", "=", "False", ")", ":", "diagnostic_messages", "=", "[", "]", "for", "(", "module_name", ",", "new_metadata", ")", "in", "parse_assigned_metadata_initial", "(", "csv_file", ")", ":", "filename", "=", "module_loader", ".", "find_plugin", "(", "module_name", ",", "mod_type", "=", "'.py'", ")", "if", "(", "filename", "is", "None", ")", ":", "diagnostic_messages", ".", "append", "(", "'Unable to find the module file for {}'", ".", "format", "(", "module_name", ")", ")", "continue", "try", ":", "write_metadata", "(", "filename", ",", "new_metadata", ",", "version", ",", "overwrite", ")", "except", "ParseError", "as", "e", ":", "diagnostic_messages", ".", "append", "(", "e", ".", "args", "[", "0", "]", ")", "continue", "if", "diagnostic_messages", ":", "pprint", "(", "diagnostic_messages", ")", "return", "0" ]
implement the subcommand to add metadata from a csv file .
train
false
52,166
@RegisterWithArgChecks(name='evpn_prefix.delete_local', req_args=[EVPN_ROUTE_TYPE, ROUTE_DISTINGUISHER], opt_args=[EVPN_ESI, EVPN_ETHERNET_TAG_ID, MAC_ADDR, IP_ADDR, IP_PREFIX, EVPN_VNI]) def delete_evpn_local(route_type, route_dist, **kwargs): try: tm = CORE_MANAGER.get_core_service().table_manager tm.update_vrf_table(route_dist, route_family=VRF_RF_L2_EVPN, route_type=route_type, is_withdraw=True, **kwargs) return [{EVPN_ROUTE_TYPE: route_type, ROUTE_DISTINGUISHER: route_dist, VRF_RF: VRF_RF_L2_EVPN}.update(kwargs)] except BgpCoreError as e: raise PrefixError(desc=e)
[ "@", "RegisterWithArgChecks", "(", "name", "=", "'evpn_prefix.delete_local'", ",", "req_args", "=", "[", "EVPN_ROUTE_TYPE", ",", "ROUTE_DISTINGUISHER", "]", ",", "opt_args", "=", "[", "EVPN_ESI", ",", "EVPN_ETHERNET_TAG_ID", ",", "MAC_ADDR", ",", "IP_ADDR", ",", "IP_PREFIX", ",", "EVPN_VNI", "]", ")", "def", "delete_evpn_local", "(", "route_type", ",", "route_dist", ",", "**", "kwargs", ")", ":", "try", ":", "tm", "=", "CORE_MANAGER", ".", "get_core_service", "(", ")", ".", "table_manager", "tm", ".", "update_vrf_table", "(", "route_dist", ",", "route_family", "=", "VRF_RF_L2_EVPN", ",", "route_type", "=", "route_type", ",", "is_withdraw", "=", "True", ",", "**", "kwargs", ")", "return", "[", "{", "EVPN_ROUTE_TYPE", ":", "route_type", ",", "ROUTE_DISTINGUISHER", ":", "route_dist", ",", "VRF_RF", ":", "VRF_RF_L2_EVPN", "}", ".", "update", "(", "kwargs", ")", "]", "except", "BgpCoreError", "as", "e", ":", "raise", "PrefixError", "(", "desc", "=", "e", ")" ]
deletes/withdraws evpn route from vrf identified by *route_dist* .
train
false
52,167
def _mult_gf2(f1, f2): if (f2 > f1): (f1, f2) = (f2, f1) z = 0 while f2: if (f2 & 1): z ^= f1 f1 <<= 1 f2 >>= 1 return z
[ "def", "_mult_gf2", "(", "f1", ",", "f2", ")", ":", "if", "(", "f2", ">", "f1", ")", ":", "(", "f1", ",", "f2", ")", "=", "(", "f2", ",", "f1", ")", "z", "=", "0", "while", "f2", ":", "if", "(", "f2", "&", "1", ")", ":", "z", "^=", "f1", "f1", "<<=", "1", "f2", ">>=", "1", "return", "z" ]
multiply two polynomials in gf(2) .
train
false
52,168
def _create_user(user_id, email): user_settings = get_user_settings(user_id, strict=False) if (user_settings is not None): raise Exception(('User %s already exists.' % user_id)) user_settings = UserSettings(user_id, email, preferred_language_codes=[feconf.DEFAULT_LANGUAGE_CODE]) _save_user_settings(user_settings) create_user_contributions(user_id, [], []) return user_settings
[ "def", "_create_user", "(", "user_id", ",", "email", ")", ":", "user_settings", "=", "get_user_settings", "(", "user_id", ",", "strict", "=", "False", ")", "if", "(", "user_settings", "is", "not", "None", ")", ":", "raise", "Exception", "(", "(", "'User %s already exists.'", "%", "user_id", ")", ")", "user_settings", "=", "UserSettings", "(", "user_id", ",", "email", ",", "preferred_language_codes", "=", "[", "feconf", ".", "DEFAULT_LANGUAGE_CODE", "]", ")", "_save_user_settings", "(", "user_settings", ")", "create_user_contributions", "(", "user_id", ",", "[", "]", ",", "[", "]", ")", "return", "user_settings" ]
creates a new user .
train
false
52,169
def _betmap(G_normalized_weight_sources_tuple): return nx.betweenness_centrality_source(*G_normalized_weight_sources_tuple)
[ "def", "_betmap", "(", "G_normalized_weight_sources_tuple", ")", ":", "return", "nx", ".", "betweenness_centrality_source", "(", "*", "G_normalized_weight_sources_tuple", ")" ]
pool for multiprocess only accepts functions with one argument .
train
false
52,171
def _routestopped_config(routestopped): if (routestopped is None): routestopped = [] lines = [ROUTESTOPPED_HEADER] for entry in routestopped: entry.setdefault('interface', 'eth0') entry.setdefault('host', '0.0.0.0/0') entry.setdefault('options', '-') entry.setdefault('proto', '-') entry.setdefault('dest_port', '-') entry.setdefault('source_port', '-') if isinstance(entry['host'], list): entry['host'] = ','.join(entry['host']) if isinstance(entry['options'], list): entry['options'] = ','.join(entry['options']) lines.append((ROUTESTOPPED_FORMAT % entry)) file('/etc/shorewall/routestopped', contents=''.join(lines), use_sudo=True)
[ "def", "_routestopped_config", "(", "routestopped", ")", ":", "if", "(", "routestopped", "is", "None", ")", ":", "routestopped", "=", "[", "]", "lines", "=", "[", "ROUTESTOPPED_HEADER", "]", "for", "entry", "in", "routestopped", ":", "entry", ".", "setdefault", "(", "'interface'", ",", "'eth0'", ")", "entry", ".", "setdefault", "(", "'host'", ",", "'0.0.0.0/0'", ")", "entry", ".", "setdefault", "(", "'options'", ",", "'-'", ")", "entry", ".", "setdefault", "(", "'proto'", ",", "'-'", ")", "entry", ".", "setdefault", "(", "'dest_port'", ",", "'-'", ")", "entry", ".", "setdefault", "(", "'source_port'", ",", "'-'", ")", "if", "isinstance", "(", "entry", "[", "'host'", "]", ",", "list", ")", ":", "entry", "[", "'host'", "]", "=", "','", ".", "join", "(", "entry", "[", "'host'", "]", ")", "if", "isinstance", "(", "entry", "[", "'options'", "]", ",", "list", ")", ":", "entry", "[", "'options'", "]", "=", "','", ".", "join", "(", "entry", "[", "'options'", "]", ")", "lines", ".", "append", "(", "(", "ROUTESTOPPED_FORMAT", "%", "entry", ")", ")", "file", "(", "'/etc/shorewall/routestopped'", ",", "contents", "=", "''", ".", "join", "(", "lines", ")", ",", "use_sudo", "=", "True", ")" ]
routestopped configuration this lists the hosts that should be accessible when the firewall is stopped or starting .
train
false
52,172
def get_sample_indices(cat_sam_groups, bt): cat_sam_indices = defaultdict(list) for (k, v) in cat_sam_groups.iteritems(): cat_sam_indices[k] = [bt.index(i, axis='sample') for i in v] return cat_sam_indices
[ "def", "get_sample_indices", "(", "cat_sam_groups", ",", "bt", ")", ":", "cat_sam_indices", "=", "defaultdict", "(", "list", ")", "for", "(", "k", ",", "v", ")", "in", "cat_sam_groups", ".", "iteritems", "(", ")", ":", "cat_sam_indices", "[", "k", "]", "=", "[", "bt", ".", "index", "(", "i", ",", "axis", "=", "'sample'", ")", "for", "i", "in", "v", "]", "return", "cat_sam_indices" ]
create {category_value:index_of_sample_with_that_value} dict .
train
false
52,174
def base62_decode(string, alphabet=ALPHABET): base = len(alphabet) strlen = len(string) num = 0 idx = 0 for char in string: power = (strlen - (idx + 1)) num += (alphabet.index(char) * (base ** power)) idx += 1 return num
[ "def", "base62_decode", "(", "string", ",", "alphabet", "=", "ALPHABET", ")", ":", "base", "=", "len", "(", "alphabet", ")", "strlen", "=", "len", "(", "string", ")", "num", "=", "0", "idx", "=", "0", "for", "char", "in", "string", ":", "power", "=", "(", "strlen", "-", "(", "idx", "+", "1", ")", ")", "num", "+=", "(", "alphabet", ".", "index", "(", "char", ")", "*", "(", "base", "**", "power", ")", ")", "idx", "+=", "1", "return", "num" ]
decode a base x encoded string into the number arguments: - string: the encoded string - alphabet: the alphabet to use for encoding .
train
true
52,176
def gens_products(*v): (res_size, res_base, res_gens) = tensor_gens(*v[0]) for i in range(1, len(v)): (size, base, gens) = tensor_gens(*v[i]) (res_base, res_gens) = bsgs_direct_product(res_base, res_gens, base, gens, 1) res_size = res_gens[0].size id_af = list(range(res_size)) res_gens = [h for h in res_gens if (h != id_af)] if (not res_gens): res_gens = [id_af] return (res_size, res_base, res_gens)
[ "def", "gens_products", "(", "*", "v", ")", ":", "(", "res_size", ",", "res_base", ",", "res_gens", ")", "=", "tensor_gens", "(", "*", "v", "[", "0", "]", ")", "for", "i", "in", "range", "(", "1", ",", "len", "(", "v", ")", ")", ":", "(", "size", ",", "base", ",", "gens", ")", "=", "tensor_gens", "(", "*", "v", "[", "i", "]", ")", "(", "res_base", ",", "res_gens", ")", "=", "bsgs_direct_product", "(", "res_base", ",", "res_gens", ",", "base", ",", "gens", ",", "1", ")", "res_size", "=", "res_gens", "[", "0", "]", ".", "size", "id_af", "=", "list", "(", "range", "(", "res_size", ")", ")", "res_gens", "=", "[", "h", "for", "h", "in", "res_gens", "if", "(", "h", "!=", "id_af", ")", "]", "if", "(", "not", "res_gens", ")", ":", "res_gens", "=", "[", "id_af", "]", "return", "(", "res_size", ",", "res_base", ",", "res_gens", ")" ]
returns size .
train
false
52,177
def name_to_uid(name): try: uid = int(name) except ValueError: try: pwdrec = pwd.getpwnam(name) except KeyError: raise ValueError(('Invalid user name %s' % name)) uid = pwdrec[2] else: try: pwd.getpwuid(uid) except KeyError: raise ValueError(('Invalid user id %s' % name)) return uid
[ "def", "name_to_uid", "(", "name", ")", ":", "try", ":", "uid", "=", "int", "(", "name", ")", "except", "ValueError", ":", "try", ":", "pwdrec", "=", "pwd", ".", "getpwnam", "(", "name", ")", "except", "KeyError", ":", "raise", "ValueError", "(", "(", "'Invalid user name %s'", "%", "name", ")", ")", "uid", "=", "pwdrec", "[", "2", "]", "else", ":", "try", ":", "pwd", ".", "getpwuid", "(", "uid", ")", "except", "KeyError", ":", "raise", "ValueError", "(", "(", "'Invalid user id %s'", "%", "name", ")", ")", "return", "uid" ]
find a user id from a string containing a user name or id .
train
false
52,178
@run.command() @click.option('--pidfile', help='Optional file used to store the process pid. The program will not start if this file already exists and the pid is still alive.') @click.option('--logfile', '-f', help='Path to log file. If no logfile is specified, stderr is used.') @click.option('--quiet', '-q', is_flag=True, default=False) @click.option('--no-color', is_flag=True, default=False) @click.option('--autoreload', is_flag=True, default=False, help='Enable autoreloading.') @click.option('--without-gossip', is_flag=True, default=False) @click.option('--without-mingle', is_flag=True, default=False) @click.option('--without-heartbeat', is_flag=True, default=False) @log_options() @configuration def cron(**options): from django.conf import settings if settings.CELERY_ALWAYS_EAGER: raise click.ClickException('Disable CELERY_ALWAYS_EAGER in your settings file to spawn workers.') from sentry.celery import app app.Beat(**options).run()
[ "@", "run", ".", "command", "(", ")", "@", "click", ".", "option", "(", "'--pidfile'", ",", "help", "=", "'Optional file used to store the process pid. The program will not start if this file already exists and the pid is still alive.'", ")", "@", "click", ".", "option", "(", "'--logfile'", ",", "'-f'", ",", "help", "=", "'Path to log file. If no logfile is specified, stderr is used.'", ")", "@", "click", ".", "option", "(", "'--quiet'", ",", "'-q'", ",", "is_flag", "=", "True", ",", "default", "=", "False", ")", "@", "click", ".", "option", "(", "'--no-color'", ",", "is_flag", "=", "True", ",", "default", "=", "False", ")", "@", "click", ".", "option", "(", "'--autoreload'", ",", "is_flag", "=", "True", ",", "default", "=", "False", ",", "help", "=", "'Enable autoreloading.'", ")", "@", "click", ".", "option", "(", "'--without-gossip'", ",", "is_flag", "=", "True", ",", "default", "=", "False", ")", "@", "click", ".", "option", "(", "'--without-mingle'", ",", "is_flag", "=", "True", ",", "default", "=", "False", ")", "@", "click", ".", "option", "(", "'--without-heartbeat'", ",", "is_flag", "=", "True", ",", "default", "=", "False", ")", "@", "log_options", "(", ")", "@", "configuration", "def", "cron", "(", "**", "options", ")", ":", "from", "django", ".", "conf", "import", "settings", "if", "settings", ".", "CELERY_ALWAYS_EAGER", ":", "raise", "click", ".", "ClickException", "(", "'Disable CELERY_ALWAYS_EAGER in your settings file to spawn workers.'", ")", "from", "sentry", ".", "celery", "import", "app", "app", ".", "Beat", "(", "**", "options", ")", ".", "run", "(", ")" ]
run periodic task dispatcher .
train
false
52,179
def central_server_test(f): return x_server_test(f, settings.CENTRAL_SERVER, 'Central server test')
[ "def", "central_server_test", "(", "f", ")", ":", "return", "x_server_test", "(", "f", ",", "settings", ".", "CENTRAL_SERVER", ",", "'Central server test'", ")" ]
run the test only on the central server .
train
false
52,180
def user_exists_in_group(user_name, group_name, region=None, key=None, keyid=None, profile=None): users = get_group_members(group_name=group_name, region=region, key=key, keyid=keyid, profile=profile) if users: for _user in users: if (user_name == _user['user_name']): msg = 'Username : {0} is already in group {1}.' log.info(msg.format(user_name, group_name)) return True return False
[ "def", "user_exists_in_group", "(", "user_name", ",", "group_name", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "users", "=", "get_group_members", "(", "group_name", "=", "group_name", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "users", ":", "for", "_user", "in", "users", ":", "if", "(", "user_name", "==", "_user", "[", "'user_name'", "]", ")", ":", "msg", "=", "'Username : {0} is already in group {1}.'", "log", ".", "info", "(", "msg", ".", "format", "(", "user_name", ",", "group_name", ")", ")", "return", "True", "return", "False" ]
check if user exists in group .
train
false
52,181
def _write_w(filename, vertices, data): assert (len(vertices) == len(data)) fid = open(filename, 'wb') fid.write(np.zeros(2, dtype=np.uint8).tostring()) vertices_n = len(vertices) _write_3(fid, vertices_n) for i in range(vertices_n): _write_3(fid, vertices[i]) fid.write(np.array(float(data[i]), dtype='>f4').tostring()) fid.close()
[ "def", "_write_w", "(", "filename", ",", "vertices", ",", "data", ")", ":", "assert", "(", "len", "(", "vertices", ")", "==", "len", "(", "data", ")", ")", "fid", "=", "open", "(", "filename", ",", "'wb'", ")", "fid", ".", "write", "(", "np", ".", "zeros", "(", "2", ",", "dtype", "=", "np", ".", "uint8", ")", ".", "tostring", "(", ")", ")", "vertices_n", "=", "len", "(", "vertices", ")", "_write_3", "(", "fid", ",", "vertices_n", ")", "for", "i", "in", "range", "(", "vertices_n", ")", ":", "_write_3", "(", "fid", ",", "vertices", "[", "i", "]", ")", "fid", ".", "write", "(", "np", ".", "array", "(", "float", "(", "data", "[", "i", "]", ")", ",", "dtype", "=", "'>f4'", ")", ".", "tostring", "(", ")", ")", "fid", ".", "close", "(", ")" ]
write a w file .
train
false
52,182
def update_course_creator_group(caller, user, add): if add: auth.add_users(caller, CourseCreatorRole(), user) else: auth.remove_users(caller, CourseCreatorRole(), user)
[ "def", "update_course_creator_group", "(", "caller", ",", "user", ",", "add", ")", ":", "if", "add", ":", "auth", ".", "add_users", "(", "caller", ",", "CourseCreatorRole", "(", ")", ",", "user", ")", "else", ":", "auth", ".", "remove_users", "(", "caller", ",", "CourseCreatorRole", "(", ")", ",", "user", ")" ]
method for adding and removing users from the creator group .
train
false
52,183
def release_eip_address(public_ip=None, allocation_id=None, region=None, key=None, keyid=None, profile=None): if (not salt.utils.exactly_one((public_ip, allocation_id))): raise SaltInvocationError("Exactly one of 'public_ip' OR 'allocation_id' must be provided") conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: return conn.release_address(public_ip, allocation_id) except boto.exception.BotoServerError as e: log.error(e) return False
[ "def", "release_eip_address", "(", "public_ip", "=", "None", ",", "allocation_id", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "if", "(", "not", "salt", ".", "utils", ".", "exactly_one", "(", "(", "public_ip", ",", "allocation_id", ")", ")", ")", ":", "raise", "SaltInvocationError", "(", "\"Exactly one of 'public_ip' OR 'allocation_id' must be provided\"", ")", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "try", ":", "return", "conn", ".", "release_address", "(", "public_ip", ",", "allocation_id", ")", "except", "boto", ".", "exception", ".", "BotoServerError", "as", "e", ":", "log", ".", "error", "(", "e", ")", "return", "False" ]
free an elastic ip address .
train
true
52,184
def notes_enabled_for_course(course): tab_found = ('notes' in course.advanced_modules) feature_enabled = settings.FEATURES.get('ENABLE_STUDENT_NOTES') return (feature_enabled and tab_found)
[ "def", "notes_enabled_for_course", "(", "course", ")", ":", "tab_found", "=", "(", "'notes'", "in", "course", ".", "advanced_modules", ")", "feature_enabled", "=", "settings", ".", "FEATURES", ".", "get", "(", "'ENABLE_STUDENT_NOTES'", ")", "return", "(", "feature_enabled", "and", "tab_found", ")" ]
returns true if the notes app is enabled for the course .
train
false
52,186
def set_value(dictionary, keys, value): if (not keys): dictionary.update(value) return for key in keys[:(-1)]: if (key not in dictionary): dictionary[key] = {} dictionary = dictionary[key] dictionary[keys[(-1)]] = value
[ "def", "set_value", "(", "dictionary", ",", "keys", ",", "value", ")", ":", "if", "(", "not", "keys", ")", ":", "dictionary", ".", "update", "(", "value", ")", "return", "for", "key", "in", "keys", "[", ":", "(", "-", "1", ")", "]", ":", "if", "(", "key", "not", "in", "dictionary", ")", ":", "dictionary", "[", "key", "]", "=", "{", "}", "dictionary", "=", "dictionary", "[", "key", "]", "dictionary", "[", "keys", "[", "(", "-", "1", ")", "]", "]", "=", "value" ]
sets the value of a variable .
train
true
52,188
def conference_results(meeting): try: conf = Conference.find_one(Q('endpoint', 'iexact', meeting)) except ModularOdmException: raise HTTPError(httplib.NOT_FOUND) data = conference_data(meeting) return {'data': data, 'label': meeting, 'meeting': serialize_conference(conf), 'settings': settings}
[ "def", "conference_results", "(", "meeting", ")", ":", "try", ":", "conf", "=", "Conference", ".", "find_one", "(", "Q", "(", "'endpoint'", ",", "'iexact'", ",", "meeting", ")", ")", "except", "ModularOdmException", ":", "raise", "HTTPError", "(", "httplib", ".", "NOT_FOUND", ")", "data", "=", "conference_data", "(", "meeting", ")", "return", "{", "'data'", ":", "data", ",", "'label'", ":", "meeting", ",", "'meeting'", ":", "serialize_conference", "(", "conf", ")", ",", "'settings'", ":", "settings", "}" ]
return the data for the grid view for a conference .
train
false
52,189
def select_volume(filename, which): from nibabel import load import numpy as np if (which.lower() == u'first'): idx = 0 elif (which.lower() == u'middle'): idx = int(np.ceil((load(filename).shape[3] / 2))) else: raise Exception((u'unknown value for volume selection : %s' % which)) return idx
[ "def", "select_volume", "(", "filename", ",", "which", ")", ":", "from", "nibabel", "import", "load", "import", "numpy", "as", "np", "if", "(", "which", ".", "lower", "(", ")", "==", "u'first'", ")", ":", "idx", "=", "0", "elif", "(", "which", ".", "lower", "(", ")", "==", "u'middle'", ")", ":", "idx", "=", "int", "(", "np", ".", "ceil", "(", "(", "load", "(", "filename", ")", ".", "shape", "[", "3", "]", "/", "2", ")", ")", ")", "else", ":", "raise", "Exception", "(", "(", "u'unknown value for volume selection : %s'", "%", "which", ")", ")", "return", "idx" ]
return the middle index of a file .
train
false
52,190
def _config_key_value(line, splitchar): try: (k, v) = line.split(splitchar, 1) except ValueError: k = line v = u'true' return (k, _config_to_python(v))
[ "def", "_config_key_value", "(", "line", ",", "splitchar", ")", ":", "try", ":", "(", "k", ",", "v", ")", "=", "line", ".", "split", "(", "splitchar", ",", "1", ")", "except", "ValueError", ":", "k", "=", "line", "v", "=", "u'true'", "return", "(", "k", ",", "_config_to_python", "(", "v", ")", ")" ]
split a config line into a pair .
train
false
52,191
def test_completion_for_default_parameters(script): (res, env) = setup_completion(script, 'pip --', '1') assert ('--help' in res.stdout), 'autocomplete function could not complete ``--``'
[ "def", "test_completion_for_default_parameters", "(", "script", ")", ":", "(", "res", ",", "env", ")", "=", "setup_completion", "(", "script", ",", "'pip --'", ",", "'1'", ")", "assert", "(", "'--help'", "in", "res", ".", "stdout", ")", ",", "'autocomplete function could not complete ``--``'" ]
test getting completion for -- should contain --help .
train
false
52,193
def isFileWithFileTypeWithoutWords(fileType, fileName, words): fileName = os.path.basename(fileName) fileTypeDot = ('.' + fileType) if (not fileName.endswith(fileTypeDot)): return False for word in words: if (fileName.find(word) >= 0): return False return True
[ "def", "isFileWithFileTypeWithoutWords", "(", "fileType", ",", "fileName", ",", "words", ")", ":", "fileName", "=", "os", ".", "path", ".", "basename", "(", "fileName", ")", "fileTypeDot", "=", "(", "'.'", "+", "fileType", ")", "if", "(", "not", "fileName", ".", "endswith", "(", "fileTypeDot", ")", ")", ":", "return", "False", "for", "word", "in", "words", ":", "if", "(", "fileName", ".", "find", "(", "word", ")", ">=", "0", ")", ":", "return", "False", "return", "True" ]
determine if file has a given file type .
train
false
52,194
def get_galaxy_test_tmp_dir(): galaxy_test_tmp_dir = os.environ.get('GALAXY_TEST_TMP_DIR', None) if (galaxy_test_tmp_dir is None): galaxy_test_tmp_dir = tempfile.mkdtemp() return galaxy_test_tmp_dir
[ "def", "get_galaxy_test_tmp_dir", "(", ")", ":", "galaxy_test_tmp_dir", "=", "os", ".", "environ", ".", "get", "(", "'GALAXY_TEST_TMP_DIR'", ",", "None", ")", "if", "(", "galaxy_test_tmp_dir", "is", "None", ")", ":", "galaxy_test_tmp_dir", "=", "tempfile", ".", "mkdtemp", "(", ")", "return", "galaxy_test_tmp_dir" ]
create test directory for use by galaxy server being setup for testing .
train
false
52,196
@pytest.fixture def nb_lock(): return lock(block=False)
[ "@", "pytest", ".", "fixture", "def", "nb_lock", "(", ")", ":", "return", "lock", "(", "block", "=", "False", ")" ]
non-blocking lock fixture .
train
false
52,197
def _cache_server_process(queue, return_failed, cache_root): httpd = None try: with temporary_dir() as tmpdir: cache_root = (cache_root if cache_root else tmpdir) with pushd(cache_root): if return_failed: handler = FailRESTHandler else: handler = SimpleRESTHandler httpd = SocketServer.TCPServer((u'localhost', 0), handler) port = httpd.server_address[1] queue.put(port) httpd.serve_forever() finally: if httpd: httpd.shutdown()
[ "def", "_cache_server_process", "(", "queue", ",", "return_failed", ",", "cache_root", ")", ":", "httpd", "=", "None", "try", ":", "with", "temporary_dir", "(", ")", "as", "tmpdir", ":", "cache_root", "=", "(", "cache_root", "if", "cache_root", "else", "tmpdir", ")", "with", "pushd", "(", "cache_root", ")", ":", "if", "return_failed", ":", "handler", "=", "FailRESTHandler", "else", ":", "handler", "=", "SimpleRESTHandler", "httpd", "=", "SocketServer", ".", "TCPServer", "(", "(", "u'localhost'", ",", "0", ")", ",", "handler", ")", "port", "=", "httpd", ".", "server_address", "[", "1", "]", "queue", ".", "put", "(", "port", ")", "httpd", ".", "serve_forever", "(", ")", "finally", ":", "if", "httpd", ":", "httpd", ".", "shutdown", "(", ")" ]
a pickleable top-level function to wrap a simpleresthandler .
train
false
52,199
def rs_client(h=client_context.host, p=client_context.port, **kwargs): return _mongo_client(h, p, **kwargs)
[ "def", "rs_client", "(", "h", "=", "client_context", ".", "host", ",", "p", "=", "client_context", ".", "port", ",", "**", "kwargs", ")", ":", "return", "_mongo_client", "(", "h", ",", "p", ",", "**", "kwargs", ")" ]
connect to the replica set and authenticate if necessary .
train
false
52,200
def multiline_string_lines(source, include_docstrings=False): line_numbers = set() previous_token_type = u'' try: for t in generate_tokens(source): token_type = t[0] start_row = t[2][0] end_row = t[3][0] if ((token_type == tokenize.STRING) and (start_row != end_row)): if (include_docstrings or (previous_token_type != tokenize.INDENT)): line_numbers |= set(range((1 + start_row), (1 + end_row))) previous_token_type = token_type except (SyntaxError, tokenize.TokenError): pass return line_numbers
[ "def", "multiline_string_lines", "(", "source", ",", "include_docstrings", "=", "False", ")", ":", "line_numbers", "=", "set", "(", ")", "previous_token_type", "=", "u''", "try", ":", "for", "t", "in", "generate_tokens", "(", "source", ")", ":", "token_type", "=", "t", "[", "0", "]", "start_row", "=", "t", "[", "2", "]", "[", "0", "]", "end_row", "=", "t", "[", "3", "]", "[", "0", "]", "if", "(", "(", "token_type", "==", "tokenize", ".", "STRING", ")", "and", "(", "start_row", "!=", "end_row", ")", ")", ":", "if", "(", "include_docstrings", "or", "(", "previous_token_type", "!=", "tokenize", ".", "INDENT", ")", ")", ":", "line_numbers", "|=", "set", "(", "range", "(", "(", "1", "+", "start_row", ")", ",", "(", "1", "+", "end_row", ")", ")", ")", "previous_token_type", "=", "token_type", "except", "(", "SyntaxError", ",", "tokenize", ".", "TokenError", ")", ":", "pass", "return", "line_numbers" ]
return line numbers that are within multiline strings .
train
true
52,201
def isXSegmentIntersectingPaths(paths, segmentFirstX, segmentSecondX, segmentYMirror, y): for path in paths: if isXSegmentIntersectingPath(path, segmentFirstX, segmentSecondX, segmentYMirror, y): return True return False
[ "def", "isXSegmentIntersectingPaths", "(", "paths", ",", "segmentFirstX", ",", "segmentSecondX", ",", "segmentYMirror", ",", "y", ")", ":", "for", "path", "in", "paths", ":", "if", "isXSegmentIntersectingPath", "(", "path", ",", "segmentFirstX", ",", "segmentSecondX", ",", "segmentYMirror", ",", "y", ")", ":", "return", "True", "return", "False" ]
determine if a path list is crossing inside the x segment .
train
false
52,202
def delete_keys(context, qos_specs_id, keys): if (qos_specs_id is None): msg = _('id cannot be None') raise exception.InvalidQoSSpecs(reason=msg) qos_spec = objects.QualityOfServiceSpecs.get_by_id(context, qos_specs_id) try: for key in keys: try: del qos_spec.specs[key] except KeyError: raise exception.QoSSpecsKeyNotFound(specs_key=key, specs_id=qos_specs_id) finally: qos_spec.save()
[ "def", "delete_keys", "(", "context", ",", "qos_specs_id", ",", "keys", ")", ":", "if", "(", "qos_specs_id", "is", "None", ")", ":", "msg", "=", "_", "(", "'id cannot be None'", ")", "raise", "exception", ".", "InvalidQoSSpecs", "(", "reason", "=", "msg", ")", "qos_spec", "=", "objects", ".", "QualityOfServiceSpecs", ".", "get_by_id", "(", "context", ",", "qos_specs_id", ")", "try", ":", "for", "key", "in", "keys", ":", "try", ":", "del", "qos_spec", ".", "specs", "[", "key", "]", "except", "KeyError", ":", "raise", "exception", ".", "QoSSpecsKeyNotFound", "(", "specs_key", "=", "key", ",", "specs_id", "=", "qos_specs_id", ")", "finally", ":", "qos_spec", ".", "save", "(", ")" ]
marks specified key of target qos specs as deleted .
train
false
52,203
def reduceCopyRegistered(cr): return (CopyRegisteredLoaded, ())
[ "def", "reduceCopyRegistered", "(", "cr", ")", ":", "return", "(", "CopyRegisteredLoaded", ",", "(", ")", ")" ]
externally implement c{__reduce__} for l{copyregistered} .
train
false
52,204
def get_os_id(uname_func=None): uname_func = (uname_func or os.uname) (sysname, _, release, _, machine) = uname_func() os_id = _ID_BY_OS.get(sysname.lower()) if os_id: return os_id(release, machine) return None
[ "def", "get_os_id", "(", "uname_func", "=", "None", ")", ":", "uname_func", "=", "(", "uname_func", "or", "os", ".", "uname", ")", "(", "sysname", ",", "_", ",", "release", ",", "_", ",", "machine", ")", "=", "uname_func", "(", ")", "os_id", "=", "_ID_BY_OS", ".", "get", "(", "sysname", ".", "lower", "(", ")", ")", "if", "os_id", ":", "return", "os_id", "(", "release", ",", "machine", ")", "return", "None" ]
return an os identifier sensitive only to its major version .
train
false
52,206
def vsprint(expr, **settings): string_printer = VectorStrPrinter(settings) return string_printer.doprint(expr)
[ "def", "vsprint", "(", "expr", ",", "**", "settings", ")", ":", "string_printer", "=", "VectorStrPrinter", "(", "settings", ")", "return", "string_printer", ".", "doprint", "(", "expr", ")" ]
function for displaying expressions generated in the sympy .
train
false
52,208
def async_test(method): @functools.wraps(method) def Wrapper(self): method(self) self.wait() return Wrapper
[ "def", "async_test", "(", "method", ")", ":", "@", "functools", ".", "wraps", "(", "method", ")", "def", "Wrapper", "(", "self", ")", ":", "method", "(", "self", ")", "self", ".", "wait", "(", ")", "return", "Wrapper" ]
decorator for tests running in test cases derived from tornado .
train
false
52,209
def is_on_path(name): for (loader, name_, ispkg) in pkgutil.iter_modules(sys.path): if (name == name_): return True else: return False
[ "def", "is_on_path", "(", "name", ")", ":", "for", "(", "loader", ",", "name_", ",", "ispkg", ")", "in", "pkgutil", ".", "iter_modules", "(", "sys", ".", "path", ")", ":", "if", "(", "name", "==", "name_", ")", ":", "return", "True", "else", ":", "return", "False" ]
is a top level package/module found on sys .
train
false
52,210
def rCopy(d, f=identityConversion, discardNoneKeys=True, deepCopy=True): if deepCopy: d = copy.deepcopy(d) newDict = {} toCopy = [(k, v, newDict, ()) for (k, v) in d.iteritems()] while (len(toCopy) > 0): (k, v, d, prevKeys) = toCopy.pop() prevKeys = (prevKeys + (k,)) if isinstance(v, dict): d[k] = dict() toCopy[0:0] = [(innerK, innerV, d[k], prevKeys) for (innerK, innerV) in v.iteritems()] else: newV = f(v, prevKeys) if ((not discardNoneKeys) or (newV is not None)): d[k] = newV return newDict
[ "def", "rCopy", "(", "d", ",", "f", "=", "identityConversion", ",", "discardNoneKeys", "=", "True", ",", "deepCopy", "=", "True", ")", ":", "if", "deepCopy", ":", "d", "=", "copy", ".", "deepcopy", "(", "d", ")", "newDict", "=", "{", "}", "toCopy", "=", "[", "(", "k", ",", "v", ",", "newDict", ",", "(", ")", ")", "for", "(", "k", ",", "v", ")", "in", "d", ".", "iteritems", "(", ")", "]", "while", "(", "len", "(", "toCopy", ")", ">", "0", ")", ":", "(", "k", ",", "v", ",", "d", ",", "prevKeys", ")", "=", "toCopy", ".", "pop", "(", ")", "prevKeys", "=", "(", "prevKeys", "+", "(", "k", ",", ")", ")", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "d", "[", "k", "]", "=", "dict", "(", ")", "toCopy", "[", "0", ":", "0", "]", "=", "[", "(", "innerK", ",", "innerV", ",", "d", "[", "k", "]", ",", "prevKeys", ")", "for", "(", "innerK", ",", "innerV", ")", "in", "v", ".", "iteritems", "(", ")", "]", "else", ":", "newV", "=", "f", "(", "v", ",", "prevKeys", ")", "if", "(", "(", "not", "discardNoneKeys", ")", "or", "(", "newV", "is", "not", "None", ")", ")", ":", "d", "[", "k", "]", "=", "newV", "return", "newDict" ]
recursively copies a dict and returns the result .
train
true
52,212
def get_docstring_and_rest(filename): with open(filename, 'rb') as fid: content = fid.read() content = content.replace('\r\n', '\n') try: node = ast.parse(content) except SyntaxError: return (SYNTAX_ERROR_DOCSTRING, content.decode('utf-8')) if (not isinstance(node, ast.Module)): raise TypeError('This function only supports modules. You provided {0}'.format(node.__class__.__name__)) if (node.body and isinstance(node.body[0], ast.Expr) and isinstance(node.body[0].value, ast.Str)): docstring_node = node.body[0] docstring = docstring_node.value.s if hasattr(docstring, 'decode'): docstring = docstring.decode('utf-8') rest = content.decode('utf-8').split('\n', docstring_node.lineno)[(-1)] return (docstring, rest) else: raise ValueError('Could not find docstring in file "{0}". A docstring is required by sphinx-gallery'.format(filename))
[ "def", "get_docstring_and_rest", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "fid", ":", "content", "=", "fid", ".", "read", "(", ")", "content", "=", "content", ".", "replace", "(", "'\\r\\n'", ",", "'\\n'", ")", "try", ":", "node", "=", "ast", ".", "parse", "(", "content", ")", "except", "SyntaxError", ":", "return", "(", "SYNTAX_ERROR_DOCSTRING", ",", "content", ".", "decode", "(", "'utf-8'", ")", ")", "if", "(", "not", "isinstance", "(", "node", ",", "ast", ".", "Module", ")", ")", ":", "raise", "TypeError", "(", "'This function only supports modules. You provided {0}'", ".", "format", "(", "node", ".", "__class__", ".", "__name__", ")", ")", "if", "(", "node", ".", "body", "and", "isinstance", "(", "node", ".", "body", "[", "0", "]", ",", "ast", ".", "Expr", ")", "and", "isinstance", "(", "node", ".", "body", "[", "0", "]", ".", "value", ",", "ast", ".", "Str", ")", ")", ":", "docstring_node", "=", "node", ".", "body", "[", "0", "]", "docstring", "=", "docstring_node", ".", "value", ".", "s", "if", "hasattr", "(", "docstring", ",", "'decode'", ")", ":", "docstring", "=", "docstring", ".", "decode", "(", "'utf-8'", ")", "rest", "=", "content", ".", "decode", "(", "'utf-8'", ")", ".", "split", "(", "'\\n'", ",", "docstring_node", ".", "lineno", ")", "[", "(", "-", "1", ")", "]", "return", "(", "docstring", ",", "rest", ")", "else", ":", "raise", "ValueError", "(", "'Could not find docstring in file \"{0}\". A docstring is required by sphinx-gallery'", ".", "format", "(", "filename", ")", ")" ]
separate filename content between docstring and the rest strongly inspired from ast .
train
false
52,213
def getComplexByPrefixes(prefixes, valueComplex, xmlElement): for prefix in prefixes: valueComplex = getComplexByPrefix(prefix, valueComplex, xmlElement) return valueComplex
[ "def", "getComplexByPrefixes", "(", "prefixes", ",", "valueComplex", ",", "xmlElement", ")", ":", "for", "prefix", "in", "prefixes", ":", "valueComplex", "=", "getComplexByPrefix", "(", "prefix", ",", "valueComplex", ",", "xmlElement", ")", "return", "valueComplex" ]
get complex from prefixes and xml element .
train
false
52,214
def rep_innerproduct(expr, **options): if (not isinstance(expr, (KetBase, BraBase))): raise TypeError('expr passed is not a Bra or Ket') basis = get_basis(expr, **options) if (not isinstance(basis, StateBase)): raise NotImplementedError("Can't form this representation!") if (not ('index' in options)): options['index'] = 1 basis_kets = enumerate_states(basis, options['index'], 2) if isinstance(expr, BraBase): bra = expr ket = (basis_kets[1] if (basis_kets[0].dual == expr) else basis_kets[0]) else: bra = (basis_kets[1].dual if (basis_kets[0] == expr) else basis_kets[0].dual) ket = expr prod = InnerProduct(bra, ket) result = prod.doit() format = options.get('format', 'sympy') return expr._format_represent(result, format)
[ "def", "rep_innerproduct", "(", "expr", ",", "**", "options", ")", ":", "if", "(", "not", "isinstance", "(", "expr", ",", "(", "KetBase", ",", "BraBase", ")", ")", ")", ":", "raise", "TypeError", "(", "'expr passed is not a Bra or Ket'", ")", "basis", "=", "get_basis", "(", "expr", ",", "**", "options", ")", "if", "(", "not", "isinstance", "(", "basis", ",", "StateBase", ")", ")", ":", "raise", "NotImplementedError", "(", "\"Can't form this representation!\"", ")", "if", "(", "not", "(", "'index'", "in", "options", ")", ")", ":", "options", "[", "'index'", "]", "=", "1", "basis_kets", "=", "enumerate_states", "(", "basis", ",", "options", "[", "'index'", "]", ",", "2", ")", "if", "isinstance", "(", "expr", ",", "BraBase", ")", ":", "bra", "=", "expr", "ket", "=", "(", "basis_kets", "[", "1", "]", "if", "(", "basis_kets", "[", "0", "]", ".", "dual", "==", "expr", ")", "else", "basis_kets", "[", "0", "]", ")", "else", ":", "bra", "=", "(", "basis_kets", "[", "1", "]", ".", "dual", "if", "(", "basis_kets", "[", "0", "]", "==", "expr", ")", "else", "basis_kets", "[", "0", "]", ".", "dual", ")", "ket", "=", "expr", "prod", "=", "InnerProduct", "(", "bra", ",", "ket", ")", "result", "=", "prod", ".", "doit", "(", ")", "format", "=", "options", ".", "get", "(", "'format'", ",", "'sympy'", ")", "return", "expr", ".", "_format_represent", "(", "result", ",", "format", ")" ]
returns an innerproduct like representation for the given state .
train
false
52,215
def fake_access(path, mode): if (mode & (os.W_OK | os.X_OK)): return False else: return FakeFile.is_file_accessible(path)
[ "def", "fake_access", "(", "path", ",", "mode", ")", ":", "if", "(", "mode", "&", "(", "os", ".", "W_OK", "|", "os", ".", "X_OK", ")", ")", ":", "return", "False", "else", ":", "return", "FakeFile", ".", "is_file_accessible", "(", "path", ")" ]
fake version of os .
train
false
52,216
def mentions(): t = Twitter(auth=authen()) num = c['HOME_TWEET_NUM'] if g['stuff'].isdigit(): num = int(g['stuff']) for tweet in reversed(t.statuses.mentions_timeline(count=num)): draw(t=tweet) printNicely('')
[ "def", "mentions", "(", ")", ":", "t", "=", "Twitter", "(", "auth", "=", "authen", "(", ")", ")", "num", "=", "c", "[", "'HOME_TWEET_NUM'", "]", "if", "g", "[", "'stuff'", "]", ".", "isdigit", "(", ")", ":", "num", "=", "int", "(", "g", "[", "'stuff'", "]", ")", "for", "tweet", "in", "reversed", "(", "t", ".", "statuses", ".", "mentions_timeline", "(", "count", "=", "num", ")", ")", ":", "draw", "(", "t", "=", "tweet", ")", "printNicely", "(", "''", ")" ]
mentions timeline .
train
false
52,217
def candlestick_ohlc(ax, quotes, width=0.2, colorup=u'k', colordown=u'r', alpha=1.0): return _candlestick(ax, quotes, width=width, colorup=colorup, colordown=colordown, alpha=alpha, ochl=False)
[ "def", "candlestick_ohlc", "(", "ax", ",", "quotes", ",", "width", "=", "0.2", ",", "colorup", "=", "u'k'", ",", "colordown", "=", "u'r'", ",", "alpha", "=", "1.0", ")", ":", "return", "_candlestick", "(", "ax", ",", "quotes", ",", "width", "=", "width", ",", "colorup", "=", "colorup", ",", "colordown", "=", "colordown", ",", "alpha", "=", "alpha", ",", "ochl", "=", "False", ")" ]
plot the time .
train
false
52,218
def get_default_pants_config_file(): return os.path.join(get_buildroot(), u'pants.ini')
[ "def", "get_default_pants_config_file", "(", ")", ":", "return", "os", ".", "path", ".", "join", "(", "get_buildroot", "(", ")", ",", "u'pants.ini'", ")" ]
return the default location of the pants config file .
train
false
52,220
def asin(x): np = import_module('numpy') if isinstance(x, (int, float)): if (abs(x) > 1): return interval((- np.inf), np.inf, is_valid=False) else: return interval(np.arcsin(x), np.arcsin(x)) elif isinstance(x, interval): if ((x.is_valid is False) or (x.start > 1) or (x.end < (-1))): return interval((- np.inf), np.inf, is_valid=False) elif ((x.start < (-1)) or (x.end > 1)): return interval((- np.inf), np.inf, is_valid=None) else: start = np.arcsin(x.start) end = np.arcsin(x.end) return interval(start, end, is_valid=x.is_valid)
[ "def", "asin", "(", "x", ")", ":", "np", "=", "import_module", "(", "'numpy'", ")", "if", "isinstance", "(", "x", ",", "(", "int", ",", "float", ")", ")", ":", "if", "(", "abs", "(", "x", ")", ">", "1", ")", ":", "return", "interval", "(", "(", "-", "np", ".", "inf", ")", ",", "np", ".", "inf", ",", "is_valid", "=", "False", ")", "else", ":", "return", "interval", "(", "np", ".", "arcsin", "(", "x", ")", ",", "np", ".", "arcsin", "(", "x", ")", ")", "elif", "isinstance", "(", "x", ",", "interval", ")", ":", "if", "(", "(", "x", ".", "is_valid", "is", "False", ")", "or", "(", "x", ".", "start", ">", "1", ")", "or", "(", "x", ".", "end", "<", "(", "-", "1", ")", ")", ")", ":", "return", "interval", "(", "(", "-", "np", ".", "inf", ")", ",", "np", ".", "inf", ",", "is_valid", "=", "False", ")", "elif", "(", "(", "x", ".", "start", "<", "(", "-", "1", ")", ")", "or", "(", "x", ".", "end", ">", "1", ")", ")", ":", "return", "interval", "(", "(", "-", "np", ".", "inf", ")", ",", "np", ".", "inf", ",", "is_valid", "=", "None", ")", "else", ":", "start", "=", "np", ".", "arcsin", "(", "x", ".", "start", ")", "end", "=", "np", ".", "arcsin", "(", "x", ".", "end", ")", "return", "interval", "(", "start", ",", "end", ",", "is_valid", "=", "x", ".", "is_valid", ")" ]
evaluates the inverse sine of an interval .
train
false
52,221
def test_write_table_html_fill_values_optional_columns(): buffer_output = StringIO() t = Table([[1], [1]], names=('a', 'b')) ascii.write(t, buffer_output, fill_values=('1', 'Hello world', 'b'), format='html') t_expected = Table([[1], ['Hello world']], names=('a', 'b')) buffer_expected = StringIO() ascii.write(t_expected, buffer_expected, format='html') assert (buffer_output.getvalue() == buffer_expected.getvalue())
[ "def", "test_write_table_html_fill_values_optional_columns", "(", ")", ":", "buffer_output", "=", "StringIO", "(", ")", "t", "=", "Table", "(", "[", "[", "1", "]", ",", "[", "1", "]", "]", ",", "names", "=", "(", "'a'", ",", "'b'", ")", ")", "ascii", ".", "write", "(", "t", ",", "buffer_output", ",", "fill_values", "=", "(", "'1'", ",", "'Hello world'", ",", "'b'", ")", ",", "format", "=", "'html'", ")", "t_expected", "=", "Table", "(", "[", "[", "1", "]", ",", "[", "'Hello world'", "]", "]", ",", "names", "=", "(", "'a'", ",", "'b'", ")", ")", "buffer_expected", "=", "StringIO", "(", ")", "ascii", ".", "write", "(", "t_expected", ",", "buffer_expected", ",", "format", "=", "'html'", ")", "assert", "(", "buffer_output", ".", "getvalue", "(", ")", "==", "buffer_expected", ".", "getvalue", "(", ")", ")" ]
test that passing optional column in fill_values should only replace matching columns .
train
false
52,222
def extract_cookies_to_jar(jar, request, response): req = MockRequest(request) res = MockResponse(response._original_response.msg) jar.extract_cookies(res, req)
[ "def", "extract_cookies_to_jar", "(", "jar", ",", "request", ",", "response", ")", ":", "req", "=", "MockRequest", "(", "request", ")", "res", "=", "MockResponse", "(", "response", ".", "_original_response", ".", "msg", ")", "jar", ".", "extract_cookies", "(", "res", ",", "req", ")" ]
extract the cookies from the response into a cookiejar .
train
true
52,225
def _convert_format(format, reverse=False): if reverse: return _convert_record2fits(format) else: return _convert_fits2record(format)
[ "def", "_convert_format", "(", "format", ",", "reverse", "=", "False", ")", ":", "if", "reverse", ":", "return", "_convert_record2fits", "(", "format", ")", "else", ":", "return", "_convert_fits2record", "(", "format", ")" ]
convert fits format spec to record format spec .
train
false
52,226
def importxml(db, xmlinput): import cStringIO import xml.dom.minidom try: doc = xml.dom.minidom.parseString(xmlinput) except: raise Exception('XML parse error') parent = doc.childNodes[0].tagName csvout = csvheader(parent, doc.childNodes[0].childNodes) for subnode in doc.childNodes: csvout = (csvout + csvdata(subnode.childNodes)) fh = cStringIO.StringIO() fh.write(csvout) fh.seek(0, 0) db[parent].import_from_csv_file(fh)
[ "def", "importxml", "(", "db", ",", "xmlinput", ")", ":", "import", "cStringIO", "import", "xml", ".", "dom", ".", "minidom", "try", ":", "doc", "=", "xml", ".", "dom", ".", "minidom", ".", "parseString", "(", "xmlinput", ")", "except", ":", "raise", "Exception", "(", "'XML parse error'", ")", "parent", "=", "doc", ".", "childNodes", "[", "0", "]", ".", "tagName", "csvout", "=", "csvheader", "(", "parent", ",", "doc", ".", "childNodes", "[", "0", "]", ".", "childNodes", ")", "for", "subnode", "in", "doc", ".", "childNodes", ":", "csvout", "=", "(", "csvout", "+", "csvdata", "(", "subnode", ".", "childNodes", ")", ")", "fh", "=", "cStringIO", ".", "StringIO", "(", ")", "fh", ".", "write", "(", "csvout", ")", "fh", ".", "seek", "(", "0", ",", "0", ")", "db", "[", "parent", "]", ".", "import_from_csv_file", "(", "fh", ")" ]
converts the xml to a csv compatible with the import_from_csv_file of web2py @todo: rewrite this to go via s3resource for proper auth checking .
train
false
52,228
def _get_out_class(tables): out_class = tables[0].__class__ for t in tables[1:]: if issubclass(t.__class__, out_class): out_class = t.__class__ return out_class
[ "def", "_get_out_class", "(", "tables", ")", ":", "out_class", "=", "tables", "[", "0", "]", ".", "__class__", "for", "t", "in", "tables", "[", "1", ":", "]", ":", "if", "issubclass", "(", "t", ".", "__class__", ",", "out_class", ")", ":", "out_class", "=", "t", ".", "__class__", "return", "out_class" ]
from a list of table instances get the merged output table class .
train
false
52,229
def timedelta_to_seconds(td): return (td.seconds + (((td.days * 24) * 60) * 60))
[ "def", "timedelta_to_seconds", "(", "td", ")", ":", "return", "(", "td", ".", "seconds", "+", "(", "(", "(", "td", ".", "days", "*", "24", ")", "*", "60", ")", "*", "60", ")", ")" ]
converts a timedelta instance to seconds .
train
false
52,230
def create_service_from_json(task_handle, data, srv, event_id, service_ids=None): if (service_ids is None): service_ids = {} global CUR_ID data.sort(key=(lambda k: k['id'])) ids = {} ct = 0 total = len(data) for obj in data: ct += 1 update_state(task_handle, ('Importing %s (%d/%d)' % (srv[0], ct, total))) (old_id, obj) = _trim_id(obj) CUR_ID = old_id obj = _delete_fields(srv, obj) obj = _fix_related_fields(srv, obj, service_ids) new_obj = srv[1].create(event_id, obj, 'dont')[0] ids[old_id] = new_obj.id _upload_media_queue(srv, new_obj) return ids
[ "def", "create_service_from_json", "(", "task_handle", ",", "data", ",", "srv", ",", "event_id", ",", "service_ids", "=", "None", ")", ":", "if", "(", "service_ids", "is", "None", ")", ":", "service_ids", "=", "{", "}", "global", "CUR_ID", "data", ".", "sort", "(", "key", "=", "(", "lambda", "k", ":", "k", "[", "'id'", "]", ")", ")", "ids", "=", "{", "}", "ct", "=", "0", "total", "=", "len", "(", "data", ")", "for", "obj", "in", "data", ":", "ct", "+=", "1", "update_state", "(", "task_handle", ",", "(", "'Importing %s (%d/%d)'", "%", "(", "srv", "[", "0", "]", ",", "ct", ",", "total", ")", ")", ")", "(", "old_id", ",", "obj", ")", "=", "_trim_id", "(", "obj", ")", "CUR_ID", "=", "old_id", "obj", "=", "_delete_fields", "(", "srv", ",", "obj", ")", "obj", "=", "_fix_related_fields", "(", "srv", ",", "obj", ",", "service_ids", ")", "new_obj", "=", "srv", "[", "1", "]", ".", "create", "(", "event_id", ",", "obj", ",", "'dont'", ")", "[", "0", "]", "ids", "[", "old_id", "]", "=", "new_obj", ".", "id", "_upload_media_queue", "(", "srv", ",", "new_obj", ")", "return", "ids" ]
given :data as json .
train
false
52,231
def s3_text_represent(text, truncate=True, lines=5, _class=None): if (not text): text = current.messages['NONE'] if (_class is None): selector = '.text-body' _class = 'text-body' else: selector = ('.%s' % _class) _class = ('text-body %s' % _class) if truncate: s3_trunk8(selector=selector, lines=lines) return DIV(text, _class='text-body')
[ "def", "s3_text_represent", "(", "text", ",", "truncate", "=", "True", ",", "lines", "=", "5", ",", "_class", "=", "None", ")", ":", "if", "(", "not", "text", ")", ":", "text", "=", "current", ".", "messages", "[", "'NONE'", "]", "if", "(", "_class", "is", "None", ")", ":", "selector", "=", "'.text-body'", "_class", "=", "'text-body'", "else", ":", "selector", "=", "(", "'.%s'", "%", "_class", ")", "_class", "=", "(", "'text-body %s'", "%", "_class", ")", "if", "truncate", ":", "s3_trunk8", "(", "selector", "=", "selector", ",", "lines", "=", "lines", ")", "return", "DIV", "(", "text", ",", "_class", "=", "'text-body'", ")" ]
representation function for text fields with intelligent truncation and preserving whitespace .
train
false
52,232
@functools.lru_cache() def resolve_address(host, port): with support.transient_internet(host): return socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM)[0][4]
[ "@", "functools", ".", "lru_cache", "(", ")", "def", "resolve_address", "(", "host", ",", "port", ")", ":", "with", "support", ".", "transient_internet", "(", "host", ")", ":", "return", "socket", ".", "getaddrinfo", "(", "host", ",", "port", ",", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "[", "0", "]", "[", "4", "]" ]
resolve an to an address .
train
false
52,233
@click.command('production') @click.argument('user') def setup_production(user): from bench.config.production_setup import setup_production setup_production(user=user)
[ "@", "click", ".", "command", "(", "'production'", ")", "@", "click", ".", "argument", "(", "'user'", ")", "def", "setup_production", "(", "user", ")", ":", "from", "bench", ".", "config", ".", "production_setup", "import", "setup_production", "setup_production", "(", "user", "=", "user", ")" ]
setup bench for production .
train
false
52,234
def norm_diff(A, norm=2, msg=True): if msg: print ('... computing %s norm ...' % norm) if (norm == 2): value = sp.sparse.linalg.svds(A, k=1, return_singular_vectors=False) elif sp.sparse.issparse(A): value = sp.sparse.linalg.norm(A, ord=norm) else: value = sp.linalg.norm(A, ord=norm) return value
[ "def", "norm_diff", "(", "A", ",", "norm", "=", "2", ",", "msg", "=", "True", ")", ":", "if", "msg", ":", "print", "(", "'... computing %s norm ...'", "%", "norm", ")", "if", "(", "norm", "==", "2", ")", ":", "value", "=", "sp", ".", "sparse", ".", "linalg", ".", "svds", "(", "A", ",", "k", "=", "1", ",", "return_singular_vectors", "=", "False", ")", "elif", "sp", ".", "sparse", ".", "issparse", "(", "A", ")", ":", "value", "=", "sp", ".", "sparse", ".", "linalg", ".", "norm", "(", "A", ",", "ord", "=", "norm", ")", "else", ":", "value", "=", "sp", ".", "linalg", ".", "norm", "(", "A", ",", "ord", "=", "norm", ")", "return", "value" ]
compute the norm diff with the original matrix .
train
false
52,235
def set_wake_on_modem(enabled): state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonmodem {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated(state, get_wake_on_modem)
[ "def", "set_wake_on_modem", "(", "enabled", ")", ":", "state", "=", "salt", ".", "utils", ".", "mac_utils", ".", "validate_enabled", "(", "enabled", ")", "cmd", "=", "'systemsetup -setwakeonmodem {0}'", ".", "format", "(", "state", ")", "salt", ".", "utils", ".", "mac_utils", ".", "execute_return_success", "(", "cmd", ")", "return", "salt", ".", "utils", ".", "mac_utils", ".", "confirm_updated", "(", "state", ",", "get_wake_on_modem", ")" ]
set whether or not the computer will wake from sleep when modem activity is detected .
train
true
52,236
def InitServerLog(persistor=None): persistor = (persistor or LogBatchPersistor()) LogBatchPersistor.SetInstance(persistor) server_log_handler = ServerLogHandler() server_log_handler.setLevel(logging.INFO) error_log_handler = ErrorLogHandler() error_log_handler.setLevel(logging.WARNING) logging.getLogger().addHandler(server_log_handler) logging.getLogger().addHandler(error_log_handler)
[ "def", "InitServerLog", "(", "persistor", "=", "None", ")", ":", "persistor", "=", "(", "persistor", "or", "LogBatchPersistor", "(", ")", ")", "LogBatchPersistor", ".", "SetInstance", "(", "persistor", ")", "server_log_handler", "=", "ServerLogHandler", "(", ")", "server_log_handler", ".", "setLevel", "(", "logging", ".", "INFO", ")", "error_log_handler", "=", "ErrorLogHandler", "(", ")", "error_log_handler", ".", "setLevel", "(", "logging", ".", "WARNING", ")", "logging", ".", "getLogger", "(", ")", ".", "addHandler", "(", "server_log_handler", ")", "logging", ".", "getLogger", "(", ")", ".", "addHandler", "(", "error_log_handler", ")" ]
establishes an instance of logbatchpersistor .
train
false
52,237
def static_url(path, request, **kw): if (not os.path.isabs(path)): if (':' not in path): package = caller_package() path = ('%s:%s' % (package.__name__, path)) return request.static_url(path, **kw)
[ "def", "static_url", "(", "path", ",", "request", ",", "**", "kw", ")", ":", "if", "(", "not", "os", ".", "path", ".", "isabs", "(", "path", ")", ")", ":", "if", "(", "':'", "not", "in", "path", ")", ":", "package", "=", "caller_package", "(", ")", "path", "=", "(", "'%s:%s'", "%", "(", "package", ".", "__name__", ",", "path", ")", ")", "return", "request", ".", "static_url", "(", "path", ",", "**", "kw", ")" ]
this is a backwards compatibility function .
train
false
52,238
def safe_peekline(handle): line = handle.peekline() if (not line): raise ValueError('Unexpected end of stream.') return line
[ "def", "safe_peekline", "(", "handle", ")", ":", "line", "=", "handle", ".", "peekline", "(", ")", "if", "(", "not", "line", ")", ":", "raise", "ValueError", "(", "'Unexpected end of stream.'", ")", "return", "line" ]
safe_peekline -> line peek at the next line in an undohandle and return it .
train
false
52,239
def _compute_multivariate_pacf_from_coefficients(constrained, error_variance, order=None, k_endog=None): if (type(constrained) == list): order = len(constrained) k_endog = constrained[0].shape[0] else: (k_endog, order) = constrained.shape order //= k_endog _acovf = _compute_multivariate_acovf_from_coefficients autocovariances = [autocovariance.T for autocovariance in _acovf(constrained, error_variance, maxlag=order)] return _compute_multivariate_pacf_from_autocovariances(autocovariances)
[ "def", "_compute_multivariate_pacf_from_coefficients", "(", "constrained", ",", "error_variance", ",", "order", "=", "None", ",", "k_endog", "=", "None", ")", ":", "if", "(", "type", "(", "constrained", ")", "==", "list", ")", ":", "order", "=", "len", "(", "constrained", ")", "k_endog", "=", "constrained", "[", "0", "]", ".", "shape", "[", "0", "]", "else", ":", "(", "k_endog", ",", "order", ")", "=", "constrained", ".", "shape", "order", "//=", "k_endog", "_acovf", "=", "_compute_multivariate_acovf_from_coefficients", "autocovariances", "=", "[", "autocovariance", ".", "T", "for", "autocovariance", "in", "_acovf", "(", "constrained", ",", "error_variance", ",", "maxlag", "=", "order", ")", "]", "return", "_compute_multivariate_pacf_from_autocovariances", "(", "autocovariances", ")" ]
transform matrices corresponding to a stationary process to matrices with singular values less than one .
train
false
52,240
def embed64(filename=None, file=None, data=None, extension='image/gif'): if (filename and os.path.exists(file)): fp = open(filename, 'rb') data = fp.read() fp.close() data = base64.b64encode(data) return ('data:%s;base64,%s' % (extension, data))
[ "def", "embed64", "(", "filename", "=", "None", ",", "file", "=", "None", ",", "data", "=", "None", ",", "extension", "=", "'image/gif'", ")", ":", "if", "(", "filename", "and", "os", ".", "path", ".", "exists", "(", "file", ")", ")", ":", "fp", "=", "open", "(", "filename", ",", "'rb'", ")", "data", "=", "fp", ".", "read", "(", ")", "fp", ".", "close", "(", ")", "data", "=", "base64", ".", "b64encode", "(", "data", ")", "return", "(", "'data:%s;base64,%s'", "%", "(", "extension", ",", "data", ")", ")" ]
helper to encode the provided data into base64 .
train
false
52,241
def parse_address_family(address_mapper, path, build_files_content): if (not build_files_content.dependencies): raise ResolveError(u'Directory "{}" does not contain build files.'.format(path)) address_maps = [] for filecontent_product in build_files_content.dependencies: address_maps.append(AddressMap.parse(filecontent_product.path, filecontent_product.content, address_mapper.symbol_table_cls, address_mapper.parser_cls, address_mapper.exclude_patterns)) return AddressFamily.create(path.path, address_maps)
[ "def", "parse_address_family", "(", "address_mapper", ",", "path", ",", "build_files_content", ")", ":", "if", "(", "not", "build_files_content", ".", "dependencies", ")", ":", "raise", "ResolveError", "(", "u'Directory \"{}\" does not contain build files.'", ".", "format", "(", "path", ")", ")", "address_maps", "=", "[", "]", "for", "filecontent_product", "in", "build_files_content", ".", "dependencies", ":", "address_maps", ".", "append", "(", "AddressMap", ".", "parse", "(", "filecontent_product", ".", "path", ",", "filecontent_product", ".", "content", ",", "address_mapper", ".", "symbol_table_cls", ",", "address_mapper", ".", "parser_cls", ",", "address_mapper", ".", "exclude_patterns", ")", ")", "return", "AddressFamily", ".", "create", "(", "path", ".", "path", ",", "address_maps", ")" ]
given the contents of the build files in one directory .
train
false
52,242
def dup_quo(f, g, K): return dup_div(f, g, K)[0]
[ "def", "dup_quo", "(", "f", ",", "g", ",", "K", ")", ":", "return", "dup_div", "(", "f", ",", "g", ",", "K", ")", "[", "0", "]" ]
returns exact polynomial quotient in k[x] .
train
false
52,243
def all_pairs_shortest_path_length(G, cutoff=None): length = single_source_shortest_path_length for n in G: (yield (n, dict(length(G, n, cutoff=cutoff))))
[ "def", "all_pairs_shortest_path_length", "(", "G", ",", "cutoff", "=", "None", ")", ":", "length", "=", "single_source_shortest_path_length", "for", "n", "in", "G", ":", "(", "yield", "(", "n", ",", "dict", "(", "length", "(", "G", ",", "n", ",", "cutoff", "=", "cutoff", ")", ")", ")", ")" ]
computes the shortest path lengths between all nodes in g .
train
false
52,244
@pytest.mark.parametrize('uri, expected', (('http://example.com/fiz?buz=%25ppicture', 'http://example.com/fiz?buz=%25ppicture'), ('http://example.com/fiz?buz=%ppicture', 'http://example.com/fiz?buz=%25ppicture'))) def test_requote_uri_with_unquoted_percents(uri, expected): assert (requote_uri(uri) == expected)
[ "@", "pytest", ".", "mark", ".", "parametrize", "(", "'uri, expected'", ",", "(", "(", "'http://example.com/fiz?buz=%25ppicture'", ",", "'http://example.com/fiz?buz=%25ppicture'", ")", ",", "(", "'http://example.com/fiz?buz=%ppicture'", ",", "'http://example.com/fiz?buz=%25ppicture'", ")", ")", ")", "def", "test_requote_uri_with_unquoted_percents", "(", "uri", ",", "expected", ")", ":", "assert", "(", "requote_uri", "(", "uri", ")", "==", "expected", ")" ]
see: URL .
train
false
52,245
def _parse_args(): parser = optparse.OptionParser() parser.add_option('--user', dest='user_install', action='store_true', default=False, help='install in user site package (requires Python 2.6 or later)') parser.add_option('--download-base', dest='download_base', metavar='URL', default=DEFAULT_URL, help='alternative URL from where to download the setuptools package') parser.add_option('--insecure', dest='downloader_factory', action='store_const', const=(lambda : download_file_insecure), default=get_best_downloader, help='Use internal, non-validating downloader') parser.add_option('--version', help='Specify which version to download', default=DEFAULT_VERSION) (options, args) = parser.parse_args() return options
[ "def", "_parse_args", "(", ")", ":", "parser", "=", "optparse", ".", "OptionParser", "(", ")", "parser", ".", "add_option", "(", "'--user'", ",", "dest", "=", "'user_install'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'install in user site package (requires Python 2.6 or later)'", ")", "parser", ".", "add_option", "(", "'--download-base'", ",", "dest", "=", "'download_base'", ",", "metavar", "=", "'URL'", ",", "default", "=", "DEFAULT_URL", ",", "help", "=", "'alternative URL from where to download the setuptools package'", ")", "parser", ".", "add_option", "(", "'--insecure'", ",", "dest", "=", "'downloader_factory'", ",", "action", "=", "'store_const'", ",", "const", "=", "(", "lambda", ":", "download_file_insecure", ")", ",", "default", "=", "get_best_downloader", ",", "help", "=", "'Use internal, non-validating downloader'", ")", "parser", ".", "add_option", "(", "'--version'", ",", "help", "=", "'Specify which version to download'", ",", "default", "=", "DEFAULT_VERSION", ")", "(", "options", ",", "args", ")", "=", "parser", ".", "parse_args", "(", ")", "return", "options" ]
parse the command line for options .
train
true
52,247
def test_scenario_with_table_and_no_step_fails(): assert_raises(LettuceSyntaxError, Scenario.from_string, SCENARIO_FAILED)
[ "def", "test_scenario_with_table_and_no_step_fails", "(", ")", ":", "assert_raises", "(", "LettuceSyntaxError", ",", "Scenario", ".", "from_string", ",", "SCENARIO_FAILED", ")" ]
a step table imediately after the scenario line .
train
false
52,248
def replace_links(container, link_map, frag_map=(lambda name, frag: frag), replace_in_opf=False): for (name, media_type) in container.mime_map.iteritems(): if ((name == container.opf_name) and (not replace_in_opf)): continue repl = LinkReplacer(name, container, link_map, frag_map) container.replace_links(name, repl)
[ "def", "replace_links", "(", "container", ",", "link_map", ",", "frag_map", "=", "(", "lambda", "name", ",", "frag", ":", "frag", ")", ",", "replace_in_opf", "=", "False", ")", ":", "for", "(", "name", ",", "media_type", ")", "in", "container", ".", "mime_map", ".", "iteritems", "(", ")", ":", "if", "(", "(", "name", "==", "container", ".", "opf_name", ")", "and", "(", "not", "replace_in_opf", ")", ")", ":", "continue", "repl", "=", "LinkReplacer", "(", "name", ",", "container", ",", "link_map", ",", "frag_map", ")", "container", ".", "replace_links", "(", "name", ",", "repl", ")" ]
replace links to files in the container .
train
false
52,250
def monomial_pow(A, n): return tuple([(a * n) for a in A])
[ "def", "monomial_pow", "(", "A", ",", "n", ")", ":", "return", "tuple", "(", "[", "(", "a", "*", "n", ")", "for", "a", "in", "A", "]", ")" ]
return the n-th pow of the monomial .
train
false
52,251
def close_pymongo_connection(app): if ('pymongo' not in app.extensions): return del app.extensions['pymongo'] del app.media
[ "def", "close_pymongo_connection", "(", "app", ")", ":", "if", "(", "'pymongo'", "not", "in", "app", ".", "extensions", ")", ":", "return", "del", "app", ".", "extensions", "[", "'pymongo'", "]", "del", "app", ".", "media" ]
close the pymongo connection in an eve/flask app .
train
false
52,254
def get_image_diff_json(image_id): diff_json = get_image_diff_cache(image_id) if diff_json: return diff_json ancestry_path = store.image_ancestry_path(image_id) ancestry = store.get_json(ancestry_path)[1:] files = json.loads(get_image_files_json(image_id)) info_map = get_file_info_map(files) deleted = {} changed = {} created = {} for id in ancestry: ancestor_files = json.loads(get_image_files_json(id)) ancestor_map = get_file_info_map(ancestor_files) for (filename, info) in info_map.items(): ancestor_info = ancestor_map.get(filename) if info[1]: deleted[filename] = info del info_map[filename] elif ancestor_info: if ancestor_info[1]: created[filename] = info else: changed[filename] = info del info_map[filename] created.update(info_map) diff_json = json.dumps({'deleted': deleted, 'changed': changed, 'created': created}) set_image_diff_cache(image_id, diff_json) return diff_json
[ "def", "get_image_diff_json", "(", "image_id", ")", ":", "diff_json", "=", "get_image_diff_cache", "(", "image_id", ")", "if", "diff_json", ":", "return", "diff_json", "ancestry_path", "=", "store", ".", "image_ancestry_path", "(", "image_id", ")", "ancestry", "=", "store", ".", "get_json", "(", "ancestry_path", ")", "[", "1", ":", "]", "files", "=", "json", ".", "loads", "(", "get_image_files_json", "(", "image_id", ")", ")", "info_map", "=", "get_file_info_map", "(", "files", ")", "deleted", "=", "{", "}", "changed", "=", "{", "}", "created", "=", "{", "}", "for", "id", "in", "ancestry", ":", "ancestor_files", "=", "json", ".", "loads", "(", "get_image_files_json", "(", "id", ")", ")", "ancestor_map", "=", "get_file_info_map", "(", "ancestor_files", ")", "for", "(", "filename", ",", "info", ")", "in", "info_map", ".", "items", "(", ")", ":", "ancestor_info", "=", "ancestor_map", ".", "get", "(", "filename", ")", "if", "info", "[", "1", "]", ":", "deleted", "[", "filename", "]", "=", "info", "del", "info_map", "[", "filename", "]", "elif", "ancestor_info", ":", "if", "ancestor_info", "[", "1", "]", ":", "created", "[", "filename", "]", "=", "info", "else", ":", "changed", "[", "filename", "]", "=", "info", "del", "info_map", "[", "filename", "]", "created", ".", "update", "(", "info_map", ")", "diff_json", "=", "json", ".", "dumps", "(", "{", "'deleted'", ":", "deleted", ",", "'changed'", ":", "changed", ",", "'created'", ":", "created", "}", ")", "set_image_diff_cache", "(", "image_id", ",", "diff_json", ")", "return", "diff_json" ]
get json describing file differences in layer calculate the diff information for the files contained within the layer .
train
false
52,255
def estimate_transform(ttype, src, dst, **kwargs): ttype = ttype.lower() if (ttype not in TRANSFORMS): raise ValueError(("the transformation type '%s' is notimplemented" % ttype)) tform = TRANSFORMS[ttype]() tform.estimate(src, dst, **kwargs) return tform
[ "def", "estimate_transform", "(", "ttype", ",", "src", ",", "dst", ",", "**", "kwargs", ")", ":", "ttype", "=", "ttype", ".", "lower", "(", ")", "if", "(", "ttype", "not", "in", "TRANSFORMS", ")", ":", "raise", "ValueError", "(", "(", "\"the transformation type '%s' is notimplemented\"", "%", "ttype", ")", ")", "tform", "=", "TRANSFORMS", "[", "ttype", "]", "(", ")", "tform", ".", "estimate", "(", "src", ",", "dst", ",", "**", "kwargs", ")", "return", "tform" ]
estimate 2d geometric transformation parameters .
train
false
52,256
def backtracking(A, g, x, p, theta, p_dot_g, lb, ub): alpha = 1 while True: (x_new, _) = reflective_transformation((x + (alpha * p)), lb, ub) step = (x_new - x) cost_change = (- evaluate_quadratic(A, g, step)) if (cost_change > (((-0.1) * alpha) * p_dot_g)): break active = find_active_constraints(x_new, lb, ub) if np.any((active != 0)): (x_new, _) = reflective_transformation((x + ((theta * alpha) * p)), lb, ub) x_new = make_strictly_feasible(x_new, lb, ub, rstep=0) step = (x_new - x) cost_change = (- evaluate_quadratic(A, g, step)) return (x, step, cost_change)
[ "def", "backtracking", "(", "A", ",", "g", ",", "x", ",", "p", ",", "theta", ",", "p_dot_g", ",", "lb", ",", "ub", ")", ":", "alpha", "=", "1", "while", "True", ":", "(", "x_new", ",", "_", ")", "=", "reflective_transformation", "(", "(", "x", "+", "(", "alpha", "*", "p", ")", ")", ",", "lb", ",", "ub", ")", "step", "=", "(", "x_new", "-", "x", ")", "cost_change", "=", "(", "-", "evaluate_quadratic", "(", "A", ",", "g", ",", "step", ")", ")", "if", "(", "cost_change", ">", "(", "(", "(", "-", "0.1", ")", "*", "alpha", ")", "*", "p_dot_g", ")", ")", ":", "break", "active", "=", "find_active_constraints", "(", "x_new", ",", "lb", ",", "ub", ")", "if", "np", ".", "any", "(", "(", "active", "!=", "0", ")", ")", ":", "(", "x_new", ",", "_", ")", "=", "reflective_transformation", "(", "(", "x", "+", "(", "(", "theta", "*", "alpha", ")", "*", "p", ")", ")", ",", "lb", ",", "ub", ")", "x_new", "=", "make_strictly_feasible", "(", "x_new", ",", "lb", ",", "ub", ",", "rstep", "=", "0", ")", "step", "=", "(", "x_new", "-", "x", ")", "cost_change", "=", "(", "-", "evaluate_quadratic", "(", "A", ",", "g", ",", "step", ")", ")", "return", "(", "x", ",", "step", ",", "cost_change", ")" ]
find an appropriate step size using backtracking line search .
train
false
52,257
def mod(a, b): return (a % b)
[ "def", "mod", "(", "a", ",", "b", ")", ":", "return", "(", "a", "%", "b", ")" ]
same as a % b .
train
false
52,258
def pyfunc(): return 'pyfunc'
[ "def", "pyfunc", "(", ")", ":", "return", "'pyfunc'" ]
some pure python tests .
train
false