id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
4,168
def split_on_groups(string, groups): if (not groups): return [string] boundaries = sorted(set(functools.reduce((lambda l, x: (l + list(x))), groups, []))) if (boundaries[0] != 0): boundaries.insert(0, 0) if (boundaries[(-1)] != len(string)): boundaries.append(len(string)) groups = [string[start:end] for (start, end) in zip(boundaries[:(-1)], boundaries[1:])] return [g for g in groups if g]
[ "def", "split_on_groups", "(", "string", ",", "groups", ")", ":", "if", "(", "not", "groups", ")", ":", "return", "[", "string", "]", "boundaries", "=", "sorted", "(", "set", "(", "functools", ".", "reduce", "(", "(", "lambda", "l", ",", "x", ":", "(", "l", "+", "list", "(", "x", ")", ")", ")", ",", "groups", ",", "[", "]", ")", ")", ")", "if", "(", "boundaries", "[", "0", "]", "!=", "0", ")", ":", "boundaries", ".", "insert", "(", "0", ",", "0", ")", "if", "(", "boundaries", "[", "(", "-", "1", ")", "]", "!=", "len", "(", "string", ")", ")", ":", "boundaries", ".", "append", "(", "len", "(", "string", ")", ")", "groups", "=", "[", "string", "[", "start", ":", "end", "]", "for", "(", "start", ",", "end", ")", "in", "zip", "(", "boundaries", "[", ":", "(", "-", "1", ")", "]", ",", "boundaries", "[", "1", ":", "]", ")", "]", "return", "[", "g", "for", "g", "in", "groups", "if", "g", "]" ]
split the given string using the different known groups for boundaries .
train
false
4,169
def _GetUserSecret(user): secret = secrets.GetSecret(_SecretName(user)) if (not secret): raise LookupError('no secret has been created for {0}'.format(user)) return secret
[ "def", "_GetUserSecret", "(", "user", ")", ":", "secret", "=", "secrets", ".", "GetSecret", "(", "_SecretName", "(", "user", ")", ")", "if", "(", "not", "secret", ")", ":", "raise", "LookupError", "(", "'no secret has been created for {0}'", ".", "format", "(", "user", ")", ")", "return", "secret" ]
returns the user secret by consulting the secrets database .
train
false
4,173
def find_hessian_diag(point, vars=None, model=None): model = modelcontext(model) H = model.fastfn(hessian_diag(model.logpt, vars)) return H(Point(point, model=model))
[ "def", "find_hessian_diag", "(", "point", ",", "vars", "=", "None", ",", "model", "=", "None", ")", ":", "model", "=", "modelcontext", "(", "model", ")", "H", "=", "model", ".", "fastfn", "(", "hessian_diag", "(", "model", ".", "logpt", ",", "vars", ")", ")", "return", "H", "(", "Point", "(", "point", ",", "model", "=", "model", ")", ")" ]
returns hessian of logp at the point passed .
train
false
4,174
def neg(a): return (- a)
[ "def", "neg", "(", "a", ")", ":", "return", "(", "-", "a", ")" ]
alias for -min_elemwise{x .
train
false
4,175
@with_setup(prepare_stdout, registry.clear) def test_jsonreport_output_with_one_error(): with check_jsonreport(u'error_traceback'): runner = Runner(feature_name(u'error_traceback'), enable_jsonreport=True) runner.run()
[ "@", "with_setup", "(", "prepare_stdout", ",", "registry", ".", "clear", ")", "def", "test_jsonreport_output_with_one_error", "(", ")", ":", "with", "check_jsonreport", "(", "u'error_traceback'", ")", ":", "runner", "=", "Runner", "(", "feature_name", "(", "u'error_traceback'", ")", ",", "enable_jsonreport", "=", "True", ")", "runner", ".", "run", "(", ")" ]
test jsonreport output with one errors .
train
false
4,178
def partition_entropy_by(inputs, attribute): partitions = partition_by(inputs, attribute) return partition_entropy(partitions.values())
[ "def", "partition_entropy_by", "(", "inputs", ",", "attribute", ")", ":", "partitions", "=", "partition_by", "(", "inputs", ",", "attribute", ")", "return", "partition_entropy", "(", "partitions", ".", "values", "(", ")", ")" ]
computes the entropy corresponding to the given partition .
train
false
4,180
def imports_on_separate_lines(logical_line): line = logical_line if line.startswith('import '): found = line.find(',') if (((-1) < found) and (';' not in line[:found])): (yield (found, 'E401 multiple imports on one line'))
[ "def", "imports_on_separate_lines", "(", "logical_line", ")", ":", "line", "=", "logical_line", "if", "line", ".", "startswith", "(", "'import '", ")", ":", "found", "=", "line", ".", "find", "(", "','", ")", "if", "(", "(", "(", "-", "1", ")", "<", "found", ")", "and", "(", "';'", "not", "in", "line", "[", ":", "found", "]", ")", ")", ":", "(", "yield", "(", "found", ",", "'E401 multiple imports on one line'", ")", ")" ]
imports should usually be on separate lines .
train
true
4,181
def modified_time(file_path): if os.path.exists(file_path): return os.path.getmtime(file_path) else: return 0.0
[ "def", "modified_time", "(", "file_path", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "file_path", ")", ":", "return", "os", ".", "path", ".", "getmtime", "(", "file_path", ")", "else", ":", "return", "0.0" ]
return the modified time of the supplied file .
train
false
4,183
def webapi_deprecated(deprecated_in, force_error_http_status=None, default_api_format=None, encoders=[]): def _dec(view_func): def _view(*args, **kwargs): if default_api_format: request = args[0] assert isinstance(request, HttpRequest) method_args = getattr(request, request.method, None) if (method_args and (u'api_format' not in method_args)): method_args = method_args.copy() method_args[u'api_format'] = default_api_format setattr(request, request.method, method_args) response = view_func(*args, **kwargs) if isinstance(response, WebAPIResponse): response.encoders = encoders if isinstance(response, WebAPIResponseError): response.api_data[u'deprecated'] = {u'in_version': deprecated_in} if (force_error_http_status and isinstance(response, WebAPIResponseError)): response.status_code = force_error_http_status return response return _view return _dec
[ "def", "webapi_deprecated", "(", "deprecated_in", ",", "force_error_http_status", "=", "None", ",", "default_api_format", "=", "None", ",", "encoders", "=", "[", "]", ")", ":", "def", "_dec", "(", "view_func", ")", ":", "def", "_view", "(", "*", "args", ",", "**", "kwargs", ")", ":", "if", "default_api_format", ":", "request", "=", "args", "[", "0", "]", "assert", "isinstance", "(", "request", ",", "HttpRequest", ")", "method_args", "=", "getattr", "(", "request", ",", "request", ".", "method", ",", "None", ")", "if", "(", "method_args", "and", "(", "u'api_format'", "not", "in", "method_args", ")", ")", ":", "method_args", "=", "method_args", ".", "copy", "(", ")", "method_args", "[", "u'api_format'", "]", "=", "default_api_format", "setattr", "(", "request", ",", "request", ".", "method", ",", "method_args", ")", "response", "=", "view_func", "(", "*", "args", ",", "**", "kwargs", ")", "if", "isinstance", "(", "response", ",", "WebAPIResponse", ")", ":", "response", ".", "encoders", "=", "encoders", "if", "isinstance", "(", "response", ",", "WebAPIResponseError", ")", ":", "response", ".", "api_data", "[", "u'deprecated'", "]", "=", "{", "u'in_version'", ":", "deprecated_in", "}", "if", "(", "force_error_http_status", "and", "isinstance", "(", "response", ",", "WebAPIResponseError", ")", ")", ":", "response", ".", "status_code", "=", "force_error_http_status", "return", "response", "return", "_view", "return", "_dec" ]
marks an api handler as deprecated .
train
false
4,184
def primary_key_names(model): mapper = sqlalchemy_inspect(model) return [column.name for column in mapper.primary_key]
[ "def", "primary_key_names", "(", "model", ")", ":", "mapper", "=", "sqlalchemy_inspect", "(", "model", ")", "return", "[", "column", ".", "name", "for", "column", "in", "mapper", ".", "primary_key", "]" ]
returns a list of all the primary keys for a model .
train
false
4,185
def stem(word, cached=True, history=10000, **kwargs): stem = word.lower() if (cached and (stem in cache)): return case_sensitive(cache[stem], word) if (cached and (len(cache) > history)): cache.clear() if (len(stem) <= 2): return case_sensitive(stem, word) if (stem in exceptions): return case_sensitive(exceptions[stem], word) if (stem in uninflected): return case_sensitive(stem, word) stem = upper_consonant_y(stem) for f in (step_1a, step_1b, step_1c, step_2, step_3, step_4, step_5a, step_5b): stem = f(stem) stem = stem.lower() stem = case_sensitive(stem, word) if cached: cache[word.lower()] = stem.lower() return stem
[ "def", "stem", "(", "word", ",", "cached", "=", "True", ",", "history", "=", "10000", ",", "**", "kwargs", ")", ":", "stem", "=", "word", ".", "lower", "(", ")", "if", "(", "cached", "and", "(", "stem", "in", "cache", ")", ")", ":", "return", "case_sensitive", "(", "cache", "[", "stem", "]", ",", "word", ")", "if", "(", "cached", "and", "(", "len", "(", "cache", ")", ">", "history", ")", ")", ":", "cache", ".", "clear", "(", ")", "if", "(", "len", "(", "stem", ")", "<=", "2", ")", ":", "return", "case_sensitive", "(", "stem", ",", "word", ")", "if", "(", "stem", "in", "exceptions", ")", ":", "return", "case_sensitive", "(", "exceptions", "[", "stem", "]", ",", "word", ")", "if", "(", "stem", "in", "uninflected", ")", ":", "return", "case_sensitive", "(", "stem", ",", "word", ")", "stem", "=", "upper_consonant_y", "(", "stem", ")", "for", "f", "in", "(", "step_1a", ",", "step_1b", ",", "step_1c", ",", "step_2", ",", "step_3", ",", "step_4", ",", "step_5a", ",", "step_5b", ")", ":", "stem", "=", "f", "(", "stem", ")", "stem", "=", "stem", ".", "lower", "(", ")", "stem", "=", "case_sensitive", "(", "stem", ",", "word", ")", "if", "cached", ":", "cache", "[", "word", ".", "lower", "(", ")", "]", "=", "stem", ".", "lower", "(", ")", "return", "stem" ]
returns the stemmed version of the argument string .
train
false
4,186
def unit_propagate(clauses, symbol): output = [] for c in clauses: if (c.func != Or): output.append(c) continue for arg in c.args: if (arg == (~ symbol)): output.append(Or(*[x for x in c.args if (x != (~ symbol))])) break if (arg == symbol): break else: output.append(c) return output
[ "def", "unit_propagate", "(", "clauses", ",", "symbol", ")", ":", "output", "=", "[", "]", "for", "c", "in", "clauses", ":", "if", "(", "c", ".", "func", "!=", "Or", ")", ":", "output", ".", "append", "(", "c", ")", "continue", "for", "arg", "in", "c", ".", "args", ":", "if", "(", "arg", "==", "(", "~", "symbol", ")", ")", ":", "output", ".", "append", "(", "Or", "(", "*", "[", "x", "for", "x", "in", "c", ".", "args", "if", "(", "x", "!=", "(", "~", "symbol", ")", ")", "]", ")", ")", "break", "if", "(", "arg", "==", "symbol", ")", ":", "break", "else", ":", "output", ".", "append", "(", "c", ")", "return", "output" ]
returns an equivalent set of clauses if a set of clauses contains the unit clause l .
train
false
4,188
def extract_components_from_tuple(repository_components_tuple): toolshed = repository_components_tuple[0] name = repository_components_tuple[1] owner = repository_components_tuple[2] changeset_revision = repository_components_tuple[3] components_list = [toolshed, name, owner, changeset_revision] if (len(repository_components_tuple) == 5): (toolshed, name, owner, changeset_revision, prior_installation_required) = repository_components_tuple components_list = [toolshed, name, owner, changeset_revision, prior_installation_required] elif (len(repository_components_tuple) == 6): (toolshed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td) = repository_components_tuple components_list = [toolshed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td] return components_list
[ "def", "extract_components_from_tuple", "(", "repository_components_tuple", ")", ":", "toolshed", "=", "repository_components_tuple", "[", "0", "]", "name", "=", "repository_components_tuple", "[", "1", "]", "owner", "=", "repository_components_tuple", "[", "2", "]", "changeset_revision", "=", "repository_components_tuple", "[", "3", "]", "components_list", "=", "[", "toolshed", ",", "name", ",", "owner", ",", "changeset_revision", "]", "if", "(", "len", "(", "repository_components_tuple", ")", "==", "5", ")", ":", "(", "toolshed", ",", "name", ",", "owner", ",", "changeset_revision", ",", "prior_installation_required", ")", "=", "repository_components_tuple", "components_list", "=", "[", "toolshed", ",", "name", ",", "owner", ",", "changeset_revision", ",", "prior_installation_required", "]", "elif", "(", "len", "(", "repository_components_tuple", ")", "==", "6", ")", ":", "(", "toolshed", ",", "name", ",", "owner", ",", "changeset_revision", ",", "prior_installation_required", ",", "only_if_compiling_contained_td", ")", "=", "repository_components_tuple", "components_list", "=", "[", "toolshed", ",", "name", ",", "owner", ",", "changeset_revision", ",", "prior_installation_required", ",", "only_if_compiling_contained_td", "]", "return", "components_list" ]
extract the repository components from the provided tuple in a backward-compatible manner .
train
false
4,189
def bin2long(text, endian): assert (endian in (LITTLE_ENDIAN, BIG_ENDIAN)) bits = [(ord(character) - ord('0')) for character in text if (character in '01')] assert (len(bits) != 0) if (endian is not BIG_ENDIAN): bits = reversed(bits) value = 0 for bit in bits: value *= 2 value += bit return value
[ "def", "bin2long", "(", "text", ",", "endian", ")", ":", "assert", "(", "endian", "in", "(", "LITTLE_ENDIAN", ",", "BIG_ENDIAN", ")", ")", "bits", "=", "[", "(", "ord", "(", "character", ")", "-", "ord", "(", "'0'", ")", ")", "for", "character", "in", "text", "if", "(", "character", "in", "'01'", ")", "]", "assert", "(", "len", "(", "bits", ")", "!=", "0", ")", "if", "(", "endian", "is", "not", "BIG_ENDIAN", ")", ":", "bits", "=", "reversed", "(", "bits", ")", "value", "=", "0", "for", "bit", "in", "bits", ":", "value", "*=", "2", "value", "+=", "bit", "return", "value" ]
convert binary number written in a string into an integer .
train
false
4,190
def absdir(path): if (not os.path.isabs(path)): path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(), path))) if ((path is None) or (not os.path.isdir(path))): return None return path
[ "def", "absdir", "(", "path", ")", ":", "if", "(", "not", "os", ".", "path", ".", "isabs", "(", "path", ")", ")", ":", "path", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "path", ")", ")", ")", "if", "(", "(", "path", "is", "None", ")", "or", "(", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", ")", ")", ":", "return", "None", "return", "path" ]
return absolute .
train
true
4,191
def initialize_scheduler(): from headphones import updater, searcher, librarysync, postprocessor, torrentfinished with SCHED_LOCK: start_jobs = (not len(SCHED.get_jobs())) minutes = CONFIG.SEARCH_INTERVAL schedule_job(searcher.searchforalbum, 'Search for Wanted', hours=0, minutes=minutes) minutes = CONFIG.DOWNLOAD_SCAN_INTERVAL schedule_job(postprocessor.checkFolder, 'Download Scan', hours=0, minutes=minutes) hours = CONFIG.LIBRARYSCAN_INTERVAL schedule_job(librarysync.libraryScan, 'Library Scan', hours=hours, minutes=0) hours = CONFIG.UPDATE_DB_INTERVAL schedule_job(updater.dbUpdate, 'MusicBrainz Update', hours=hours, minutes=0) if CONFIG.CHECK_GITHUB: if CONFIG.CHECK_GITHUB_INTERVAL: minutes = CONFIG.CHECK_GITHUB_INTERVAL else: minutes = 0 schedule_job(versioncheck.checkGithub, 'Check GitHub for updates', hours=0, minutes=minutes) minutes = CONFIG.TORRENT_REMOVAL_INTERVAL schedule_job(torrentfinished.checkTorrentFinished, 'Torrent removal check', hours=0, minutes=minutes) if (start_jobs and len(SCHED.get_jobs())): try: SCHED.start() except Exception as e: logger.info(e)
[ "def", "initialize_scheduler", "(", ")", ":", "from", "headphones", "import", "updater", ",", "searcher", ",", "librarysync", ",", "postprocessor", ",", "torrentfinished", "with", "SCHED_LOCK", ":", "start_jobs", "=", "(", "not", "len", "(", "SCHED", ".", "get_jobs", "(", ")", ")", ")", "minutes", "=", "CONFIG", ".", "SEARCH_INTERVAL", "schedule_job", "(", "searcher", ".", "searchforalbum", ",", "'Search for Wanted'", ",", "hours", "=", "0", ",", "minutes", "=", "minutes", ")", "minutes", "=", "CONFIG", ".", "DOWNLOAD_SCAN_INTERVAL", "schedule_job", "(", "postprocessor", ".", "checkFolder", ",", "'Download Scan'", ",", "hours", "=", "0", ",", "minutes", "=", "minutes", ")", "hours", "=", "CONFIG", ".", "LIBRARYSCAN_INTERVAL", "schedule_job", "(", "librarysync", ".", "libraryScan", ",", "'Library Scan'", ",", "hours", "=", "hours", ",", "minutes", "=", "0", ")", "hours", "=", "CONFIG", ".", "UPDATE_DB_INTERVAL", "schedule_job", "(", "updater", ".", "dbUpdate", ",", "'MusicBrainz Update'", ",", "hours", "=", "hours", ",", "minutes", "=", "0", ")", "if", "CONFIG", ".", "CHECK_GITHUB", ":", "if", "CONFIG", ".", "CHECK_GITHUB_INTERVAL", ":", "minutes", "=", "CONFIG", ".", "CHECK_GITHUB_INTERVAL", "else", ":", "minutes", "=", "0", "schedule_job", "(", "versioncheck", ".", "checkGithub", ",", "'Check GitHub for updates'", ",", "hours", "=", "0", ",", "minutes", "=", "minutes", ")", "minutes", "=", "CONFIG", ".", "TORRENT_REMOVAL_INTERVAL", "schedule_job", "(", "torrentfinished", ".", "checkTorrentFinished", ",", "'Torrent removal check'", ",", "hours", "=", "0", ",", "minutes", "=", "minutes", ")", "if", "(", "start_jobs", "and", "len", "(", "SCHED", ".", "get_jobs", "(", ")", ")", ")", ":", "try", ":", "SCHED", ".", "start", "(", ")", "except", "Exception", "as", "e", ":", "logger", ".", "info", "(", "e", ")" ]
start the scheduled background tasks .
train
false
4,192
def buildDecomposableNetwork(): n = buildNetwork(2, 3, 2, bias=False) ndc = NeuronDecomposableNetwork.convertNormalNetwork(n) ndc._setParameters(ones(12)) return ndc
[ "def", "buildDecomposableNetwork", "(", ")", ":", "n", "=", "buildNetwork", "(", "2", ",", "3", ",", "2", ",", "bias", "=", "False", ")", "ndc", "=", "NeuronDecomposableNetwork", ".", "convertNormalNetwork", "(", "n", ")", "ndc", ".", "_setParameters", "(", "ones", "(", "12", ")", ")", "return", "ndc" ]
three hidden neurons .
train
false
4,196
def _edge_betweenness(G, source, nodes=None, cutoff=False): (pred, length) = nx.predecessor(G, source, cutoff=cutoff, return_seen=True) onodes = [n for (n, d) in sorted(length.items(), key=itemgetter(1))] between = {} for (u, v) in G.edges(nodes): between[(u, v)] = 1.0 between[(v, u)] = 1.0 while onodes: v = onodes.pop() if (v in pred): num_paths = len(pred[v]) for w in pred[v]: if (w in pred): num_paths = len(pred[w]) for x in pred[w]: between[(w, x)] += (between[(v, w)] / num_paths) between[(x, w)] += (between[(w, v)] / num_paths) return between
[ "def", "_edge_betweenness", "(", "G", ",", "source", ",", "nodes", "=", "None", ",", "cutoff", "=", "False", ")", ":", "(", "pred", ",", "length", ")", "=", "nx", ".", "predecessor", "(", "G", ",", "source", ",", "cutoff", "=", "cutoff", ",", "return_seen", "=", "True", ")", "onodes", "=", "[", "n", "for", "(", "n", ",", "d", ")", "in", "sorted", "(", "length", ".", "items", "(", ")", ",", "key", "=", "itemgetter", "(", "1", ")", ")", "]", "between", "=", "{", "}", "for", "(", "u", ",", "v", ")", "in", "G", ".", "edges", "(", "nodes", ")", ":", "between", "[", "(", "u", ",", "v", ")", "]", "=", "1.0", "between", "[", "(", "v", ",", "u", ")", "]", "=", "1.0", "while", "onodes", ":", "v", "=", "onodes", ".", "pop", "(", ")", "if", "(", "v", "in", "pred", ")", ":", "num_paths", "=", "len", "(", "pred", "[", "v", "]", ")", "for", "w", "in", "pred", "[", "v", "]", ":", "if", "(", "w", "in", "pred", ")", ":", "num_paths", "=", "len", "(", "pred", "[", "w", "]", ")", "for", "x", "in", "pred", "[", "w", "]", ":", "between", "[", "(", "w", ",", "x", ")", "]", "+=", "(", "between", "[", "(", "v", ",", "w", ")", "]", "/", "num_paths", ")", "between", "[", "(", "x", ",", "w", ")", "]", "+=", "(", "between", "[", "(", "w", ",", "v", ")", "]", "/", "num_paths", ")", "return", "between" ]
edge betweenness helper .
train
false
4,197
def referenced(word, article=INDEFINITE, gender=MALE, role=SUBJECT): return ('%s %s' % (_article(word, article, gender, role), word))
[ "def", "referenced", "(", "word", ",", "article", "=", "INDEFINITE", ",", "gender", "=", "MALE", ",", "role", "=", "SUBJECT", ")", ":", "return", "(", "'%s %s'", "%", "(", "_article", "(", "word", ",", "article", ",", "gender", ",", "role", ")", ",", "word", ")", ")" ]
returns a string with the article + the word .
train
true
4,198
def p_boolean_document(corpus, segmented_topics): top_ids = _ret_top_ids(segmented_topics) per_topic_postings = {} for id in top_ids: id_list = set() for (n, document) in enumerate(corpus): if (id in frozenset((x[0] for x in document))): id_list.add(n) per_topic_postings[id] = id_list num_docs = len(corpus) return (per_topic_postings, num_docs)
[ "def", "p_boolean_document", "(", "corpus", ",", "segmented_topics", ")", ":", "top_ids", "=", "_ret_top_ids", "(", "segmented_topics", ")", "per_topic_postings", "=", "{", "}", "for", "id", "in", "top_ids", ":", "id_list", "=", "set", "(", ")", "for", "(", "n", ",", "document", ")", "in", "enumerate", "(", "corpus", ")", ":", "if", "(", "id", "in", "frozenset", "(", "(", "x", "[", "0", "]", "for", "x", "in", "document", ")", ")", ")", ":", "id_list", ".", "add", "(", "n", ")", "per_topic_postings", "[", "id", "]", "=", "id_list", "num_docs", "=", "len", "(", "corpus", ")", "return", "(", "per_topic_postings", ",", "num_docs", ")" ]
this function performs the boolean document probability estimation .
train
false
4,199
def parse_denoiser_mapping(denoiser_map): result = {} for line in denoiser_map: line = line.strip().split(' DCTB ') denoised_id = line[0].rstrip(':') original_ids = ([denoised_id] + line[1:]) if (denoised_id in result): raise ValueError('Duplicated identifiers in denoiser mapping file: are you sure you merged the correct files?') else: result[denoised_id] = original_ids return result
[ "def", "parse_denoiser_mapping", "(", "denoiser_map", ")", ":", "result", "=", "{", "}", "for", "line", "in", "denoiser_map", ":", "line", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "' DCTB '", ")", "denoised_id", "=", "line", "[", "0", "]", ".", "rstrip", "(", "':'", ")", "original_ids", "=", "(", "[", "denoised_id", "]", "+", "line", "[", "1", ":", "]", ")", "if", "(", "denoised_id", "in", "result", ")", ":", "raise", "ValueError", "(", "'Duplicated identifiers in denoiser mapping file: are you sure you merged the correct files?'", ")", "else", ":", "result", "[", "denoised_id", "]", "=", "original_ids", "return", "result" ]
read a denoiser mapping file into a dictionary .
train
false
4,200
def save_reg(data): reg_dir = _reg_dir() regfile = os.path.join(reg_dir, 'register') try: if (not os.path.exists()): os.makedirs(reg_dir) except OSError as exc: if (exc.errno == errno.EEXIST): pass else: raise try: with salt.utils.fopen(regfile, 'a') as fh_: msgpack.dump(data, fh_) fh_.close() except: log.error('Could not write to msgpack file {0}'.format(__opts__['outdir'])) raise
[ "def", "save_reg", "(", "data", ")", ":", "reg_dir", "=", "_reg_dir", "(", ")", "regfile", "=", "os", ".", "path", ".", "join", "(", "reg_dir", ",", "'register'", ")", "try", ":", "if", "(", "not", "os", ".", "path", ".", "exists", "(", ")", ")", ":", "os", ".", "makedirs", "(", "reg_dir", ")", "except", "OSError", "as", "exc", ":", "if", "(", "exc", ".", "errno", "==", "errno", ".", "EEXIST", ")", ":", "pass", "else", ":", "raise", "try", ":", "with", "salt", ".", "utils", ".", "fopen", "(", "regfile", ",", "'a'", ")", "as", "fh_", ":", "msgpack", ".", "dump", "(", "data", ",", "fh_", ")", "fh_", ".", "close", "(", ")", "except", ":", "log", ".", "error", "(", "'Could not write to msgpack file {0}'", ".", "format", "(", "__opts__", "[", "'outdir'", "]", ")", ")", "raise" ]
save the register to msgpack files .
train
true
4,201
def GetOwnerIDs(region): ec2 = _Connect(region) return [g.owner_id for g in ec2.get_all_security_groups()]
[ "def", "GetOwnerIDs", "(", "region", ")", ":", "ec2", "=", "_Connect", "(", "region", ")", "return", "[", "g", ".", "owner_id", "for", "g", "in", "ec2", ".", "get_all_security_groups", "(", ")", "]" ]
return the list of owner ids in this regions security groups .
train
false
4,202
def get_model_field_name(field): field = slugify(field) field = field.replace('-', '_') field = field.replace(':', '_') if (field in ('id',)): field += '_' if (field.upper() in PG_RESERVED_KEYWORDS): field += '_' if (field[(-1):] == '_'): field += 'field' try: int(field) float(field) field = ('_%s' % field) except ValueError: pass return field
[ "def", "get_model_field_name", "(", "field", ")", ":", "field", "=", "slugify", "(", "field", ")", "field", "=", "field", ".", "replace", "(", "'-'", ",", "'_'", ")", "field", "=", "field", ".", "replace", "(", "':'", ",", "'_'", ")", "if", "(", "field", "in", "(", "'id'", ",", ")", ")", ":", "field", "+=", "'_'", "if", "(", "field", ".", "upper", "(", ")", "in", "PG_RESERVED_KEYWORDS", ")", ":", "field", "+=", "'_'", "if", "(", "field", "[", "(", "-", "1", ")", ":", "]", "==", "'_'", ")", ":", "field", "+=", "'field'", "try", ":", "int", "(", "field", ")", "float", "(", "field", ")", "field", "=", "(", "'_%s'", "%", "field", ")", "except", "ValueError", ":", "pass", "return", "field" ]
get the field name usable without quotes .
train
false
4,204
def appendTextElements(e, contentsList, se): def uconcat(text, newText, se): if (type(newText) != type(text)): if (type(text) is str): text = text.decode(se) else: newText = newText.decode(se) return (text + newText) e.text = '' lastElement = None for content in contentsList: if (not isinstance(content, Text)): newElement = content.toElement(se) if (newElement is None): continue lastElement = newElement lastElement.tail = '' e.append(lastElement) elif (lastElement is None): e.text = uconcat(e.text, content.text, se) else: lastElement.tail = uconcat(lastElement.tail, content.text, se)
[ "def", "appendTextElements", "(", "e", ",", "contentsList", ",", "se", ")", ":", "def", "uconcat", "(", "text", ",", "newText", ",", "se", ")", ":", "if", "(", "type", "(", "newText", ")", "!=", "type", "(", "text", ")", ")", ":", "if", "(", "type", "(", "text", ")", "is", "str", ")", ":", "text", "=", "text", ".", "decode", "(", "se", ")", "else", ":", "newText", "=", "newText", ".", "decode", "(", "se", ")", "return", "(", "text", "+", "newText", ")", "e", ".", "text", "=", "''", "lastElement", "=", "None", "for", "content", "in", "contentsList", ":", "if", "(", "not", "isinstance", "(", "content", ",", "Text", ")", ")", ":", "newElement", "=", "content", ".", "toElement", "(", "se", ")", "if", "(", "newElement", "is", "None", ")", ":", "continue", "lastElement", "=", "newElement", "lastElement", ".", "tail", "=", "''", "e", ".", "append", "(", "lastElement", ")", "elif", "(", "lastElement", "is", "None", ")", ":", "e", ".", "text", "=", "uconcat", "(", "e", ".", "text", ",", "content", ".", "text", ",", "se", ")", "else", ":", "lastElement", ".", "tail", "=", "uconcat", "(", "lastElement", ".", "tail", ",", "content", ".", "text", ",", "se", ")" ]
a helper function to convert text streams into the proper elements .
train
false
4,206
def update_patch_log(patchmodule): frappe.get_doc({u'doctype': u'Patch Log', u'patch': patchmodule}).insert()
[ "def", "update_patch_log", "(", "patchmodule", ")", ":", "frappe", ".", "get_doc", "(", "{", "u'doctype'", ":", "u'Patch Log'", ",", "u'patch'", ":", "patchmodule", "}", ")", ".", "insert", "(", ")" ]
update patch_file in patch log .
train
false
4,209
def ExampleGen(data_path, num_epochs=None): epoch = 0 while True: if ((num_epochs is not None) and (epoch >= num_epochs)): break filelist = glob.glob(data_path) assert filelist, 'Empty filelist.' random.shuffle(filelist) for f in filelist: reader = open(f, 'rb') while True: len_bytes = reader.read(8) if (not len_bytes): break str_len = struct.unpack('q', len_bytes)[0] example_str = struct.unpack(('%ds' % str_len), reader.read(str_len))[0] (yield example_pb2.Example.FromString(example_str)) epoch += 1
[ "def", "ExampleGen", "(", "data_path", ",", "num_epochs", "=", "None", ")", ":", "epoch", "=", "0", "while", "True", ":", "if", "(", "(", "num_epochs", "is", "not", "None", ")", "and", "(", "epoch", ">=", "num_epochs", ")", ")", ":", "break", "filelist", "=", "glob", ".", "glob", "(", "data_path", ")", "assert", "filelist", ",", "'Empty filelist.'", "random", ".", "shuffle", "(", "filelist", ")", "for", "f", "in", "filelist", ":", "reader", "=", "open", "(", "f", ",", "'rb'", ")", "while", "True", ":", "len_bytes", "=", "reader", ".", "read", "(", "8", ")", "if", "(", "not", "len_bytes", ")", ":", "break", "str_len", "=", "struct", ".", "unpack", "(", "'q'", ",", "len_bytes", ")", "[", "0", "]", "example_str", "=", "struct", ".", "unpack", "(", "(", "'%ds'", "%", "str_len", ")", ",", "reader", ".", "read", "(", "str_len", ")", ")", "[", "0", "]", "(", "yield", "example_pb2", ".", "Example", ".", "FromString", "(", "example_str", ")", ")", "epoch", "+=", "1" ]
generates tf .
train
false
4,212
def is_installed(pkg_name): with settings(warn_only=True): res = run(('pkg_info -e %s' % pkg_name)) return (res.succeeded is True)
[ "def", "is_installed", "(", "pkg_name", ")", ":", "with", "settings", "(", "warn_only", "=", "True", ")", ":", "res", "=", "run", "(", "(", "'pkg_info -e %s'", "%", "pkg_name", ")", ")", "return", "(", "res", ".", "succeeded", "is", "True", ")" ]
returns true if a model_class is installed .
train
false
4,213
def true(*args, **kwargs): return True
[ "def", "true", "(", "*", "args", ",", "**", "kwargs", ")", ":", "return", "True" ]
always returns true .
train
false
4,214
def print_name_status(changes): for change in changes: if (not change): continue if (type(change) is list): change = change[0] if (change.type == CHANGE_ADD): path1 = change.new.path path2 = '' kind = 'A' elif (change.type == CHANGE_DELETE): path1 = change.old.path path2 = '' kind = 'D' elif (change.type == CHANGE_MODIFY): path1 = change.new.path path2 = '' kind = 'M' elif (change.type in RENAME_CHANGE_TYPES): path1 = change.old.path path2 = change.new.path if (change.type == CHANGE_RENAME): kind = 'R' elif (change.type == CHANGE_COPY): kind = 'C' (yield ('%-8s%-20s%-20s' % (kind, path1, path2)))
[ "def", "print_name_status", "(", "changes", ")", ":", "for", "change", "in", "changes", ":", "if", "(", "not", "change", ")", ":", "continue", "if", "(", "type", "(", "change", ")", "is", "list", ")", ":", "change", "=", "change", "[", "0", "]", "if", "(", "change", ".", "type", "==", "CHANGE_ADD", ")", ":", "path1", "=", "change", ".", "new", ".", "path", "path2", "=", "''", "kind", "=", "'A'", "elif", "(", "change", ".", "type", "==", "CHANGE_DELETE", ")", ":", "path1", "=", "change", ".", "old", ".", "path", "path2", "=", "''", "kind", "=", "'D'", "elif", "(", "change", ".", "type", "==", "CHANGE_MODIFY", ")", ":", "path1", "=", "change", ".", "new", ".", "path", "path2", "=", "''", "kind", "=", "'M'", "elif", "(", "change", ".", "type", "in", "RENAME_CHANGE_TYPES", ")", ":", "path1", "=", "change", ".", "old", ".", "path", "path2", "=", "change", ".", "new", ".", "path", "if", "(", "change", ".", "type", "==", "CHANGE_RENAME", ")", ":", "kind", "=", "'R'", "elif", "(", "change", ".", "type", "==", "CHANGE_COPY", ")", ":", "kind", "=", "'C'", "(", "yield", "(", "'%-8s%-20s%-20s'", "%", "(", "kind", ",", "path1", ",", "path2", ")", ")", ")" ]
print a simple status summary .
train
false
4,215
def fixReturns(line): return re.sub('returns:', '@returns', line)
[ "def", "fixReturns", "(", "line", ")", ":", "return", "re", ".", "sub", "(", "'returns:'", ",", "'@returns'", ",", "line", ")" ]
change returns: foo to @return foo .
train
false
4,216
def is_sys_meta(server_type, key): if (len(key) <= (11 + len(server_type))): return False return key.lower().startswith(get_sys_meta_prefix(server_type))
[ "def", "is_sys_meta", "(", "server_type", ",", "key", ")", ":", "if", "(", "len", "(", "key", ")", "<=", "(", "11", "+", "len", "(", "server_type", ")", ")", ")", ":", "return", "False", "return", "key", ".", "lower", "(", ")", ".", "startswith", "(", "get_sys_meta_prefix", "(", "server_type", ")", ")" ]
tests if a header key starts with and is longer than the system metadata prefix for given server type .
train
false
4,218
def _StrOrUnicode(value): try: return str(value) except UnicodeEncodeError: return unicode(value)
[ "def", "_StrOrUnicode", "(", "value", ")", ":", "try", ":", "return", "str", "(", "value", ")", "except", "UnicodeEncodeError", ":", "return", "unicode", "(", "value", ")" ]
converts value to a python string or .
train
false
4,220
def makeDir(path): if (not ek(os.path.isdir, path)): try: ek(os.makedirs, path) sickbeard.notifiers.synoindex_notifier.addFolder(path) except OSError: return False return True
[ "def", "makeDir", "(", "path", ")", ":", "if", "(", "not", "ek", "(", "os", ".", "path", ".", "isdir", ",", "path", ")", ")", ":", "try", ":", "ek", "(", "os", ".", "makedirs", ",", "path", ")", "sickbeard", ".", "notifiers", ".", "synoindex_notifier", ".", "addFolder", "(", "path", ")", "except", "OSError", ":", "return", "False", "return", "True" ]
make a directory on the filesystem .
train
false
4,221
def FriendlyExceptionDlg(message): class w3af_message_dialog(gtk.MessageDialog, ): def dialog_response_cb(self, widget, response_id): '\n http://faq.pygtk.org/index.py?req=show&file=faq10.017.htp\n ' self.destroy() def dialog_run(self): '\n http://faq.pygtk.org/index.py?req=show&file=faq10.017.htp\n ' if (not self.modal): self.set_modal(True) self.connect('response', self.dialog_response_cb) self.show() dlg = w3af_message_dialog(None, gtk.DIALOG_MODAL, gtk.MESSAGE_WARNING, gtk.BUTTONS_OK, message) dlg.set_icon_from_file(W3AF_ICON) dlg.set_title('Error') dlg.dialog_run() return
[ "def", "FriendlyExceptionDlg", "(", "message", ")", ":", "class", "w3af_message_dialog", "(", "gtk", ".", "MessageDialog", ",", ")", ":", "def", "dialog_response_cb", "(", "self", ",", "widget", ",", "response_id", ")", ":", "self", ".", "destroy", "(", ")", "def", "dialog_run", "(", "self", ")", ":", "if", "(", "not", "self", ".", "modal", ")", ":", "self", ".", "set_modal", "(", "True", ")", "self", ".", "connect", "(", "'response'", ",", "self", ".", "dialog_response_cb", ")", "self", ".", "show", "(", ")", "dlg", "=", "w3af_message_dialog", "(", "None", ",", "gtk", ".", "DIALOG_MODAL", ",", "gtk", ".", "MESSAGE_WARNING", ",", "gtk", ".", "BUTTONS_OK", ",", "message", ")", "dlg", ".", "set_icon_from_file", "(", "W3AF_ICON", ")", "dlg", ".", "set_title", "(", "'Error'", ")", "dlg", ".", "dialog_run", "(", ")", "return" ]
creates the dialog showing the message .
train
false
4,222
def _add_schema_entry(model_type, name, add_entry): (schema_type, entity_meaning) = _GetSchemaEntryForPropertyType(model_type) if (not schema_type): return entry = add_entry() entry.set_name(name) entry.set_type(schema_type) if entity_meaning: entry.set_meaning(entity_meaning)
[ "def", "_add_schema_entry", "(", "model_type", ",", "name", ",", "add_entry", ")", ":", "(", "schema_type", ",", "entity_meaning", ")", "=", "_GetSchemaEntryForPropertyType", "(", "model_type", ")", "if", "(", "not", "schema_type", ")", ":", "return", "entry", "=", "add_entry", "(", ")", "entry", ".", "set_name", "(", "name", ")", "entry", ".", "set_type", "(", "schema_type", ")", "if", "entity_meaning", ":", "entry", ".", "set_meaning", "(", "entity_meaning", ")" ]
add single entry to schemaentries by invoking add_entry .
train
false
4,223
def make_2d(a): a = np.atleast_2d(a.T).T n = a.shape[0] newshape = np.product(a.shape[1:]).astype(int) a = a.reshape((n, newshape), order='F') return a
[ "def", "make_2d", "(", "a", ")", ":", "a", "=", "np", ".", "atleast_2d", "(", "a", ".", "T", ")", ".", "T", "n", "=", "a", ".", "shape", "[", "0", "]", "newshape", "=", "np", ".", "product", "(", "a", ".", "shape", "[", "1", ":", "]", ")", ".", "astype", "(", "int", ")", "a", "=", "a", ".", "reshape", "(", "(", "n", ",", "newshape", ")", ",", "order", "=", "'F'", ")", "return", "a" ]
ravel the dimensions after the first .
train
false
4,224
def _make_cipher(initialization_vector, secret): return AES.new(secret[:KEY_SIZE], AES.MODE_CBC, initialization_vector[:AES.block_size])
[ "def", "_make_cipher", "(", "initialization_vector", ",", "secret", ")", ":", "return", "AES", ".", "new", "(", "secret", "[", ":", "KEY_SIZE", "]", ",", "AES", ".", "MODE_CBC", ",", "initialization_vector", "[", ":", "AES", ".", "block_size", "]", ")" ]
return a block cipher object for use in encrypt and decrypt .
train
false
4,228
def swapaxes(x, axis1, axis2): return Swapaxes(axis1, axis2)(x)
[ "def", "swapaxes", "(", "x", ",", "axis1", ",", "axis2", ")", ":", "return", "Swapaxes", "(", "axis1", ",", "axis2", ")", "(", "x", ")" ]
swap axes of inputted tensor .
train
false
4,229
@pytest.mark.network def test_download_vcs_link(script): result = script.pip('install', '-d', '.', 'git+git://github.com/pypa/pip-test-package.git', expect_stderr=True) assert ((Path('scratch') / 'pip-test-package-0.1.1.zip') in result.files_created) assert ((script.site_packages / 'piptestpackage') not in result.files_created)
[ "@", "pytest", ".", "mark", ".", "network", "def", "test_download_vcs_link", "(", "script", ")", ":", "result", "=", "script", ".", "pip", "(", "'install'", ",", "'-d'", ",", "'.'", ",", "'git+git://github.com/pypa/pip-test-package.git'", ",", "expect_stderr", "=", "True", ")", "assert", "(", "(", "Path", "(", "'scratch'", ")", "/", "'pip-test-package-0.1.1.zip'", ")", "in", "result", ".", "files_created", ")", "assert", "(", "(", "script", ".", "site_packages", "/", "'piptestpackage'", ")", "not", "in", "result", ".", "files_created", ")" ]
it should allow -d flag for vcs links .
train
false
4,231
def order_at_oo(a, d, t): if a.is_zero: return oo return (d.degree(t) - a.degree(t))
[ "def", "order_at_oo", "(", "a", ",", "d", ",", "t", ")", ":", "if", "a", ".", "is_zero", ":", "return", "oo", "return", "(", "d", ".", "degree", "(", "t", ")", "-", "a", ".", "degree", "(", "t", ")", ")" ]
computes the order of a/d at oo .
train
false
4,233
def CreateCustomizerFeed(client, feed_name): ad_customizer_feed_service = client.GetService('AdCustomizerFeedService') customizer_feed = {'feedName': feed_name, 'feedAttributes': [{'type': 'STRING', 'name': 'Name'}, {'type': 'STRING', 'name': 'Price'}, {'type': 'DATE_TIME', 'name': 'Date'}]} feed_service_operation = {'operator': 'ADD', 'operand': customizer_feed} response = ad_customizer_feed_service.mutate([feed_service_operation]) if (response and ('value' in response)): feed = response['value'][0] feed_data = {'feedId': feed['feedId'], 'nameId': feed['feedAttributes'][0]['id'], 'priceId': feed['feedAttributes'][1]['id'], 'dateId': feed['feedAttributes'][2]['id']} print ("Feed with name '%s' and ID %s was added with:\n DCTB Name attribute ID %s and price attribute ID %s and date attributeID %s" % (feed['feedName'], feed['feedId'], feed_data['nameId'], feed_data['priceId'], feed_data['dateId'])) return feed else: raise errors.GoogleAdsError('No feeds were added')
[ "def", "CreateCustomizerFeed", "(", "client", ",", "feed_name", ")", ":", "ad_customizer_feed_service", "=", "client", ".", "GetService", "(", "'AdCustomizerFeedService'", ")", "customizer_feed", "=", "{", "'feedName'", ":", "feed_name", ",", "'feedAttributes'", ":", "[", "{", "'type'", ":", "'STRING'", ",", "'name'", ":", "'Name'", "}", ",", "{", "'type'", ":", "'STRING'", ",", "'name'", ":", "'Price'", "}", ",", "{", "'type'", ":", "'DATE_TIME'", ",", "'name'", ":", "'Date'", "}", "]", "}", "feed_service_operation", "=", "{", "'operator'", ":", "'ADD'", ",", "'operand'", ":", "customizer_feed", "}", "response", "=", "ad_customizer_feed_service", ".", "mutate", "(", "[", "feed_service_operation", "]", ")", "if", "(", "response", "and", "(", "'value'", "in", "response", ")", ")", ":", "feed", "=", "response", "[", "'value'", "]", "[", "0", "]", "feed_data", "=", "{", "'feedId'", ":", "feed", "[", "'feedId'", "]", ",", "'nameId'", ":", "feed", "[", "'feedAttributes'", "]", "[", "0", "]", "[", "'id'", "]", ",", "'priceId'", ":", "feed", "[", "'feedAttributes'", "]", "[", "1", "]", "[", "'id'", "]", ",", "'dateId'", ":", "feed", "[", "'feedAttributes'", "]", "[", "2", "]", "[", "'id'", "]", "}", "print", "(", "\"Feed with name '%s' and ID %s was added with:\\n DCTB Name attribute ID %s and price attribute ID %s and date attributeID %s\"", "%", "(", "feed", "[", "'feedName'", "]", ",", "feed", "[", "'feedId'", "]", ",", "feed_data", "[", "'nameId'", "]", ",", "feed_data", "[", "'priceId'", "]", ",", "feed_data", "[", "'dateId'", "]", ")", ")", "return", "feed", "else", ":", "raise", "errors", ".", "GoogleAdsError", "(", "'No feeds were added'", ")" ]
creates a new adcustomizerfeed .
train
true
4,234
def argument(*param_decls, **attrs): def decorator(f): ArgumentClass = attrs.pop('cls', Argument) _param_memo(f, ArgumentClass(param_decls, **attrs)) return f return decorator
[ "def", "argument", "(", "*", "param_decls", ",", "**", "attrs", ")", ":", "def", "decorator", "(", "f", ")", ":", "ArgumentClass", "=", "attrs", ".", "pop", "(", "'cls'", ",", "Argument", ")", "_param_memo", "(", "f", ",", "ArgumentClass", "(", "param_decls", ",", "**", "attrs", ")", ")", "return", "f", "return", "decorator" ]
attaches an argument to the command .
train
true
4,235
@register.inclusion_tag('zinnia/tags/dummy.html') def get_featured_entries(number=5, template='zinnia/tags/entries_featured.html'): return {'template': template, 'entries': Entry.published.filter(featured=True)[:number]}
[ "@", "register", ".", "inclusion_tag", "(", "'zinnia/tags/dummy.html'", ")", "def", "get_featured_entries", "(", "number", "=", "5", ",", "template", "=", "'zinnia/tags/entries_featured.html'", ")", ":", "return", "{", "'template'", ":", "template", ",", "'entries'", ":", "Entry", ".", "published", ".", "filter", "(", "featured", "=", "True", ")", "[", ":", "number", "]", "}" ]
return the featured entries .
train
true
4,236
def validate_gs_bucket_name(bucket_name): if (len(bucket_name) > MAX_BUCKET_LEN): raise BackupValidationException(('Bucket name length should not be longer than %d' % MAX_BUCKET_LEN)) if (len(bucket_name) < MIN_BUCKET_LEN): raise BackupValidationException(('Bucket name length should be longer than %d' % MIN_BUCKET_LEN)) if bucket_name.lower().startswith('goog'): raise BackupValidationException('Bucket name should not start with a "goog" prefix') bucket_elements = bucket_name.split('.') for bucket_element in bucket_elements: if (len(bucket_element) > MAX_BUCKET_SEGMENT_LEN): raise BackupValidationException(('Segment length of bucket name should not be longer than %d' % MAX_BUCKET_SEGMENT_LEN)) if (not re.match(BUCKET_PATTERN, bucket_name)): raise BackupValidationException(('Invalid bucket name "%s"' % bucket_name))
[ "def", "validate_gs_bucket_name", "(", "bucket_name", ")", ":", "if", "(", "len", "(", "bucket_name", ")", ">", "MAX_BUCKET_LEN", ")", ":", "raise", "BackupValidationException", "(", "(", "'Bucket name length should not be longer than %d'", "%", "MAX_BUCKET_LEN", ")", ")", "if", "(", "len", "(", "bucket_name", ")", "<", "MIN_BUCKET_LEN", ")", ":", "raise", "BackupValidationException", "(", "(", "'Bucket name length should be longer than %d'", "%", "MIN_BUCKET_LEN", ")", ")", "if", "bucket_name", ".", "lower", "(", ")", ".", "startswith", "(", "'goog'", ")", ":", "raise", "BackupValidationException", "(", "'Bucket name should not start with a \"goog\" prefix'", ")", "bucket_elements", "=", "bucket_name", ".", "split", "(", "'.'", ")", "for", "bucket_element", "in", "bucket_elements", ":", "if", "(", "len", "(", "bucket_element", ")", ">", "MAX_BUCKET_SEGMENT_LEN", ")", ":", "raise", "BackupValidationException", "(", "(", "'Segment length of bucket name should not be longer than %d'", "%", "MAX_BUCKET_SEGMENT_LEN", ")", ")", "if", "(", "not", "re", ".", "match", "(", "BUCKET_PATTERN", ",", "bucket_name", ")", ")", ":", "raise", "BackupValidationException", "(", "(", "'Invalid bucket name \"%s\"'", "%", "bucket_name", ")", ")" ]
validate the format of the given bucket_name .
train
false
4,237
def start_event_loop_qt4(app=None): if (app is None): app = get_app_qt4(['']) if (not is_event_loop_running_qt4(app)): app._in_event_loop = True app.exec_() app._in_event_loop = False else: app._in_event_loop = True
[ "def", "start_event_loop_qt4", "(", "app", "=", "None", ")", ":", "if", "(", "app", "is", "None", ")", ":", "app", "=", "get_app_qt4", "(", "[", "''", "]", ")", "if", "(", "not", "is_event_loop_running_qt4", "(", "app", ")", ")", ":", "app", ".", "_in_event_loop", "=", "True", "app", ".", "exec_", "(", ")", "app", ".", "_in_event_loop", "=", "False", "else", ":", "app", ".", "_in_event_loop", "=", "True" ]
start the qt4 event loop in a consistent manner .
train
true
4,238
def get_era_names(width='wide', locale=LC_TIME): return Locale.parse(locale).eras[width]
[ "def", "get_era_names", "(", "width", "=", "'wide'", ",", "locale", "=", "LC_TIME", ")", ":", "return", "Locale", ".", "parse", "(", "locale", ")", ".", "eras", "[", "width", "]" ]
return the era names used by the locale for the specified format .
train
false
4,241
def dccDescribe(data): orig_data = data data = string.split(data) if (len(data) < 4): return orig_data (dcctype, arg, address, port) = data[:4] if ('.' in address): pass else: try: address = long(address) except ValueError: pass else: address = (((address >> 24) & 255), ((address >> 16) & 255), ((address >> 8) & 255), (address & 255)) address = string.join(map(str, map(int, address)), '.') if (dcctype == 'SEND'): filename = arg size_txt = '' if (len(data) >= 5): try: size = int(data[4]) size_txt = (' of size %d bytes' % (size,)) except ValueError: pass dcc_text = ("SEND for file '%s'%s at host %s, port %s" % (filename, size_txt, address, port)) elif (dcctype == 'CHAT'): dcc_text = ('CHAT for host %s, port %s' % (address, port)) else: dcc_text = orig_data return dcc_text
[ "def", "dccDescribe", "(", "data", ")", ":", "orig_data", "=", "data", "data", "=", "string", ".", "split", "(", "data", ")", "if", "(", "len", "(", "data", ")", "<", "4", ")", ":", "return", "orig_data", "(", "dcctype", ",", "arg", ",", "address", ",", "port", ")", "=", "data", "[", ":", "4", "]", "if", "(", "'.'", "in", "address", ")", ":", "pass", "else", ":", "try", ":", "address", "=", "long", "(", "address", ")", "except", "ValueError", ":", "pass", "else", ":", "address", "=", "(", "(", "(", "address", ">>", "24", ")", "&", "255", ")", ",", "(", "(", "address", ">>", "16", ")", "&", "255", ")", ",", "(", "(", "address", ">>", "8", ")", "&", "255", ")", ",", "(", "address", "&", "255", ")", ")", "address", "=", "string", ".", "join", "(", "map", "(", "str", ",", "map", "(", "int", ",", "address", ")", ")", ",", "'.'", ")", "if", "(", "dcctype", "==", "'SEND'", ")", ":", "filename", "=", "arg", "size_txt", "=", "''", "if", "(", "len", "(", "data", ")", ">=", "5", ")", ":", "try", ":", "size", "=", "int", "(", "data", "[", "4", "]", ")", "size_txt", "=", "(", "' of size %d bytes'", "%", "(", "size", ",", ")", ")", "except", "ValueError", ":", "pass", "dcc_text", "=", "(", "\"SEND for file '%s'%s at host %s, port %s\"", "%", "(", "filename", ",", "size_txt", ",", "address", ",", "port", ")", ")", "elif", "(", "dcctype", "==", "'CHAT'", ")", ":", "dcc_text", "=", "(", "'CHAT for host %s, port %s'", "%", "(", "address", ",", "port", ")", ")", "else", ":", "dcc_text", "=", "orig_data", "return", "dcc_text" ]
given the data chunk from a dcc query .
train
false
4,242
def rpc(cmd=None, dest=None, format='xml', *args, **kwargs): conn = __proxy__['junos.conn']() ret = dict() ret['out'] = True op = dict() if ('__pub_arg' in kwargs): if isinstance(kwargs['__pub_arg'][(-1)], dict): op.update(kwargs['__pub_arg'][(-1)]) else: op.update(kwargs) if ((dest is None) and (format != 'xml')): log.warning('Format ignored as it is only used for output which is dumped in the file.') write_response = '' try: if (cmd in ['get-config', 'get_config']): filter_reply = None if ('filter' in op): filter_reply = etree.XML(op['filter']) xml_reply = getattr(conn.rpc, cmd.replace('-', '_'))(filter_reply, options=op) ret['message'] = jxmlease.parse(etree.tostring(xml_reply)) write_response = etree.tostring(xml_reply) if ((dest is not None) and (format != 'xml')): op.update({'format': format}) rpc_reply = getattr(conn.rpc, cmd.replace('-', '_'))(filter_reply, options=op) if (format == 'json'): write_response = json.dumps(rpc_reply, indent=1) else: write_response = rpc_reply.text else: xml_reply = getattr(conn.rpc, cmd.replace('-', '_'))(**op) ret['message'] = jxmlease.parse(etree.tostring(xml_reply)) write_response = etree.tostring(xml_reply) if ((dest is not None) and (format != 'xml')): rpc_reply = getattr(conn.rpc, cmd.replace('-', '_'))({'format': format}, **op) if (format == 'json'): write_response = json.dumps(rpc_reply, indent=1) else: write_response = rpc_reply.text except Exception as exception: ret['message'] = 'Execution failed due to "{0}"'.format(exception) ret['out'] = False if (dest is not None): with fopen(dest, 'w') as fp: fp.write(write_response) return ret
[ "def", "rpc", "(", "cmd", "=", "None", ",", "dest", "=", "None", ",", "format", "=", "'xml'", ",", "*", "args", ",", "**", "kwargs", ")", ":", "conn", "=", "__proxy__", "[", "'junos.conn'", "]", "(", ")", "ret", "=", "dict", "(", ")", "ret", "[", "'out'", "]", "=", "True", "op", "=", "dict", "(", ")", "if", "(", "'__pub_arg'", "in", "kwargs", ")", ":", "if", "isinstance", "(", "kwargs", "[", "'__pub_arg'", "]", "[", "(", "-", "1", ")", "]", ",", "dict", ")", ":", "op", ".", "update", "(", "kwargs", "[", "'__pub_arg'", "]", "[", "(", "-", "1", ")", "]", ")", "else", ":", "op", ".", "update", "(", "kwargs", ")", "if", "(", "(", "dest", "is", "None", ")", "and", "(", "format", "!=", "'xml'", ")", ")", ":", "log", ".", "warning", "(", "'Format ignored as it is only used for output which is dumped in the file.'", ")", "write_response", "=", "''", "try", ":", "if", "(", "cmd", "in", "[", "'get-config'", ",", "'get_config'", "]", ")", ":", "filter_reply", "=", "None", "if", "(", "'filter'", "in", "op", ")", ":", "filter_reply", "=", "etree", ".", "XML", "(", "op", "[", "'filter'", "]", ")", "xml_reply", "=", "getattr", "(", "conn", ".", "rpc", ",", "cmd", ".", "replace", "(", "'-'", ",", "'_'", ")", ")", "(", "filter_reply", ",", "options", "=", "op", ")", "ret", "[", "'message'", "]", "=", "jxmlease", ".", "parse", "(", "etree", ".", "tostring", "(", "xml_reply", ")", ")", "write_response", "=", "etree", ".", "tostring", "(", "xml_reply", ")", "if", "(", "(", "dest", "is", "not", "None", ")", "and", "(", "format", "!=", "'xml'", ")", ")", ":", "op", ".", "update", "(", "{", "'format'", ":", "format", "}", ")", "rpc_reply", "=", "getattr", "(", "conn", ".", "rpc", ",", "cmd", ".", "replace", "(", "'-'", ",", "'_'", ")", ")", "(", "filter_reply", ",", "options", "=", "op", ")", "if", "(", "format", "==", "'json'", ")", ":", "write_response", "=", "json", ".", "dumps", "(", "rpc_reply", ",", "indent", "=", "1", ")", "else", ":", "write_response", "=", "rpc_reply", ".", "text", "else", ":", "xml_reply", "=", "getattr", "(", "conn", ".", "rpc", ",", "cmd", ".", "replace", "(", "'-'", ",", "'_'", ")", ")", "(", "**", "op", ")", "ret", "[", "'message'", "]", "=", "jxmlease", ".", "parse", "(", "etree", ".", "tostring", "(", "xml_reply", ")", ")", "write_response", "=", "etree", ".", "tostring", "(", "xml_reply", ")", "if", "(", "(", "dest", "is", "not", "None", ")", "and", "(", "format", "!=", "'xml'", ")", ")", ":", "rpc_reply", "=", "getattr", "(", "conn", ".", "rpc", ",", "cmd", ".", "replace", "(", "'-'", ",", "'_'", ")", ")", "(", "{", "'format'", ":", "format", "}", ",", "**", "op", ")", "if", "(", "format", "==", "'json'", ")", ":", "write_response", "=", "json", ".", "dumps", "(", "rpc_reply", ",", "indent", "=", "1", ")", "else", ":", "write_response", "=", "rpc_reply", ".", "text", "except", "Exception", "as", "exception", ":", "ret", "[", "'message'", "]", "=", "'Execution failed due to \"{0}\"'", ".", "format", "(", "exception", ")", "ret", "[", "'out'", "]", "=", "False", "if", "(", "dest", "is", "not", "None", ")", ":", "with", "fopen", "(", "dest", ",", "'w'", ")", "as", "fp", ":", "fp", ".", "write", "(", "write_response", ")", "return", "ret" ]
executes the given rpc .
train
false
4,243
def addValueToOutput(depth, keyInput, output, value): depthStart = (' ' * depth) output.write(('%s%s:' % (depthStart, keyInput))) if (value.__class__ == dict): output.write('\n') keys = value.keys() keys.sort() for key in keys: addValueToOutput((depth + 1), key, output, value[key]) return if (value.__class__ == list): output.write('\n') for (elementIndex, element) in enumerate(value): addValueToOutput((depth + 1), elementIndex, output, element) return output.write((' %s\n' % value))
[ "def", "addValueToOutput", "(", "depth", ",", "keyInput", ",", "output", ",", "value", ")", ":", "depthStart", "=", "(", "' '", "*", "depth", ")", "output", ".", "write", "(", "(", "'%s%s:'", "%", "(", "depthStart", ",", "keyInput", ")", ")", ")", "if", "(", "value", ".", "__class__", "==", "dict", ")", ":", "output", ".", "write", "(", "'\\n'", ")", "keys", "=", "value", ".", "keys", "(", ")", "keys", ".", "sort", "(", ")", "for", "key", "in", "keys", ":", "addValueToOutput", "(", "(", "depth", "+", "1", ")", ",", "key", ",", "output", ",", "value", "[", "key", "]", ")", "return", "if", "(", "value", ".", "__class__", "==", "list", ")", ":", "output", ".", "write", "(", "'\\n'", ")", "for", "(", "elementIndex", ",", "element", ")", "in", "enumerate", "(", "value", ")", ":", "addValueToOutput", "(", "(", "depth", "+", "1", ")", ",", "elementIndex", ",", "output", ",", "element", ")", "return", "output", ".", "write", "(", "(", "' %s\\n'", "%", "value", ")", ")" ]
add value to the output .
train
false
4,244
def assign(obj, **kwargs): obj.__dict__.update(kwargs)
[ "def", "assign", "(", "obj", ",", "**", "kwargs", ")", ":", "obj", ".", "__dict__", ".", "update", "(", "kwargs", ")" ]
assign a single sysctl parameter for this minion cli example: .
train
false
4,245
@pytest.mark.network def test_editables_flag(script, data): script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0') result = script.pip('install', '-e', 'git+https://github.com/pypa/pip-test-package.git#egg=pip-test-package') result = script.pip('list', '--editable', '--format=legacy') assert ('simple (1.0)' not in result.stdout), str(result) assert (os.path.join('src', 'pip-test-package') in result.stdout), str(result)
[ "@", "pytest", ".", "mark", ".", "network", "def", "test_editables_flag", "(", "script", ",", "data", ")", ":", "script", ".", "pip", "(", "'install'", ",", "'-f'", ",", "data", ".", "find_links", ",", "'--no-index'", ",", "'simple==1.0'", ")", "result", "=", "script", ".", "pip", "(", "'install'", ",", "'-e'", ",", "'git+https://github.com/pypa/pip-test-package.git#egg=pip-test-package'", ")", "result", "=", "script", ".", "pip", "(", "'list'", ",", "'--editable'", ",", "'--format=legacy'", ")", "assert", "(", "'simple (1.0)'", "not", "in", "result", ".", "stdout", ")", ",", "str", "(", "result", ")", "assert", "(", "os", ".", "path", ".", "join", "(", "'src'", ",", "'pip-test-package'", ")", "in", "result", ".", "stdout", ")", ",", "str", "(", "result", ")" ]
test the behavior of --editables flag in the list command .
train
false
4,246
def gis_opacity(): T = current.T OPACITY = T('Opacity') return S3ReusableField('opacity', 'double', default=1.0, label=OPACITY, requires=IS_FLOAT_IN_RANGE(0, 1), widget=S3SliderWidget(0.01, 'float'), comment=DIV(_class='tooltip', _title=('%s|%s' % (OPACITY, T('Left-side is fully transparent (0), right-side is opaque (1.0).')))))
[ "def", "gis_opacity", "(", ")", ":", "T", "=", "current", ".", "T", "OPACITY", "=", "T", "(", "'Opacity'", ")", "return", "S3ReusableField", "(", "'opacity'", ",", "'double'", ",", "default", "=", "1.0", ",", "label", "=", "OPACITY", ",", "requires", "=", "IS_FLOAT_IN_RANGE", "(", "0", ",", "1", ")", ",", "widget", "=", "S3SliderWidget", "(", "0.01", ",", "'float'", ")", ",", "comment", "=", "DIV", "(", "_class", "=", "'tooltip'", ",", "_title", "=", "(", "'%s|%s'", "%", "(", "OPACITY", ",", "T", "(", "'Left-side is fully transparent (0), right-side is opaque (1.0).'", ")", ")", ")", ")", ")" ]
used by gis_style .
train
false
4,247
def disable_source(name): return _change_source_state(name, 'disable')
[ "def", "disable_source", "(", "name", ")", ":", "return", "_change_source_state", "(", "name", ",", "'disable'", ")" ]
instructs chocolatey to disable a source .
train
false
4,249
@handle_response_format @treeio_login_required def folder_delete(request, folder_id, response_format='html'): folder = get_object_or_404(Folder, pk=folder_id) if (not request.user.profile.has_permission(folder, mode='w')): return user_denied(request, message="You don't have access to this Folder") if request.POST: if ('delete' in request.POST): if ('trash' in request.POST): folder.trash = True folder.save() else: folder.delete() return HttpResponseRedirect(reverse('document_index')) elif ('cancel' in request.POST): return HttpResponseRedirect(reverse('documents_folder_view', args=[folder.id])) query = ((Q(object_type='treeio.documents.models.Document') | Q(object_type='treeio.documents.models.File')) | Q(object_type='treeio.documents.models.WebLink')) query = (query & ((Q(document__folder=folder) | Q(file__folder=folder)) | Q(weblink__folder=folder))) if request.GET: query = _get_filter_query(request.GET) objects = Object.filter_by_request(request, Object.objects.filter(query)) else: objects = Object.filter_by_request(request, Object.objects.filter(query)) context = _get_default_context(request) context.update({'folder': folder, 'objects': objects}) return render_to_response('documents/folder_delete', context, context_instance=RequestContext(request), response_format=response_format)
[ "@", "handle_response_format", "@", "treeio_login_required", "def", "folder_delete", "(", "request", ",", "folder_id", ",", "response_format", "=", "'html'", ")", ":", "folder", "=", "get_object_or_404", "(", "Folder", ",", "pk", "=", "folder_id", ")", "if", "(", "not", "request", ".", "user", ".", "profile", ".", "has_permission", "(", "folder", ",", "mode", "=", "'w'", ")", ")", ":", "return", "user_denied", "(", "request", ",", "message", "=", "\"You don't have access to this Folder\"", ")", "if", "request", ".", "POST", ":", "if", "(", "'delete'", "in", "request", ".", "POST", ")", ":", "if", "(", "'trash'", "in", "request", ".", "POST", ")", ":", "folder", ".", "trash", "=", "True", "folder", ".", "save", "(", ")", "else", ":", "folder", ".", "delete", "(", ")", "return", "HttpResponseRedirect", "(", "reverse", "(", "'document_index'", ")", ")", "elif", "(", "'cancel'", "in", "request", ".", "POST", ")", ":", "return", "HttpResponseRedirect", "(", "reverse", "(", "'documents_folder_view'", ",", "args", "=", "[", "folder", ".", "id", "]", ")", ")", "query", "=", "(", "(", "Q", "(", "object_type", "=", "'treeio.documents.models.Document'", ")", "|", "Q", "(", "object_type", "=", "'treeio.documents.models.File'", ")", ")", "|", "Q", "(", "object_type", "=", "'treeio.documents.models.WebLink'", ")", ")", "query", "=", "(", "query", "&", "(", "(", "Q", "(", "document__folder", "=", "folder", ")", "|", "Q", "(", "file__folder", "=", "folder", ")", ")", "|", "Q", "(", "weblink__folder", "=", "folder", ")", ")", ")", "if", "request", ".", "GET", ":", "query", "=", "_get_filter_query", "(", "request", ".", "GET", ")", "objects", "=", "Object", ".", "filter_by_request", "(", "request", ",", "Object", ".", "objects", ".", "filter", "(", "query", ")", ")", "else", ":", "objects", "=", "Object", ".", "filter_by_request", "(", "request", ",", "Object", ".", "objects", ".", "filter", "(", "query", ")", ")", "context", "=", "_get_default_context", "(", "request", ")", "context", ".", "update", "(", "{", "'folder'", ":", "folder", ",", "'objects'", ":", "objects", "}", ")", "return", "render_to_response", "(", "'documents/folder_delete'", ",", "context", ",", "context_instance", "=", "RequestContext", "(", "request", ")", ",", "response_format", "=", "response_format", ")" ]
type delete .
train
false
4,251
def _docs(): dev = _get_import_dir()[1] if (not dev): warnings.warn("Docstring test imports Vispy from Vispy's installation. It is recommended to setup Vispy using 'python setup.py develop' so that the latest sources are used automatically") try: from ..util.tests import test_docstring_parameters print('Running docstring test...') test_docstring_parameters.test_docstring_parameters() except AssertionError as docstring_violations: raise RuntimeError(docstring_violations)
[ "def", "_docs", "(", ")", ":", "dev", "=", "_get_import_dir", "(", ")", "[", "1", "]", "if", "(", "not", "dev", ")", ":", "warnings", ".", "warn", "(", "\"Docstring test imports Vispy from Vispy's installation. It is recommended to setup Vispy using 'python setup.py develop' so that the latest sources are used automatically\"", ")", "try", ":", "from", ".", ".", "util", ".", "tests", "import", "test_docstring_parameters", "print", "(", "'Running docstring test...'", ")", "test_docstring_parameters", ".", "test_docstring_parameters", "(", ")", "except", "AssertionError", "as", "docstring_violations", ":", "raise", "RuntimeError", "(", "docstring_violations", ")" ]
test docstring paramters using vispy/utils/tests/test_docstring_parameters .
train
false
4,252
@contextmanager def lock_file(filename, timeout=10, append=False, unlink=True): flags = (os.O_CREAT | os.O_RDWR) if append: flags |= os.O_APPEND mode = 'a+' else: mode = 'r+' fd = os.open(filename, flags) file_obj = os.fdopen(fd, mode) try: with LockTimeout(timeout, filename): while True: try: fcntl.flock(fd, (fcntl.LOCK_EX | fcntl.LOCK_NB)) break except IOError as err: if (err.errno != errno.EAGAIN): raise sleep(0.01) (yield file_obj) finally: try: file_obj.close() except UnboundLocalError: pass if unlink: os.unlink(filename)
[ "@", "contextmanager", "def", "lock_file", "(", "filename", ",", "timeout", "=", "10", ",", "append", "=", "False", ",", "unlink", "=", "True", ")", ":", "flags", "=", "(", "os", ".", "O_CREAT", "|", "os", ".", "O_RDWR", ")", "if", "append", ":", "flags", "|=", "os", ".", "O_APPEND", "mode", "=", "'a+'", "else", ":", "mode", "=", "'r+'", "fd", "=", "os", ".", "open", "(", "filename", ",", "flags", ")", "file_obj", "=", "os", ".", "fdopen", "(", "fd", ",", "mode", ")", "try", ":", "with", "LockTimeout", "(", "timeout", ",", "filename", ")", ":", "while", "True", ":", "try", ":", "fcntl", ".", "flock", "(", "fd", ",", "(", "fcntl", ".", "LOCK_EX", "|", "fcntl", ".", "LOCK_NB", ")", ")", "break", "except", "IOError", "as", "err", ":", "if", "(", "err", ".", "errno", "!=", "errno", ".", "EAGAIN", ")", ":", "raise", "sleep", "(", "0.01", ")", "(", "yield", "file_obj", ")", "finally", ":", "try", ":", "file_obj", ".", "close", "(", ")", "except", "UnboundLocalError", ":", "pass", "if", "unlink", ":", "os", ".", "unlink", "(", "filename", ")" ]
context manager that acquires a lock on a file .
train
false
4,253
def settings(request): settings = debug.get_safe_settings() sorted_settings = [{'key': key, 'value': settings[key]} for key in sorted(settings.keys())] return render_to_response('kadmin/settings.html', {'pythonpath': sys.path, 'settings': sorted_settings, 'title': 'Settings'}, RequestContext(request, {}))
[ "def", "settings", "(", "request", ")", ":", "settings", "=", "debug", ".", "get_safe_settings", "(", ")", "sorted_settings", "=", "[", "{", "'key'", ":", "key", ",", "'value'", ":", "settings", "[", "key", "]", "}", "for", "key", "in", "sorted", "(", "settings", ".", "keys", "(", ")", ")", "]", "return", "render_to_response", "(", "'kadmin/settings.html'", ",", "{", "'pythonpath'", ":", "sys", ".", "path", ",", "'settings'", ":", "sorted_settings", ",", "'title'", ":", "'Settings'", "}", ",", "RequestContext", "(", "request", ",", "{", "}", ")", ")" ]
nest context managers and/or override env variables .
train
false
4,254
def occurrence(request, event_id, template_name='schedule/occurrence.html', *args, **kwargs): (event, occurrence) = get_occurrence(event_id, *args, **kwargs) back_url = request.META.get('HTTP_REFERER', None) return render_to_response(template_name, {'event': event, 'occurrence': occurrence, 'back_url': back_url}, context_instance=RequestContext(request))
[ "def", "occurrence", "(", "request", ",", "event_id", ",", "template_name", "=", "'schedule/occurrence.html'", ",", "*", "args", ",", "**", "kwargs", ")", ":", "(", "event", ",", "occurrence", ")", "=", "get_occurrence", "(", "event_id", ",", "*", "args", ",", "**", "kwargs", ")", "back_url", "=", "request", ".", "META", ".", "get", "(", "'HTTP_REFERER'", ",", "None", ")", "return", "render_to_response", "(", "template_name", ",", "{", "'event'", ":", "event", ",", "'occurrence'", ":", "occurrence", ",", "'back_url'", ":", "back_url", "}", ",", "context_instance", "=", "RequestContext", "(", "request", ")", ")" ]
this view is used to display an occurrence .
train
false
4,256
def get_converter(from_unit, to_unit): try: scale = from_unit._to(to_unit) except UnitsError: return from_unit._apply_equivalencies(from_unit, to_unit, get_current_unit_registry().equivalencies) if (scale == 1.0): return None else: return (lambda val: (scale * val))
[ "def", "get_converter", "(", "from_unit", ",", "to_unit", ")", ":", "try", ":", "scale", "=", "from_unit", ".", "_to", "(", "to_unit", ")", "except", "UnitsError", ":", "return", "from_unit", ".", "_apply_equivalencies", "(", "from_unit", ",", "to_unit", ",", "get_current_unit_registry", "(", ")", ".", "equivalencies", ")", "if", "(", "scale", "==", "1.0", ")", ":", "return", "None", "else", ":", "return", "(", "lambda", "val", ":", "(", "scale", "*", "val", ")", ")" ]
create a new converter for the given arguments or raise exception if the converter does not exist .
train
false
4,257
def raises_Invalid(function): def call_and_assert(*args, **kwargs): nose.tools.assert_raises(df.Invalid, function, *args, **kwargs) return call_and_assert
[ "def", "raises_Invalid", "(", "function", ")", ":", "def", "call_and_assert", "(", "*", "args", ",", "**", "kwargs", ")", ":", "nose", ".", "tools", ".", "assert_raises", "(", "df", ".", "Invalid", ",", "function", ",", "*", "args", ",", "**", "kwargs", ")", "return", "call_and_assert" ]
a decorator that asserts that the decorated function raises dictization_functions .
train
false
4,258
def parse_p2g(lines): description = next(lines).strip() G = networkx.MultiDiGraph(name=description, selfloops=True) (nnodes, nedges) = map(int, next(lines).split()) nodelabel = {} nbrs = {} for i in range(nnodes): n = next(lines).strip() nodelabel[i] = n G.add_node(n) nbrs[n] = map(int, next(lines).split()) for n in G: for nbr in nbrs[n]: G.add_edge(n, nodelabel[nbr]) return G
[ "def", "parse_p2g", "(", "lines", ")", ":", "description", "=", "next", "(", "lines", ")", ".", "strip", "(", ")", "G", "=", "networkx", ".", "MultiDiGraph", "(", "name", "=", "description", ",", "selfloops", "=", "True", ")", "(", "nnodes", ",", "nedges", ")", "=", "map", "(", "int", ",", "next", "(", "lines", ")", ".", "split", "(", ")", ")", "nodelabel", "=", "{", "}", "nbrs", "=", "{", "}", "for", "i", "in", "range", "(", "nnodes", ")", ":", "n", "=", "next", "(", "lines", ")", ".", "strip", "(", ")", "nodelabel", "[", "i", "]", "=", "n", "G", ".", "add_node", "(", "n", ")", "nbrs", "[", "n", "]", "=", "map", "(", "int", ",", "next", "(", "lines", ")", ".", "split", "(", ")", ")", "for", "n", "in", "G", ":", "for", "nbr", "in", "nbrs", "[", "n", "]", ":", "G", ".", "add_edge", "(", "n", ",", "nodelabel", "[", "nbr", "]", ")", "return", "G" ]
parse p2g format graph from string or iterable .
train
false
4,260
def get_cloud_init_mime(cloud_init): if isinstance(cloud_init, six.string_types): cloud_init = json.loads(cloud_init) _cloud_init = email.mime.multipart.MIMEMultipart() if ('boothooks' in cloud_init): for (script_name, script) in six.iteritems(cloud_init['boothooks']): _script = email.mime.text.MIMEText(script, 'cloud-boothook') _cloud_init.attach(_script) if ('scripts' in cloud_init): for (script_name, script) in six.iteritems(cloud_init['scripts']): _script = email.mime.text.MIMEText(script, 'x-shellscript') _cloud_init.attach(_script) if ('cloud-config' in cloud_init): cloud_config = cloud_init['cloud-config'] _cloud_config = email.mime.text.MIMEText(_safe_dump(cloud_config), 'cloud-config') _cloud_init.attach(_cloud_config) return _cloud_init.as_string()
[ "def", "get_cloud_init_mime", "(", "cloud_init", ")", ":", "if", "isinstance", "(", "cloud_init", ",", "six", ".", "string_types", ")", ":", "cloud_init", "=", "json", ".", "loads", "(", "cloud_init", ")", "_cloud_init", "=", "email", ".", "mime", ".", "multipart", ".", "MIMEMultipart", "(", ")", "if", "(", "'boothooks'", "in", "cloud_init", ")", ":", "for", "(", "script_name", ",", "script", ")", "in", "six", ".", "iteritems", "(", "cloud_init", "[", "'boothooks'", "]", ")", ":", "_script", "=", "email", ".", "mime", ".", "text", ".", "MIMEText", "(", "script", ",", "'cloud-boothook'", ")", "_cloud_init", ".", "attach", "(", "_script", ")", "if", "(", "'scripts'", "in", "cloud_init", ")", ":", "for", "(", "script_name", ",", "script", ")", "in", "six", ".", "iteritems", "(", "cloud_init", "[", "'scripts'", "]", ")", ":", "_script", "=", "email", ".", "mime", ".", "text", ".", "MIMEText", "(", "script", ",", "'x-shellscript'", ")", "_cloud_init", ".", "attach", "(", "_script", ")", "if", "(", "'cloud-config'", "in", "cloud_init", ")", ":", "cloud_config", "=", "cloud_init", "[", "'cloud-config'", "]", "_cloud_config", "=", "email", ".", "mime", ".", "text", ".", "MIMEText", "(", "_safe_dump", "(", "cloud_config", ")", ",", "'cloud-config'", ")", "_cloud_init", ".", "attach", "(", "_cloud_config", ")", "return", "_cloud_init", ".", "as_string", "(", ")" ]
get a mime multipart encoded string from a cloud-init dict .
train
false
4,261
@pytest.mark.django_db def test_reject_suggestion_with_comment(client, request_users): Comment = get_comment_model() unit = Unit.objects.filter(suggestion__state='pending', state=UNTRANSLATED)[0] sugg = Suggestion.objects.filter(unit=unit, state='pending')[0] comment = 'This is a comment!' user = request_users['user'] if (user.username != 'nobody'): client.login(username=user.username, password=request_users['password']) url = ('/xhr/units/%d/suggestions/%d/' % (unit.id, sugg.id)) response = client.delete(url, ('comment=%s' % comment), HTTP_X_REQUESTED_WITH='XMLHttpRequest') can_reject = (check_permission('review', response.wsgi_request) or (sugg.user.id == user.id)) if can_reject: assert (response.status_code == 200) rejected_suggestion = Suggestion.objects.get(id=sugg.id) assert (rejected_suggestion.state == 'rejected') assert (Comment.objects.for_model(rejected_suggestion).get().comment == comment) else: assert (response.status_code == 403)
[ "@", "pytest", ".", "mark", ".", "django_db", "def", "test_reject_suggestion_with_comment", "(", "client", ",", "request_users", ")", ":", "Comment", "=", "get_comment_model", "(", ")", "unit", "=", "Unit", ".", "objects", ".", "filter", "(", "suggestion__state", "=", "'pending'", ",", "state", "=", "UNTRANSLATED", ")", "[", "0", "]", "sugg", "=", "Suggestion", ".", "objects", ".", "filter", "(", "unit", "=", "unit", ",", "state", "=", "'pending'", ")", "[", "0", "]", "comment", "=", "'This is a comment!'", "user", "=", "request_users", "[", "'user'", "]", "if", "(", "user", ".", "username", "!=", "'nobody'", ")", ":", "client", ".", "login", "(", "username", "=", "user", ".", "username", ",", "password", "=", "request_users", "[", "'password'", "]", ")", "url", "=", "(", "'/xhr/units/%d/suggestions/%d/'", "%", "(", "unit", ".", "id", ",", "sugg", ".", "id", ")", ")", "response", "=", "client", ".", "delete", "(", "url", ",", "(", "'comment=%s'", "%", "comment", ")", ",", "HTTP_X_REQUESTED_WITH", "=", "'XMLHttpRequest'", ")", "can_reject", "=", "(", "check_permission", "(", "'review'", ",", "response", ".", "wsgi_request", ")", "or", "(", "sugg", ".", "user", ".", "id", "==", "user", ".", "id", ")", ")", "if", "can_reject", ":", "assert", "(", "response", ".", "status_code", "==", "200", ")", "rejected_suggestion", "=", "Suggestion", ".", "objects", ".", "get", "(", "id", "=", "sugg", ".", "id", ")", "assert", "(", "rejected_suggestion", ".", "state", "==", "'rejected'", ")", "assert", "(", "Comment", ".", "objects", ".", "for_model", "(", "rejected_suggestion", ")", ".", "get", "(", ")", ".", "comment", "==", "comment", ")", "else", ":", "assert", "(", "response", ".", "status_code", "==", "403", ")" ]
tests suggestion can be rejected with a comment .
train
false
4,262
def init_test(options, testdir): locals_dict = locals().copy() globals_dict = globals().copy() locals_dict['testdir'] = testdir job = setup_job(options=options) locals_dict['job'] = job test_name = os.path.split(testdir)[(-1)] outputdir = os.path.join(job.resultdir, test_name) try: os.makedirs(outputdir) except OSError: pass locals_dict['outputdir'] = outputdir sys.path.insert(0, testdir) client_test = None try: import_stmt = ('import %s' % test_name) init_stmt = ('auto_test = %s.%s(job, testdir, outputdir)' % (test_name, test_name)) exec ((import_stmt + '\n') + init_stmt) in locals_dict, globals_dict client_test = globals_dict['auto_test'] except ImportError as e: if re.search(test_name, str(e)): pass else: logging.error(('%s import error: %s. Skipping %s' % (test_name, e, test_name))) except Exception as e: logging.error('%s: %s', test_name, e) finally: sys.path.pop(0) return client_test
[ "def", "init_test", "(", "options", ",", "testdir", ")", ":", "locals_dict", "=", "locals", "(", ")", ".", "copy", "(", ")", "globals_dict", "=", "globals", "(", ")", ".", "copy", "(", ")", "locals_dict", "[", "'testdir'", "]", "=", "testdir", "job", "=", "setup_job", "(", "options", "=", "options", ")", "locals_dict", "[", "'job'", "]", "=", "job", "test_name", "=", "os", ".", "path", ".", "split", "(", "testdir", ")", "[", "(", "-", "1", ")", "]", "outputdir", "=", "os", ".", "path", ".", "join", "(", "job", ".", "resultdir", ",", "test_name", ")", "try", ":", "os", ".", "makedirs", "(", "outputdir", ")", "except", "OSError", ":", "pass", "locals_dict", "[", "'outputdir'", "]", "=", "outputdir", "sys", ".", "path", ".", "insert", "(", "0", ",", "testdir", ")", "client_test", "=", "None", "try", ":", "import_stmt", "=", "(", "'import %s'", "%", "test_name", ")", "init_stmt", "=", "(", "'auto_test = %s.%s(job, testdir, outputdir)'", "%", "(", "test_name", ",", "test_name", ")", ")", "exec", "(", "(", "import_stmt", "+", "'\\n'", ")", "+", "init_stmt", ")", "in", "locals_dict", ",", "globals_dict", "client_test", "=", "globals_dict", "[", "'auto_test'", "]", "except", "ImportError", "as", "e", ":", "if", "re", ".", "search", "(", "test_name", ",", "str", "(", "e", ")", ")", ":", "pass", "else", ":", "logging", ".", "error", "(", "(", "'%s import error: %s. Skipping %s'", "%", "(", "test_name", ",", "e", ",", "test_name", ")", ")", ")", "except", "Exception", "as", "e", ":", "logging", ".", "error", "(", "'%s: %s'", ",", "test_name", ",", "e", ")", "finally", ":", "sys", ".", "path", ".", "pop", "(", "0", ")", "return", "client_test" ]
instantiate a client test object from a given test directory .
train
false
4,264
def _retrieve_dummy(job_path): pass
[ "def", "_retrieve_dummy", "(", "job_path", ")", ":", "pass" ]
dummy function for retrieving host and logs .
train
false
4,266
def validate_kwargs(fname, kwargs, compat_args): kwds = kwargs.copy() _check_for_invalid_keys(fname, kwargs, compat_args) _check_for_default_values(fname, kwds, compat_args)
[ "def", "validate_kwargs", "(", "fname", ",", "kwargs", ",", "compat_args", ")", ":", "kwds", "=", "kwargs", ".", "copy", "(", ")", "_check_for_invalid_keys", "(", "fname", ",", "kwargs", ",", "compat_args", ")", "_check_for_default_values", "(", "fname", ",", "kwds", ",", "compat_args", ")" ]
checks whether parameters passed to the **kwargs argument in a function fname are valid parameters as specified in *compat_args and whether or not they are set to their default values .
train
true
4,267
def _get_offset_param(params): offset = params.pop('offset', 0) return utils.validate_integer(offset, 'offset', 0, constants.DB_MAX_INT)
[ "def", "_get_offset_param", "(", "params", ")", ":", "offset", "=", "params", ".", "pop", "(", "'offset'", ",", "0", ")", "return", "utils", ".", "validate_integer", "(", "offset", ",", "'offset'", ",", "0", ",", "constants", ".", "DB_MAX_INT", ")" ]
extract offset id from requests dictionary or fail .
train
false
4,268
def evaluate_deltas(e): accepted_functions = (Add,) if isinstance(e, accepted_functions): return e.func(*[evaluate_deltas(arg) for arg in e.args]) elif isinstance(e, Mul): deltas = [] indices = {} for i in e.args: for s in i.free_symbols: if (s in indices): indices[s] += 1 else: indices[s] = 0 if isinstance(i, KroneckerDelta): deltas.append(i) for d in deltas: if indices[d.killable_index]: e = e.subs(d.killable_index, d.preferred_index) if (len(deltas) > 1): return evaluate_deltas(e) elif (indices[d.preferred_index] and d.indices_contain_equal_information): e = e.subs(d.preferred_index, d.killable_index) if (len(deltas) > 1): return evaluate_deltas(e) else: pass return e else: return e
[ "def", "evaluate_deltas", "(", "e", ")", ":", "accepted_functions", "=", "(", "Add", ",", ")", "if", "isinstance", "(", "e", ",", "accepted_functions", ")", ":", "return", "e", ".", "func", "(", "*", "[", "evaluate_deltas", "(", "arg", ")", "for", "arg", "in", "e", ".", "args", "]", ")", "elif", "isinstance", "(", "e", ",", "Mul", ")", ":", "deltas", "=", "[", "]", "indices", "=", "{", "}", "for", "i", "in", "e", ".", "args", ":", "for", "s", "in", "i", ".", "free_symbols", ":", "if", "(", "s", "in", "indices", ")", ":", "indices", "[", "s", "]", "+=", "1", "else", ":", "indices", "[", "s", "]", "=", "0", "if", "isinstance", "(", "i", ",", "KroneckerDelta", ")", ":", "deltas", ".", "append", "(", "i", ")", "for", "d", "in", "deltas", ":", "if", "indices", "[", "d", ".", "killable_index", "]", ":", "e", "=", "e", ".", "subs", "(", "d", ".", "killable_index", ",", "d", ".", "preferred_index", ")", "if", "(", "len", "(", "deltas", ")", ">", "1", ")", ":", "return", "evaluate_deltas", "(", "e", ")", "elif", "(", "indices", "[", "d", ".", "preferred_index", "]", "and", "d", ".", "indices_contain_equal_information", ")", ":", "e", "=", "e", ".", "subs", "(", "d", ".", "preferred_index", ",", "d", ".", "killable_index", ")", "if", "(", "len", "(", "deltas", ")", ">", "1", ")", ":", "return", "evaluate_deltas", "(", "e", ")", "else", ":", "pass", "return", "e", "else", ":", "return", "e" ]
we evaluate kroneckerdelta symbols in the expression assuming einstein summation .
train
false
4,269
def get_ipython_package_dir(): ipdir = os.path.dirname(IPython.__file__) return py3compat.cast_unicode(ipdir, fs_encoding)
[ "def", "get_ipython_package_dir", "(", ")", ":", "ipdir", "=", "os", ".", "path", ".", "dirname", "(", "IPython", ".", "__file__", ")", "return", "py3compat", ".", "cast_unicode", "(", "ipdir", ",", "fs_encoding", ")" ]
get the base directory where ipython itself is installed .
train
true
4,271
@not_implemented_for('directed') def generalized_degree(G, nodes=None): if (nodes in G): return next(_triangles_and_degree_iter(G, nodes))[3] return {v: gd for (v, d, t, gd) in _triangles_and_degree_iter(G, nodes)}
[ "@", "not_implemented_for", "(", "'directed'", ")", "def", "generalized_degree", "(", "G", ",", "nodes", "=", "None", ")", ":", "if", "(", "nodes", "in", "G", ")", ":", "return", "next", "(", "_triangles_and_degree_iter", "(", "G", ",", "nodes", ")", ")", "[", "3", "]", "return", "{", "v", ":", "gd", "for", "(", "v", ",", "d", ",", "t", ",", "gd", ")", "in", "_triangles_and_degree_iter", "(", "G", ",", "nodes", ")", "}" ]
compute the generalized degree for nodes .
train
false
4,272
def jail_path_configured(): try: jc = JailsConfiguration.objects.latest('id') except JailsConfiguration.DoesNotExist: jc = None return (jc and jc.jc_path and os.path.exists(jc.jc_path))
[ "def", "jail_path_configured", "(", ")", ":", "try", ":", "jc", "=", "JailsConfiguration", ".", "objects", ".", "latest", "(", "'id'", ")", "except", "JailsConfiguration", ".", "DoesNotExist", ":", "jc", "=", "None", "return", "(", "jc", "and", "jc", ".", "jc_path", "and", "os", ".", "path", ".", "exists", "(", "jc", ".", "jc_path", ")", ")" ]
check if there is the jail system is configured by looking at the jailsconfiguration model and jc_path field :returns: boolean .
train
false
4,273
@webob.dec.wsgify @util.check_accept('application/json') def list_usages(req): context = req.environ['placement.context'] uuid = util.wsgi_path_item(req.environ, 'uuid') try: resource_provider = objects.ResourceProvider.get_by_uuid(context, uuid) except exception.NotFound as exc: raise webob.exc.HTTPNotFound((_('No resource provider with uuid %(uuid)s found: %(error)s') % {'uuid': uuid, 'error': exc}), json_formatter=util.json_error_formatter) usage = objects.UsageList.get_all_by_resource_provider_uuid(context, uuid) response = req.response response.body = encodeutils.to_utf8(jsonutils.dumps(_serialize_usages(resource_provider, usage))) req.response.content_type = 'application/json' return req.response
[ "@", "webob", ".", "dec", ".", "wsgify", "@", "util", ".", "check_accept", "(", "'application/json'", ")", "def", "list_usages", "(", "req", ")", ":", "context", "=", "req", ".", "environ", "[", "'placement.context'", "]", "uuid", "=", "util", ".", "wsgi_path_item", "(", "req", ".", "environ", ",", "'uuid'", ")", "try", ":", "resource_provider", "=", "objects", ".", "ResourceProvider", ".", "get_by_uuid", "(", "context", ",", "uuid", ")", "except", "exception", ".", "NotFound", "as", "exc", ":", "raise", "webob", ".", "exc", ".", "HTTPNotFound", "(", "(", "_", "(", "'No resource provider with uuid %(uuid)s found: %(error)s'", ")", "%", "{", "'uuid'", ":", "uuid", ",", "'error'", ":", "exc", "}", ")", ",", "json_formatter", "=", "util", ".", "json_error_formatter", ")", "usage", "=", "objects", ".", "UsageList", ".", "get_all_by_resource_provider_uuid", "(", "context", ",", "uuid", ")", "response", "=", "req", ".", "response", "response", ".", "body", "=", "encodeutils", ".", "to_utf8", "(", "jsonutils", ".", "dumps", "(", "_serialize_usages", "(", "resource_provider", ",", "usage", ")", ")", ")", "req", ".", "response", ".", "content_type", "=", "'application/json'", "return", "req", ".", "response" ]
get a dictionary of resource provider usage by resource class .
train
false
4,274
def skipUnless(condition, reason): if (not condition): return skip(reason) return _id
[ "def", "skipUnless", "(", "condition", ",", "reason", ")", ":", "if", "(", "not", "condition", ")", ":", "return", "skip", "(", "reason", ")", "return", "_id" ]
skip a test unless the condition is true .
train
false
4,275
def numpy_cupy_array_almost_equal_nulp(nulp=1, name='xp', type_check=True, accept_error=False): def check_func(x, y): array.assert_array_almost_equal_nulp(x, y, nulp) return _make_decorator(check_func, name, type_check, accept_error)
[ "def", "numpy_cupy_array_almost_equal_nulp", "(", "nulp", "=", "1", ",", "name", "=", "'xp'", ",", "type_check", "=", "True", ",", "accept_error", "=", "False", ")", ":", "def", "check_func", "(", "x", ",", "y", ")", ":", "array", ".", "assert_array_almost_equal_nulp", "(", "x", ",", "y", ",", "nulp", ")", "return", "_make_decorator", "(", "check_func", ",", "name", ",", "type_check", ",", "accept_error", ")" ]
decorator that checks results of numpy and cupy are equal w .
train
false
4,276
def setlocale(category, locale=None): if (locale and (not isinstance(locale, _builtin_str))): locale = normalize(_build_localename(locale)) return _setlocale(category, locale)
[ "def", "setlocale", "(", "category", ",", "locale", "=", "None", ")", ":", "if", "(", "locale", "and", "(", "not", "isinstance", "(", "locale", ",", "_builtin_str", ")", ")", ")", ":", "locale", "=", "normalize", "(", "_build_localename", "(", "locale", ")", ")", "return", "_setlocale", "(", "category", ",", "locale", ")" ]
set the locale for the given category .
train
false
4,277
def verify_signed_data(token, data): if data.startswith(MAC_MARKER): try: data = data[len(MAC_MARKER):] mac_data = json.loads(base64.b64decode(data)) mac = compute_mac(token, mac_data['serialized_data']) if (mac != mac_data['mac']): raise InvalidMacError(('invalid MAC; expect=%s, actual=%s' % (mac_data['mac'], mac))) return json.loads(mac_data['serialized_data']) except: raise InvalidMacError('invalid MAC; data appeared to be corrupted') else: return data
[ "def", "verify_signed_data", "(", "token", ",", "data", ")", ":", "if", "data", ".", "startswith", "(", "MAC_MARKER", ")", ":", "try", ":", "data", "=", "data", "[", "len", "(", "MAC_MARKER", ")", ":", "]", "mac_data", "=", "json", ".", "loads", "(", "base64", ".", "b64decode", "(", "data", ")", ")", "mac", "=", "compute_mac", "(", "token", ",", "mac_data", "[", "'serialized_data'", "]", ")", "if", "(", "mac", "!=", "mac_data", "[", "'mac'", "]", ")", ":", "raise", "InvalidMacError", "(", "(", "'invalid MAC; expect=%s, actual=%s'", "%", "(", "mac_data", "[", "'mac'", "]", ",", "mac", ")", ")", ")", "return", "json", ".", "loads", "(", "mac_data", "[", "'serialized_data'", "]", ")", "except", ":", "raise", "InvalidMacError", "(", "'invalid MAC; data appeared to be corrupted'", ")", "else", ":", "return", "data" ]
verify data integrity by ensuring mac is valid .
train
false
4,278
@require_POST @login_required def edit_priority(request, pk): source = get_object_or_404(Source, pk=pk) if (not can_edit_priority(request.user, source.subproject.project)): raise PermissionDenied() form = PriorityForm(request.POST) if form.is_valid(): source.priority = form.cleaned_data['priority'] source.save() else: messages.error(request, _('Failed to change a priority!')) return redirect(request.POST.get('next', source.get_absolute_url()))
[ "@", "require_POST", "@", "login_required", "def", "edit_priority", "(", "request", ",", "pk", ")", ":", "source", "=", "get_object_or_404", "(", "Source", ",", "pk", "=", "pk", ")", "if", "(", "not", "can_edit_priority", "(", "request", ".", "user", ",", "source", ".", "subproject", ".", "project", ")", ")", ":", "raise", "PermissionDenied", "(", ")", "form", "=", "PriorityForm", "(", "request", ".", "POST", ")", "if", "form", ".", "is_valid", "(", ")", ":", "source", ".", "priority", "=", "form", ".", "cleaned_data", "[", "'priority'", "]", "source", ".", "save", "(", ")", "else", ":", "messages", ".", "error", "(", "request", ",", "_", "(", "'Failed to change a priority!'", ")", ")", "return", "redirect", "(", "request", ".", "POST", ".", "get", "(", "'next'", ",", "source", ".", "get_absolute_url", "(", ")", ")", ")" ]
change source string priority .
train
false
4,279
@addon_view @non_atomic_requests def sources_series(request, addon, group, start, end, format): date_range = check_series_params_or_404(group, start, end, format) check_stats_permission(request, addon) series = get_series(DownloadCount, source='sources', addon=addon.id, date__range=date_range) if (format == 'csv'): (series, fields) = csv_fields(series) return render_csv(request, addon, series, (['date', 'count'] + list(fields))) elif (format == 'json'): return render_json(request, addon, series)
[ "@", "addon_view", "@", "non_atomic_requests", "def", "sources_series", "(", "request", ",", "addon", ",", "group", ",", "start", ",", "end", ",", "format", ")", ":", "date_range", "=", "check_series_params_or_404", "(", "group", ",", "start", ",", "end", ",", "format", ")", "check_stats_permission", "(", "request", ",", "addon", ")", "series", "=", "get_series", "(", "DownloadCount", ",", "source", "=", "'sources'", ",", "addon", "=", "addon", ".", "id", ",", "date__range", "=", "date_range", ")", "if", "(", "format", "==", "'csv'", ")", ":", "(", "series", ",", "fields", ")", "=", "csv_fields", "(", "series", ")", "return", "render_csv", "(", "request", ",", "addon", ",", "series", ",", "(", "[", "'date'", ",", "'count'", "]", "+", "list", "(", "fields", ")", ")", ")", "elif", "(", "format", "==", "'json'", ")", ":", "return", "render_json", "(", "request", ",", "addon", ",", "series", ")" ]
generate download source breakdown .
train
false
4,280
def bdist_wininst_arch(pyver, arch): if os.path.exists('build'): shutil.rmtree('build') _bdist_wininst(pyver, SITECFG[arch])
[ "def", "bdist_wininst_arch", "(", "pyver", ",", "arch", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "'build'", ")", ":", "shutil", ".", "rmtree", "(", "'build'", ")", "_bdist_wininst", "(", "pyver", ",", "SITECFG", "[", "arch", "]", ")" ]
arch specific wininst build .
train
false
4,283
def _get_record(xapi, rectype, uuid): return getattr(xapi, rectype).get_record(uuid)
[ "def", "_get_record", "(", "xapi", ",", "rectype", ",", "uuid", ")", ":", "return", "getattr", "(", "xapi", ",", "rectype", ")", ".", "get_record", "(", "uuid", ")" ]
gets the record object for a given fqdn .
train
false
4,285
def _report_not_exist(source_dir, path, blade): depender = _find_dir_depender(source_dir, blade) if depender: console.error_exit(('//%s not found, required by %s, exit...' % (path, depender))) else: console.error_exit(('//%s not found, exit...' % path))
[ "def", "_report_not_exist", "(", "source_dir", ",", "path", ",", "blade", ")", ":", "depender", "=", "_find_dir_depender", "(", "source_dir", ",", "blade", ")", "if", "depender", ":", "console", ".", "error_exit", "(", "(", "'//%s not found, required by %s, exit...'", "%", "(", "path", ",", "depender", ")", ")", ")", "else", ":", "console", ".", "error_exit", "(", "(", "'//%s not found, exit...'", "%", "path", ")", ")" ]
report dir or build file does not exist .
train
false
4,286
def setup_workers(num_cpus, outdir, server_socket, verbose=True, error_profile=None): DENOISE_WORKER = 'denoiser_worker.py' workers = [] client_sockets = [] tmpname = ''.join(sample(list(lowercase), 8)) (host, port) = server_socket.getsockname() for i in range(num_cpus): name = (outdir + ('/%sworker%d' % (tmpname, i))) workers.append(name) cmd = ('%s -f %s -s %s -p %s' % (DENOISE_WORKER, name, host, port)) if verbose: cmd += ' -v' if error_profile: cmd += (' -e %s' % error_profile) submit_jobs([cmd], tmpname) (client_socket, client_address) = server_socket.accept() client_sockets.append((client_socket, client_address)) return (workers, client_sockets)
[ "def", "setup_workers", "(", "num_cpus", ",", "outdir", ",", "server_socket", ",", "verbose", "=", "True", ",", "error_profile", "=", "None", ")", ":", "DENOISE_WORKER", "=", "'denoiser_worker.py'", "workers", "=", "[", "]", "client_sockets", "=", "[", "]", "tmpname", "=", "''", ".", "join", "(", "sample", "(", "list", "(", "lowercase", ")", ",", "8", ")", ")", "(", "host", ",", "port", ")", "=", "server_socket", ".", "getsockname", "(", ")", "for", "i", "in", "range", "(", "num_cpus", ")", ":", "name", "=", "(", "outdir", "+", "(", "'/%sworker%d'", "%", "(", "tmpname", ",", "i", ")", ")", ")", "workers", ".", "append", "(", "name", ")", "cmd", "=", "(", "'%s -f %s -s %s -p %s'", "%", "(", "DENOISE_WORKER", ",", "name", ",", "host", ",", "port", ")", ")", "if", "verbose", ":", "cmd", "+=", "' -v'", "if", "error_profile", ":", "cmd", "+=", "(", "' -e %s'", "%", "error_profile", ")", "submit_jobs", "(", "[", "cmd", "]", ",", "tmpname", ")", "(", "client_socket", ",", "client_address", ")", "=", "server_socket", ".", "accept", "(", ")", "client_sockets", ".", "append", "(", "(", "client_socket", ",", "client_address", ")", ")", "return", "(", "workers", ",", "client_sockets", ")" ]
start workers waiting for data .
train
false
4,287
def _paint_path(fill, stroke): if stroke: if fill: return Op.fill_stroke else: return Op.stroke elif fill: return Op.fill else: return Op.endpath
[ "def", "_paint_path", "(", "fill", ",", "stroke", ")", ":", "if", "stroke", ":", "if", "fill", ":", "return", "Op", ".", "fill_stroke", "else", ":", "return", "Op", ".", "stroke", "elif", "fill", ":", "return", "Op", ".", "fill", "else", ":", "return", "Op", ".", "endpath" ]
return the pdf operator to paint a path in the following way: fill: fill the path with the fill color stroke: stroke the outline of the path with the line color .
train
false
4,288
def getIndex(form, pos='noun'): def trySubstitutions(trySubstitutions, form, substitutions, lookup=1, dictionary=dictionaryFor(pos)): if (lookup and dictionary.has_key(form)): return dictionary[form] elif substitutions: (old, new) = substitutions[0] substitute = (string.replace(form, old, new) and (substitute != form)) if (substitute and dictionary.has_key(substitute)): return dictionary[substitute] return (trySubstitutions(trySubstitutions, form, substitutions[1:], lookup=0) or (substitute and trySubstitutions(trySubstitutions, substitute, substitutions[1:]))) return trySubstitutions(returnMatch, form, GET_INDEX_SUBSTITUTIONS)
[ "def", "getIndex", "(", "form", ",", "pos", "=", "'noun'", ")", ":", "def", "trySubstitutions", "(", "trySubstitutions", ",", "form", ",", "substitutions", ",", "lookup", "=", "1", ",", "dictionary", "=", "dictionaryFor", "(", "pos", ")", ")", ":", "if", "(", "lookup", "and", "dictionary", ".", "has_key", "(", "form", ")", ")", ":", "return", "dictionary", "[", "form", "]", "elif", "substitutions", ":", "(", "old", ",", "new", ")", "=", "substitutions", "[", "0", "]", "substitute", "=", "(", "string", ".", "replace", "(", "form", ",", "old", ",", "new", ")", "and", "(", "substitute", "!=", "form", ")", ")", "if", "(", "substitute", "and", "dictionary", ".", "has_key", "(", "substitute", ")", ")", ":", "return", "dictionary", "[", "substitute", "]", "return", "(", "trySubstitutions", "(", "trySubstitutions", ",", "form", ",", "substitutions", "[", "1", ":", "]", ",", "lookup", "=", "0", ")", "or", "(", "substitute", "and", "trySubstitutions", "(", "trySubstitutions", ",", "substitute", ",", "substitutions", "[", "1", ":", "]", ")", ")", ")", "return", "trySubstitutions", "(", "returnMatch", ",", "form", ",", "GET_INDEX_SUBSTITUTIONS", ")" ]
search for _form_ in the index file corresponding to _pos_ .
train
false
4,289
def highlight_string(source, unit): if (unit is None): return [] highlights = [] for check in CHECKS: if (not CHECKS[check].target): continue highlights += CHECKS[check].check_highlight(source, unit) highlights.sort(key=(lambda x: x[0])) for hl_idx in range(0, len(highlights)): if (hl_idx >= len(highlights)): break elref = highlights[hl_idx] for hl_idx_next in range((hl_idx + 1), len(highlights)): if (hl_idx_next >= len(highlights)): break eltest = highlights[hl_idx_next] if ((eltest[0] >= elref[0]) and (eltest[0] < elref[1])): highlights.pop(hl_idx_next) elif (eltest[0] > elref[1]): break return highlights
[ "def", "highlight_string", "(", "source", ",", "unit", ")", ":", "if", "(", "unit", "is", "None", ")", ":", "return", "[", "]", "highlights", "=", "[", "]", "for", "check", "in", "CHECKS", ":", "if", "(", "not", "CHECKS", "[", "check", "]", ".", "target", ")", ":", "continue", "highlights", "+=", "CHECKS", "[", "check", "]", ".", "check_highlight", "(", "source", ",", "unit", ")", "highlights", ".", "sort", "(", "key", "=", "(", "lambda", "x", ":", "x", "[", "0", "]", ")", ")", "for", "hl_idx", "in", "range", "(", "0", ",", "len", "(", "highlights", ")", ")", ":", "if", "(", "hl_idx", ">=", "len", "(", "highlights", ")", ")", ":", "break", "elref", "=", "highlights", "[", "hl_idx", "]", "for", "hl_idx_next", "in", "range", "(", "(", "hl_idx", "+", "1", ")", ",", "len", "(", "highlights", ")", ")", ":", "if", "(", "hl_idx_next", ">=", "len", "(", "highlights", ")", ")", ":", "break", "eltest", "=", "highlights", "[", "hl_idx_next", "]", "if", "(", "(", "eltest", "[", "0", "]", ">=", "elref", "[", "0", "]", ")", "and", "(", "eltest", "[", "0", "]", "<", "elref", "[", "1", "]", ")", ")", ":", "highlights", ".", "pop", "(", "hl_idx_next", ")", "elif", "(", "eltest", "[", "0", "]", ">", "elref", "[", "1", "]", ")", ":", "break", "return", "highlights" ]
returns highlights for a string .
train
false
4,290
def quote_sheetname(sheetname): if ((not sheetname.isalnum()) and (not sheetname.startswith("'"))): sheetname = sheetname.replace("'", "''") sheetname = ("'%s'" % sheetname) return sheetname
[ "def", "quote_sheetname", "(", "sheetname", ")", ":", "if", "(", "(", "not", "sheetname", ".", "isalnum", "(", ")", ")", "and", "(", "not", "sheetname", ".", "startswith", "(", "\"'\"", ")", ")", ")", ":", "sheetname", "=", "sheetname", ".", "replace", "(", "\"'\"", ",", "\"''\"", ")", "sheetname", "=", "(", "\"'%s'\"", "%", "sheetname", ")", "return", "sheetname" ]
convert a worksheet name to a quoted name if it contains spaces or special characters .
train
false
4,291
def colormaps(): return sorted(cm.cmap_d)
[ "def", "colormaps", "(", ")", ":", "return", "sorted", "(", "cm", ".", "cmap_d", ")" ]
matplotlib provides the following colormaps .
train
false
4,292
def obj_to_ref(obj): try: ref = ('%s:%s' % (obj.__module__, get_callable_name(obj))) obj2 = ref_to_obj(ref) if (obj != obj2): raise ValueError except Exception: raise ValueError(('Cannot determine the reference to %r' % obj)) return ref
[ "def", "obj_to_ref", "(", "obj", ")", ":", "try", ":", "ref", "=", "(", "'%s:%s'", "%", "(", "obj", ".", "__module__", ",", "get_callable_name", "(", "obj", ")", ")", ")", "obj2", "=", "ref_to_obj", "(", "ref", ")", "if", "(", "obj", "!=", "obj2", ")", ":", "raise", "ValueError", "except", "Exception", ":", "raise", "ValueError", "(", "(", "'Cannot determine the reference to %r'", "%", "obj", ")", ")", "return", "ref" ]
returns the path to the given object .
train
false
4,294
def _model_to_fit_params(model): fitparam_indices = list(range(len(model.param_names))) if (any(model.fixed.values()) or any(model.tied.values())): params = list(model.parameters) param_metrics = model._param_metrics for (idx, name) in list(enumerate(model.param_names))[::(-1)]: if (model.fixed[name] or model.tied[name]): slice_ = param_metrics[name][u'slice'] del params[slice_] del fitparam_indices[idx] return (np.array(params), fitparam_indices) else: return (model.parameters, fitparam_indices)
[ "def", "_model_to_fit_params", "(", "model", ")", ":", "fitparam_indices", "=", "list", "(", "range", "(", "len", "(", "model", ".", "param_names", ")", ")", ")", "if", "(", "any", "(", "model", ".", "fixed", ".", "values", "(", ")", ")", "or", "any", "(", "model", ".", "tied", ".", "values", "(", ")", ")", ")", ":", "params", "=", "list", "(", "model", ".", "parameters", ")", "param_metrics", "=", "model", ".", "_param_metrics", "for", "(", "idx", ",", "name", ")", "in", "list", "(", "enumerate", "(", "model", ".", "param_names", ")", ")", "[", ":", ":", "(", "-", "1", ")", "]", ":", "if", "(", "model", ".", "fixed", "[", "name", "]", "or", "model", ".", "tied", "[", "name", "]", ")", ":", "slice_", "=", "param_metrics", "[", "name", "]", "[", "u'slice'", "]", "del", "params", "[", "slice_", "]", "del", "fitparam_indices", "[", "idx", "]", "return", "(", "np", ".", "array", "(", "params", ")", ",", "fitparam_indices", ")", "else", ":", "return", "(", "model", ".", "parameters", ",", "fitparam_indices", ")" ]
convert a model instances parameter array to an array that can be used with a fitter that doesnt natively support fixed or tied parameters .
train
false
4,296
def test_replace_update_column_via_setitem(): a = ([1, 2] * u.m) b = [3, 4] t = table.QTable([a, b], names=['a', 'b']) assert isinstance(t['a'], u.Quantity) ta = t['a'] t['a'] = (5 * u.m) assert np.all((t['a'] == ([5, 5] * u.m))) assert (t['a'] is ta) t['a'] = [5, 6] assert np.all((t['a'] == [5, 6])) assert isinstance(t['a'], table.Column) assert (t['a'] is not ta)
[ "def", "test_replace_update_column_via_setitem", "(", ")", ":", "a", "=", "(", "[", "1", ",", "2", "]", "*", "u", ".", "m", ")", "b", "=", "[", "3", ",", "4", "]", "t", "=", "table", ".", "QTable", "(", "[", "a", ",", "b", "]", ",", "names", "=", "[", "'a'", ",", "'b'", "]", ")", "assert", "isinstance", "(", "t", "[", "'a'", "]", ",", "u", ".", "Quantity", ")", "ta", "=", "t", "[", "'a'", "]", "t", "[", "'a'", "]", "=", "(", "5", "*", "u", ".", "m", ")", "assert", "np", ".", "all", "(", "(", "t", "[", "'a'", "]", "==", "(", "[", "5", ",", "5", "]", "*", "u", ".", "m", ")", ")", ")", "assert", "(", "t", "[", "'a'", "]", "is", "ta", ")", "t", "[", "'a'", "]", "=", "[", "5", ",", "6", "]", "assert", "np", ".", "all", "(", "(", "t", "[", "'a'", "]", "==", "[", "5", ",", "6", "]", ")", ")", "assert", "isinstance", "(", "t", "[", "'a'", "]", ",", "table", ".", "Column", ")", "assert", "(", "t", "[", "'a'", "]", "is", "not", "ta", ")" ]
test table update like t[a] = value .
train
false
4,297
def floored_twelfth_of_a_360_day_year(date): timetuple = date.timetuple() year = timetuple.tm_year day_of_year = timetuple.tm_yday month0 = floor(((day_of_year / 360) * 12)) return ((((year - start_year) * 12) + month0) - start_month_0_indexed)
[ "def", "floored_twelfth_of_a_360_day_year", "(", "date", ")", ":", "timetuple", "=", "date", ".", "timetuple", "(", ")", "year", "=", "timetuple", ".", "tm_year", "day_of_year", "=", "timetuple", ".", "tm_yday", "month0", "=", "floor", "(", "(", "(", "day_of_year", "/", "360", ")", "*", "12", ")", ")", "return", "(", "(", "(", "(", "year", "-", "start_year", ")", "*", "12", ")", "+", "month0", ")", "-", "start_month_0_indexed", ")" ]
this function converts a date to a month number by flooring to the nearest 12th of a 360 day year .
train
false
4,298
@cache_permission def can_upload_dictionary(user, project): return check_permission(user, project, 'trans.upload_dictionary')
[ "@", "cache_permission", "def", "can_upload_dictionary", "(", "user", ",", "project", ")", ":", "return", "check_permission", "(", "user", ",", "project", ",", "'trans.upload_dictionary'", ")" ]
checks whether user can upload dictionary for given project .
train
false
4,299
def GetDebugger(): try: import pywin.debugger return pywin.debugger except ImportError: return None
[ "def", "GetDebugger", "(", ")", ":", "try", ":", "import", "pywin", ".", "debugger", "return", "pywin", ".", "debugger", "except", "ImportError", ":", "return", "None" ]
get the default python debugger .
train
false
4,300
def apply_trans(trans, pts, move=True): if isinstance(trans, dict): trans = trans['trans'] trans = np.asarray(trans) pts = np.asarray(pts) if (pts.size == 0): return pts.copy() out_pts = np.dot(pts, trans[:3, :3].T) if (move is True): transl = trans[:3, 3] if np.any((transl != 0)): out_pts += transl return out_pts
[ "def", "apply_trans", "(", "trans", ",", "pts", ",", "move", "=", "True", ")", ":", "if", "isinstance", "(", "trans", ",", "dict", ")", ":", "trans", "=", "trans", "[", "'trans'", "]", "trans", "=", "np", ".", "asarray", "(", "trans", ")", "pts", "=", "np", ".", "asarray", "(", "pts", ")", "if", "(", "pts", ".", "size", "==", "0", ")", ":", "return", "pts", ".", "copy", "(", ")", "out_pts", "=", "np", ".", "dot", "(", "pts", ",", "trans", "[", ":", "3", ",", ":", "3", "]", ".", "T", ")", "if", "(", "move", "is", "True", ")", ":", "transl", "=", "trans", "[", ":", "3", ",", "3", "]", "if", "np", ".", "any", "(", "(", "transl", "!=", "0", ")", ")", ":", "out_pts", "+=", "transl", "return", "out_pts" ]
apply a transform matrix to an array of points .
train
false
4,301
def pportOut(data): global dataReg port.DlPortWritePortUchar(baseAddress, data) dataReg = data
[ "def", "pportOut", "(", "data", ")", ":", "global", "dataReg", "port", ".", "DlPortWritePortUchar", "(", "baseAddress", ",", "data", ")", "dataReg", "=", "data" ]
data output function .
train
false
4,302
def list_frameworks(): sys.stdout.write(('Testable frameworks: %s\n\nNote that membership in this list means the framework can be tested with\nPyMongo, not necessarily that it is officially supported.\n' % ', '.join(sorted(FRAMEWORKS))))
[ "def", "list_frameworks", "(", ")", ":", "sys", ".", "stdout", ".", "write", "(", "(", "'Testable frameworks: %s\\n\\nNote that membership in this list means the framework can be tested with\\nPyMongo, not necessarily that it is officially supported.\\n'", "%", "', '", ".", "join", "(", "sorted", "(", "FRAMEWORKS", ")", ")", ")", ")" ]
tell the user what framework names are valid .
train
false
4,304
def pr_remove_from_role(role_id, pe_id): atable = current.s3db.pr_affiliation query = ((atable.role_id == role_id) & (atable.pe_id == pe_id)) affiliation = current.db(query).select(atable.id, limitby=(0, 1)).first() if (affiliation is not None): deleted_fk = {'role_id': role_id, 'pe_id': pe_id} data = {'deleted': True, 'role_id': None, 'pe_id': None, 'deleted_fk': json.dumps(deleted_fk)} affiliation.update_record(**data) pr_rebuild_path(pe_id, clear=True) return
[ "def", "pr_remove_from_role", "(", "role_id", ",", "pe_id", ")", ":", "atable", "=", "current", ".", "s3db", ".", "pr_affiliation", "query", "=", "(", "(", "atable", ".", "role_id", "==", "role_id", ")", "&", "(", "atable", ".", "pe_id", "==", "pe_id", ")", ")", "affiliation", "=", "current", ".", "db", "(", "query", ")", ".", "select", "(", "atable", ".", "id", ",", "limitby", "=", "(", "0", ",", "1", ")", ")", ".", "first", "(", ")", "if", "(", "affiliation", "is", "not", "None", ")", ":", "deleted_fk", "=", "{", "'role_id'", ":", "role_id", ",", "'pe_id'", ":", "pe_id", "}", "data", "=", "{", "'deleted'", ":", "True", ",", "'role_id'", ":", "None", ",", "'pe_id'", ":", "None", ",", "'deleted_fk'", ":", "json", ".", "dumps", "(", "deleted_fk", ")", "}", "affiliation", ".", "update_record", "(", "**", "data", ")", "pr_rebuild_path", "(", "pe_id", ",", "clear", "=", "True", ")", "return" ]
back-end method to remove a person entity from a role .
train
false
4,307
def sha_to_hex(sha): hexsha = binascii.hexlify(sha) assert (len(hexsha) == 40), ('Incorrect length of sha1 string: %d' % hexsha) return hexsha
[ "def", "sha_to_hex", "(", "sha", ")", ":", "hexsha", "=", "binascii", ".", "hexlify", "(", "sha", ")", "assert", "(", "len", "(", "hexsha", ")", "==", "40", ")", ",", "(", "'Incorrect length of sha1 string: %d'", "%", "hexsha", ")", "return", "hexsha" ]
takes a string and returns the hex of the sha within .
train
false
4,308
def assert_attribute_matches(output, path, attribute, expression): xml = xml_find(output, path) attribute_value = xml.attrib[attribute] if (re.match(expression, attribute_value) is None): errmsg = ("Expected attribute '%s' on element with path '%s' to match '%s', instead attribute value was '%s'." % (attribute, path, expression, attribute_value)) raise AssertionError(errmsg)
[ "def", "assert_attribute_matches", "(", "output", ",", "path", ",", "attribute", ",", "expression", ")", ":", "xml", "=", "xml_find", "(", "output", ",", "path", ")", "attribute_value", "=", "xml", ".", "attrib", "[", "attribute", "]", "if", "(", "re", ".", "match", "(", "expression", ",", "attribute_value", ")", "is", "None", ")", ":", "errmsg", "=", "(", "\"Expected attribute '%s' on element with path '%s' to match '%s', instead attribute value was '%s'.\"", "%", "(", "attribute", ",", "path", ",", "expression", ",", "attribute_value", ")", ")", "raise", "AssertionError", "(", "errmsg", ")" ]
asserts the specified attribute of the first element matching the specified path matches the specified regular expression .
train
false