id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
25,000
@pytest.mark.skipif('not HAS_BEAUTIFUL_SOUP') def test_backend_parsers(): for parser in ('lxml', 'xml', 'html.parser', 'html5lib'): try: table = Table.read('t/html2.html', format='ascii.html', htmldict={'parser': parser}, guess=False) except FeatureNotFound: if (parser == 'html.parser'): raise with pytest.raises(FeatureNotFound): Table.read('t/html2.html', format='ascii.html', htmldict={'parser': 'foo'}, guess=False)
[ "@", "pytest", ".", "mark", ".", "skipif", "(", "'not HAS_BEAUTIFUL_SOUP'", ")", "def", "test_backend_parsers", "(", ")", ":", "for", "parser", "in", "(", "'lxml'", ",", "'xml'", ",", "'html.parser'", ",", "'html5lib'", ")", ":", "try", ":", "table", "=", ...
make sure the user can specify which back-end parser to use and that an error is raised if the parser is invalid .
train
false
25,004
def set_cookie_data(storage, messages, invalid=False, encode_empty=False): encoded_data = storage._encode(messages, encode_empty=encode_empty) if invalid: encoded_data = encoded_data[1:] storage.request.COOKIES = {CookieStorage.cookie_name: encoded_data} if hasattr(storage, '_loaded_data'): del storage._loaded_data
[ "def", "set_cookie_data", "(", "storage", ",", "messages", ",", "invalid", "=", "False", ",", "encode_empty", "=", "False", ")", ":", "encoded_data", "=", "storage", ".", "_encode", "(", "messages", ",", "encode_empty", "=", "encode_empty", ")", "if", "inval...
set request .
train
false
25,005
def compile_tree(): vstr = '.'.join(map(str, sys.version_info[:2])) stat = os.system(('python %s/lib/python%s/compileall.py .' % (sys.prefix, vstr))) if stat: msg = '*** ERROR: Some Python files in tree do NOT compile! ***\n' msg += 'See messages above for the actual file that produced it.\n' raise SystemExit(msg)
[ "def", "compile_tree", "(", ")", ":", "vstr", "=", "'.'", ".", "join", "(", "map", "(", "str", ",", "sys", ".", "version_info", "[", ":", "2", "]", ")", ")", "stat", "=", "os", ".", "system", "(", "(", "'python %s/lib/python%s/compileall.py .'", "%", ...
compile all python files below current directory .
train
false
25,006
def get_words_by_content(filename): f_in = join(TEST_DATA_PATH, filename) ext = splitext(f_in)[1] with open(f_in, 'r') as infile: content = infile.read() return get_words(content=content, extension=ext)
[ "def", "get_words_by_content", "(", "filename", ")", ":", "f_in", "=", "join", "(", "TEST_DATA_PATH", ",", "filename", ")", "ext", "=", "splitext", "(", "f_in", ")", "[", "1", "]", "with", "open", "(", "f_in", ",", "'r'", ")", "as", "infile", ":", "c...
test get_words from content in filename .
train
false
25,008
def execute_all_tasks(taskqueue, queue='default', handlers_map=None): tasks = taskqueue.GetTasks(queue) taskqueue.FlushQueue(queue) task_run_counts = collections.defaultdict((lambda : 0)) for task in tasks: retries = 0 while True: try: handler = execute_task(task, retries, handlers_map=handlers_map) task_run_counts[handler.__class__] += 1 break except: retries += 1 if (retries > 100): logging.debug('Task %s failed for too many times. Giving up.', task['name']) raise logging.debug('Task %s is being retried for the %s time', task['name'], retries) return task_run_counts
[ "def", "execute_all_tasks", "(", "taskqueue", ",", "queue", "=", "'default'", ",", "handlers_map", "=", "None", ")", ":", "tasks", "=", "taskqueue", ".", "GetTasks", "(", "queue", ")", "taskqueue", ".", "FlushQueue", "(", "queue", ")", "task_run_counts", "="...
run and remove all tasks in the taskqueue .
train
false
25,011
def _depth_first_search(set_tasks, current_task, visited): visited.add(current_task) if (current_task in set_tasks['still_pending_not_ext']): upstream_failure = False upstream_missing_dependency = False upstream_run_by_other_worker = False upstream_scheduling_error = False for task in current_task._requires(): if (task not in visited): _depth_first_search(set_tasks, task, visited) if ((task in set_tasks['ever_failed']) or (task in set_tasks['upstream_failure'])): set_tasks['upstream_failure'].add(current_task) upstream_failure = True if ((task in set_tasks['still_pending_ext']) or (task in set_tasks['upstream_missing_dependency'])): set_tasks['upstream_missing_dependency'].add(current_task) upstream_missing_dependency = True if ((task in set_tasks['run_by_other_worker']) or (task in set_tasks['upstream_run_by_other_worker'])): set_tasks['upstream_run_by_other_worker'].add(current_task) upstream_run_by_other_worker = True if (task in set_tasks['scheduling_error']): set_tasks['upstream_scheduling_error'].add(current_task) upstream_scheduling_error = True if ((not upstream_failure) and (not upstream_missing_dependency) and (not upstream_run_by_other_worker) and (not upstream_scheduling_error) and (current_task not in set_tasks['run_by_other_worker'])): set_tasks['not_run'].add(current_task)
[ "def", "_depth_first_search", "(", "set_tasks", ",", "current_task", ",", "visited", ")", ":", "visited", ".", "add", "(", "current_task", ")", "if", "(", "current_task", "in", "set_tasks", "[", "'still_pending_not_ext'", "]", ")", ":", "upstream_failure", "=", ...
this dfs checks why tasks are still pending .
train
true
25,012
def _gpa11iterator(handle): for inline in handle: if (inline[0] == '!'): continue inrec = inline.rstrip('\n').split(' DCTB ') if (len(inrec) == 1): continue inrec[2] = inrec[2].split('|') inrec[4] = inrec[4].split('|') inrec[6] = inrec[6].split('|') inrec[10] = inrec[10].split('|') (yield dict(zip(GPA11FIELDS, inrec)))
[ "def", "_gpa11iterator", "(", "handle", ")", ":", "for", "inline", "in", "handle", ":", "if", "(", "inline", "[", "0", "]", "==", "'!'", ")", ":", "continue", "inrec", "=", "inline", ".", "rstrip", "(", "'\\n'", ")", ".", "split", "(", "' DCTB '", ...
read gpa 1 .
train
false
25,013
def polygon_area(pr, pc): pr = np.asarray(pr) pc = np.asarray(pc) return (0.5 * np.abs(np.sum(((pc[:(-1)] * pr[1:]) - (pc[1:] * pr[:(-1)])))))
[ "def", "polygon_area", "(", "pr", ",", "pc", ")", ":", "pr", "=", "np", ".", "asarray", "(", "pr", ")", "pc", "=", "np", ".", "asarray", "(", "pc", ")", "return", "(", "0.5", "*", "np", ".", "abs", "(", "np", ".", "sum", "(", "(", "(", "pc"...
compute the area of a polygon .
train
false
25,014
def _filtered_gens(poly, symbol): gens = {g for g in poly.gens if (symbol in g.free_symbols)} for g in list(gens): ag = (1 / g) if ((g in gens) and (ag in gens)): if (ag.as_numer_denom()[1] is not S.One): g = ag gens.remove(g) return gens
[ "def", "_filtered_gens", "(", "poly", ",", "symbol", ")", ":", "gens", "=", "{", "g", "for", "g", "in", "poly", ".", "gens", "if", "(", "symbol", "in", "g", ".", "free_symbols", ")", "}", "for", "g", "in", "list", "(", "gens", ")", ":", "ag", "...
process the generators of poly .
train
false
25,015
def assert_no_element_by_xpath_selector(context, xpath, wait_time=MAX_WAIT_FOR_UNEXPECTED_ELEMENT): _assert_no_element_by(context, By.XPATH, xpath, wait_time)
[ "def", "assert_no_element_by_xpath_selector", "(", "context", ",", "xpath", ",", "wait_time", "=", "MAX_WAIT_FOR_UNEXPECTED_ELEMENT", ")", ":", "_assert_no_element_by", "(", "context", ",", "By", ".", "XPATH", ",", "xpath", ",", "wait_time", ")" ]
assert that no element is found .
train
false
25,016
def LineTextInCurrentBuffer(line_number): return vim.current.buffer[(line_number - 1)]
[ "def", "LineTextInCurrentBuffer", "(", "line_number", ")", ":", "return", "vim", ".", "current", ".", "buffer", "[", "(", "line_number", "-", "1", ")", "]" ]
returns the text on the 1-indexed line .
train
false
25,017
def test_cache_clear_deactivated(config_stub, tmpdir): config_stub.data = {'storage': {'cache-size': 1024}, 'general': {'private-browsing': True}} disk_cache = cache.DiskCache(str(tmpdir)) assert (disk_cache.clear() is None)
[ "def", "test_cache_clear_deactivated", "(", "config_stub", ",", "tmpdir", ")", ":", "config_stub", ".", "data", "=", "{", "'storage'", ":", "{", "'cache-size'", ":", "1024", "}", ",", "'general'", ":", "{", "'private-browsing'", ":", "True", "}", "}", "disk_...
test method clear() on deactivated cache .
train
false
25,018
@contextlib.contextmanager def _grpc_catch_rendezvous(): try: (yield) except exceptions.GrpcRendezvous as exc: error_code = exc.code() error_class = _GRPC_ERROR_MAPPING.get(error_code) if (error_class is None): raise else: raise error_class(exc.details())
[ "@", "contextlib", ".", "contextmanager", "def", "_grpc_catch_rendezvous", "(", ")", ":", "try", ":", "(", "yield", ")", "except", "exceptions", ".", "GrpcRendezvous", "as", "exc", ":", "error_code", "=", "exc", ".", "code", "(", ")", "error_class", "=", "...
re-map grpc exceptions that happen in context .
train
false
25,019
@api_versions.wraps('2.23') @utils.arg('server', metavar='<server>', help=_('Name or ID of server.')) @utils.arg('migration', metavar='<migration>', help=_('ID of migration.')) def do_server_migration_show(cs, args): server = _find_server(cs, args.server) migration = cs.server_migrations.get(server, args.migration) utils.print_dict(migration.to_dict())
[ "@", "api_versions", ".", "wraps", "(", "'2.23'", ")", "@", "utils", ".", "arg", "(", "'server'", ",", "metavar", "=", "'<server>'", ",", "help", "=", "_", "(", "'Name or ID of server.'", ")", ")", "@", "utils", ".", "arg", "(", "'migration'", ",", "me...
get the migration of specified server .
train
false
25,020
def whitespace_around_comma(logical_line): line = logical_line for m in WHITESPACE_AFTER_COMMA_REGEX.finditer(line): found = (m.start() + 1) if (' DCTB ' in m.group()): (yield (found, ("E242 tab after '%s'" % m.group()[0]))) else: (yield (found, ("E241 multiple spaces after '%s'" % m.group()[0])))
[ "def", "whitespace_around_comma", "(", "logical_line", ")", ":", "line", "=", "logical_line", "for", "m", "in", "WHITESPACE_AFTER_COMMA_REGEX", ".", "finditer", "(", "line", ")", ":", "found", "=", "(", "m", ".", "start", "(", ")", "+", "1", ")", "if", "...
avoid extraneous whitespace in the following situations: - more than one space around an assignment operator to align it with another .
train
true
25,021
def _parallel_predict_regression(estimators, estimators_features, X): return sum((estimator.predict(X[:, features]) for (estimator, features) in zip(estimators, estimators_features)))
[ "def", "_parallel_predict_regression", "(", "estimators", ",", "estimators_features", ",", "X", ")", ":", "return", "sum", "(", "(", "estimator", ".", "predict", "(", "X", "[", ":", ",", "features", "]", ")", "for", "(", "estimator", ",", "features", ")", ...
private function used to compute predictions within a job .
train
false
25,022
def copyfunc(f, name=None): try: return types.FunctionType(f.func_code, f.func_globals, (name or f.__name__), f.func_defaults, f.func_closure) except AttributeError: return types.FunctionType(f.__code__, f.__globals__, (name or f.__name__), f.__defaults__, f.__closure__)
[ "def", "copyfunc", "(", "f", ",", "name", "=", "None", ")", ":", "try", ":", "return", "types", ".", "FunctionType", "(", "f", ".", "func_code", ",", "f", ".", "func_globals", ",", "(", "name", "or", "f", ".", "__name__", ")", ",", "f", ".", "fun...
returns a deepcopy of a function .
train
false
25,023
@auth.before_request def check_rate_limiting(): if (not flaskbb_config['AUTH_RATELIMIT_ENABLED']): return None return limiter.check()
[ "@", "auth", ".", "before_request", "def", "check_rate_limiting", "(", ")", ":", "if", "(", "not", "flaskbb_config", "[", "'AUTH_RATELIMIT_ENABLED'", "]", ")", ":", "return", "None", "return", "limiter", ".", "check", "(", ")" ]
check the the rate limits for each request for this blueprint .
train
false
25,024
def f_threshold_mway_rm(n_subjects, factor_levels, effects='A*B', pvalue=0.05): from scipy.stats import f (effect_picks, _) = _map_effects(len(factor_levels), effects) f_threshold = [] for (_, df1, df2) in _iter_contrasts(n_subjects, factor_levels, effect_picks): f_threshold.append(f(df1, df2).isf(pvalue)) return (f_threshold if (len(f_threshold) > 1) else f_threshold[0])
[ "def", "f_threshold_mway_rm", "(", "n_subjects", ",", "factor_levels", ",", "effects", "=", "'A*B'", ",", "pvalue", "=", "0.05", ")", ":", "from", "scipy", ".", "stats", "import", "f", "(", "effect_picks", ",", "_", ")", "=", "_map_effects", "(", "len", ...
compute f-value thesholds for a two-way anova .
train
false
25,027
def DEBUG(x): LOG_LEVEL('debug')
[ "def", "DEBUG", "(", "x", ")", ":", "LOG_LEVEL", "(", "'debug'", ")" ]
sets the logging verbosity to debug which displays much more information .
train
false
25,028
@pytest.mark.linux def test_misconfigured_user_dirs(request, httpbin, temp_basedir_env, tmpdir, quteproc_new): if request.config.webengine: pytest.skip('Downloads are not implemented with QtWebEngine yet') home = (tmpdir / 'home') home.ensure(dir=True) temp_basedir_env['HOME'] = str(home) assert (temp_basedir_env['XDG_CONFIG_HOME'] == (tmpdir / 'config')) ((tmpdir / 'config') / 'user-dirs.dirs').write('XDG_DOWNLOAD_DIR="relative"', ensure=True) quteproc_new.start(_base_args(request.config), env=temp_basedir_env) quteproc_new.set_setting('storage', 'prompt-download-directory', 'false') url = 'http://localhost:{port}/data/downloads/download.bin'.format(port=httpbin.port) quteproc_new.send_cmd(':download {}'.format(url)) line = quteproc_new.wait_for(loglevel=logging.ERROR, category='message', message='XDG_DOWNLOAD_DIR points to a relative path - please check your ~/.config/user-dirs.dirs. The download is saved in your home directory.') line.expected = True quteproc_new.wait_for(category='downloads', message='Download download.bin finished') assert (home / 'download.bin').exists()
[ "@", "pytest", ".", "mark", ".", "linux", "def", "test_misconfigured_user_dirs", "(", "request", ",", "httpbin", ",", "temp_basedir_env", ",", "tmpdir", ",", "quteproc_new", ")", ":", "if", "request", ".", "config", ".", "webengine", ":", "pytest", ".", "ski...
test downloads with a misconfigured xdg_download_dir .
train
false
25,029
def p_parameter_type_list_1(t): pass
[ "def", "p_parameter_type_list_1", "(", "t", ")", ":", "pass" ]
parameter_type_list : parameter_list .
train
false
25,030
def unpack_dbobj(item): _init_globals() try: obj = (item[3] and _TO_MODEL_MAP[item[1]].objects.get(id=item[3])) except ObjectDoesNotExist: return None except TypeError: if hasattr(item, 'pk'): return item return None return (((_TO_DATESTRING(obj) == item[2]) and obj) or None)
[ "def", "unpack_dbobj", "(", "item", ")", ":", "_init_globals", "(", ")", "try", ":", "obj", "=", "(", "item", "[", "3", "]", "and", "_TO_MODEL_MAP", "[", "item", "[", "1", "]", "]", ".", "objects", ".", "get", "(", "id", "=", "item", "[", "3", ...
check and convert internal representations back to django database models .
train
false
25,032
def parse_acl_v2(data): if (data is None): return None if (data is ''): return {} try: result = json.loads(data) return (result if (type(result) is dict) else None) except ValueError: return None
[ "def", "parse_acl_v2", "(", "data", ")", ":", "if", "(", "data", "is", "None", ")", ":", "return", "None", "if", "(", "data", "is", "''", ")", ":", "return", "{", "}", "try", ":", "result", "=", "json", ".", "loads", "(", "data", ")", "return", ...
parses a version-2 swift acl string and returns a dict of acl info .
train
false
25,033
def addCage(derivation, height, negatives, positives): copyShallow = derivation.elementNode.getCopyShallow() copyShallow.attributes['path'] = [Vector3(), Vector3(0.0, 0.0, height)] extrudeDerivation = extrude.ExtrudeDerivation(copyShallow) roundedExtendedRectangle = getRoundedExtendedRectangle(derivation.demiwidth, derivation.rectangleCenterX, 14) outsidePath = euclidean.getVector3Path(roundedExtendedRectangle) extrude.addPositives(extrudeDerivation, [outsidePath], positives) for bearingCenterX in derivation.bearingCenterXs: addNegativeSphere(derivation, negatives, bearingCenterX)
[ "def", "addCage", "(", "derivation", ",", "height", ",", "negatives", ",", "positives", ")", ":", "copyShallow", "=", "derivation", ".", "elementNode", ".", "getCopyShallow", "(", ")", "copyShallow", ".", "attributes", "[", "'path'", "]", "=", "[", "Vector3"...
add linear bearing cage .
train
false
25,034
def test_url_req_case_mismatch_file_index(script, data): Dinner = os.path.join(data.find_links3, 'dinner', 'Dinner-1.0.tar.gz') result = script.pip('install', '--index-url', data.find_links3, Dinner, 'requiredinner') egg_folder = ((script.site_packages / 'Dinner-1.0-py%s.egg-info') % pyversion) assert (egg_folder in result.files_created), str(result) egg_folder = ((script.site_packages / 'Dinner-2.0-py%s.egg-info') % pyversion) assert (egg_folder not in result.files_created), str(result)
[ "def", "test_url_req_case_mismatch_file_index", "(", "script", ",", "data", ")", ":", "Dinner", "=", "os", ".", "path", ".", "join", "(", "data", ".", "find_links3", ",", "'dinner'", ",", "'Dinner-1.0.tar.gz'", ")", "result", "=", "script", ".", "pip", "(", ...
tar ball url requirements .
train
false
25,035
@pytest.mark.parametrize('parallel', [True, False]) def test_fill_include_exclude_names(parallel, read_csv): text = '\nA, B, C\n, 1, 2\n3, , 4\n5, 5,\n' table = read_csv(text, fill_include_names=['A', 'B'], parallel=parallel) assert (table['A'][0] is ma.masked) assert (table['B'][1] is ma.masked) assert (table['C'][2] is not ma.masked) table = read_csv(text, fill_exclude_names=['A', 'B'], parallel=parallel) assert (table['C'][2] is ma.masked) assert (table['A'][0] is not ma.masked) assert (table['B'][1] is not ma.masked) table = read_csv(text, fill_include_names=['A', 'B'], fill_exclude_names=['B'], parallel=parallel) assert (table['A'][0] is ma.masked) assert (table['B'][1] is not ma.masked) assert (table['C'][2] is not ma.masked)
[ "@", "pytest", ".", "mark", ".", "parametrize", "(", "'parallel'", ",", "[", "True", ",", "False", "]", ")", "def", "test_fill_include_exclude_names", "(", "parallel", ",", "read_csv", ")", ":", "text", "=", "'\\nA, B, C\\n, 1, 2\\n3, , 4\\n5, 5,\\n'", "table", ...
fill_include_names and fill_exclude_names should filter missing/empty value handling in the same way that include_names and exclude_names filter output columns .
train
false
25,036
def word_tokenize(text, include_punc=True, *args, **kwargs): words = chain.from_iterable((_word_tokenizer.itokenize(sentence, include_punc=include_punc, *args, **kwargs) for sentence in sent_tokenize(text))) return words
[ "def", "word_tokenize", "(", "text", ",", "include_punc", "=", "True", ",", "*", "args", ",", "**", "kwargs", ")", ":", "words", "=", "chain", ".", "from_iterable", "(", "(", "_word_tokenizer", ".", "itokenize", "(", "sentence", ",", "include_punc", "=", ...
return a tokenized copy of *text* .
train
false
25,037
def get_text_from_si_or_is(self, elem, r_tag=(U_SSML12 + u'r'), t_tag=(U_SSML12 + u't')): accum = [] for child in elem: tag = child.tag if (tag == t_tag): t = cooked_text(self, child) if t: accum.append(t) elif (tag == r_tag): for tnode in child: if (tnode.tag == t_tag): t = cooked_text(self, tnode) if t: accum.append(t) return u''.join(accum)
[ "def", "get_text_from_si_or_is", "(", "self", ",", "elem", ",", "r_tag", "=", "(", "U_SSML12", "+", "u'r'", ")", ",", "t_tag", "=", "(", "U_SSML12", "+", "u't'", ")", ")", ":", "accum", "=", "[", "]", "for", "child", "in", "elem", ":", "tag", "=", ...
returns unescaped unicode .
train
false
25,038
def apply_noise(computation_graph, variables, level, seed=None): if (not seed): seed = config.default_seed rng = MRG_RandomStreams(seed) replace = {} for variable in variables: replace[variable] = (variable + rng.normal(variable.shape, std=level)) return computation_graph.replace(replace)
[ "def", "apply_noise", "(", "computation_graph", ",", "variables", ",", "level", ",", "seed", "=", "None", ")", ":", "if", "(", "not", "seed", ")", ":", "seed", "=", "config", ".", "default_seed", "rng", "=", "MRG_RandomStreams", "(", "seed", ")", "replac...
add gaussian noise to certain variable of a computation graph .
train
false
25,039
def report_change(project, path, old_content): resource = path_to_resource(project, path) if (resource is None): return for observer in list(project.observers): observer.resource_changed(resource) if project.pycore.automatic_soa: rope.base.pycore.perform_soa_on_changed_scopes(project, resource, old_content)
[ "def", "report_change", "(", "project", ",", "path", ",", "old_content", ")", ":", "resource", "=", "path_to_resource", "(", "project", ",", "path", ")", "if", "(", "resource", "is", "None", ")", ":", "return", "for", "observer", "in", "list", "(", "proj...
report that the contents of file at path was changed the new contents of file is retrieved by reading the file .
train
true
25,043
def _EscapeCommandLineArgumentForMSVS(s): def _Replace(match): return ((2 * match.group(1)) + '\\"') s = quote_replacer_regex.sub(_Replace, s) s = (('"' + s) + '"') return s
[ "def", "_EscapeCommandLineArgumentForMSVS", "(", "s", ")", ":", "def", "_Replace", "(", "match", ")", ":", "return", "(", "(", "2", "*", "match", ".", "group", "(", "1", ")", ")", "+", "'\\\\\"'", ")", "s", "=", "quote_replacer_regex", ".", "sub", "(",...
escapes a windows command-line argument .
train
false
25,044
def dup_apply_pairs(f, g, h, args, K): (n, m) = (len(f), len(g)) if (n != m): if (n > m): g = (([K.zero] * (n - m)) + g) else: f = (([K.zero] * (m - n)) + f) result = [] for (a, b) in zip(f, g): result.append(h(a, b, *args)) return dup_strip(result)
[ "def", "dup_apply_pairs", "(", "f", ",", "g", ",", "h", ",", "args", ",", "K", ")", ":", "(", "n", ",", "m", ")", "=", "(", "len", "(", "f", ")", ",", "len", "(", "g", ")", ")", "if", "(", "n", "!=", "m", ")", ":", "if", "(", "n", ">"...
apply h to pairs of coefficients of f and g .
train
false
25,045
def analysis(inputFile1='../../sounds/violin-B3.wav', window1='blackman', M1=1001, N1=1024, t1=(-100), minSineDur1=0.05, nH=60, minf01=200, maxf01=300, f0et1=10, harmDevSlope1=0.01, stocf=0.1, inputFile2='../../sounds/soprano-E4.wav', window2='blackman', M2=901, N2=1024, t2=(-100), minSineDur2=0.05, minf02=250, maxf02=500, f0et2=10, harmDevSlope2=0.01): Ns = 512 H = 128 (fs1, x1) = UF.wavread(inputFile1) (fs2, x2) = UF.wavread(inputFile2) w1 = get_window(window1, M1) w2 = get_window(window2, M2) (hfreq1, hmag1, hphase1, stocEnv1) = HPS.hpsModelAnal(x1, fs1, w1, N1, H, t1, nH, minf01, maxf01, f0et1, harmDevSlope1, minSineDur1, Ns, stocf) (hfreq2, hmag2, hphase2, stocEnv2) = HPS.hpsModelAnal(x2, fs2, w2, N2, H, t2, nH, minf02, maxf02, f0et2, harmDevSlope2, minSineDur2, Ns, stocf) plt.figure(figsize=(12, 9)) maxplotfreq = 15000.0 plt.subplot(2, 1, 1) numFrames = int(stocEnv1[:, 0].size) sizeEnv = int(stocEnv1[0, :].size) frmTime = ((H * np.arange(numFrames)) / float(fs1)) binFreq = (((0.5 * fs1) * np.arange(((sizeEnv * maxplotfreq) / (0.5 * fs1)))) / sizeEnv) plt.pcolormesh(frmTime, binFreq, np.transpose(stocEnv1[:, :(((sizeEnv * maxplotfreq) / (0.5 * fs1)) + 1)])) plt.autoscale(tight=True) if (hfreq1.shape[1] > 0): harms = np.copy(hfreq1) harms = (harms * np.less(harms, maxplotfreq)) harms[(harms == 0)] = np.nan numFrames = int(harms[:, 0].size) frmTime = ((H * np.arange(numFrames)) / float(fs1)) plt.plot(frmTime, harms, color='k', ms=3, alpha=1) plt.xlabel('time (sec)') plt.ylabel('frequency (Hz)') plt.autoscale(tight=True) plt.title('harmonics + stochastic spectrogram of sound 1') plt.subplot(2, 1, 2) numFrames = int(stocEnv2[:, 0].size) sizeEnv = int(stocEnv2[0, :].size) frmTime = ((H * np.arange(numFrames)) / float(fs2)) binFreq = (((0.5 * fs2) * np.arange(((sizeEnv * maxplotfreq) / (0.5 * fs2)))) / sizeEnv) plt.pcolormesh(frmTime, binFreq, np.transpose(stocEnv2[:, :(((sizeEnv * maxplotfreq) / (0.5 * fs2)) + 1)])) plt.autoscale(tight=True) if (hfreq2.shape[1] > 0): harms = np.copy(hfreq2) harms = (harms * np.less(harms, maxplotfreq)) harms[(harms == 0)] = np.nan numFrames = int(harms[:, 0].size) frmTime = ((H * np.arange(numFrames)) / float(fs2)) plt.plot(frmTime, harms, color='k', ms=3, alpha=1) plt.xlabel('time (sec)') plt.ylabel('frequency (Hz)') plt.autoscale(tight=True) plt.title('harmonics + stochastic spectrogram of sound 2') plt.tight_layout() plt.show(block=False) return (inputFile1, fs1, hfreq1, hmag1, stocEnv1, inputFile2, hfreq2, hmag2, stocEnv2)
[ "def", "analysis", "(", "inputFile1", "=", "'../../sounds/violin-B3.wav'", ",", "window1", "=", "'blackman'", ",", "M1", "=", "1001", ",", "N1", "=", "1024", ",", "t1", "=", "(", "-", "100", ")", ",", "minSineDur1", "=", "0.05", ",", "nH", "=", "60", ...
restful crud controller - for completed answers - not editable .
train
false
25,046
def cache_get(key): packed = cache.get(_hashed_key(key)) if (packed is None): return None (value, refresh_time, refreshed) = packed if ((time() > refresh_time) and (not refreshed)): cache_set(key, value, settings.CACHE_SET_DELAY_SECONDS, True) return None return value
[ "def", "cache_get", "(", "key", ")", ":", "packed", "=", "cache", ".", "get", "(", "_hashed_key", "(", "key", ")", ")", "if", "(", "packed", "is", "None", ")", ":", "return", "None", "(", "value", ",", "refresh_time", ",", "refreshed", ")", "=", "p...
wrapper for cache .
train
true
25,047
def get_last_week_dashboard_stats(user_id): weekly_dashboard_stats = get_weekly_dashboard_stats(user_id) if weekly_dashboard_stats: return weekly_dashboard_stats[(-1)] else: return None
[ "def", "get_last_week_dashboard_stats", "(", "user_id", ")", ":", "weekly_dashboard_stats", "=", "get_weekly_dashboard_stats", "(", "user_id", ")", "if", "weekly_dashboard_stats", ":", "return", "weekly_dashboard_stats", "[", "(", "-", "1", ")", "]", "else", ":", "r...
gets last weeks dashboard stats for a given user_id .
train
false
25,048
def test_crash_empty_prefix(lookup): results = lookup.lookup(u':Eevee') assert (results[0].object.name == u'Eevee')
[ "def", "test_crash_empty_prefix", "(", "lookup", ")", ":", "results", "=", "lookup", ".", "lookup", "(", "u':Eevee'", ")", "assert", "(", "results", "[", "0", "]", ".", "object", ".", "name", "==", "u'Eevee'", ")" ]
searching for :foo used to crash .
train
false
25,049
def shekel(individual, a, c): return (sum(((1.0 / (c[i] + sum((((individual[j] - aij) ** 2) for (j, aij) in enumerate(a[i]))))) for i in range(len(c)))),)
[ "def", "shekel", "(", "individual", ",", "a", ",", "c", ")", ":", "return", "(", "sum", "(", "(", "(", "1.0", "/", "(", "c", "[", "i", "]", "+", "sum", "(", "(", "(", "(", "individual", "[", "j", "]", "-", "aij", ")", "**", "2", ")", "for...
the shekel multimodal function can have any number of maxima .
train
false
25,050
def UNTRANS(p): global gTABLE_LATIN_850 if sabnzbd.WIN32: return p.encode('cp1252', 'replace').translate(gTABLE_LATIN_850) else: return p
[ "def", "UNTRANS", "(", "p", ")", ":", "global", "gTABLE_LATIN_850", "if", "sabnzbd", ".", "WIN32", ":", "return", "p", ".", "encode", "(", "'cp1252'", ",", "'replace'", ")", ".", "translate", "(", "gTABLE_LATIN_850", ")", "else", ":", "return", "p" ]
for windows: translate pythons latin-1 to cp850 others: return original string .
train
false
25,052
def adjustments_from_deltas_with_sids(dense_dates, sparse_dates, column_idx, column_name, asset_idx, deltas): ad_series = deltas[AD_FIELD_NAME] adjustments = defaultdict(list) for (sid, per_sid) in deltas[column_name].iteritems(): idx = asset_idx[sid] for (kd, v) in per_sid.iteritems(): adjustments[dense_dates.searchsorted(kd)].extend(overwrite_from_dates(ad_series.loc[(kd, sid)], dense_dates, sparse_dates, (idx, idx), v)) return dict(adjustments)
[ "def", "adjustments_from_deltas_with_sids", "(", "dense_dates", ",", "sparse_dates", ",", "column_idx", ",", "column_name", ",", "asset_idx", ",", "deltas", ")", ":", "ad_series", "=", "deltas", "[", "AD_FIELD_NAME", "]", "adjustments", "=", "defaultdict", "(", "l...
collect all the adjustments that occur in a dataset that has a sid column .
train
false
25,054
def find_file_mismatch_nodes(): return [node for node in Node.find() if (set(node.files_versions.keys()) != set(node.files_current.keys()))]
[ "def", "find_file_mismatch_nodes", "(", ")", ":", "return", "[", "node", "for", "node", "in", "Node", ".", "find", "(", ")", "if", "(", "set", "(", "node", ".", "files_versions", ".", "keys", "(", ")", ")", "!=", "set", "(", "node", ".", "files_curre...
find nodes with inconsistent files_current and files_versions field keys .
train
false
25,055
def parse_ldap(pkt): payload = pkt[TCP].payload pkt_layer = util.get_layer_bytes(str(payload)) (usr, pswd) = (None, None) if (len(pkt_layer) > 0): if (pkt_layer[4] == '01'): (usr, pswd) = ('', '') usr_len = int(pkt_layer[11]) for idx in xrange(usr_len): usr += pkt_layer[(12 + idx)].decode('hex') pw_len = int(pkt_layer[(13 + usr_len)]) for idx in xrange(pw_len): pswd += pkt_layer[((14 + usr_len) + idx)].decode('hex') return (usr, pswd)
[ "def", "parse_ldap", "(", "pkt", ")", ":", "payload", "=", "pkt", "[", "TCP", "]", ".", "payload", "pkt_layer", "=", "util", ".", "get_layer_bytes", "(", "str", "(", "payload", ")", ")", "(", "usr", ",", "pswd", ")", "=", "(", "None", ",", "None", ...
parse ldap credentials; only supports simple (0) authentication right now .
train
false
25,058
def make_timeseries(start, end, dtypes, freq, partition_freq, seed=None): divisions = list(pd.DatetimeIndex(start=start, end=end, freq=partition_freq)) state_data = random_state_data((len(divisions) - 1), seed) name = ('make-timeseries-' + tokenize(start, end, dtypes, freq, partition_freq)) dsk = {(name, i): (make_timeseries_part, divisions[i], divisions[(i + 1)], dtypes, freq, state_data[i]) for i in range((len(divisions) - 1))} head = make_timeseries_part('2000', '2000', dtypes, '1H', state_data[0]) return DataFrame(dsk, name, head, divisions)
[ "def", "make_timeseries", "(", "start", ",", "end", ",", "dtypes", ",", "freq", ",", "partition_freq", ",", "seed", "=", "None", ")", ":", "divisions", "=", "list", "(", "pd", ".", "DatetimeIndex", "(", "start", "=", "start", ",", "end", "=", "end", ...
create timeseries dataframe with random data parameters start: datetime start of time series end: datetime end of time series dtypes: dict mapping of column names to types .
train
false
25,059
def tree_groups(N, num_groups): group_size = (N // num_groups) dx = num_groups dy = (N - (group_size * num_groups)) D = ((2 * dy) - dx) rv = [] for _ in range(num_groups): if (D < 0): rv.append(group_size) else: rv.append((group_size + 1)) D -= (2 * dx) D += (2 * dy) return rv
[ "def", "tree_groups", "(", "N", ",", "num_groups", ")", ":", "group_size", "=", "(", "N", "//", "num_groups", ")", "dx", "=", "num_groups", "dy", "=", "(", "N", "-", "(", "group_size", "*", "num_groups", ")", ")", "D", "=", "(", "(", "2", "*", "d...
split an integer n into evenly sized and spaced groups .
train
false
25,060
@receiver(models.signals.post_save, sender=CourseEnrollment) @receiver(models.signals.post_delete, sender=CourseEnrollment) def invalidate_enrollment_mode_cache(sender, instance, **kwargs): cache_key = CourseEnrollment.cache_key_name(instance.user.id, unicode(instance.course_id)) cache.delete(cache_key)
[ "@", "receiver", "(", "models", ".", "signals", ".", "post_save", ",", "sender", "=", "CourseEnrollment", ")", "@", "receiver", "(", "models", ".", "signals", ".", "post_delete", ",", "sender", "=", "CourseEnrollment", ")", "def", "invalidate_enrollment_mode_cac...
invalidate the cache of courseenrollment model .
train
false
25,061
def document_shared_examples(section, operation_model, example_prefix, shared_examples): container_section = section.add_new_section('shared-examples') container_section.style.new_paragraph() container_section.style.bold('Examples') documenter = SharedExampleDocumenter() for example in shared_examples: documenter.document_shared_example(example=example, section=container_section.add_new_section(example['id']), prefix=example_prefix, operation_model=operation_model)
[ "def", "document_shared_examples", "(", "section", ",", "operation_model", ",", "example_prefix", ",", "shared_examples", ")", ":", "container_section", "=", "section", ".", "add_new_section", "(", "'shared-examples'", ")", "container_section", ".", "style", ".", "new...
documents the shared examples .
train
false
25,062
def import_csr_file(csrfile, data): for (form, typ) in (('der', OpenSSL.crypto.FILETYPE_ASN1), ('pem', OpenSSL.crypto.FILETYPE_PEM)): try: domains = get_names_from_csr(data, typ) except OpenSSL.crypto.Error: logger.debug('CSR parse error (form=%s, typ=%s):', form, typ) logger.debug(traceback.format_exc()) continue return (typ, util.CSR(file=csrfile, data=data, form=form), domains) raise errors.Error('Failed to parse CSR file: {0}'.format(csrfile))
[ "def", "import_csr_file", "(", "csrfile", ",", "data", ")", ":", "for", "(", "form", ",", "typ", ")", "in", "(", "(", "'der'", ",", "OpenSSL", ".", "crypto", ".", "FILETYPE_ASN1", ")", ",", "(", "'pem'", ",", "OpenSSL", ".", "crypto", ".", "FILETYPE_...
import a csr file .
train
false
25,063
def retry_backoff_delay(attempt, scale_factor=1.0, jitter_max=0.0): exp = (float((2 ** attempt)) * float(scale_factor)) if (jitter_max == 0.0): return exp return (exp + (random.random() * jitter_max))
[ "def", "retry_backoff_delay", "(", "attempt", ",", "scale_factor", "=", "1.0", ",", "jitter_max", "=", "0.0", ")", ":", "exp", "=", "(", "float", "(", "(", "2", "**", "attempt", ")", ")", "*", "float", "(", "scale_factor", ")", ")", "if", "(", "jitte...
calculate an exponential backoff delay with jitter .
train
false
25,066
def hasFailed(release, size, provider='%'): release = prepareFailedName(release) failed_db_con = db.DBConnection('failed.db') sql_results = failed_db_con.select('SELECT release FROM failed WHERE release=? AND size=? AND provider LIKE ? LIMIT 1', [release, size, provider]) return (len(sql_results) > 0)
[ "def", "hasFailed", "(", "release", ",", "size", ",", "provider", "=", "'%'", ")", ":", "release", "=", "prepareFailedName", "(", "release", ")", "failed_db_con", "=", "db", ".", "DBConnection", "(", "'failed.db'", ")", "sql_results", "=", "failed_db_con", "...
returns true if a release has previously failed .
train
false
25,068
def check_and_call_extract_file(filepath, method_map, options_map, callback, keywords, comment_tags, strip_comment_tags, dirpath=None): filename = relpath(filepath, dirpath) for (pattern, method) in method_map: if (not pathmatch(pattern, filename)): continue options = {} for (opattern, odict) in options_map.items(): if pathmatch(opattern, filename): options = odict if callback: callback(filename, method, options) for message_tuple in extract_from_file(method, filepath, keywords=keywords, comment_tags=comment_tags, options=options, strip_comment_tags=strip_comment_tags): (yield ((filename,) + message_tuple)) break
[ "def", "check_and_call_extract_file", "(", "filepath", ",", "method_map", ",", "options_map", ",", "callback", ",", "keywords", ",", "comment_tags", ",", "strip_comment_tags", ",", "dirpath", "=", "None", ")", ":", "filename", "=", "relpath", "(", "filepath", ",...
checks if the given file matches an extraction method mapping .
train
false
25,070
def is_request_in_microsite(): return BACKEND.is_request_in_microsite()
[ "def", "is_request_in_microsite", "(", ")", ":", "return", "BACKEND", ".", "is_request_in_microsite", "(", ")" ]
this will return if current request is a request within a microsite .
train
false
25,071
def make_or_pipe(pipe): p1 = OrPipe(pipe) p2 = OrPipe(pipe) p1._partner = p2 p2._partner = p1 return (p1, p2)
[ "def", "make_or_pipe", "(", "pipe", ")", ":", "p1", "=", "OrPipe", "(", "pipe", ")", "p2", "=", "OrPipe", "(", "pipe", ")", "p1", ".", "_partner", "=", "p2", "p2", ".", "_partner", "=", "p1", "return", "(", "p1", ",", "p2", ")" ]
wraps a pipe into two pipe-like objects which are "or"d together to affect the real pipe .
train
true
25,072
def prepare_for_serialization(objects): if (isinstance(objects, list) and len(objects) and isinstance(objects[0], dict) and ('id' in objects[0])): objects = gather_unique_dicts(objects) return _prepare_data(objects)
[ "def", "prepare_for_serialization", "(", "objects", ")", ":", "if", "(", "isinstance", "(", "objects", ",", "list", ")", "and", "len", "(", "objects", ")", "and", "isinstance", "(", "objects", "[", "0", "]", ",", "dict", ")", "and", "(", "'id'", "in", ...
prepare python objects to be returned via rpc .
train
false
25,076
def min(x, axis=None, keepdims=False): return Min(axis, keepdims)(x)
[ "def", "min", "(", "x", ",", "axis", "=", "None", ",", "keepdims", "=", "False", ")", ":", "return", "Min", "(", "axis", ",", "keepdims", ")", "(", "x", ")" ]
returns minimum elements obtained by iterating over given axis .
train
false
25,077
def _string_concat(*strings): return u''.join([force_unicode(s) for s in strings])
[ "def", "_string_concat", "(", "*", "strings", ")", ":", "return", "u''", ".", "join", "(", "[", "force_unicode", "(", "s", ")", "for", "s", "in", "strings", "]", ")" ]
lazy variant of string concatenation .
train
false
25,079
def strhash(s, length, obfuscate=None): if obfuscate: hashedStr = b64c(sha512b64(s, obfuscate).lower()) else: hashedStr = re.sub(STRHASH_RE, '', s.lower())[:(length - 4)] while (len(hashedStr) < length): hashedStr += b64c(sha1b64(s)).lower() return hashedStr[:length]
[ "def", "strhash", "(", "s", ",", "length", ",", "obfuscate", "=", "None", ")", ":", "if", "obfuscate", ":", "hashedStr", "=", "b64c", "(", "sha512b64", "(", "s", ",", "obfuscate", ")", ".", "lower", "(", ")", ")", "else", ":", "hashedStr", "=", "re...
create a hash of .
train
false
25,080
def glDrawStringCenter(s): glRasterPos2f(0, 0) glBitmap(0, 0, 0, 0, ((- glGetStringSize(s)[0]) / 2), 0, None) for c in s: glutBitmapCharacter(OpenGL.GLUT.GLUT_BITMAP_HELVETICA_18, ord(c))
[ "def", "glDrawStringCenter", "(", "s", ")", ":", "glRasterPos2f", "(", "0", ",", "0", ")", "glBitmap", "(", "0", ",", "0", ",", "0", ",", "0", ",", "(", "(", "-", "glGetStringSize", "(", "s", ")", "[", "0", "]", ")", "/", "2", ")", ",", "0", ...
draw string on current draw pointer position .
train
false
25,082
def render_template_with_system_and_user_context(value, user, context=None, prefix=None): context = (context or {}) context[SYSTEM_SCOPE] = KeyValueLookup(prefix=prefix, scope=SYSTEM_SCOPE) context[USER_SCOPE] = UserKeyValueLookup(prefix=prefix, user=user, scope=USER_SCOPE) context[DATASTORE_PARENT_SCOPE] = {SYSTEM_SCOPE: KeyValueLookup(prefix=prefix, scope=FULL_SYSTEM_SCOPE), USER_SCOPE: UserKeyValueLookup(prefix=prefix, user=user, scope=FULL_USER_SCOPE)} rendered = render_template(value=value, context=context) return rendered
[ "def", "render_template_with_system_and_user_context", "(", "value", ",", "user", ",", "context", "=", "None", ",", "prefix", "=", "None", ")", ":", "context", "=", "(", "context", "or", "{", "}", ")", "context", "[", "SYSTEM_SCOPE", "]", "=", "KeyValueLooku...
render provided template with a default system context and user context for the provided user .
train
false
25,083
def url_params_from_lookup_dict(lookups): params = {} if (lookups and hasattr(lookups, u'items')): items = [] for (k, v) in lookups.items(): if callable(v): v = v() if isinstance(v, (tuple, list)): v = u','.join([str(x) for x in v]) elif isinstance(v, bool): v = (u'0', u'1')[v] else: v = six.text_type(v) items.append((k, v)) params.update(dict(items)) return params
[ "def", "url_params_from_lookup_dict", "(", "lookups", ")", ":", "params", "=", "{", "}", "if", "(", "lookups", "and", "hasattr", "(", "lookups", ",", "u'items'", ")", ")", ":", "items", "=", "[", "]", "for", "(", "k", ",", "v", ")", "in", "lookups", ...
converts the type of lookups specified in a foreignkey limit_choices_to attribute to a dictionary of query parameters .
train
false
25,084
def netstat(): if (__grains__['kernel'] == 'Linux'): return _netstat_linux() elif (__grains__['kernel'] in ('OpenBSD', 'FreeBSD', 'NetBSD')): return _netstat_bsd() elif (__grains__['kernel'] == 'SunOS'): return _netstat_sunos() raise CommandExecutionError('Not yet supported on this platform')
[ "def", "netstat", "(", ")", ":", "if", "(", "__grains__", "[", "'kernel'", "]", "==", "'Linux'", ")", ":", "return", "_netstat_linux", "(", ")", "elif", "(", "__grains__", "[", "'kernel'", "]", "in", "(", "'OpenBSD'", ",", "'FreeBSD'", ",", "'NetBSD'", ...
return information on open ports and states .
train
false
25,085
@api_view(['POST']) @permission_classes((AllowAny,)) def sig_check(request): issued_at = calendar.timegm(time.gmtime()) req = {'iss': settings.APP_PURCHASE_KEY, 'typ': settings.SIG_CHECK_TYP, 'aud': settings.APP_PURCHASE_AUD, 'iat': issued_at, 'exp': (issued_at + 3600), 'request': {}} return Response({'sig_check_jwt': sign_webpay_jwt(req)}, status=201)
[ "@", "api_view", "(", "[", "'POST'", "]", ")", "@", "permission_classes", "(", "(", "AllowAny", ",", ")", ")", "def", "sig_check", "(", "request", ")", ":", "issued_at", "=", "calendar", ".", "timegm", "(", "time", ".", "gmtime", "(", ")", ")", "req"...
returns a signed jwt to use for signature checking .
train
false
25,087
def brush_darker(brush, factor): grad = brush.gradient() if grad: return QBrush(gradient_darker(grad, factor)) else: brush = QBrush(brush) brush.setColor(brush.color().darker(factor)) return brush
[ "def", "brush_darker", "(", "brush", ",", "factor", ")", ":", "grad", "=", "brush", ".", "gradient", "(", ")", "if", "grad", ":", "return", "QBrush", "(", "gradient_darker", "(", "grad", ",", "factor", ")", ")", "else", ":", "brush", "=", "QBrush", "...
return a copy of the brush darkened by factor .
train
false
25,088
def _cast_none(x): if (isinstance(x, six.string_types) and (x == NONE_MAGIC_VALUE)): return None return x
[ "def", "_cast_none", "(", "x", ")", ":", "if", "(", "isinstance", "(", "x", ",", "six", ".", "string_types", ")", "and", "(", "x", "==", "NONE_MAGIC_VALUE", ")", ")", ":", "return", "None", "return", "x" ]
cast function which serializes special magic string value which indicate "none" to none type .
train
false
25,089
def unvouch_mozilla_alternate_emails(apps, schema_editor): pass
[ "def", "unvouch_mozilla_alternate_emails", "(", "apps", ",", "schema_editor", ")", ":", "pass" ]
vouches cannot be removed .
train
false
25,090
@cronjobs.register def update_l10n_contributor_metrics(day=None): if (day is None): day = date.today() first_of_month = date(day.year, day.month, 1) if (day.month == 1): previous_first_of_month = date((day.year - 1), 12, 1) else: previous_first_of_month = date(day.year, (day.month - 1), 1) for locale in settings.SUMO_LANGUAGES: for product in ([None] + list(Product.objects.filter(visible=True))): num = num_active_contributors(from_date=previous_first_of_month, to_date=first_of_month, locale=locale, product=product) WikiMetric.objects.create(code=L10N_ACTIVE_CONTRIBUTORS_CODE, locale=locale, product=product, date=previous_first_of_month, value=num)
[ "@", "cronjobs", ".", "register", "def", "update_l10n_contributor_metrics", "(", "day", "=", "None", ")", ":", "if", "(", "day", "is", "None", ")", ":", "day", "=", "date", ".", "today", "(", ")", "first_of_month", "=", "date", "(", "day", ".", "year",...
update the number of active contributors for each locale/product .
train
false
25,093
def LoadSingleDispatch(dispatch_info, open_fn=None): builder = yaml_object.ObjectBuilder(DispatchInfoExternal) handler = yaml_builder.BuilderHandler(builder) listener = yaml_listener.EventListener(handler) listener.Parse(dispatch_info) parsed_yaml = handler.GetResults() if (not parsed_yaml): return DispatchInfoExternal() if (len(parsed_yaml) > 1): raise MalformedDispatchConfigurationError('Multiple dispatch: sections in configuration.') return parsed_yaml[0]
[ "def", "LoadSingleDispatch", "(", "dispatch_info", ",", "open_fn", "=", "None", ")", ":", "builder", "=", "yaml_object", ".", "ObjectBuilder", "(", "DispatchInfoExternal", ")", "handler", "=", "yaml_builder", ".", "BuilderHandler", "(", "builder", ")", "listener",...
load a dispatch .
train
false
25,094
def user_create_onvalidation(form): if (form.request_vars.has_key('password_two') and (form.request_vars.password != form.request_vars.password_two)): form.errors.password = T("Password fields don't match") return True
[ "def", "user_create_onvalidation", "(", "form", ")", ":", "if", "(", "form", ".", "request_vars", ".", "has_key", "(", "'password_two'", ")", "and", "(", "form", ".", "request_vars", ".", "password", "!=", "form", ".", "request_vars", ".", "password_two", ")...
server-side check that password confirmation field is valid .
train
false
25,096
def require_cron_or_superadmin(handler): def _require_cron_or_superadmin(self, *args, **kwargs): if ((self.request.headers.get('X-AppEngine-Cron') is None) and (not self.is_super_admin)): raise self.UnauthorizedUserException('You do not have the credentials to access this page.') else: return handler(self, *args, **kwargs) return _require_cron_or_superadmin
[ "def", "require_cron_or_superadmin", "(", "handler", ")", ":", "def", "_require_cron_or_superadmin", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", ":", "if", "(", "(", "self", ".", "request", ".", "headers", ".", "get", "(", "'X-AppEngine-Cron'", ...
decorator to ensure that the handler is being called by cron or by a superadmin of the application .
train
false
25,097
def SSH(port=22, **kwargs): return rule(port, **kwargs)
[ "def", "SSH", "(", "port", "=", "22", ",", "**", "kwargs", ")", ":", "return", "rule", "(", "port", ",", "**", "kwargs", ")" ]
helper to build a firewall rule for ssh connections extra args will be passed to :py:func:~fabtools .
train
false
25,098
def _is_analytic(f, x): from sympy import Heaviside, Abs return (not any(((x in expr.free_symbols) for expr in f.atoms(Heaviside, Abs))))
[ "def", "_is_analytic", "(", "f", ",", "x", ")", ":", "from", "sympy", "import", "Heaviside", ",", "Abs", "return", "(", "not", "any", "(", "(", "(", "x", "in", "expr", ".", "free_symbols", ")", "for", "expr", "in", "f", ".", "atoms", "(", "Heavisid...
check if f(x) .
train
false
25,099
def plot_accuracy(x, y, x_legend): x = np.array(x) y = np.array(y) plt.title(('Classification accuracy as a function of %s' % x_legend)) plt.xlabel(('%s' % x_legend)) plt.ylabel('Accuracy') plt.grid(True) plt.plot(x, y)
[ "def", "plot_accuracy", "(", "x", ",", "y", ",", "x_legend", ")", ":", "x", "=", "np", ".", "array", "(", "x", ")", "y", "=", "np", ".", "array", "(", "y", ")", "plt", ".", "title", "(", "(", "'Classification accuracy as a function of %s'", "%", "x_l...
plot accuracy as a function of x .
train
false
25,100
def get_processor(format, mapping): try: obj_info = mapping[format] except KeyError: if (format is None): raise ValueError('Format required (lower case string)') elif (not isinstance(format, basestring)): raise TypeError('Need a string for the file format (lower case)') elif (format != format.lower()): raise ValueError(('Format string %r should be lower case' % format)) else: raise ValueError(('Unknown format %r. Supported formats are %r' % (format, "', '".join(mapping)))) (mod_name, obj_name) = obj_info mod = __import__(('Bio.SearchIO.%s' % mod_name), fromlist=['']) return getattr(mod, obj_name)
[ "def", "get_processor", "(", "format", ",", "mapping", ")", ":", "try", ":", "obj_info", "=", "mapping", "[", "format", "]", "except", "KeyError", ":", "if", "(", "format", "is", "None", ")", ":", "raise", "ValueError", "(", "'Format required (lower case str...
returns the object to process the given format according to the mapping .
train
false
25,101
def conjugate(matlist, K): return [conjugate_row(row, K) for row in matlist]
[ "def", "conjugate", "(", "matlist", ",", "K", ")", ":", "return", "[", "conjugate_row", "(", "row", ",", "K", ")", "for", "row", "in", "matlist", "]" ]
returns the conjugate of a matrix row-wise .
train
false
25,102
def construct_tmp_lun_name(lun_name): return ('%(src)s-%(ts)s' % {'src': lun_name, 'ts': int(time.time())})
[ "def", "construct_tmp_lun_name", "(", "lun_name", ")", ":", "return", "(", "'%(src)s-%(ts)s'", "%", "{", "'src'", ":", "lun_name", ",", "'ts'", ":", "int", "(", "time", ".", "time", "(", ")", ")", "}", ")" ]
constructs a time-based temporary lun name .
train
false
25,104
def bench_R10(): def srange(min, max, step): v = [min] while ((max - v[(-1)]).evalf() > 0): v.append((v[(-1)] + step)) return v[:(-1)] v = srange((- pi), pi, (sympify(1) / 10))
[ "def", "bench_R10", "(", ")", ":", "def", "srange", "(", "min", ",", "max", ",", "step", ")", ":", "v", "=", "[", "min", "]", "while", "(", "(", "max", "-", "v", "[", "(", "-", "1", ")", "]", ")", ".", "evalf", "(", ")", ">", "0", ")", ...
v = [-pi .
train
false
25,105
def check_modify_host(update_data): if ('status' in update_data): raise model_logic.ValidationError({'status': 'Host status can not be modified by the frontend.'})
[ "def", "check_modify_host", "(", "update_data", ")", ":", "if", "(", "'status'", "in", "update_data", ")", ":", "raise", "model_logic", ".", "ValidationError", "(", "{", "'status'", ":", "'Host status can not be modified by the frontend.'", "}", ")" ]
sanity check modify_host* requests .
train
false
25,106
@requires_duration def slide_out(clip, duration, side): (w, h) = clip.size t_s = (clip.duration - duration) pos_dict = {'left': (lambda t: (min(0, (w * (1 - ((t - ts) / duration)))), 'center')), 'right': (lambda t: (max(0, (w * (((t - ts) / duration) - 1))), 'center')), 'top': (lambda t: ('center', min(0, (h * (1 - ((t - ts) / duration)))))), 'bottom': (lambda t: ('center', max(0, (h * (((t - ts) / duration) - 1)))))} return clip.set_pos(pos_dict[side])
[ "@", "requires_duration", "def", "slide_out", "(", "clip", ",", "duration", ",", "side", ")", ":", "(", "w", ",", "h", ")", "=", "clip", ".", "size", "t_s", "=", "(", "clip", ".", "duration", "-", "duration", ")", "pos_dict", "=", "{", "'left'", ":...
makes the clip go away by one side of the screen .
train
false
25,108
def _load_file(filename): fp = open(filename, 'rb') source = (fp.read() + '\n') try: co = compile(source, filename, 'exec') except SyntaxError: print >>sys.stderr, '>>Syntax error in', filename raise fp.close() return co
[ "def", "_load_file", "(", "filename", ")", ":", "fp", "=", "open", "(", "filename", ",", "'rb'", ")", "source", "=", "(", "fp", ".", "read", "(", ")", "+", "'\\n'", ")", "try", ":", "co", "=", "compile", "(", "source", ",", "filename", ",", "'exe...
load a python source file and compile it to byte-code _load_module: code_object filename: name of file containing python source code code_object: code_object compiled from this source code this function does not write any file! .
train
false
25,109
def list_cert_bindings(site): ret = dict() sites = list_sites() if (site not in sites): _LOG.warning('Site not found: %s', site) return ret for binding in sites[site]['bindings']: if sites[site]['bindings'][binding]['certificatehash']: ret[binding] = sites[site]['bindings'][binding] if (not ret): _LOG.warning('No certificate bindings found for site: %s', site) return ret
[ "def", "list_cert_bindings", "(", "site", ")", ":", "ret", "=", "dict", "(", ")", "sites", "=", "list_sites", "(", ")", "if", "(", "site", "not", "in", "sites", ")", ":", "_LOG", ".", "warning", "(", "'Site not found: %s'", ",", "site", ")", "return", ...
list certificate bindings for an iis site .
train
true
25,110
def CDLTHRUSTING(barDs, count): return call_talib_with_ohlc(barDs, count, talib.CDLTHRUSTING)
[ "def", "CDLTHRUSTING", "(", "barDs", ",", "count", ")", ":", "return", "call_talib_with_ohlc", "(", "barDs", ",", "count", ",", "talib", ".", "CDLTHRUSTING", ")" ]
thrusting pattern .
train
false
25,111
def parse_sequence_example(serialized, image_feature, caption_feature): (context, sequence) = tf.parse_single_sequence_example(serialized, context_features={image_feature: tf.FixedLenFeature([], dtype=tf.string)}, sequence_features={caption_feature: tf.FixedLenSequenceFeature([], dtype=tf.int64)}) encoded_image = context[image_feature] caption = sequence[caption_feature] return (encoded_image, caption)
[ "def", "parse_sequence_example", "(", "serialized", ",", "image_feature", ",", "caption_feature", ")", ":", "(", "context", ",", "sequence", ")", "=", "tf", ".", "parse_single_sequence_example", "(", "serialized", ",", "context_features", "=", "{", "image_feature", ...
parses a tensorflow .
train
false
25,112
def new_zone(zone, restart=True): out = __mgmt(zone, 'zone', 'new') if restart: if (out == 'success'): return __firewall_cmd('--reload') return out
[ "def", "new_zone", "(", "zone", ",", "restart", "=", "True", ")", ":", "out", "=", "__mgmt", "(", "zone", ",", "'zone'", ",", "'new'", ")", "if", "restart", ":", "if", "(", "out", "==", "'success'", ")", ":", "return", "__firewall_cmd", "(", "'--relo...
add a new zone cli example: .
train
true
25,113
def build_year_spans(year_spans_str): spans = [] for elem in year_spans_str: spans.append(span_from_str(elem)) complete_year_spans(spans) return spans
[ "def", "build_year_spans", "(", "year_spans_str", ")", ":", "spans", "=", "[", "]", "for", "elem", "in", "year_spans_str", ":", "spans", ".", "append", "(", "span_from_str", "(", "elem", ")", ")", "complete_year_spans", "(", "spans", ")", "return", "spans" ]
build a chronologically ordered list of spans dict from unordered spans stringlist .
train
false
25,114
def get_single_text(field, text, **kwargs): for t in field.process_text(text, mode='query', **kwargs): return t
[ "def", "get_single_text", "(", "field", ",", "text", ",", "**", "kwargs", ")", ":", "for", "t", "in", "field", ".", "process_text", "(", "text", ",", "mode", "=", "'query'", ",", "**", "kwargs", ")", ":", "return", "t" ]
returns the first token from an analyzers output .
train
false
25,115
def forgiving_float(value): try: return float(value) except (ValueError, TypeError): return value
[ "def", "forgiving_float", "(", "value", ")", ":", "try", ":", "return", "float", "(", "value", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "return", "value" ]
try to convert value to a float .
train
false
25,116
def _get_representation_attrs(frame, units, kwargs): frame_attr_names = frame.representation_component_names.keys() repr_attr_classes = frame.representation.attr_classes.values() valid_kwargs = {} for (frame_attr_name, repr_attr_class, unit) in zip(frame_attr_names, repr_attr_classes, units): value = kwargs.pop(frame_attr_name, None) if (value is not None): valid_kwargs[frame_attr_name] = repr_attr_class(value, unit=unit) return valid_kwargs
[ "def", "_get_representation_attrs", "(", "frame", ",", "units", ",", "kwargs", ")", ":", "frame_attr_names", "=", "frame", ".", "representation_component_names", ".", "keys", "(", ")", "repr_attr_classes", "=", "frame", ".", "representation", ".", "attr_classes", ...
find instances of the "representation attributes" for specifying data for this frame .
train
false
25,118
def eventFromJSON(eventText): loaded = loads(eventText, object_hook=objectLoadHook) return loaded
[ "def", "eventFromJSON", "(", "eventText", ")", ":", "loaded", "=", "loads", "(", "eventText", ",", "object_hook", "=", "objectLoadHook", ")", "return", "loaded" ]
decode a log event from json .
train
false
25,119
@task @cmdopts([BOKCHOY_IMPORTS_DIR, BOKCHOY_IMPORTS_DIR_DEPR, PA11Y_FETCH_COURSE]) @timed def get_test_course(options): if options.get('imports_dir'): print colorize('green', '--imports-dir specified, skipping fetch of test course') return if (not options.get('should_fetch_course', False)): print colorize('green', '--skip-fetch specified, skipping fetch of test course') return options.imports_dir = DEMO_COURSE_IMPORT_DIR options.imports_dir.makedirs_p() zipped_course = (options.imports_dir + 'demo_course.tar.gz') msg = colorize('green', 'Fetching the test course from github...') print msg sh('wget {tar_gz_file} -O {zipped_course}'.format(tar_gz_file=DEMO_COURSE_TAR_GZ, zipped_course=zipped_course)) msg = colorize('green', 'Uncompressing the test course...') print msg sh('tar zxf {zipped_course} -C {courses_dir}'.format(zipped_course=zipped_course, courses_dir=options.imports_dir))
[ "@", "task", "@", "cmdopts", "(", "[", "BOKCHOY_IMPORTS_DIR", ",", "BOKCHOY_IMPORTS_DIR_DEPR", ",", "PA11Y_FETCH_COURSE", "]", ")", "@", "timed", "def", "get_test_course", "(", "options", ")", ":", "if", "options", ".", "get", "(", "'imports_dir'", ")", ":", ...
fetches the test course .
train
false
25,122
def master_config(path, env_var='SALT_MASTER_CONFIG', defaults=None, exit_on_config_errors=False): if (defaults is None): defaults = DEFAULT_MASTER_OPTS if (not os.environ.get(env_var, None)): salt_config_dir = os.environ.get('SALT_CONFIG_DIR', None) if salt_config_dir: env_config_file_path = os.path.join(salt_config_dir, 'master') if (salt_config_dir and os.path.isfile(env_config_file_path)): os.environ[env_var] = env_config_file_path overrides = load_config(path, env_var, DEFAULT_MASTER_OPTS['conf_file']) default_include = overrides.get('default_include', defaults['default_include']) include = overrides.get('include', []) overrides.update(include_config(default_include, path, verbose=False), exit_on_config_errors=exit_on_config_errors) overrides.update(include_config(include, path, verbose=True), exit_on_config_errors=exit_on_config_errors) opts = apply_master_config(overrides, defaults) _validate_opts(opts) if (opts.get('nodegroups') is None): opts['nodegroups'] = DEFAULT_MASTER_OPTS.get('nodegroups', {}) if ((opts.get('transport') == 'raet') and ('aes' in opts)): opts.pop('aes') apply_sdb(opts) return opts
[ "def", "master_config", "(", "path", ",", "env_var", "=", "'SALT_MASTER_CONFIG'", ",", "defaults", "=", "None", ",", "exit_on_config_errors", "=", "False", ")", ":", "if", "(", "defaults", "is", "None", ")", ":", "defaults", "=", "DEFAULT_MASTER_OPTS", "if", ...
return a masters configuration for the provided options and vm .
train
false
25,123
@parse_data @set_database def get_topic_update_nodes(parent=None, **kwargs): if parent: Parent = Item.alias() if (parent == 'root'): selector = Parent.parent.is_null() else: selector = (Parent.id == parent) values = Item.select(Item.title, Item.description, Item.available, Item.kind, Item.pk, Item.size_on_disk, Item.remote_size, Item.files_complete, Item.total_files, Item.id, Item.path, Item.youtube_id).join(Parent, on=(Item.parent == Parent.pk)).where((selector & (Item.total_files != 0))) return values
[ "@", "parse_data", "@", "set_database", "def", "get_topic_update_nodes", "(", "parent", "=", "None", ",", "**", "kwargs", ")", ":", "if", "parent", ":", "Parent", "=", "Item", ".", "alias", "(", ")", "if", "(", "parent", "==", "'root'", ")", ":", "sele...
convenience function for returning a set of topic nodes with limited fields for rendering the update topic tree .
train
false
25,124
def test_hsl_to_rgb_part_1(): assert (hsl_to_rgb((-360), 100, 50) == (255, 0, 0)) assert (hsl_to_rgb((-300), 100, 50) == (255, 255, 0)) assert (hsl_to_rgb((-240), 100, 50) == (0, 255, 0)) assert (hsl_to_rgb((-180), 100, 50) == (0, 255, 255)) assert (hsl_to_rgb((-120), 100, 50) == (0, 0, 255)) assert (hsl_to_rgb((-60), 100, 50) == (255, 0, 255))
[ "def", "test_hsl_to_rgb_part_1", "(", ")", ":", "assert", "(", "hsl_to_rgb", "(", "(", "-", "360", ")", ",", "100", ",", "50", ")", "==", "(", "255", ",", "0", ",", "0", ")", ")", "assert", "(", "hsl_to_rgb", "(", "(", "-", "300", ")", ",", "10...
test hsl to rgb color function .
train
false
25,125
@checker('.rst', severity=2) def check_suspicious_constructs(fn, lines): inprod = False for (lno, line) in enumerate(lines): if seems_directive_re.match(line): (yield ((lno + 1), 'comment seems to be intended as a directive')) if ('.. productionlist::' in line): inprod = True elif ((not inprod) and default_role_re.search(line)): (yield ((lno + 1), 'default role used')) elif (inprod and (not line.strip())): inprod = False
[ "@", "checker", "(", "'.rst'", ",", "severity", "=", "2", ")", "def", "check_suspicious_constructs", "(", "fn", ",", "lines", ")", ":", "inprod", "=", "False", "for", "(", "lno", ",", "line", ")", "in", "enumerate", "(", "lines", ")", ":", "if", "see...
check for suspicious rest constructs .
train
false
25,126
def handle_dependency(): platform_name = platform.system() try: if (platform_name == 'Windows'): install_talib_for_windows() elif (platform_name == 'Linux'): install_talib_for_linux() elif (platform_name == 'Darwin'): install_talib_for_Darwin() else: print 'Failed to install ta-lib!' except Exception as e: print 'Failed to install ta-lib!' print e dependencies = create_dependencies(platform_name) if dependencies: print 'pip install -r requirements.txt' print dependencies result = os.popen('pip install -r requirements.txt').readlines() util.printCommandResult(result)
[ "def", "handle_dependency", "(", ")", ":", "platform_name", "=", "platform", ".", "system", "(", ")", "try", ":", "if", "(", "platform_name", "==", "'Windows'", ")", ":", "install_talib_for_windows", "(", ")", "elif", "(", "platform_name", "==", "'Linux'", "...
docstring for fn .
train
false
25,132
def executors(opts, functions=None, context=None): return LazyLoader(_module_dirs(opts, 'executors', 'executor'), opts, tag='executor', pack={'__salt__': functions, '__context__': (context or {})})
[ "def", "executors", "(", "opts", ",", "functions", "=", "None", ",", "context", "=", "None", ")", ":", "return", "LazyLoader", "(", "_module_dirs", "(", "opts", ",", "'executors'", ",", "'executor'", ")", ",", "opts", ",", "tag", "=", "'executor'", ",", ...
returns the executor modules .
train
false
25,133
@pytest.mark.cmd @pytest.mark.django_db def test_revision_restore(capfd): call_command('revision', '--restore') (out, err) = capfd.readouterr() assert out.rstrip().isnumeric()
[ "@", "pytest", ".", "mark", ".", "cmd", "@", "pytest", ".", "mark", ".", "django_db", "def", "test_revision_restore", "(", "capfd", ")", ":", "call_command", "(", "'revision'", ",", "'--restore'", ")", "(", "out", ",", "err", ")", "=", "capfd", ".", "r...
restore redis revision from db .
train
false
25,136
def makeNumeric(s): if checkInt(s): return int(s) elif checkFloat(s): return float(s) else: return s
[ "def", "makeNumeric", "(", "s", ")", ":", "if", "checkInt", "(", "s", ")", ":", "return", "int", "(", "s", ")", "elif", "checkFloat", "(", "s", ")", ":", "return", "float", "(", "s", ")", "else", ":", "return", "s" ]
convert string to int or float if numeric .
train
false
25,138
def create_stores(): store_count = 0 store_classes = set() for store_entry in CONF.known_stores: store_entry = store_entry.strip() if (not store_entry): continue store_cls = _get_store_class(store_entry) store_instance = store_cls() schemes = store_instance.get_schemes() if (not schemes): raise BackendException(('Unable to register store %s. No schemes associated with it.' % store_cls)) elif (store_cls not in store_classes): LOG.debug('Registering store %s with schemes %s', store_cls, schemes) store_classes.add(store_cls) scheme_map = {} for scheme in schemes: loc_cls = store_instance.get_store_location_class() scheme_map[scheme] = {'store_class': store_cls, 'location_class': loc_cls} location.register_scheme_map(scheme_map) store_count += 1 else: LOG.debug('Store %s already registered', store_cls) return store_count
[ "def", "create_stores", "(", ")", ":", "store_count", "=", "0", "store_classes", "=", "set", "(", ")", "for", "store_entry", "in", "CONF", ".", "known_stores", ":", "store_entry", "=", "store_entry", ".", "strip", "(", ")", "if", "(", "not", "store_entry",...
registers all store modules and all schemes from the given config .
train
false
25,139
def cloud_query_magnetinfo(cookie, tokens, source_url, save_path): url = ''.join([const.PAN_URL, 'rest/2.0/services/cloud_dl?channel=chunlei&clienttype=0&web=1', '&bdstoken=', tokens['bdstoken']]) data = ''.join(['method=query_magnetinfo&app_id=250528', '&source_url=', encoder.encode_uri_component(source_url), '&save_path=', encoder.encode_uri_component(save_path), '&type=4']) req = net.urlopen(url, headers={'Cookie': cookie.header_output()}, data=data.encode()) if req: content = req.data return json.loads(content.decode()) else: return None
[ "def", "cloud_query_magnetinfo", "(", "cookie", ",", "tokens", ",", "source_url", ",", "save_path", ")", ":", "url", "=", "''", ".", "join", "(", "[", "const", ".", "PAN_URL", ",", "'rest/2.0/services/cloud_dl?channel=chunlei&clienttype=0&web=1'", ",", "'&bdstoken='...
source_url - 磁链的url .
train
true
25,140
def get_documenter(obj, parent): from sphinx.ext.autodoc import AutoDirective, DataDocumenter, ModuleDocumenter if inspect.ismodule(obj): return ModuleDocumenter if (parent is not None): parent_doc_cls = get_documenter(parent, None) else: parent_doc_cls = ModuleDocumenter if hasattr(parent, '__name__'): parent_doc = parent_doc_cls(FakeDirective(), parent.__name__) else: parent_doc = parent_doc_cls(FakeDirective(), '') classes = [cls for cls in AutoDirective._registry.values() if cls.can_document_member(obj, '', False, parent_doc)] if classes: classes.sort(key=(lambda cls: cls.priority)) return classes[(-1)] else: return DataDocumenter
[ "def", "get_documenter", "(", "obj", ",", "parent", ")", ":", "from", "sphinx", ".", "ext", ".", "autodoc", "import", "AutoDirective", ",", "DataDocumenter", ",", "ModuleDocumenter", "if", "inspect", ".", "ismodule", "(", "obj", ")", ":", "return", "ModuleDo...
get an autodoc .
train
true
25,141
@register.as_tag def blog_recent_posts(limit=5, tag=None, username=None, category=None): blog_posts = BlogPost.objects.published().select_related(u'user') title_or_slug = (lambda s: (Q(title=s) | Q(slug=s))) if (tag is not None): try: tag = Keyword.objects.get(title_or_slug(tag)) blog_posts = blog_posts.filter(keywords__keyword=tag) except Keyword.DoesNotExist: return [] if (category is not None): try: category = BlogCategory.objects.get(title_or_slug(category)) blog_posts = blog_posts.filter(categories=category) except BlogCategory.DoesNotExist: return [] if (username is not None): try: author = User.objects.get(username=username) blog_posts = blog_posts.filter(user=author) except User.DoesNotExist: return [] return list(blog_posts[:limit])
[ "@", "register", ".", "as_tag", "def", "blog_recent_posts", "(", "limit", "=", "5", ",", "tag", "=", "None", ",", "username", "=", "None", ",", "category", "=", "None", ")", ":", "blog_posts", "=", "BlogPost", ".", "objects", ".", "published", "(", ")"...
put a list of recently published blog posts into the template context .
train
true