id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
5,117
def ads_email(body, from_name=g.domain): return _ads_email(body, from_name, Email.Kind.ADS_ALERT)
[ "def", "ads_email", "(", "body", ",", "from_name", "=", "g", ".", "domain", ")", ":", "return", "_ads_email", "(", "body", ",", "from_name", ",", "Email", ".", "Kind", ".", "ADS_ALERT", ")" ]
queues an email to the sales team .
train
false
5,120
def _dummy_closure(x): return (lambda : x)
[ "def", "_dummy_closure", "(", "x", ")", ":", "return", "(", "lambda", ":", "x", ")" ]
a dummy function allowing us to build cell objects .
train
false
5,121
def aggregate_host_get_all(context, aggregate_id): return IMPL.aggregate_host_get_all(context, aggregate_id)
[ "def", "aggregate_host_get_all", "(", "context", ",", "aggregate_id", ")", ":", "return", "IMPL", ".", "aggregate_host_get_all", "(", "context", ",", "aggregate_id", ")" ]
get hosts for the specified aggregate .
train
false
5,123
def update_module_store_settings(module_store_setting, doc_store_settings=None, module_store_options=None, xml_store_options=None, default_store=None, mappings=None): for store in module_store_setting['default']['OPTIONS']['stores']: if (store['NAME'] == 'xml'): (xml_store_options and store['OPTIONS'].update(xml_store_options)) else: (module_store_options and store['OPTIONS'].update(module_store_options)) (doc_store_settings and store['DOC_STORE_CONFIG'].update(doc_store_settings)) if default_store: mixed_stores = get_mixed_stores(module_store_setting) for store in mixed_stores: if (store['NAME'] == default_store): mixed_stores.remove(store) mixed_stores.insert(0, store) return raise Exception('Could not find setting for requested default store: {}'.format(default_store)) if (mappings and ('mappings' in module_store_setting['default']['OPTIONS'])): module_store_setting['default']['OPTIONS']['mappings'] = mappings
[ "def", "update_module_store_settings", "(", "module_store_setting", ",", "doc_store_settings", "=", "None", ",", "module_store_options", "=", "None", ",", "xml_store_options", "=", "None", ",", "default_store", "=", "None", ",", "mappings", "=", "None", ")", ":", "for", "store", "in", "module_store_setting", "[", "'default'", "]", "[", "'OPTIONS'", "]", "[", "'stores'", "]", ":", "if", "(", "store", "[", "'NAME'", "]", "==", "'xml'", ")", ":", "(", "xml_store_options", "and", "store", "[", "'OPTIONS'", "]", ".", "update", "(", "xml_store_options", ")", ")", "else", ":", "(", "module_store_options", "and", "store", "[", "'OPTIONS'", "]", ".", "update", "(", "module_store_options", ")", ")", "(", "doc_store_settings", "and", "store", "[", "'DOC_STORE_CONFIG'", "]", ".", "update", "(", "doc_store_settings", ")", ")", "if", "default_store", ":", "mixed_stores", "=", "get_mixed_stores", "(", "module_store_setting", ")", "for", "store", "in", "mixed_stores", ":", "if", "(", "store", "[", "'NAME'", "]", "==", "default_store", ")", ":", "mixed_stores", ".", "remove", "(", "store", ")", "mixed_stores", ".", "insert", "(", "0", ",", "store", ")", "return", "raise", "Exception", "(", "'Could not find setting for requested default store: {}'", ".", "format", "(", "default_store", ")", ")", "if", "(", "mappings", "and", "(", "'mappings'", "in", "module_store_setting", "[", "'default'", "]", "[", "'OPTIONS'", "]", ")", ")", ":", "module_store_setting", "[", "'default'", "]", "[", "'OPTIONS'", "]", "[", "'mappings'", "]", "=", "mappings" ]
updates the settings for each store defined in the given module_store_setting settings with the given doc store configuration and options .
train
false
5,124
def simplify_presentation(C): rels = C._reidemeister_relators rels_arr = _simplification_technique_1(rels) group = C._schreier_free_group C._reidemeister_relators = [group.dtype(tuple(r)).identity_cyclic_reduction() for r in rels_arr if r]
[ "def", "simplify_presentation", "(", "C", ")", ":", "rels", "=", "C", ".", "_reidemeister_relators", "rels_arr", "=", "_simplification_technique_1", "(", "rels", ")", "group", "=", "C", ".", "_schreier_free_group", "C", ".", "_reidemeister_relators", "=", "[", "group", ".", "dtype", "(", "tuple", "(", "r", ")", ")", ".", "identity_cyclic_reduction", "(", ")", "for", "r", "in", "rels_arr", "if", "r", "]" ]
relies upon _simplification_technique_1 for its functioning .
train
false
5,125
def yticks(*args, **kwargs): ax = gca() if (len(args) == 0): locs = ax.get_yticks() labels = ax.get_yticklabels() elif (len(args) == 1): locs = ax.set_yticks(args[0]) labels = ax.get_yticklabels() elif (len(args) == 2): locs = ax.set_yticks(args[0]) labels = ax.set_yticklabels(args[1], **kwargs) else: raise TypeError('Illegal number of arguments to yticks') if len(kwargs): for l in labels: l.update(kwargs) draw_if_interactive() return (locs, silent_list('Text yticklabel', labels))
[ "def", "yticks", "(", "*", "args", ",", "**", "kwargs", ")", ":", "ax", "=", "gca", "(", ")", "if", "(", "len", "(", "args", ")", "==", "0", ")", ":", "locs", "=", "ax", ".", "get_yticks", "(", ")", "labels", "=", "ax", ".", "get_yticklabels", "(", ")", "elif", "(", "len", "(", "args", ")", "==", "1", ")", ":", "locs", "=", "ax", ".", "set_yticks", "(", "args", "[", "0", "]", ")", "labels", "=", "ax", ".", "get_yticklabels", "(", ")", "elif", "(", "len", "(", "args", ")", "==", "2", ")", ":", "locs", "=", "ax", ".", "set_yticks", "(", "args", "[", "0", "]", ")", "labels", "=", "ax", ".", "set_yticklabels", "(", "args", "[", "1", "]", ",", "**", "kwargs", ")", "else", ":", "raise", "TypeError", "(", "'Illegal number of arguments to yticks'", ")", "if", "len", "(", "kwargs", ")", ":", "for", "l", "in", "labels", ":", "l", ".", "update", "(", "kwargs", ")", "draw_if_interactive", "(", ")", "return", "(", "locs", ",", "silent_list", "(", "'Text yticklabel'", ",", "labels", ")", ")" ]
get or set the *y*-limits of the current tick locations and labels .
train
false
5,126
def device_memset(dst, val, size, stream=0): varargs = [] if stream: assert isinstance(stream, Stream) fn = driver.cuMemsetD8Async varargs.append(stream.handle) else: fn = driver.cuMemsetD8 fn(device_pointer(dst), val, size, *varargs)
[ "def", "device_memset", "(", "dst", ",", "val", ",", "size", ",", "stream", "=", "0", ")", ":", "varargs", "=", "[", "]", "if", "stream", ":", "assert", "isinstance", "(", "stream", ",", "Stream", ")", "fn", "=", "driver", ".", "cuMemsetD8Async", "varargs", ".", "append", "(", "stream", ".", "handle", ")", "else", ":", "fn", "=", "driver", ".", "cuMemsetD8", "fn", "(", "device_pointer", "(", "dst", ")", ",", "val", ",", "size", ",", "*", "varargs", ")" ]
memset on the device .
train
false
5,127
def render_variable(env, raw, cookiecutter_dict): if (raw is None): return None elif isinstance(raw, dict): return {render_variable(env, k, cookiecutter_dict): render_variable(env, v, cookiecutter_dict) for (k, v) in raw.items()} elif isinstance(raw, list): return [render_variable(env, v, cookiecutter_dict) for v in raw] elif (not isinstance(raw, basestring)): raw = str(raw) template = env.from_string(raw) rendered_template = template.render(cookiecutter=cookiecutter_dict) return rendered_template
[ "def", "render_variable", "(", "env", ",", "raw", ",", "cookiecutter_dict", ")", ":", "if", "(", "raw", "is", "None", ")", ":", "return", "None", "elif", "isinstance", "(", "raw", ",", "dict", ")", ":", "return", "{", "render_variable", "(", "env", ",", "k", ",", "cookiecutter_dict", ")", ":", "render_variable", "(", "env", ",", "v", ",", "cookiecutter_dict", ")", "for", "(", "k", ",", "v", ")", "in", "raw", ".", "items", "(", ")", "}", "elif", "isinstance", "(", "raw", ",", "list", ")", ":", "return", "[", "render_variable", "(", "env", ",", "v", ",", "cookiecutter_dict", ")", "for", "v", "in", "raw", "]", "elif", "(", "not", "isinstance", "(", "raw", ",", "basestring", ")", ")", ":", "raw", "=", "str", "(", "raw", ")", "template", "=", "env", ".", "from_string", "(", "raw", ")", "rendered_template", "=", "template", ".", "render", "(", "cookiecutter", "=", "cookiecutter_dict", ")", "return", "rendered_template" ]
inside the prompting taken from the cookiecutter .
train
true
5,128
def convert_to_seconds(varnames): return preprocess_args(cvsecs, varnames)
[ "def", "convert_to_seconds", "(", "varnames", ")", ":", "return", "preprocess_args", "(", "cvsecs", ",", "varnames", ")" ]
converts the specified variables to seconds .
train
false
5,129
def addPathIndexSecondSegment(gridPixel, pathIndexTable, pixelTable, segmentSecondPixel): for yStep in xrange(gridPixel[1], (segmentSecondPixel[1] + 1)): if getKeyIsInPixelTableAddValue((gridPixel[0], yStep), pathIndexTable, pixelTable): return
[ "def", "addPathIndexSecondSegment", "(", "gridPixel", ",", "pathIndexTable", ",", "pixelTable", ",", "segmentSecondPixel", ")", ":", "for", "yStep", "in", "xrange", "(", "gridPixel", "[", "1", "]", ",", "(", "segmentSecondPixel", "[", "1", "]", "+", "1", ")", ")", ":", "if", "getKeyIsInPixelTableAddValue", "(", "(", "gridPixel", "[", "0", "]", ",", "yStep", ")", ",", "pathIndexTable", ",", "pixelTable", ")", ":", "return" ]
add the path index of the closest segment found toward the second segment .
train
false
5,131
def TestInit(): global INIT_RAN if (stats.STATS is None): stats.STATS = stats.StatsCollector() flags.FLAGS.config = config_lib.Resource().Filter('install_data/etc/grr-server.yaml') flags.FLAGS.secondary_configs = [config_lib.Resource().Filter('test_data/grr_test.yaml@grr-response-test')] extra_test_config = config_lib.CONFIG['Test.additional_test_config'] if os.path.exists(extra_test_config): flags.FLAGS.secondary_configs.append(extra_test_config) config_lib.CONFIG.AddContext('Test Context', 'Context applied when we run tests.') config_lib.SetPlatformArchContext() config_lib.ParseConfigCommandLine() if (not INIT_RAN): log.ServerLoggingStartupInit() registry.TestInit() INIT_RAN = True
[ "def", "TestInit", "(", ")", ":", "global", "INIT_RAN", "if", "(", "stats", ".", "STATS", "is", "None", ")", ":", "stats", ".", "STATS", "=", "stats", ".", "StatsCollector", "(", ")", "flags", ".", "FLAGS", ".", "config", "=", "config_lib", ".", "Resource", "(", ")", ".", "Filter", "(", "'install_data/etc/grr-server.yaml'", ")", "flags", ".", "FLAGS", ".", "secondary_configs", "=", "[", "config_lib", ".", "Resource", "(", ")", ".", "Filter", "(", "'test_data/grr_test.yaml@grr-response-test'", ")", "]", "extra_test_config", "=", "config_lib", ".", "CONFIG", "[", "'Test.additional_test_config'", "]", "if", "os", ".", "path", ".", "exists", "(", "extra_test_config", ")", ":", "flags", ".", "FLAGS", ".", "secondary_configs", ".", "append", "(", "extra_test_config", ")", "config_lib", ".", "CONFIG", ".", "AddContext", "(", "'Test Context'", ",", "'Context applied when we run tests.'", ")", "config_lib", ".", "SetPlatformArchContext", "(", ")", "config_lib", ".", "ParseConfigCommandLine", "(", ")", "if", "(", "not", "INIT_RAN", ")", ":", "log", ".", "ServerLoggingStartupInit", "(", ")", "registry", ".", "TestInit", "(", ")", "INIT_RAN", "=", "True" ]
only used in tests and will rerun all the hooks to create a clean state .
train
false
5,132
def power_ztost_prop(low, upp, nobs, p_alt, alpha=0.05, dist='norm', variance_prop=None, discrete=True, continuity=0, critval_continuity=0): mean_low = low var_low = (std_prop(low, nobs) ** 2) mean_upp = upp var_upp = (std_prop(upp, nobs) ** 2) mean_alt = p_alt var_alt = (std_prop(p_alt, nobs) ** 2) if (variance_prop is not None): var_low = var_upp = (std_prop(variance_prop, nobs) ** 2) power = _power_ztost(mean_low, var_low, mean_upp, var_upp, mean_alt, var_alt, alpha=alpha, discrete=discrete, dist=dist, nobs=nobs, continuity=continuity, critval_continuity=critval_continuity) return (np.maximum(power[0], 0), power[1:])
[ "def", "power_ztost_prop", "(", "low", ",", "upp", ",", "nobs", ",", "p_alt", ",", "alpha", "=", "0.05", ",", "dist", "=", "'norm'", ",", "variance_prop", "=", "None", ",", "discrete", "=", "True", ",", "continuity", "=", "0", ",", "critval_continuity", "=", "0", ")", ":", "mean_low", "=", "low", "var_low", "=", "(", "std_prop", "(", "low", ",", "nobs", ")", "**", "2", ")", "mean_upp", "=", "upp", "var_upp", "=", "(", "std_prop", "(", "upp", ",", "nobs", ")", "**", "2", ")", "mean_alt", "=", "p_alt", "var_alt", "=", "(", "std_prop", "(", "p_alt", ",", "nobs", ")", "**", "2", ")", "if", "(", "variance_prop", "is", "not", "None", ")", ":", "var_low", "=", "var_upp", "=", "(", "std_prop", "(", "variance_prop", ",", "nobs", ")", "**", "2", ")", "power", "=", "_power_ztost", "(", "mean_low", ",", "var_low", ",", "mean_upp", ",", "var_upp", ",", "mean_alt", ",", "var_alt", ",", "alpha", "=", "alpha", ",", "discrete", "=", "discrete", ",", "dist", "=", "dist", ",", "nobs", "=", "nobs", ",", "continuity", "=", "continuity", ",", "critval_continuity", "=", "critval_continuity", ")", "return", "(", "np", ".", "maximum", "(", "power", "[", "0", "]", ",", "0", ")", ",", "power", "[", "1", ":", "]", ")" ]
power of proportions equivalence test based on normal distribution parameters low .
train
false
5,133
def list_journals(config): sep = u'\n' journal_list = sep.join(config[u'journals']) return journal_list
[ "def", "list_journals", "(", "config", ")", ":", "sep", "=", "u'\\n'", "journal_list", "=", "sep", ".", "join", "(", "config", "[", "u'journals'", "]", ")", "return", "journal_list" ]
list the journals specified in the configuration file .
train
false
5,134
def ProtosToIndexDefinitions(protos): return [ProtoToIndexDefinition(definition) for definition in protos]
[ "def", "ProtosToIndexDefinitions", "(", "protos", ")", ":", "return", "[", "ProtoToIndexDefinition", "(", "definition", ")", "for", "definition", "in", "protos", "]" ]
transform multiple index protocol buffers to index definitions .
train
false
5,138
def _margeff_cov_params_dummy(model, cov_margins, params, exog, dummy_ind, method, J): for i in dummy_ind: exog0 = exog.copy() exog1 = exog.copy() exog0[:, i] = 0 exog1[:, i] = 1 dfdb0 = model._derivative_predict(params, exog0, method) dfdb1 = model._derivative_predict(params, exog1, method) dfdb = (dfdb1 - dfdb0) if (dfdb.ndim >= 2): dfdb = dfdb.mean(0) if (J > 1): K = (dfdb.shape[1] // (J - 1)) cov_margins[i::K, :] = dfdb else: cov_margins[i, :] = dfdb return cov_margins
[ "def", "_margeff_cov_params_dummy", "(", "model", ",", "cov_margins", ",", "params", ",", "exog", ",", "dummy_ind", ",", "method", ",", "J", ")", ":", "for", "i", "in", "dummy_ind", ":", "exog0", "=", "exog", ".", "copy", "(", ")", "exog1", "=", "exog", ".", "copy", "(", ")", "exog0", "[", ":", ",", "i", "]", "=", "0", "exog1", "[", ":", ",", "i", "]", "=", "1", "dfdb0", "=", "model", ".", "_derivative_predict", "(", "params", ",", "exog0", ",", "method", ")", "dfdb1", "=", "model", ".", "_derivative_predict", "(", "params", ",", "exog1", ",", "method", ")", "dfdb", "=", "(", "dfdb1", "-", "dfdb0", ")", "if", "(", "dfdb", ".", "ndim", ">=", "2", ")", ":", "dfdb", "=", "dfdb", ".", "mean", "(", "0", ")", "if", "(", "J", ">", "1", ")", ":", "K", "=", "(", "dfdb", ".", "shape", "[", "1", "]", "//", "(", "J", "-", "1", ")", ")", "cov_margins", "[", "i", ":", ":", "K", ",", ":", "]", "=", "dfdb", "else", ":", "cov_margins", "[", "i", ",", ":", "]", "=", "dfdb", "return", "cov_margins" ]
returns the jacobian for discrete regressors for use in margeff_cov_params .
train
false
5,139
@then('the command output should not contain log records from categories') def step_command_output_should_not_contain_log_records_from_categories(context): assert context.table, 'REQUIRE: context.table' context.table.require_column('category') record_schema = context.log_record_row_schema LogRecordTable.annotate_with_row_schema(context.table, record_schema) step_command_output_should_not_contain_log_records(context) context.table.remove_columns(['level', 'message'])
[ "@", "then", "(", "'the command output should not contain log records from categories'", ")", "def", "step_command_output_should_not_contain_log_records_from_categories", "(", "context", ")", ":", "assert", "context", ".", "table", ",", "'REQUIRE: context.table'", "context", ".", "table", ".", "require_column", "(", "'category'", ")", "record_schema", "=", "context", ".", "log_record_row_schema", "LogRecordTable", ".", "annotate_with_row_schema", "(", "context", ".", "table", ",", "record_schema", ")", "step_command_output_should_not_contain_log_records", "(", "context", ")", "context", ".", "table", ".", "remove_columns", "(", "[", "'level'", ",", "'message'", "]", ")" ]
verifies that the command output contains not log records from the provided log categories .
train
true
5,141
def check_32(arch, osarch=None): if (osarch is None): osarch = get_osarch() return all(((x in ARCHES_32) for x in (osarch, arch)))
[ "def", "check_32", "(", "arch", ",", "osarch", "=", "None", ")", ":", "if", "(", "osarch", "is", "None", ")", ":", "osarch", "=", "get_osarch", "(", ")", "return", "all", "(", "(", "(", "x", "in", "ARCHES_32", ")", "for", "x", "in", "(", "osarch", ",", "arch", ")", ")", ")" ]
returns true if both the os arch and the passed arch are 32-bit .
train
true
5,142
def _chown(path, uid, gid): os.chown(path, uid, gid) for item in os.listdir(path): item_path = os.path.join(path, item) if os.path.isfile(item_path): os.chown(item_path, uid, gid) elif os.path.isdir(item_path): os.chown(item_path, uid, gid) _chown(item_path, uid, gid)
[ "def", "_chown", "(", "path", ",", "uid", ",", "gid", ")", ":", "os", ".", "chown", "(", "path", ",", "uid", ",", "gid", ")", "for", "item", "in", "os", ".", "listdir", "(", "path", ")", ":", "item_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "item", ")", "if", "os", ".", "path", ".", "isfile", "(", "item_path", ")", ":", "os", ".", "chown", "(", "item_path", ",", "uid", ",", "gid", ")", "elif", "os", ".", "path", ".", "isdir", "(", "item_path", ")", ":", "os", ".", "chown", "(", "item_path", ",", "uid", ",", "gid", ")", "_chown", "(", "item_path", ",", "uid", ",", "gid", ")" ]
change permissions recursively .
train
false
5,143
def binary_crossentropy(output, target): return (- ((target * tensor.log(output)) + ((1.0 - target) * tensor.log((1.0 - output)))))
[ "def", "binary_crossentropy", "(", "output", ",", "target", ")", ":", "return", "(", "-", "(", "(", "target", "*", "tensor", ".", "log", "(", "output", ")", ")", "+", "(", "(", "1.0", "-", "target", ")", "*", "tensor", ".", "log", "(", "(", "1.0", "-", "output", ")", ")", ")", ")", ")" ]
computes the binary cross-entropy between predictions and targets .
train
false
5,144
def cov_cluster_2groups(results, group, group2=None, use_correction=True): if (group2 is None): if ((group.ndim != 2) or (group.shape[1] != 2)): raise ValueError(('if group2 is not given, then groups needs to be ' + 'an array with two columns')) group0 = group[:, 0] group1 = group[:, 1] else: group0 = group group1 = group2 group = (group0, group1) cov0 = cov_cluster(results, group0, use_correction=use_correction) cov1 = cov_cluster(results, group1, use_correction=use_correction) group_intersection = Group(group) cov01 = cov_cluster(results, group_intersection.group_int, use_correction=use_correction) cov_both = ((cov0 + cov1) - cov01) return (cov_both, cov0, cov1)
[ "def", "cov_cluster_2groups", "(", "results", ",", "group", ",", "group2", "=", "None", ",", "use_correction", "=", "True", ")", ":", "if", "(", "group2", "is", "None", ")", ":", "if", "(", "(", "group", ".", "ndim", "!=", "2", ")", "or", "(", "group", ".", "shape", "[", "1", "]", "!=", "2", ")", ")", ":", "raise", "ValueError", "(", "(", "'if group2 is not given, then groups needs to be '", "+", "'an array with two columns'", ")", ")", "group0", "=", "group", "[", ":", ",", "0", "]", "group1", "=", "group", "[", ":", ",", "1", "]", "else", ":", "group0", "=", "group", "group1", "=", "group2", "group", "=", "(", "group0", ",", "group1", ")", "cov0", "=", "cov_cluster", "(", "results", ",", "group0", ",", "use_correction", "=", "use_correction", ")", "cov1", "=", "cov_cluster", "(", "results", ",", "group1", ",", "use_correction", "=", "use_correction", ")", "group_intersection", "=", "Group", "(", "group", ")", "cov01", "=", "cov_cluster", "(", "results", ",", "group_intersection", ".", "group_int", ",", "use_correction", "=", "use_correction", ")", "cov_both", "=", "(", "(", "cov0", "+", "cov1", ")", "-", "cov01", ")", "return", "(", "cov_both", ",", "cov0", ",", "cov1", ")" ]
cluster robust covariance matrix for two groups/clusters parameters results : result instance result of a regression .
train
false
5,145
def test_get_replay_file_name(): exp_replay_file_name = os.path.join('foo', 'bar.json') assert (replay.get_file_name('foo', 'bar') == exp_replay_file_name)
[ "def", "test_get_replay_file_name", "(", ")", ":", "exp_replay_file_name", "=", "os", ".", "path", ".", "join", "(", "'foo'", ",", "'bar.json'", ")", "assert", "(", "replay", ".", "get_file_name", "(", "'foo'", ",", "'bar'", ")", "==", "exp_replay_file_name", ")" ]
make sure that replay .
train
false
5,146
def extract_metadata_column(sample_ids, metadata, category): col_ix = metadata[1].index(category) map_sample_ids = zip(*metadata[0])[0] category_labels = [] for (i, sample_id) in enumerate(sample_ids): if (sample_id in map_sample_ids): row_ix = map_sample_ids.index(sample_id) entry = metadata[0][row_ix][col_ix] category_labels.append(entry) return category_labels
[ "def", "extract_metadata_column", "(", "sample_ids", ",", "metadata", ",", "category", ")", ":", "col_ix", "=", "metadata", "[", "1", "]", ".", "index", "(", "category", ")", "map_sample_ids", "=", "zip", "(", "*", "metadata", "[", "0", "]", ")", "[", "0", "]", "category_labels", "=", "[", "]", "for", "(", "i", ",", "sample_id", ")", "in", "enumerate", "(", "sample_ids", ")", ":", "if", "(", "sample_id", "in", "map_sample_ids", ")", ":", "row_ix", "=", "map_sample_ids", ".", "index", "(", "sample_id", ")", "entry", "=", "metadata", "[", "0", "]", "[", "row_ix", "]", "[", "col_ix", "]", "category_labels", ".", "append", "(", "entry", ")", "return", "category_labels" ]
extracts values from the given metadata column .
train
false
5,147
def get_equivalent_release_groups(release_group): for equivalent_release_group in equivalent_release_groups: if (release_group in equivalent_release_group): return equivalent_release_group return {release_group}
[ "def", "get_equivalent_release_groups", "(", "release_group", ")", ":", "for", "equivalent_release_group", "in", "equivalent_release_groups", ":", "if", "(", "release_group", "in", "equivalent_release_group", ")", ":", "return", "equivalent_release_group", "return", "{", "release_group", "}" ]
get all the equivalents of the given release group .
train
false
5,148
def show_reference_template(request, template): try: context = {'disable_courseware_js': True, 'uses_pattern_library': True} context.update(request.GET.dict()) return render_to_response(template, context) except TopLevelLookupException: return HttpResponseNotFound("Couldn't find template {template}".format(template=template))
[ "def", "show_reference_template", "(", "request", ",", "template", ")", ":", "try", ":", "context", "=", "{", "'disable_courseware_js'", ":", "True", ",", "'uses_pattern_library'", ":", "True", "}", "context", ".", "update", "(", "request", ".", "GET", ".", "dict", "(", ")", ")", "return", "render_to_response", "(", "template", ",", "context", ")", "except", "TopLevelLookupException", ":", "return", "HttpResponseNotFound", "(", "\"Couldn't find template {template}\"", ".", "format", "(", "template", "=", "template", ")", ")" ]
shows the specified template as an html page .
train
false
5,149
def delivery_pipeline(registry, xml_parent, data): pipeline = XML.SubElement(xml_parent, 'se.diabol.jenkins.pipeline.PipelineProperty') pipeline.set('plugin', 'delivery-pipeline-plugin') mapping = [('stage', 'stageName', ''), ('task', 'taskName', ''), ('description', 'descriptionTemplate', '')] helpers.convert_mapping_to_xml(pipeline, data, mapping, fail_required=True)
[ "def", "delivery_pipeline", "(", "registry", ",", "xml_parent", ",", "data", ")", ":", "pipeline", "=", "XML", ".", "SubElement", "(", "xml_parent", ",", "'se.diabol.jenkins.pipeline.PipelineProperty'", ")", "pipeline", ".", "set", "(", "'plugin'", ",", "'delivery-pipeline-plugin'", ")", "mapping", "=", "[", "(", "'stage'", ",", "'stageName'", ",", "''", ")", ",", "(", "'task'", ",", "'taskName'", ",", "''", ")", ",", "(", "'description'", ",", "'descriptionTemplate'", ",", "''", ")", "]", "helpers", ".", "convert_mapping_to_xml", "(", "pipeline", ",", "data", ",", "mapping", ",", "fail_required", "=", "True", ")" ]
yaml: delivery-pipeline if enabled the job will create a version based on the template .
train
false
5,150
def HeatMap(data, x=None, y=None, values=None, stat='count', xgrid=False, ygrid=False, hover_tool=True, hover_text=None, **kw): kw['x'] = x kw['y'] = y kw['values'] = values kw['stat'] = stat chart = create_and_build(HeatMapBuilder, data, xgrid=xgrid, ygrid=ygrid, **kw) if hover_tool: tooltip = build_agg_tooltip(hover_text=hover_text, aggregated_col=values, agg_text=stat) chart.add_tooltips([tooltip]) return chart
[ "def", "HeatMap", "(", "data", ",", "x", "=", "None", ",", "y", "=", "None", ",", "values", "=", "None", ",", "stat", "=", "'count'", ",", "xgrid", "=", "False", ",", "ygrid", "=", "False", ",", "hover_tool", "=", "True", ",", "hover_text", "=", "None", ",", "**", "kw", ")", ":", "kw", "[", "'x'", "]", "=", "x", "kw", "[", "'y'", "]", "=", "y", "kw", "[", "'values'", "]", "=", "values", "kw", "[", "'stat'", "]", "=", "stat", "chart", "=", "create_and_build", "(", "HeatMapBuilder", ",", "data", ",", "xgrid", "=", "xgrid", ",", "ygrid", "=", "ygrid", ",", "**", "kw", ")", "if", "hover_tool", ":", "tooltip", "=", "build_agg_tooltip", "(", "hover_text", "=", "hover_text", ",", "aggregated_col", "=", "values", ",", "agg_text", "=", "stat", ")", "chart", ".", "add_tooltips", "(", "[", "tooltip", "]", ")", "return", "chart" ]
represent 3 dimensions in a heatmap chart using x .
train
false
5,151
def test_rotate(): assert (rotate('#000', 45) == '#000') assert (rotate('#fff', 45) == '#fff') assert (rotate('#811', 45) == '#886a11') assert (rotate('#8a8', 360) == '#8a8') assert (rotate('#8a8', 0) == '#8a8') assert (rotate('#8a8', (-360)) == '#8a8')
[ "def", "test_rotate", "(", ")", ":", "assert", "(", "rotate", "(", "'#000'", ",", "45", ")", "==", "'#000'", ")", "assert", "(", "rotate", "(", "'#fff'", ",", "45", ")", "==", "'#fff'", ")", "assert", "(", "rotate", "(", "'#811'", ",", "45", ")", "==", "'#886a11'", ")", "assert", "(", "rotate", "(", "'#8a8'", ",", "360", ")", "==", "'#8a8'", ")", "assert", "(", "rotate", "(", "'#8a8'", ",", "0", ")", "==", "'#8a8'", ")", "assert", "(", "rotate", "(", "'#8a8'", ",", "(", "-", "360", ")", ")", "==", "'#8a8'", ")" ]
test color rotation function .
train
false
5,153
def LoadChecksFromDirs(dir_paths, overwrite_if_exists=True): loaded = [] for dir_path in dir_paths: cfg_files = glob.glob(os.path.join(dir_path, '*.yaml')) loaded.extend(LoadChecksFromFiles(cfg_files, overwrite_if_exists)) return loaded
[ "def", "LoadChecksFromDirs", "(", "dir_paths", ",", "overwrite_if_exists", "=", "True", ")", ":", "loaded", "=", "[", "]", "for", "dir_path", "in", "dir_paths", ":", "cfg_files", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "dir_path", ",", "'*.yaml'", ")", ")", "loaded", ".", "extend", "(", "LoadChecksFromFiles", "(", "cfg_files", ",", "overwrite_if_exists", ")", ")", "return", "loaded" ]
load checks from all yaml files in the specified directories .
train
true
5,155
def dir_tree_find(tree, kind): nodes = [] if isinstance(tree, list): for t in tree: nodes += dir_tree_find(t, kind) else: if (tree['block'] == kind): nodes.append(tree) for child in tree['children']: nodes += dir_tree_find(child, kind) return nodes
[ "def", "dir_tree_find", "(", "tree", ",", "kind", ")", ":", "nodes", "=", "[", "]", "if", "isinstance", "(", "tree", ",", "list", ")", ":", "for", "t", "in", "tree", ":", "nodes", "+=", "dir_tree_find", "(", "t", ",", "kind", ")", "else", ":", "if", "(", "tree", "[", "'block'", "]", "==", "kind", ")", ":", "nodes", ".", "append", "(", "tree", ")", "for", "child", "in", "tree", "[", "'children'", "]", ":", "nodes", "+=", "dir_tree_find", "(", "child", ",", "kind", ")", "return", "nodes" ]
find nodes of the given kind from a directory tree structure .
train
false
5,156
def cpuThrottle(value): delay = (1e-05 * (value ** 2)) time.sleep(delay)
[ "def", "cpuThrottle", "(", "value", ")", ":", "delay", "=", "(", "1e-05", "*", "(", "value", "**", "2", ")", ")", "time", ".", "sleep", "(", "delay", ")" ]
does a cpu throttling for lesser cpu consumption .
train
false
5,157
def comparable_version(version_string): comparable = parse_version_failsafe(version_string) if (not comparable): if (version_string == LATEST_VERBOSE_NAME): comparable = Version('99999.0') elif (version_string == STABLE_VERBOSE_NAME): comparable = Version('9999.0') else: comparable = Version('0.01') return comparable
[ "def", "comparable_version", "(", "version_string", ")", ":", "comparable", "=", "parse_version_failsafe", "(", "version_string", ")", "if", "(", "not", "comparable", ")", ":", "if", "(", "version_string", "==", "LATEST_VERBOSE_NAME", ")", ":", "comparable", "=", "Version", "(", "'99999.0'", ")", "elif", "(", "version_string", "==", "STABLE_VERBOSE_NAME", ")", ":", "comparable", "=", "Version", "(", "'9999.0'", ")", "else", ":", "comparable", "=", "Version", "(", "'0.01'", ")", "return", "comparable" ]
this can be used as key argument to sorted .
train
false
5,158
def inplace_swap_column(X, m, n): if (m < 0): m += X.shape[1] if (n < 0): n += X.shape[1] if isinstance(X, sp.csc_matrix): return inplace_swap_row_csr(X, m, n) elif isinstance(X, sp.csr_matrix): return inplace_swap_row_csc(X, m, n) else: _raise_typeerror(X)
[ "def", "inplace_swap_column", "(", "X", ",", "m", ",", "n", ")", ":", "if", "(", "m", "<", "0", ")", ":", "m", "+=", "X", ".", "shape", "[", "1", "]", "if", "(", "n", "<", "0", ")", ":", "n", "+=", "X", ".", "shape", "[", "1", "]", "if", "isinstance", "(", "X", ",", "sp", ".", "csc_matrix", ")", ":", "return", "inplace_swap_row_csr", "(", "X", ",", "m", ",", "n", ")", "elif", "isinstance", "(", "X", ",", "sp", ".", "csr_matrix", ")", ":", "return", "inplace_swap_row_csc", "(", "X", ",", "m", ",", "n", ")", "else", ":", "_raise_typeerror", "(", "X", ")" ]
swaps two columns of a csc/csr matrix in-place .
train
false
5,160
def test_importorskip_module_level(testdir): testdir.makepyfile('\n import pytest\n foobarbaz = pytest.importorskip("foobarbaz")\n\n def test_foo():\n pass\n ') result = testdir.runpytest() result.stdout.fnmatch_lines(['*collected 0 items / 1 skipped*'])
[ "def", "test_importorskip_module_level", "(", "testdir", ")", ":", "testdir", ".", "makepyfile", "(", "'\\n import pytest\\n foobarbaz = pytest.importorskip(\"foobarbaz\")\\n\\n def test_foo():\\n pass\\n '", ")", "result", "=", "testdir", ".", "runpytest", "(", ")", "result", ".", "stdout", ".", "fnmatch_lines", "(", "[", "'*collected 0 items / 1 skipped*'", "]", ")" ]
importorskip must be able to skip entire modules when used at module level .
train
false
5,163
@salt.utils.decorators.depends(True) def booldependsTrue(): return True
[ "@", "salt", ".", "utils", ".", "decorators", ".", "depends", "(", "True", ")", "def", "booldependsTrue", "(", ")", ":", "return", "True" ]
cli example: .
train
false
5,164
def readKeys(keydir): for filename in os.listdir(keydir): if filename.startswith('.'): continue (basename, ext) = os.path.splitext(filename) if (ext != '.pub'): continue if (not isSafeUsername(basename)): log.warn('Unsafe SSH username in keyfile: %r', filename) continue path = os.path.join(keydir, filename) f = file(path) for line in f: line = line.rstrip('\n') (yield (basename, line)) f.close()
[ "def", "readKeys", "(", "keydir", ")", ":", "for", "filename", "in", "os", ".", "listdir", "(", "keydir", ")", ":", "if", "filename", ".", "startswith", "(", "'.'", ")", ":", "continue", "(", "basename", ",", "ext", ")", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "if", "(", "ext", "!=", "'.pub'", ")", ":", "continue", "if", "(", "not", "isSafeUsername", "(", "basename", ")", ")", ":", "log", ".", "warn", "(", "'Unsafe SSH username in keyfile: %r'", ",", "filename", ")", "continue", "path", "=", "os", ".", "path", ".", "join", "(", "keydir", ",", "filename", ")", "f", "=", "file", "(", "path", ")", "for", "line", "in", "f", ":", "line", "=", "line", ".", "rstrip", "(", "'\\n'", ")", "(", "yield", "(", "basename", ",", "line", ")", ")", "f", ".", "close", "(", ")" ]
read ssh public keys from keydir/* .
train
false
5,165
def get_scene_numbering(indexer_id, indexer, season, episode, fallback_to_xem=True): if ((indexer_id is None) or (season is None) or (episode is None)): return (season, episode) showObj = Show.find(sickbeard.showList, int(indexer_id)) if (showObj and (not showObj.is_scene)): return (season, episode) result = find_scene_numbering(int(indexer_id), int(indexer), season, episode) if result: return result else: if fallback_to_xem: xem_result = find_xem_numbering(int(indexer_id), int(indexer), season, episode) if xem_result: return xem_result return (season, episode)
[ "def", "get_scene_numbering", "(", "indexer_id", ",", "indexer", ",", "season", ",", "episode", ",", "fallback_to_xem", "=", "True", ")", ":", "if", "(", "(", "indexer_id", "is", "None", ")", "or", "(", "season", "is", "None", ")", "or", "(", "episode", "is", "None", ")", ")", ":", "return", "(", "season", ",", "episode", ")", "showObj", "=", "Show", ".", "find", "(", "sickbeard", ".", "showList", ",", "int", "(", "indexer_id", ")", ")", "if", "(", "showObj", "and", "(", "not", "showObj", ".", "is_scene", ")", ")", ":", "return", "(", "season", ",", "episode", ")", "result", "=", "find_scene_numbering", "(", "int", "(", "indexer_id", ")", ",", "int", "(", "indexer", ")", ",", "season", ",", "episode", ")", "if", "result", ":", "return", "result", "else", ":", "if", "fallback_to_xem", ":", "xem_result", "=", "find_xem_numbering", "(", "int", "(", "indexer_id", ")", ",", "int", "(", "indexer", ")", ",", "season", ",", "episode", ")", "if", "xem_result", ":", "return", "xem_result", "return", "(", "season", ",", "episode", ")" ]
returns a tuple .
train
false
5,166
def set_loader(fxn): @functools.wraps(fxn) def set_loader_wrapper(self, *args, **kwargs): warnings.warn('The import system now takes care of this automatically.', DeprecationWarning, stacklevel=2) module = fxn(self, *args, **kwargs) if (getattr(module, '__loader__', None) is None): module.__loader__ = self return module return set_loader_wrapper
[ "def", "set_loader", "(", "fxn", ")", ":", "@", "functools", ".", "wraps", "(", "fxn", ")", "def", "set_loader_wrapper", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", ":", "warnings", ".", "warn", "(", "'The import system now takes care of this automatically.'", ",", "DeprecationWarning", ",", "stacklevel", "=", "2", ")", "module", "=", "fxn", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", "if", "(", "getattr", "(", "module", ",", "'__loader__'", ",", "None", ")", "is", "None", ")", ":", "module", ".", "__loader__", "=", "self", "return", "module", "return", "set_loader_wrapper" ]
set __loader__ on the returned module .
train
true
5,172
def chuid(name, uid): if (not isinstance(uid, int)): raise SaltInvocationError('uid must be an integer') pre_info = info(name) if (not pre_info): raise CommandExecutionError("User '{0}' does not exist".format(name)) if (uid == pre_info['uid']): return True _dscl(['/Users/{0}'.format(name), 'UniqueID', pre_info['uid'], uid], ctype='change') time.sleep(1) return (info(name).get('uid') == uid)
[ "def", "chuid", "(", "name", ",", "uid", ")", ":", "if", "(", "not", "isinstance", "(", "uid", ",", "int", ")", ")", ":", "raise", "SaltInvocationError", "(", "'uid must be an integer'", ")", "pre_info", "=", "info", "(", "name", ")", "if", "(", "not", "pre_info", ")", ":", "raise", "CommandExecutionError", "(", "\"User '{0}' does not exist\"", ".", "format", "(", "name", ")", ")", "if", "(", "uid", "==", "pre_info", "[", "'uid'", "]", ")", ":", "return", "True", "_dscl", "(", "[", "'/Users/{0}'", ".", "format", "(", "name", ")", ",", "'UniqueID'", ",", "pre_info", "[", "'uid'", "]", ",", "uid", "]", ",", "ctype", "=", "'change'", ")", "time", ".", "sleep", "(", "1", ")", "return", "(", "info", "(", "name", ")", ".", "get", "(", "'uid'", ")", "==", "uid", ")" ]
change the uid for a named user cli example: .
train
true
5,173
def ValidateAccessAndSubjects(requested_access, subjects): if (not requested_access): raise access_control.UnauthorizedAccess(('Must specify requested access type for %s' % subjects)) for s in requested_access: if (s not in 'rwq'): raise ValueError(('Invalid access requested for %s: %s' % (subjects, requested_access))) if (('q' in requested_access) and ('r' not in requested_access)): raise access_control.UnauthorizedAccess(('Invalid access request: query permissions require read permissions for %s' % subjects), requested_access=requested_access) return True
[ "def", "ValidateAccessAndSubjects", "(", "requested_access", ",", "subjects", ")", ":", "if", "(", "not", "requested_access", ")", ":", "raise", "access_control", ".", "UnauthorizedAccess", "(", "(", "'Must specify requested access type for %s'", "%", "subjects", ")", ")", "for", "s", "in", "requested_access", ":", "if", "(", "s", "not", "in", "'rwq'", ")", ":", "raise", "ValueError", "(", "(", "'Invalid access requested for %s: %s'", "%", "(", "subjects", ",", "requested_access", ")", ")", ")", "if", "(", "(", "'q'", "in", "requested_access", ")", "and", "(", "'r'", "not", "in", "requested_access", ")", ")", ":", "raise", "access_control", ".", "UnauthorizedAccess", "(", "(", "'Invalid access request: query permissions require read permissions for %s'", "%", "subjects", ")", ",", "requested_access", "=", "requested_access", ")", "return", "True" ]
does basic requested access validation .
train
true
5,174
def format_function(name, function, docstring=None): template = '.. py:function:: ckan.plugins.toolkit.{function}{args}\n\n{docstring}\n\n' argstring = inspect.formatargspec(*inspect.getargspec(function)) docstring = (docstring or inspect.getdoc(function)) if (docstring is None): docstring = '' else: docstring = '\n'.join([(' ' + line) for line in docstring.split('\n')]) return template.format(function=name, args=argstring, docstring=docstring)
[ "def", "format_function", "(", "name", ",", "function", ",", "docstring", "=", "None", ")", ":", "template", "=", "'.. py:function:: ckan.plugins.toolkit.{function}{args}\\n\\n{docstring}\\n\\n'", "argstring", "=", "inspect", ".", "formatargspec", "(", "*", "inspect", ".", "getargspec", "(", "function", ")", ")", "docstring", "=", "(", "docstring", "or", "inspect", ".", "getdoc", "(", "function", ")", ")", "if", "(", "docstring", "is", "None", ")", ":", "docstring", "=", "''", "else", ":", "docstring", "=", "'\\n'", ".", "join", "(", "[", "(", "' '", "+", "line", ")", "for", "line", "in", "docstring", ".", "split", "(", "'\\n'", ")", "]", ")", "return", "template", ".", "format", "(", "function", "=", "name", ",", "args", "=", "argstring", ",", "docstring", "=", "docstring", ")" ]
return a sphinx .
train
false
5,176
def _transpose_iterables(fields, values): if isinstance(values, dict): transposed = dict([(field, defaultdict(list)) for field in fields]) for (key, tuples) in list(values.items()): for kvals in tuples: for (idx, val) in enumerate(kvals): if (val is not None): transposed[fields[idx]][key].append(val) return list(transposed.items()) else: return list(zip(fields, [[v for v in list(transpose) if (v is not None)] for transpose in zip(*values)]))
[ "def", "_transpose_iterables", "(", "fields", ",", "values", ")", ":", "if", "isinstance", "(", "values", ",", "dict", ")", ":", "transposed", "=", "dict", "(", "[", "(", "field", ",", "defaultdict", "(", "list", ")", ")", "for", "field", "in", "fields", "]", ")", "for", "(", "key", ",", "tuples", ")", "in", "list", "(", "values", ".", "items", "(", ")", ")", ":", "for", "kvals", "in", "tuples", ":", "for", "(", "idx", ",", "val", ")", "in", "enumerate", "(", "kvals", ")", ":", "if", "(", "val", "is", "not", "None", ")", ":", "transposed", "[", "fields", "[", "idx", "]", "]", "[", "key", "]", ".", "append", "(", "val", ")", "return", "list", "(", "transposed", ".", "items", "(", ")", ")", "else", ":", "return", "list", "(", "zip", "(", "fields", ",", "[", "[", "v", "for", "v", "in", "list", "(", "transpose", ")", "if", "(", "v", "is", "not", "None", ")", "]", "for", "transpose", "in", "zip", "(", "*", "values", ")", "]", ")", ")" ]
converts the given fields and tuple values into a standardized iterables value .
train
false
5,177
def test_predict1(): sp = SequencePattern() ts2s = TFLearnSeq2Seq(sp, verbose=1) wfn = ('test_%s' % ts2s.canonical_weights_fn(0)) print(('using weights filename %s' % wfn)) tf.reset_default_graph() (prediction, y) = ts2s.predict(Xin=range(10), weights_input_fn=wfn) assert len((prediction == 10))
[ "def", "test_predict1", "(", ")", ":", "sp", "=", "SequencePattern", "(", ")", "ts2s", "=", "TFLearnSeq2Seq", "(", "sp", ",", "verbose", "=", "1", ")", "wfn", "=", "(", "'test_%s'", "%", "ts2s", ".", "canonical_weights_fn", "(", "0", ")", ")", "print", "(", "(", "'using weights filename %s'", "%", "wfn", ")", ")", "tf", ".", "reset_default_graph", "(", ")", "(", "prediction", ",", "y", ")", "=", "ts2s", ".", "predict", "(", "Xin", "=", "range", "(", "10", ")", ",", "weights_input_fn", "=", "wfn", ")", "assert", "len", "(", "(", "prediction", "==", "10", ")", ")" ]
test simple preductions using weights just produced .
train
false
5,180
def brokenTen(value): if (val < 10): return True else: return False
[ "def", "brokenTen", "(", "value", ")", ":", "if", "(", "val", "<", "10", ")", ":", "return", "True", "else", ":", "return", "False" ]
incorrect implementation of the ten function .
train
false
5,182
@register.inclusion_tag('horizon/_nav_list.html', takes_context=True) def horizon_main_nav(context): if ('request' not in context): return {} current_dashboard = context['request'].horizon.get('dashboard', None) dashboards = [] for dash in Horizon.get_dashboards(): if dash.can_access(context): if (callable(dash.nav) and dash.nav(context)): dashboards.append(dash) elif dash.nav: dashboards.append(dash) return {'components': dashboards, 'user': context['request'].user, 'current': current_dashboard, 'request': context['request']}
[ "@", "register", ".", "inclusion_tag", "(", "'horizon/_nav_list.html'", ",", "takes_context", "=", "True", ")", "def", "horizon_main_nav", "(", "context", ")", ":", "if", "(", "'request'", "not", "in", "context", ")", ":", "return", "{", "}", "current_dashboard", "=", "context", "[", "'request'", "]", ".", "horizon", ".", "get", "(", "'dashboard'", ",", "None", ")", "dashboards", "=", "[", "]", "for", "dash", "in", "Horizon", ".", "get_dashboards", "(", ")", ":", "if", "dash", ".", "can_access", "(", "context", ")", ":", "if", "(", "callable", "(", "dash", ".", "nav", ")", "and", "dash", ".", "nav", "(", "context", ")", ")", ":", "dashboards", ".", "append", "(", "dash", ")", "elif", "dash", ".", "nav", ":", "dashboards", ".", "append", "(", "dash", ")", "return", "{", "'components'", ":", "dashboards", ",", "'user'", ":", "context", "[", "'request'", "]", ".", "user", ",", "'current'", ":", "current_dashboard", ",", "'request'", ":", "context", "[", "'request'", "]", "}" ]
generates top-level dashboard navigation entries .
train
false
5,183
def roots_legendre(n, mu=False): m = int(n) if ((n < 1) or (n != m)): raise ValueError('n must be a positive integer.') mu0 = 2.0 an_func = (lambda k: (0.0 * k)) bn_func = (lambda k: (k * np.sqrt((1.0 / (((4 * k) * k) - 1))))) f = cephes.eval_legendre df = (lambda n, x: (((((- n) * x) * cephes.eval_legendre(n, x)) + (n * cephes.eval_legendre((n - 1), x))) / (1 - (x ** 2)))) return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)
[ "def", "roots_legendre", "(", "n", ",", "mu", "=", "False", ")", ":", "m", "=", "int", "(", "n", ")", "if", "(", "(", "n", "<", "1", ")", "or", "(", "n", "!=", "m", ")", ")", ":", "raise", "ValueError", "(", "'n must be a positive integer.'", ")", "mu0", "=", "2.0", "an_func", "=", "(", "lambda", "k", ":", "(", "0.0", "*", "k", ")", ")", "bn_func", "=", "(", "lambda", "k", ":", "(", "k", "*", "np", ".", "sqrt", "(", "(", "1.0", "/", "(", "(", "(", "4", "*", "k", ")", "*", "k", ")", "-", "1", ")", ")", ")", ")", ")", "f", "=", "cephes", ".", "eval_legendre", "df", "=", "(", "lambda", "n", ",", "x", ":", "(", "(", "(", "(", "(", "-", "n", ")", "*", "x", ")", "*", "cephes", ".", "eval_legendre", "(", "n", ",", "x", ")", ")", "+", "(", "n", "*", "cephes", ".", "eval_legendre", "(", "(", "n", "-", "1", ")", ",", "x", ")", ")", ")", "/", "(", "1", "-", "(", "x", "**", "2", ")", ")", ")", ")", "return", "_gen_roots_and_weights", "(", "m", ",", "mu0", ",", "an_func", ",", "bn_func", ",", "f", ",", "df", ",", "True", ",", "mu", ")" ]
gauss-legendre quadrature .
train
false
5,185
def get_elements_count(N, N1, W2): thresh = 0.9 max_count = 0 total_counts = 0.0 for i in xrange(N): w = W2[:, i] wa = np.abs(w) total = wa.sum() s = np.asarray(sorted(wa)) count = 1 while (s[(- count):].sum() < (thresh * total)): count += 1 if (count > max_count): max_count = count total_counts += count ave = (total_counts / float(N)) print('average needed filters', ave) count = max_count print('It takes', count, 'of', N1, 'elements to account for ', (thresh * 100.0), '\\% of the weight in at least one filter') lim = 10 if (count > lim): count = lim print('Only displaying ', count, ' elements though.') if (count > N1): count = N1 return count
[ "def", "get_elements_count", "(", "N", ",", "N1", ",", "W2", ")", ":", "thresh", "=", "0.9", "max_count", "=", "0", "total_counts", "=", "0.0", "for", "i", "in", "xrange", "(", "N", ")", ":", "w", "=", "W2", "[", ":", ",", "i", "]", "wa", "=", "np", ".", "abs", "(", "w", ")", "total", "=", "wa", ".", "sum", "(", ")", "s", "=", "np", ".", "asarray", "(", "sorted", "(", "wa", ")", ")", "count", "=", "1", "while", "(", "s", "[", "(", "-", "count", ")", ":", "]", ".", "sum", "(", ")", "<", "(", "thresh", "*", "total", ")", ")", ":", "count", "+=", "1", "if", "(", "count", ">", "max_count", ")", ":", "max_count", "=", "count", "total_counts", "+=", "count", "ave", "=", "(", "total_counts", "/", "float", "(", "N", ")", ")", "print", "(", "'average needed filters'", ",", "ave", ")", "count", "=", "max_count", "print", "(", "'It takes'", ",", "count", ",", "'of'", ",", "N1", ",", "'elements to account for '", ",", "(", "thresh", "*", "100.0", ")", ",", "'\\\\% of the weight in at least one filter'", ")", "lim", "=", "10", "if", "(", "count", ">", "lim", ")", ":", "count", "=", "lim", "print", "(", "'Only displaying '", ",", "count", ",", "' elements though.'", ")", "if", "(", "count", ">", "N1", ")", ":", "count", "=", "N1", "return", "count" ]
retrieve the number of elements to show .
train
false
5,186
def column_stack(tup): if any(((not isinstance(a, cupy.ndarray)) for a in tup)): raise TypeError('Only cupy arrays can be column stacked') lst = list(tup) for (i, a) in enumerate(lst): if (a.ndim == 1): a = a[:, cupy.newaxis] lst[i] = a elif (a.ndim != 2): raise ValueError('Only 1 or 2 dimensional arrays can be column stacked') return concatenate(lst, axis=1)
[ "def", "column_stack", "(", "tup", ")", ":", "if", "any", "(", "(", "(", "not", "isinstance", "(", "a", ",", "cupy", ".", "ndarray", ")", ")", "for", "a", "in", "tup", ")", ")", ":", "raise", "TypeError", "(", "'Only cupy arrays can be column stacked'", ")", "lst", "=", "list", "(", "tup", ")", "for", "(", "i", ",", "a", ")", "in", "enumerate", "(", "lst", ")", ":", "if", "(", "a", ".", "ndim", "==", "1", ")", ":", "a", "=", "a", "[", ":", ",", "cupy", ".", "newaxis", "]", "lst", "[", "i", "]", "=", "a", "elif", "(", "a", ".", "ndim", "!=", "2", ")", ":", "raise", "ValueError", "(", "'Only 1 or 2 dimensional arrays can be column stacked'", ")", "return", "concatenate", "(", "lst", ",", "axis", "=", "1", ")" ]
stacks 1-d and 2-d arrays as columns into a 2-d array .
train
false
5,187
def xsrf_required(method): def xsrf_required_decorator(self): expected_token = get_xsrf_token() actual_token = self.request.get('xsrf_token') if (actual_token != expected_token): self.response.set_status(403, 'Invalid XSRF token') self.response.out.write((('<h1>Invalid XSRF token</h1>\n' + '<p>Please reload the form page</n>\n') + (' ' * 512))) else: method(self) return xsrf_required_decorator
[ "def", "xsrf_required", "(", "method", ")", ":", "def", "xsrf_required_decorator", "(", "self", ")", ":", "expected_token", "=", "get_xsrf_token", "(", ")", "actual_token", "=", "self", ".", "request", ".", "get", "(", "'xsrf_token'", ")", "if", "(", "actual_token", "!=", "expected_token", ")", ":", "self", ".", "response", ".", "set_status", "(", "403", ",", "'Invalid XSRF token'", ")", "self", ".", "response", ".", "out", ".", "write", "(", "(", "(", "'<h1>Invalid XSRF token</h1>\\n'", "+", "'<p>Please reload the form page</n>\\n'", ")", "+", "(", "' '", "*", "512", ")", ")", ")", "else", ":", "method", "(", "self", ")", "return", "xsrf_required_decorator" ]
decorator to protect post() handlers against xsrf attacks .
train
false
5,189
def is_oozie_enabled(): return (len([app for app in appmanager.DESKTOP_MODULES if (app.name == 'oozie')]) > 0)
[ "def", "is_oozie_enabled", "(", ")", ":", "return", "(", "len", "(", "[", "app", "for", "app", "in", "appmanager", ".", "DESKTOP_MODULES", "if", "(", "app", ".", "name", "==", "'oozie'", ")", "]", ")", ">", "0", ")" ]
oozie needs to be available as it is the backend .
train
false
5,190
def get_metaschemas(*args, **kwargs): count = request.args.get('count', 100) include = request.args.get('include', 'latest') meta_schemas = [] if (include == 'latest'): schema_names = list(MetaSchema.objects.all().values_list('name', flat=True).distinct()) for name in schema_names: meta_schema_set = MetaSchema.find((Q('name', 'eq', name) & Q('schema_version', 'eq', 2))) meta_schemas = (meta_schemas + [s for s in meta_schema_set]) else: meta_schemas = MetaSchema.find() meta_schemas = [schema for schema in meta_schemas if (schema.name in ACTIVE_META_SCHEMAS)] meta_schemas.sort(key=(lambda a: ACTIVE_META_SCHEMAS.index(a.name))) return ({'meta_schemas': [serialize_meta_schema(ms) for ms in meta_schemas[:count]]}, http.OK)
[ "def", "get_metaschemas", "(", "*", "args", ",", "**", "kwargs", ")", ":", "count", "=", "request", ".", "args", ".", "get", "(", "'count'", ",", "100", ")", "include", "=", "request", ".", "args", ".", "get", "(", "'include'", ",", "'latest'", ")", "meta_schemas", "=", "[", "]", "if", "(", "include", "==", "'latest'", ")", ":", "schema_names", "=", "list", "(", "MetaSchema", ".", "objects", ".", "all", "(", ")", ".", "values_list", "(", "'name'", ",", "flat", "=", "True", ")", ".", "distinct", "(", ")", ")", "for", "name", "in", "schema_names", ":", "meta_schema_set", "=", "MetaSchema", ".", "find", "(", "(", "Q", "(", "'name'", ",", "'eq'", ",", "name", ")", "&", "Q", "(", "'schema_version'", ",", "'eq'", ",", "2", ")", ")", ")", "meta_schemas", "=", "(", "meta_schemas", "+", "[", "s", "for", "s", "in", "meta_schema_set", "]", ")", "else", ":", "meta_schemas", "=", "MetaSchema", ".", "find", "(", ")", "meta_schemas", "=", "[", "schema", "for", "schema", "in", "meta_schemas", "if", "(", "schema", ".", "name", "in", "ACTIVE_META_SCHEMAS", ")", "]", "meta_schemas", ".", "sort", "(", "key", "=", "(", "lambda", "a", ":", "ACTIVE_META_SCHEMAS", ".", "index", "(", "a", ".", "name", ")", ")", ")", "return", "(", "{", "'meta_schemas'", ":", "[", "serialize_meta_schema", "(", "ms", ")", "for", "ms", "in", "meta_schemas", "[", ":", "count", "]", "]", "}", ",", "http", ".", "OK", ")" ]
list metaschemas with which a draft registration may be created .
train
false
5,191
def add_home_page(bootinfo, docs): if (frappe.session.user == u'Guest'): return home_page = frappe.db.get_default(u'desktop:home_page') if (home_page == u'setup-wizard'): bootinfo.setup_wizard_requires = frappe.get_hooks(u'setup_wizard_requires') try: page = frappe.desk.desk_page.get(home_page) except (frappe.DoesNotExistError, frappe.PermissionError): if frappe.message_log: frappe.message_log.pop() page = frappe.desk.desk_page.get(u'desktop') bootinfo[u'home_page'] = page.name docs.append(page)
[ "def", "add_home_page", "(", "bootinfo", ",", "docs", ")", ":", "if", "(", "frappe", ".", "session", ".", "user", "==", "u'Guest'", ")", ":", "return", "home_page", "=", "frappe", ".", "db", ".", "get_default", "(", "u'desktop:home_page'", ")", "if", "(", "home_page", "==", "u'setup-wizard'", ")", ":", "bootinfo", ".", "setup_wizard_requires", "=", "frappe", ".", "get_hooks", "(", "u'setup_wizard_requires'", ")", "try", ":", "page", "=", "frappe", ".", "desk", ".", "desk_page", ".", "get", "(", "home_page", ")", "except", "(", "frappe", ".", "DoesNotExistError", ",", "frappe", ".", "PermissionError", ")", ":", "if", "frappe", ".", "message_log", ":", "frappe", ".", "message_log", ".", "pop", "(", ")", "page", "=", "frappe", ".", "desk", ".", "desk_page", ".", "get", "(", "u'desktop'", ")", "bootinfo", "[", "u'home_page'", "]", "=", "page", ".", "name", "docs", ".", "append", "(", "page", ")" ]
load home page .
train
false
5,193
def test_pushd_appends_current_dir_to_stack_if_empty(): mox = Mox() old_os = io.os io.os = mox.CreateMockAnything() class MyFs(io.FileSystem, ): stack = [] @classmethod def current_dir(cls): return 'should be current dir' io.os.chdir('somewhere') mox.ReplayAll() try: assert (len(MyFs.stack) is 0) MyFs.pushd('somewhere') assert (len(MyFs.stack) is 2) assert_equals(MyFs.stack, ['should be current dir', 'somewhere']) mox.VerifyAll() finally: io.os = old_os
[ "def", "test_pushd_appends_current_dir_to_stack_if_empty", "(", ")", ":", "mox", "=", "Mox", "(", ")", "old_os", "=", "io", ".", "os", "io", ".", "os", "=", "mox", ".", "CreateMockAnything", "(", ")", "class", "MyFs", "(", "io", ".", "FileSystem", ",", ")", ":", "stack", "=", "[", "]", "@", "classmethod", "def", "current_dir", "(", "cls", ")", ":", "return", "'should be current dir'", "io", ".", "os", ".", "chdir", "(", "'somewhere'", ")", "mox", ".", "ReplayAll", "(", ")", "try", ":", "assert", "(", "len", "(", "MyFs", ".", "stack", ")", "is", "0", ")", "MyFs", ".", "pushd", "(", "'somewhere'", ")", "assert", "(", "len", "(", "MyFs", ".", "stack", ")", "is", "2", ")", "assert_equals", "(", "MyFs", ".", "stack", ",", "[", "'should be current dir'", ",", "'somewhere'", "]", ")", "mox", ".", "VerifyAll", "(", ")", "finally", ":", "io", ".", "os", "=", "old_os" ]
default behaviour of pushd() is adding the current dir to the stack .
train
false
5,194
def _vector_or_scalar(x, type='row'): if isinstance(x, (list, tuple)): x = np.array(x) if isinstance(x, np.ndarray): assert (x.ndim == 1) if (type == 'column'): x = x[:, None] return x
[ "def", "_vector_or_scalar", "(", "x", ",", "type", "=", "'row'", ")", ":", "if", "isinstance", "(", "x", ",", "(", "list", ",", "tuple", ")", ")", ":", "x", "=", "np", ".", "array", "(", "x", ")", "if", "isinstance", "(", "x", ",", "np", ".", "ndarray", ")", ":", "assert", "(", "x", ".", "ndim", "==", "1", ")", "if", "(", "type", "==", "'column'", ")", ":", "x", "=", "x", "[", ":", ",", "None", "]", "return", "x" ]
convert an object to either a scalar or a row or column vector .
train
true
5,195
def get_tag_commit(): try: return check_output(['git', 'describe', '--tags'], stderr=STDOUT, cwd=os.path.dirname(os.path.abspath(__file__))) except CalledProcessError as e: logger.error('Error calling git: "{}" \n output: "{}"'.format(e, e.output)) return None except OSError as e: logger.error('Could not call git, is it installed? error msg: "{}"'.format(e)) return None
[ "def", "get_tag_commit", "(", ")", ":", "try", ":", "return", "check_output", "(", "[", "'git'", ",", "'describe'", ",", "'--tags'", "]", ",", "stderr", "=", "STDOUT", ",", "cwd", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", ")", "except", "CalledProcessError", "as", "e", ":", "logger", ".", "error", "(", "'Error calling git: \"{}\" \\n output: \"{}\"'", ".", "format", "(", "e", ",", "e", ".", "output", ")", ")", "return", "None", "except", "OSError", "as", "e", ":", "logger", ".", "error", "(", "'Could not call git, is it installed? error msg: \"{}\"'", ".", "format", "(", "e", ")", ")", "return", "None" ]
returns string: tag-commits since tag-7 digit commit id .
train
false
5,196
def is_missing(data, missing_value): if (is_float(data) and isnan(missing_value)): return isnan(data) elif (is_datetime(data) and isnat(missing_value)): return isnat(data) return (data == missing_value)
[ "def", "is_missing", "(", "data", ",", "missing_value", ")", ":", "if", "(", "is_float", "(", "data", ")", "and", "isnan", "(", "missing_value", ")", ")", ":", "return", "isnan", "(", "data", ")", "elif", "(", "is_datetime", "(", "data", ")", "and", "isnat", "(", "missing_value", ")", ")", ":", "return", "isnat", "(", "data", ")", "return", "(", "data", "==", "missing_value", ")" ]
generic is_missing function that handles nan and nat .
train
true
5,197
def success_installation(domains): z_util(interfaces.IDisplay).notification('Congratulations! You have successfully enabled {0}{1}{1}You should test your configuration at:{1}{2}'.format(_gen_https_names(domains), os.linesep, os.linesep.join(_gen_ssl_lab_urls(domains))), pause=False)
[ "def", "success_installation", "(", "domains", ")", ":", "z_util", "(", "interfaces", ".", "IDisplay", ")", ".", "notification", "(", "'Congratulations! You have successfully enabled {0}{1}{1}You should test your configuration at:{1}{2}'", ".", "format", "(", "_gen_https_names", "(", "domains", ")", ",", "os", ".", "linesep", ",", "os", ".", "linesep", ".", "join", "(", "_gen_ssl_lab_urls", "(", "domains", ")", ")", ")", ",", "pause", "=", "False", ")" ]
display a box confirming the installation of https .
train
false
5,198
def test_smote_fit_single_class(): smote = SMOTETomek(random_state=RND_SEED) y_single_class = np.zeros((X.shape[0],)) assert_warns(UserWarning, smote.fit, X, y_single_class)
[ "def", "test_smote_fit_single_class", "(", ")", ":", "smote", "=", "SMOTETomek", "(", "random_state", "=", "RND_SEED", ")", "y_single_class", "=", "np", ".", "zeros", "(", "(", "X", ".", "shape", "[", "0", "]", ",", ")", ")", "assert_warns", "(", "UserWarning", ",", "smote", ".", "fit", ",", "X", ",", "y_single_class", ")" ]
test either if an error when there is a single class .
train
false
5,199
def anonymous_name(id): import socket, random name = ('%s_%s_%s_%s' % (id, socket.gethostname(), os.getpid(), random.randint(0, sys.maxsize))) name = name.replace('.', '_') name = name.replace('-', '_') return name.replace(':', '_')
[ "def", "anonymous_name", "(", "id", ")", ":", "import", "socket", ",", "random", "name", "=", "(", "'%s_%s_%s_%s'", "%", "(", "id", ",", "socket", ".", "gethostname", "(", ")", ",", "os", ".", "getpid", "(", ")", ",", "random", ".", "randint", "(", "0", ",", "sys", ".", "maxsize", ")", ")", ")", "name", "=", "name", ".", "replace", "(", "'.'", ",", "'_'", ")", "name", "=", "name", ".", "replace", "(", "'-'", ",", "'_'", ")", "return", "name", ".", "replace", "(", "':'", ",", "'_'", ")" ]
generate a ros-legal anonymous name .
train
false
5,200
def translate_longopt(opt): return string.translate(opt, longopt_xlate)
[ "def", "translate_longopt", "(", "opt", ")", ":", "return", "string", ".", "translate", "(", "opt", ",", "longopt_xlate", ")" ]
convert a long option name to a valid python identifier by changing "-" to "_" .
train
false
5,201
def get_repository_version(pear_output): lines = pear_output.split('\n') for line in lines: if ('Latest ' in line): return line.rsplit(None, 1)[(-1)].strip() return None
[ "def", "get_repository_version", "(", "pear_output", ")", ":", "lines", "=", "pear_output", ".", "split", "(", "'\\n'", ")", "for", "line", "in", "lines", ":", "if", "(", "'Latest '", "in", "line", ")", ":", "return", "line", ".", "rsplit", "(", "None", ",", "1", ")", "[", "(", "-", "1", ")", "]", ".", "strip", "(", ")", "return", "None" ]
take pear remote-info output and get the latest version .
train
false
5,203
def stringToLong(s): result = 0L for byte in s: result = ((256 * result) + ord(byte)) return result
[ "def", "stringToLong", "(", "s", ")", ":", "result", "=", "0", "L", "for", "byte", "in", "s", ":", "result", "=", "(", "(", "256", "*", "result", ")", "+", "ord", "(", "byte", ")", ")", "return", "result" ]
convert digest to long .
train
false
5,204
def _compose(f, g): def _exec(x): ret = g(x) if (ret is not None): ret = HtmlPageRegion(ret.htmlpage, remove_tags(ret.text_content)) return f(ret) return None return _exec
[ "def", "_compose", "(", "f", ",", "g", ")", ":", "def", "_exec", "(", "x", ")", ":", "ret", "=", "g", "(", "x", ")", "if", "(", "ret", "is", "not", "None", ")", ":", "ret", "=", "HtmlPageRegion", "(", "ret", ".", "htmlpage", ",", "remove_tags", "(", "ret", ".", "text_content", ")", ")", "return", "f", "(", "ret", ")", "return", "None", "return", "_exec" ]
given unary functions f and g .
train
false
5,205
def visit_snippet(self, node): lang = self.highlightlang linenos = (node.rawsource.count('\n') >= (self.highlightlinenothreshold - 1)) fname = node['filename'] highlight_args = node.get('highlight_args', {}) if ('language' in node): lang = node['language'] highlight_args['force'] = True if ('linenos' in node): linenos = node['linenos'] def warner(msg): self.builder.warn(msg, (self.builder.current_docname, node.line)) highlighted = self.highlighter.highlight_block(node.rawsource, lang, warn=warner, linenos=linenos, **highlight_args) starttag = self.starttag(node, 'div', suffix='', CLASS=('highlight-%s' % lang)) self.body.append(starttag) self.body.append(('<div class="snippet-filename">%s</div>\n' % (fname,))) self.body.append(highlighted) self.body.append('</div>\n') raise nodes.SkipNode
[ "def", "visit_snippet", "(", "self", ",", "node", ")", ":", "lang", "=", "self", ".", "highlightlang", "linenos", "=", "(", "node", ".", "rawsource", ".", "count", "(", "'\\n'", ")", ">=", "(", "self", ".", "highlightlinenothreshold", "-", "1", ")", ")", "fname", "=", "node", "[", "'filename'", "]", "highlight_args", "=", "node", ".", "get", "(", "'highlight_args'", ",", "{", "}", ")", "if", "(", "'language'", "in", "node", ")", ":", "lang", "=", "node", "[", "'language'", "]", "highlight_args", "[", "'force'", "]", "=", "True", "if", "(", "'linenos'", "in", "node", ")", ":", "linenos", "=", "node", "[", "'linenos'", "]", "def", "warner", "(", "msg", ")", ":", "self", ".", "builder", ".", "warn", "(", "msg", ",", "(", "self", ".", "builder", ".", "current_docname", ",", "node", ".", "line", ")", ")", "highlighted", "=", "self", ".", "highlighter", ".", "highlight_block", "(", "node", ".", "rawsource", ",", "lang", ",", "warn", "=", "warner", ",", "linenos", "=", "linenos", ",", "**", "highlight_args", ")", "starttag", "=", "self", ".", "starttag", "(", "node", ",", "'div'", ",", "suffix", "=", "''", ",", "CLASS", "=", "(", "'highlight-%s'", "%", "lang", ")", ")", "self", ".", "body", ".", "append", "(", "starttag", ")", "self", ".", "body", ".", "append", "(", "(", "'<div class=\"snippet-filename\">%s</div>\\n'", "%", "(", "fname", ",", ")", ")", ")", "self", ".", "body", ".", "append", "(", "highlighted", ")", "self", ".", "body", ".", "append", "(", "'</div>\\n'", ")", "raise", "nodes", ".", "SkipNode" ]
html document generator visit handler .
train
true
5,206
def _get_param(param, param_dict, default=None): result = param_dict.get(param, default) if ((param in ['max-keys']) and result): return long(result) return result
[ "def", "_get_param", "(", "param", ",", "param_dict", ",", "default", "=", "None", ")", ":", "result", "=", "param_dict", ".", "get", "(", "param", ",", "default", ")", "if", "(", "(", "param", "in", "[", "'max-keys'", "]", ")", "and", "result", ")", ":", "return", "long", "(", "result", ")", "return", "result" ]
gets a parameter value from request query parameters .
train
false
5,208
def any2unicode(text, encoding='utf8', errors='strict'): if isinstance(text, unicode): return text return unicode(text, encoding, errors=errors)
[ "def", "any2unicode", "(", "text", ",", "encoding", "=", "'utf8'", ",", "errors", "=", "'strict'", ")", ":", "if", "isinstance", "(", "text", ",", "unicode", ")", ":", "return", "text", "return", "unicode", "(", "text", ",", "encoding", ",", "errors", "=", "errors", ")" ]
convert a string .
train
true
5,210
def task_log_begin_task(context, task_name, period_beginning, period_ending, host, task_items=None, message=None): return IMPL.task_log_begin_task(context, task_name, period_beginning, period_ending, host, task_items, message)
[ "def", "task_log_begin_task", "(", "context", ",", "task_name", ",", "period_beginning", ",", "period_ending", ",", "host", ",", "task_items", "=", "None", ",", "message", "=", "None", ")", ":", "return", "IMPL", ".", "task_log_begin_task", "(", "context", ",", "task_name", ",", "period_beginning", ",", "period_ending", ",", "host", ",", "task_items", ",", "message", ")" ]
mark a task as started for a given host/time period .
train
false
5,211
def translate_tmpl(prefix, lng): src = open((EMAIL_DIR + ('/%s-en.tmpl' % prefix)), 'r') data = src.read().decode('utf-8') src.close() data = _(data).encode('utf-8') fp = open(('email/%s-%s.tmpl' % (prefix, lng)), 'wb') if (not ((-1) < data.find('UTF-8') < 30)): fp.write('#encoding UTF-8\n') fp.write(data) fp.close()
[ "def", "translate_tmpl", "(", "prefix", ",", "lng", ")", ":", "src", "=", "open", "(", "(", "EMAIL_DIR", "+", "(", "'/%s-en.tmpl'", "%", "prefix", ")", ")", ",", "'r'", ")", "data", "=", "src", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "src", ".", "close", "(", ")", "data", "=", "_", "(", "data", ")", ".", "encode", "(", "'utf-8'", ")", "fp", "=", "open", "(", "(", "'email/%s-%s.tmpl'", "%", "(", "prefix", ",", "lng", ")", ")", ",", "'wb'", ")", "if", "(", "not", "(", "(", "-", "1", ")", "<", "data", ".", "find", "(", "'UTF-8'", ")", "<", "30", ")", ")", ":", "fp", ".", "write", "(", "'#encoding UTF-8\\n'", ")", "fp", ".", "write", "(", "data", ")", "fp", ".", "close", "(", ")" ]
translate template prefix into language lng .
train
false
5,212
def make_dirs(path): try: os.makedirs(path) except OSError as ex: if (ex.errno != errno.EEXIST): raise
[ "def", "make_dirs", "(", "path", ")", ":", "try", ":", "os", ".", "makedirs", "(", "path", ")", "except", "OSError", "as", "ex", ":", "if", "(", "ex", ".", "errno", "!=", "errno", ".", "EEXIST", ")", ":", "raise" ]
an idempotent version of os .
train
false
5,213
def random_symbols(expr): try: return list(expr.atoms(RandomSymbol)) except AttributeError: return []
[ "def", "random_symbols", "(", "expr", ")", ":", "try", ":", "return", "list", "(", "expr", ".", "atoms", "(", "RandomSymbol", ")", ")", "except", "AttributeError", ":", "return", "[", "]" ]
returns all randomsymbols within a sympy expression .
train
false
5,214
def calc_moments(timeseries_file, moment): timeseries = np.genfromtxt(timeseries_file) m2 = stats.moment(timeseries, 2, axis=0) m3 = stats.moment(timeseries, moment, axis=0) zero = (m2 == 0) return np.where(zero, 0, (m3 / (m2 ** (moment / 2.0))))
[ "def", "calc_moments", "(", "timeseries_file", ",", "moment", ")", ":", "timeseries", "=", "np", ".", "genfromtxt", "(", "timeseries_file", ")", "m2", "=", "stats", ".", "moment", "(", "timeseries", ",", "2", ",", "axis", "=", "0", ")", "m3", "=", "stats", ".", "moment", "(", "timeseries", ",", "moment", ",", "axis", "=", "0", ")", "zero", "=", "(", "m2", "==", "0", ")", "return", "np", ".", "where", "(", "zero", ",", "0", ",", "(", "m3", "/", "(", "m2", "**", "(", "moment", "/", "2.0", ")", ")", ")", ")" ]
returns nth moment of timeseries .
train
false
5,215
def test_time_as_index(): evoked = read_evokeds(fname, condition=0).crop((-0.1), 0.1) assert_array_equal(evoked.time_as_index([(-0.1), 0.1], use_rounding=True), [0, (len(evoked.times) - 1)])
[ "def", "test_time_as_index", "(", ")", ":", "evoked", "=", "read_evokeds", "(", "fname", ",", "condition", "=", "0", ")", ".", "crop", "(", "(", "-", "0.1", ")", ",", "0.1", ")", "assert_array_equal", "(", "evoked", ".", "time_as_index", "(", "[", "(", "-", "0.1", ")", ",", "0.1", "]", ",", "use_rounding", "=", "True", ")", ",", "[", "0", ",", "(", "len", "(", "evoked", ".", "times", ")", "-", "1", ")", "]", ")" ]
test time as index .
train
false
5,217
def massage_permissions(document): read_perms = document.list_permissions(perm='read') write_perms = document.list_permissions(perm='write') return {'perms': {'read': {'users': [{'id': perm_user.id, 'username': perm_user.username} for perm_user in read_perms.users.all()], 'groups': [{'id': perm_group.id, 'name': perm_group.name} for perm_group in read_perms.groups.all()]}, 'write': {'users': [{'id': perm_user.id, 'username': perm_user.username} for perm_user in write_perms.users.all()], 'groups': [{'id': perm_group.id, 'name': perm_group.name} for perm_group in write_perms.groups.all()]}}}
[ "def", "massage_permissions", "(", "document", ")", ":", "read_perms", "=", "document", ".", "list_permissions", "(", "perm", "=", "'read'", ")", "write_perms", "=", "document", ".", "list_permissions", "(", "perm", "=", "'write'", ")", "return", "{", "'perms'", ":", "{", "'read'", ":", "{", "'users'", ":", "[", "{", "'id'", ":", "perm_user", ".", "id", ",", "'username'", ":", "perm_user", ".", "username", "}", "for", "perm_user", "in", "read_perms", ".", "users", ".", "all", "(", ")", "]", ",", "'groups'", ":", "[", "{", "'id'", ":", "perm_group", ".", "id", ",", "'name'", ":", "perm_group", ".", "name", "}", "for", "perm_group", "in", "read_perms", ".", "groups", ".", "all", "(", ")", "]", "}", ",", "'write'", ":", "{", "'users'", ":", "[", "{", "'id'", ":", "perm_user", ".", "id", ",", "'username'", ":", "perm_user", ".", "username", "}", "for", "perm_user", "in", "write_perms", ".", "users", ".", "all", "(", ")", "]", ",", "'groups'", ":", "[", "{", "'id'", ":", "perm_group", ".", "id", ",", "'name'", ":", "perm_group", ".", "name", "}", "for", "perm_group", "in", "write_perms", ".", "groups", ".", "all", "(", ")", "]", "}", "}", "}" ]
returns the permissions for a given document as a dictionary .
train
false
5,218
def move_to_gpu(data): if (str(data.dtype) in tensor.basic.complex_dtypes): return False if ((data.ndim == 0) and (str(data.dtype) in tensor.basic.discrete_dtypes)): return False return True
[ "def", "move_to_gpu", "(", "data", ")", ":", "if", "(", "str", "(", "data", ".", "dtype", ")", "in", "tensor", ".", "basic", ".", "complex_dtypes", ")", ":", "return", "False", "if", "(", "(", "data", ".", "ndim", "==", "0", ")", "and", "(", "str", "(", "data", ".", "dtype", ")", "in", "tensor", ".", "basic", ".", "discrete_dtypes", ")", ")", ":", "return", "False", "return", "True" ]
do we want to move this computation to the gpu? currently .
train
false
5,219
@pytest.mark.svn def test_freeze_svn(script, tmpdir): checkout_path = _create_test_package(script, vcs='svn') script.run('python', 'setup.py', 'develop', cwd=checkout_path, expect_stderr=True) result = script.pip('freeze', expect_stderr=True) expected = textwrap.dedent(' ...-e svn+...#egg=version_pkg\n ...') _check_output(result.stdout, expected)
[ "@", "pytest", ".", "mark", ".", "svn", "def", "test_freeze_svn", "(", "script", ",", "tmpdir", ")", ":", "checkout_path", "=", "_create_test_package", "(", "script", ",", "vcs", "=", "'svn'", ")", "script", ".", "run", "(", "'python'", ",", "'setup.py'", ",", "'develop'", ",", "cwd", "=", "checkout_path", ",", "expect_stderr", "=", "True", ")", "result", "=", "script", ".", "pip", "(", "'freeze'", ",", "expect_stderr", "=", "True", ")", "expected", "=", "textwrap", ".", "dedent", "(", "' ...-e svn+...#egg=version_pkg\\n ...'", ")", "_check_output", "(", "result", ".", "stdout", ",", "expected", ")" ]
test freezing a svn checkout .
train
false
5,224
def get_text_box(text, fs): return (fs, text_len(len(text), fs))
[ "def", "get_text_box", "(", "text", ",", "fs", ")", ":", "return", "(", "fs", ",", "text_len", "(", "len", "(", "text", ")", ",", "fs", ")", ")" ]
approximation of text bounds .
train
false
5,225
@require_context @pick_context_manager_writer def ec2_volume_create(context, volume_uuid, id=None): ec2_volume_ref = models.VolumeIdMapping() ec2_volume_ref.update({'uuid': volume_uuid}) if (id is not None): ec2_volume_ref.update({'id': id}) ec2_volume_ref.save(context.session) return ec2_volume_ref
[ "@", "require_context", "@", "pick_context_manager_writer", "def", "ec2_volume_create", "(", "context", ",", "volume_uuid", ",", "id", "=", "None", ")", ":", "ec2_volume_ref", "=", "models", ".", "VolumeIdMapping", "(", ")", "ec2_volume_ref", ".", "update", "(", "{", "'uuid'", ":", "volume_uuid", "}", ")", "if", "(", "id", "is", "not", "None", ")", ":", "ec2_volume_ref", ".", "update", "(", "{", "'id'", ":", "id", "}", ")", "ec2_volume_ref", ".", "save", "(", "context", ".", "session", ")", "return", "ec2_volume_ref" ]
create ec2 compatible volume by provided uuid .
train
false
5,226
def geometric_conj_af(a, f): (a, f) = map(sympify, (a, f)) return (- geometric_conj_ab(a, (- f)))
[ "def", "geometric_conj_af", "(", "a", ",", "f", ")", ":", "(", "a", ",", "f", ")", "=", "map", "(", "sympify", ",", "(", "a", ",", "f", ")", ")", "return", "(", "-", "geometric_conj_ab", "(", "a", ",", "(", "-", "f", ")", ")", ")" ]
conjugation relation for geometrical beams under paraxial conditions .
train
false
5,227
def humanize_path(path): return path.replace('.', '/')
[ "def", "humanize_path", "(", "path", ")", ":", "return", "path", ".", "replace", "(", "'.'", ",", "'/'", ")" ]
replace python dotted path to directory-like one .
train
false
5,228
def query_metadata(metadata_url, headers=None, expect_json=False): (result, info) = fetch_url(module, metadata_url, headers=headers) if (info['status'] != 200): raise OpenShiftFactsMetadataUnavailableError('Metadata unavailable') if expect_json: return module.from_json(to_native(result.read())) else: return [to_native(line.strip()) for line in result.readlines()]
[ "def", "query_metadata", "(", "metadata_url", ",", "headers", "=", "None", ",", "expect_json", "=", "False", ")", ":", "(", "result", ",", "info", ")", "=", "fetch_url", "(", "module", ",", "metadata_url", ",", "headers", "=", "headers", ")", "if", "(", "info", "[", "'status'", "]", "!=", "200", ")", ":", "raise", "OpenShiftFactsMetadataUnavailableError", "(", "'Metadata unavailable'", ")", "if", "expect_json", ":", "return", "module", ".", "from_json", "(", "to_native", "(", "result", ".", "read", "(", ")", ")", ")", "else", ":", "return", "[", "to_native", "(", "line", ".", "strip", "(", ")", ")", "for", "line", "in", "result", ".", "readlines", "(", ")", "]" ]
return metadata from the provided metadata_url args: metadata_url : metadata url headers : headers to set for metadata request expect_json : does the metadata_url return json returns: dict or list: metadata request result .
train
false
5,229
@context.quietfunc @with_device def logcat(stream=False): if stream: return process(['logcat']) else: return process(['logcat', '-d']).recvall()
[ "@", "context", ".", "quietfunc", "@", "with_device", "def", "logcat", "(", "stream", "=", "False", ")", ":", "if", "stream", ":", "return", "process", "(", "[", "'logcat'", "]", ")", "else", ":", "return", "process", "(", "[", "'logcat'", ",", "'-d'", "]", ")", ".", "recvall", "(", ")" ]
reads the system log file .
train
false
5,230
def compareFunctionName(first, second): first = getConvertedName(first) second = getConvertedName(second) if (first < second): return (-1) return (first < second)
[ "def", "compareFunctionName", "(", "first", ",", "second", ")", ":", "first", "=", "getConvertedName", "(", "first", ")", "second", "=", "getConvertedName", "(", "second", ")", "if", "(", "first", "<", "second", ")", ":", "return", "(", "-", "1", ")", "return", "(", "first", "<", "second", ")" ]
compare the function names .
train
false
5,231
def outputFromPythonScript(script, *args): with open(devnull, 'rb') as nullInput: with open(devnull, 'wb') as nullError: process = Popen(([executable, script.path] + list(args)), stdout=PIPE, stderr=nullError, stdin=nullInput) stdout = process.communicate()[0] return stdout
[ "def", "outputFromPythonScript", "(", "script", ",", "*", "args", ")", ":", "with", "open", "(", "devnull", ",", "'rb'", ")", "as", "nullInput", ":", "with", "open", "(", "devnull", ",", "'wb'", ")", "as", "nullError", ":", "process", "=", "Popen", "(", "(", "[", "executable", ",", "script", ".", "path", "]", "+", "list", "(", "args", ")", ")", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "nullError", ",", "stdin", "=", "nullInput", ")", "stdout", "=", "process", ".", "communicate", "(", ")", "[", "0", "]", "return", "stdout" ]
synchronously run a python script .
train
false
5,233
def _lookup_first(dictionary, key): value = dictionary[key] if (type(value) == list): return value[0] else: return value
[ "def", "_lookup_first", "(", "dictionary", ",", "key", ")", ":", "value", "=", "dictionary", "[", "key", "]", "if", "(", "type", "(", "value", ")", "==", "list", ")", ":", "return", "value", "[", "0", "]", "else", ":", "return", "value" ]
lookup the first value given a key .
train
true
5,235
@contextmanager def redirected_stdio(conn): orig_stdin = conn.modules.sys.stdin orig_stdout = conn.modules.sys.stdout orig_stderr = conn.modules.sys.stderr try: conn.modules.sys.stdin = restricted(sys.stdin, ['softspace', 'write', 'readline', 'encoding', 'close']) conn.modules.sys.stdout = restricted(sys.stdout, ['softspace', 'write', 'readline', 'encoding', 'close', 'flush']) conn.modules.sys.stderr = restricted(sys.stderr, ['softspace', 'write', 'readline', 'encoding', 'close', 'flush']) (yield) finally: conn.modules.sys.stdin = orig_stdin conn.modules.sys.stdout = orig_stdout conn.modules.sys.stderr = orig_stderr
[ "@", "contextmanager", "def", "redirected_stdio", "(", "conn", ")", ":", "orig_stdin", "=", "conn", ".", "modules", ".", "sys", ".", "stdin", "orig_stdout", "=", "conn", ".", "modules", ".", "sys", ".", "stdout", "orig_stderr", "=", "conn", ".", "modules", ".", "sys", ".", "stderr", "try", ":", "conn", ".", "modules", ".", "sys", ".", "stdin", "=", "restricted", "(", "sys", ".", "stdin", ",", "[", "'softspace'", ",", "'write'", ",", "'readline'", ",", "'encoding'", ",", "'close'", "]", ")", "conn", ".", "modules", ".", "sys", ".", "stdout", "=", "restricted", "(", "sys", ".", "stdout", ",", "[", "'softspace'", ",", "'write'", ",", "'readline'", ",", "'encoding'", ",", "'close'", ",", "'flush'", "]", ")", "conn", ".", "modules", ".", "sys", ".", "stderr", "=", "restricted", "(", "sys", ".", "stderr", ",", "[", "'softspace'", ",", "'write'", ",", "'readline'", ",", "'encoding'", ",", "'close'", ",", "'flush'", "]", ")", "(", "yield", ")", "finally", ":", "conn", ".", "modules", ".", "sys", ".", "stdin", "=", "orig_stdin", "conn", ".", "modules", ".", "sys", ".", "stdout", "=", "orig_stdout", "conn", ".", "modules", ".", "sys", ".", "stderr", "=", "orig_stderr" ]
redirects the other partys stdin .
train
false
5,236
def parseConfig(configHandle): if isinstance(configHandle, dict): config_dict = configHandle dirpath = '.' else: (scheme, host, path, p, q, f) = urlparse(configHandle) if (scheme == ''): scheme = 'file' path = realpath(path) if (scheme == 'file'): with open(path) as file: config_dict = json_load(file) else: config_dict = json_load(urlopen(configHandle)) dirpath = ('%s://%s%s' % (scheme, host, (dirname(path).rstrip('/') + '/'))) return Config.buildConfiguration(config_dict, dirpath)
[ "def", "parseConfig", "(", "configHandle", ")", ":", "if", "isinstance", "(", "configHandle", ",", "dict", ")", ":", "config_dict", "=", "configHandle", "dirpath", "=", "'.'", "else", ":", "(", "scheme", ",", "host", ",", "path", ",", "p", ",", "q", ",", "f", ")", "=", "urlparse", "(", "configHandle", ")", "if", "(", "scheme", "==", "''", ")", ":", "scheme", "=", "'file'", "path", "=", "realpath", "(", "path", ")", "if", "(", "scheme", "==", "'file'", ")", ":", "with", "open", "(", "path", ")", "as", "file", ":", "config_dict", "=", "json_load", "(", "file", ")", "else", ":", "config_dict", "=", "json_load", "(", "urlopen", "(", "configHandle", ")", ")", "dirpath", "=", "(", "'%s://%s%s'", "%", "(", "scheme", ",", "host", ",", "(", "dirname", "(", "path", ")", ".", "rstrip", "(", "'/'", ")", "+", "'/'", ")", ")", ")", "return", "Config", ".", "buildConfiguration", "(", "config_dict", ",", "dirpath", ")" ]
parse a configuration file and return a configuration object .
train
false
5,238
def Lop(f, wrt, eval_points, consider_constant=None, disconnected_inputs='raise'): if (type(eval_points) not in (list, tuple)): eval_points = [eval_points] using_list = isinstance(wrt, list) using_tuple = isinstance(wrt, tuple) if (not isinstance(f, (list, tuple))): f = [f] f = list(f) grads = list(eval_points) if (not isinstance(wrt, (list, tuple))): wrt = [wrt] assert (len(f) == len(grads)) known = OrderedDict(izip(f, grads)) ret = grad(cost=None, known_grads=known, consider_constant=consider_constant, wrt=wrt, disconnected_inputs=disconnected_inputs) return format_as(using_list, using_tuple, ret)
[ "def", "Lop", "(", "f", ",", "wrt", ",", "eval_points", ",", "consider_constant", "=", "None", ",", "disconnected_inputs", "=", "'raise'", ")", ":", "if", "(", "type", "(", "eval_points", ")", "not", "in", "(", "list", ",", "tuple", ")", ")", ":", "eval_points", "=", "[", "eval_points", "]", "using_list", "=", "isinstance", "(", "wrt", ",", "list", ")", "using_tuple", "=", "isinstance", "(", "wrt", ",", "tuple", ")", "if", "(", "not", "isinstance", "(", "f", ",", "(", "list", ",", "tuple", ")", ")", ")", ":", "f", "=", "[", "f", "]", "f", "=", "list", "(", "f", ")", "grads", "=", "list", "(", "eval_points", ")", "if", "(", "not", "isinstance", "(", "wrt", ",", "(", "list", ",", "tuple", ")", ")", ")", ":", "wrt", "=", "[", "wrt", "]", "assert", "(", "len", "(", "f", ")", "==", "len", "(", "grads", ")", ")", "known", "=", "OrderedDict", "(", "izip", "(", "f", ",", "grads", ")", ")", "ret", "=", "grad", "(", "cost", "=", "None", ",", "known_grads", "=", "known", ",", "consider_constant", "=", "consider_constant", ",", "wrt", "=", "wrt", ",", "disconnected_inputs", "=", "disconnected_inputs", ")", "return", "format_as", "(", "using_list", ",", "using_tuple", ",", "ret", ")" ]
computes the l operation on f wrt to wrt evaluated at points given in eval_points .
train
false
5,239
def download_workflow(url): filename = url.split(u'/')[(-1)] if ((not url.endswith(u'.alfredworkflow')) or (not filename.endswith(u'.alfredworkflow'))): raise ValueError(u'Attachment `{0}` not a workflow'.format(filename)) local_path = os.path.join(tempfile.gettempdir(), filename) wf().logger.debug(u'Downloading updated workflow from `{0}` to `{1}` ...'.format(url, local_path)) response = web.get(url) with open(local_path, u'wb') as output: output.write(response.content) return local_path
[ "def", "download_workflow", "(", "url", ")", ":", "filename", "=", "url", ".", "split", "(", "u'/'", ")", "[", "(", "-", "1", ")", "]", "if", "(", "(", "not", "url", ".", "endswith", "(", "u'.alfredworkflow'", ")", ")", "or", "(", "not", "filename", ".", "endswith", "(", "u'.alfredworkflow'", ")", ")", ")", ":", "raise", "ValueError", "(", "u'Attachment `{0}` not a workflow'", ".", "format", "(", "filename", ")", ")", "local_path", "=", "os", ".", "path", ".", "join", "(", "tempfile", ".", "gettempdir", "(", ")", ",", "filename", ")", "wf", "(", ")", ".", "logger", ".", "debug", "(", "u'Downloading updated workflow from `{0}` to `{1}` ...'", ".", "format", "(", "url", ",", "local_path", ")", ")", "response", "=", "web", ".", "get", "(", "url", ")", "with", "open", "(", "local_path", ",", "u'wb'", ")", "as", "output", ":", "output", ".", "write", "(", "response", ".", "content", ")", "return", "local_path" ]
download workflow at url to a local temporary file .
train
false
5,240
def decode_string(v, encoding='utf-8'): if isinstance(encoding, basestring): encoding = (((encoding,),) + (('windows-1252',), ('utf-8', 'ignore'))) if isinstance(v, str): for e in encoding: try: return v.decode(*e) except: pass return v return unicode(v)
[ "def", "decode_string", "(", "v", ",", "encoding", "=", "'utf-8'", ")", ":", "if", "isinstance", "(", "encoding", ",", "basestring", ")", ":", "encoding", "=", "(", "(", "(", "encoding", ",", ")", ",", ")", "+", "(", "(", "'windows-1252'", ",", ")", ",", "(", "'utf-8'", ",", "'ignore'", ")", ")", ")", "if", "isinstance", "(", "v", ",", "str", ")", ":", "for", "e", "in", "encoding", ":", "try", ":", "return", "v", ".", "decode", "(", "*", "e", ")", "except", ":", "pass", "return", "v", "return", "unicode", "(", "v", ")" ]
returns the given value as a unicode string .
train
true
5,242
@requires_application() def test_spectrogram(): n_fft = 256 n_freqs = ((n_fft // 2) + 1) size = (100, n_freqs) with TestingCanvas(size=size) as c: np.random.seed(67853498) data = np.random.normal(size=(n_fft * 100)) spec = Spectrogram(data, n_fft=n_fft, step=n_fft, window=None, color_scale='linear', cmap='grays') c.draw_visual(spec) assert_image_approved('screenshot', 'visuals/spectrogram.png') freqs = spec.freqs assert (len(freqs) == n_freqs) assert (freqs[0] == 0) assert (freqs[(-1)] == 0.5)
[ "@", "requires_application", "(", ")", "def", "test_spectrogram", "(", ")", ":", "n_fft", "=", "256", "n_freqs", "=", "(", "(", "n_fft", "//", "2", ")", "+", "1", ")", "size", "=", "(", "100", ",", "n_freqs", ")", "with", "TestingCanvas", "(", "size", "=", "size", ")", "as", "c", ":", "np", ".", "random", ".", "seed", "(", "67853498", ")", "data", "=", "np", ".", "random", ".", "normal", "(", "size", "=", "(", "n_fft", "*", "100", ")", ")", "spec", "=", "Spectrogram", "(", "data", ",", "n_fft", "=", "n_fft", ",", "step", "=", "n_fft", ",", "window", "=", "None", ",", "color_scale", "=", "'linear'", ",", "cmap", "=", "'grays'", ")", "c", ".", "draw_visual", "(", "spec", ")", "assert_image_approved", "(", "'screenshot'", ",", "'visuals/spectrogram.png'", ")", "freqs", "=", "spec", ".", "freqs", "assert", "(", "len", "(", "freqs", ")", "==", "n_freqs", ")", "assert", "(", "freqs", "[", "0", "]", "==", "0", ")", "assert", "(", "freqs", "[", "(", "-", "1", ")", "]", "==", "0.5", ")" ]
test spectrogram visual .
train
false
5,244
def add_params_to_uri(uri, params): (sch, net, path, par, query, fra) = urlparse(uri) query = add_params_to_qs(query, params) return urlunparse((sch, net, path, par, query, fra))
[ "def", "add_params_to_uri", "(", "uri", ",", "params", ")", ":", "(", "sch", ",", "net", ",", "path", ",", "par", ",", "query", ",", "fra", ")", "=", "urlparse", "(", "uri", ")", "query", "=", "add_params_to_qs", "(", "query", ",", "params", ")", "return", "urlunparse", "(", "(", "sch", ",", "net", ",", "path", ",", "par", ",", "query", ",", "fra", ")", ")" ]
add a list of two-tuples to the uri query components .
train
true
5,246
def _finish_backtrace(sequenceA, sequenceB, ali_seqA, ali_seqB, row, col, gap_char): if row: ali_seqA += sequenceA[(row - 1)::(-1)] if col: ali_seqB += sequenceB[(col - 1)::(-1)] if (row > col): ali_seqB += (gap_char * (len(ali_seqA) - len(ali_seqB))) elif (col > row): ali_seqA += (gap_char * (len(ali_seqB) - len(ali_seqA))) return (ali_seqA, ali_seqB)
[ "def", "_finish_backtrace", "(", "sequenceA", ",", "sequenceB", ",", "ali_seqA", ",", "ali_seqB", ",", "row", ",", "col", ",", "gap_char", ")", ":", "if", "row", ":", "ali_seqA", "+=", "sequenceA", "[", "(", "row", "-", "1", ")", ":", ":", "(", "-", "1", ")", "]", "if", "col", ":", "ali_seqB", "+=", "sequenceB", "[", "(", "col", "-", "1", ")", ":", ":", "(", "-", "1", ")", "]", "if", "(", "row", ">", "col", ")", ":", "ali_seqB", "+=", "(", "gap_char", "*", "(", "len", "(", "ali_seqA", ")", "-", "len", "(", "ali_seqB", ")", ")", ")", "elif", "(", "col", ">", "row", ")", ":", "ali_seqA", "+=", "(", "gap_char", "*", "(", "len", "(", "ali_seqB", ")", "-", "len", "(", "ali_seqA", ")", ")", ")", "return", "(", "ali_seqA", ",", "ali_seqB", ")" ]
add remaining sequences and fill with gaps if neccessary .
train
false
5,247
def is_local_device(my_ips, my_port, dev_ip, dev_port): candidate_ips = [] if ((not is_valid_ip(dev_ip)) and is_valid_hostname(dev_ip)): try: addrinfo = socket.getaddrinfo(dev_ip, dev_port) for addr in addrinfo: family = addr[0] dev_ip = addr[4][0] if (family == socket.AF_INET6): dev_ip = expand_ipv6(dev_ip) candidate_ips.append(dev_ip) except socket.gaierror: return False else: if is_valid_ipv6(dev_ip): dev_ip = expand_ipv6(dev_ip) candidate_ips = [dev_ip] for dev_ip in candidate_ips: if ((dev_ip in my_ips) and ((my_port is None) or (dev_port == my_port))): return True return False
[ "def", "is_local_device", "(", "my_ips", ",", "my_port", ",", "dev_ip", ",", "dev_port", ")", ":", "candidate_ips", "=", "[", "]", "if", "(", "(", "not", "is_valid_ip", "(", "dev_ip", ")", ")", "and", "is_valid_hostname", "(", "dev_ip", ")", ")", ":", "try", ":", "addrinfo", "=", "socket", ".", "getaddrinfo", "(", "dev_ip", ",", "dev_port", ")", "for", "addr", "in", "addrinfo", ":", "family", "=", "addr", "[", "0", "]", "dev_ip", "=", "addr", "[", "4", "]", "[", "0", "]", "if", "(", "family", "==", "socket", ".", "AF_INET6", ")", ":", "dev_ip", "=", "expand_ipv6", "(", "dev_ip", ")", "candidate_ips", ".", "append", "(", "dev_ip", ")", "except", "socket", ".", "gaierror", ":", "return", "False", "else", ":", "if", "is_valid_ipv6", "(", "dev_ip", ")", ":", "dev_ip", "=", "expand_ipv6", "(", "dev_ip", ")", "candidate_ips", "=", "[", "dev_ip", "]", "for", "dev_ip", "in", "candidate_ips", ":", "if", "(", "(", "dev_ip", "in", "my_ips", ")", "and", "(", "(", "my_port", "is", "None", ")", "or", "(", "dev_port", "==", "my_port", ")", ")", ")", ":", "return", "True", "return", "False" ]
return true if the provided dev_ip and dev_port are among the ip addresses specified in my_ips and my_port respectively .
train
false
5,251
def detect_snappy(contents): try: import snappy return snappy.isValidCompressed(contents) except: logging.exception('failed to detect snappy') return False
[ "def", "detect_snappy", "(", "contents", ")", ":", "try", ":", "import", "snappy", "return", "snappy", ".", "isValidCompressed", "(", "contents", ")", "except", ":", "logging", ".", "exception", "(", "'failed to detect snappy'", ")", "return", "False" ]
this is a silly small function which checks to see if the file is snappy .
train
false
5,252
def get_currency_name(currency, locale=LC_NUMERIC): return Locale.parse(locale).currencies.get(currency, currency)
[ "def", "get_currency_name", "(", "currency", ",", "locale", "=", "LC_NUMERIC", ")", ":", "return", "Locale", ".", "parse", "(", "locale", ")", ".", "currencies", ".", "get", "(", "currency", ",", "currency", ")" ]
return the name used by the locale for the specified currency .
train
false
5,253
def reducer(*tokens): def decorator(func): if (not hasattr(func, 'reducers')): func.reducers = [] func.reducers.append(list(tokens)) return func return decorator
[ "def", "reducer", "(", "*", "tokens", ")", ":", "def", "decorator", "(", "func", ")", ":", "if", "(", "not", "hasattr", "(", "func", ",", "'reducers'", ")", ")", ":", "func", ".", "reducers", "=", "[", "]", "func", ".", "reducers", ".", "append", "(", "list", "(", "tokens", ")", ")", "return", "func", "return", "decorator" ]
decorator for reduction methods .
train
false
5,254
def col_download_all(cols_selected): submissions = [] for sid in cols_selected: id = Source.query.filter((Source.filesystem_id == sid)).one().id submissions += Submission.query.filter((Submission.source_id == id)).all() return download('all', submissions)
[ "def", "col_download_all", "(", "cols_selected", ")", ":", "submissions", "=", "[", "]", "for", "sid", "in", "cols_selected", ":", "id", "=", "Source", ".", "query", ".", "filter", "(", "(", "Source", ".", "filesystem_id", "==", "sid", ")", ")", ".", "one", "(", ")", ".", "id", "submissions", "+=", "Submission", ".", "query", ".", "filter", "(", "(", "Submission", ".", "source_id", "==", "id", ")", ")", ".", "all", "(", ")", "return", "download", "(", "'all'", ",", "submissions", ")" ]
download all submissions from all selected sources .
train
false
5,255
def filename(): fname = u'' for __ in xrange(random.randint(10, 30)): fname += random.choice(NAME_CHARS_W_UNICODE) fname += random.choice(('.jpg', '.pdf', '.png', '.txt')) return fname
[ "def", "filename", "(", ")", ":", "fname", "=", "u''", "for", "__", "in", "xrange", "(", "random", ".", "randint", "(", "10", ",", "30", ")", ")", ":", "fname", "+=", "random", ".", "choice", "(", "NAME_CHARS_W_UNICODE", ")", "fname", "+=", "random", ".", "choice", "(", "(", "'.jpg'", ",", "'.pdf'", ",", "'.png'", ",", "'.txt'", ")", ")", "return", "fname" ]
return the name of the file currently being read .
train
false
5,256
def _db_connection_type(db_connection): db_string = db_connection.split(':')[0].split('+')[0] return db_string.lower()
[ "def", "_db_connection_type", "(", "db_connection", ")", ":", "db_string", "=", "db_connection", ".", "split", "(", "':'", ")", "[", "0", "]", ".", "split", "(", "'+'", ")", "[", "0", "]", "return", "db_string", ".", "lower", "(", ")" ]
returns a lowercase symbol for the db type .
train
false
5,257
def init_list(doctype): doc = frappe.get_meta(doctype) make_boilerplate(u'controller_list.js', doc) make_boilerplate(u'controller_list.html', doc)
[ "def", "init_list", "(", "doctype", ")", ":", "doc", "=", "frappe", ".", "get_meta", "(", "doctype", ")", "make_boilerplate", "(", "u'controller_list.js'", ",", "doc", ")", "make_boilerplate", "(", "u'controller_list.html'", ",", "doc", ")" ]
make boilerplate list views .
train
false
5,258
def p_trailer(p): p[0] = ('CALL', p[2])
[ "def", "p_trailer", "(", "p", ")", ":", "p", "[", "0", "]", "=", "(", "'CALL'", ",", "p", "[", "2", "]", ")" ]
trailer : lpar arglist rpar .
train
false