id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
19,970
def bayesian_blocks(t, x=None, sigma=None, fitness=u'events', **kwargs): FITNESS_DICT = {u'events': Events, u'regular_events': RegularEvents, u'measures': PointMeasures} fitness = FITNESS_DICT.get(fitness, fitness) if ((type(fitness) is type) and issubclass(fitness, FitnessFunc)): fitfunc = fitness(**kwargs) elif isinstance(fitness, FitnessFunc): fitfunc = fitness else: raise ValueError(u'fitness parameter not understood') return fitfunc.fit(t, x, sigma)
[ "def", "bayesian_blocks", "(", "t", ",", "x", "=", "None", ",", "sigma", "=", "None", ",", "fitness", "=", "u'events'", ",", "**", "kwargs", ")", ":", "FITNESS_DICT", "=", "{", "u'events'", ":", "Events", ",", "u'regular_events'", ":", "RegularEvents", ",", "u'measures'", ":", "PointMeasures", "}", "fitness", "=", "FITNESS_DICT", ".", "get", "(", "fitness", ",", "fitness", ")", "if", "(", "(", "type", "(", "fitness", ")", "is", "type", ")", "and", "issubclass", "(", "fitness", ",", "FitnessFunc", ")", ")", ":", "fitfunc", "=", "fitness", "(", "**", "kwargs", ")", "elif", "isinstance", "(", "fitness", ",", "FitnessFunc", ")", ":", "fitfunc", "=", "fitness", "else", ":", "raise", "ValueError", "(", "u'fitness parameter not understood'", ")", "return", "fitfunc", ".", "fit", "(", "t", ",", "x", ",", "sigma", ")" ]
compute optimal segmentation of data with scargles bayesian blocks this is a flexible implementation of the bayesian blocks algorithm described in scargle 2012 [1]_ .
train
false
19,973
def agent_check_deregister(consul_url=None, checkid=None): ret = {} if (not consul_url): consul_url = _get_config() if (not consul_url): log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if (not checkid): raise SaltInvocationError('Required argument "checkid" is missing.') function = 'agent/check/deregister/{0}'.format(checkid) res = _query(consul_url=consul_url, function=function, method='GET') if res['res']: ret['res'] = True ret['message'] = 'Check {0} removed from agent.'.format(checkid) else: ret['res'] = False ret['message'] = 'Unable to remove check from agent.' return ret
[ "def", "agent_check_deregister", "(", "consul_url", "=", "None", ",", "checkid", "=", "None", ")", ":", "ret", "=", "{", "}", "if", "(", "not", "consul_url", ")", ":", "consul_url", "=", "_get_config", "(", ")", "if", "(", "not", "consul_url", ")", ":", "log", ".", "error", "(", "'No Consul URL found.'", ")", "ret", "[", "'message'", "]", "=", "'No Consul URL found.'", "ret", "[", "'res'", "]", "=", "False", "return", "ret", "if", "(", "not", "checkid", ")", ":", "raise", "SaltInvocationError", "(", "'Required argument \"checkid\" is missing.'", ")", "function", "=", "'agent/check/deregister/{0}'", ".", "format", "(", "checkid", ")", "res", "=", "_query", "(", "consul_url", "=", "consul_url", ",", "function", "=", "function", ",", "method", "=", "'GET'", ")", "if", "res", "[", "'res'", "]", ":", "ret", "[", "'res'", "]", "=", "True", "ret", "[", "'message'", "]", "=", "'Check {0} removed from agent.'", ".", "format", "(", "checkid", ")", "else", ":", "ret", "[", "'res'", "]", "=", "False", "ret", "[", "'message'", "]", "=", "'Unable to remove check from agent.'", "return", "ret" ]
the agent will take care of deregistering the check from the catalog .
train
true
19,974
@pytest.mark.cmd def test_pootle_init_db_sqlite(capfd, tmpdir): test_conf_file = tmpdir.join('pootle.conf') call(['pootle', 'init', '--db=sqlite', ('--config=%s' % test_conf_file)]) (out, err) = capfd.readouterr() assert ('Configuration file created' in out)
[ "@", "pytest", ".", "mark", ".", "cmd", "def", "test_pootle_init_db_sqlite", "(", "capfd", ",", "tmpdir", ")", ":", "test_conf_file", "=", "tmpdir", ".", "join", "(", "'pootle.conf'", ")", "call", "(", "[", "'pootle'", ",", "'init'", ",", "'--db=sqlite'", ",", "(", "'--config=%s'", "%", "test_conf_file", ")", "]", ")", "(", "out", ",", "err", ")", "=", "capfd", ".", "readouterr", "(", ")", "assert", "(", "'Configuration file created'", "in", "out", ")" ]
pootle init --help .
train
false
19,975
def _expand_users(device_users, common_users): expected_users = deepcopy(common_users) expected_users.update(device_users) return expected_users
[ "def", "_expand_users", "(", "device_users", ",", "common_users", ")", ":", "expected_users", "=", "deepcopy", "(", "common_users", ")", "expected_users", ".", "update", "(", "device_users", ")", "return", "expected_users" ]
creates a longer list of accepted users on the device .
train
true
19,977
def _copy_volume(*args, **kwargs): pass
[ "def", "_copy_volume", "(", "*", "args", ",", "**", "kwargs", ")", ":", "pass" ]
return without doing anything .
train
false
19,978
def write_stored_checksum(target): with open(target, 'r') as img_file: checksum = utils.hash_file(img_file) write_stored_info(target, field='sha1', value=checksum)
[ "def", "write_stored_checksum", "(", "target", ")", ":", "with", "open", "(", "target", ",", "'r'", ")", "as", "img_file", ":", "checksum", "=", "utils", ".", "hash_file", "(", "img_file", ")", "write_stored_info", "(", "target", ",", "field", "=", "'sha1'", ",", "value", "=", "checksum", ")" ]
write a checksum to disk for a file in _base .
train
false
19,980
def _update_context_with_user_info(context, user, user_certificate): user_fullname = user.profile.name context['username'] = user.username context['course_mode'] = user_certificate.mode context['accomplishment_user_id'] = user.id context['accomplishment_copy_name'] = user_fullname context['accomplishment_copy_username'] = user.username context['accomplishment_more_title'] = _("More Information About {user_name}'s Certificate:").format(user_name=user_fullname) context['accomplishment_banner_opening'] = _('{fullname}, you earned a certificate!').format(fullname=user_fullname) context['accomplishment_banner_congrats'] = _('Congratulations! This page summarizes what you accomplished. Show it off to family, friends, and colleagues in your social and professional networks.') context['accomplishment_copy_more_about'] = _("More about {fullname}'s accomplishment").format(fullname=user_fullname)
[ "def", "_update_context_with_user_info", "(", "context", ",", "user", ",", "user_certificate", ")", ":", "user_fullname", "=", "user", ".", "profile", ".", "name", "context", "[", "'username'", "]", "=", "user", ".", "username", "context", "[", "'course_mode'", "]", "=", "user_certificate", ".", "mode", "context", "[", "'accomplishment_user_id'", "]", "=", "user", ".", "id", "context", "[", "'accomplishment_copy_name'", "]", "=", "user_fullname", "context", "[", "'accomplishment_copy_username'", "]", "=", "user", ".", "username", "context", "[", "'accomplishment_more_title'", "]", "=", "_", "(", "\"More Information About {user_name}'s Certificate:\"", ")", ".", "format", "(", "user_name", "=", "user_fullname", ")", "context", "[", "'accomplishment_banner_opening'", "]", "=", "_", "(", "'{fullname}, you earned a certificate!'", ")", ".", "format", "(", "fullname", "=", "user_fullname", ")", "context", "[", "'accomplishment_banner_congrats'", "]", "=", "_", "(", "'Congratulations! This page summarizes what you accomplished. Show it off to family, friends, and colleagues in your social and professional networks.'", ")", "context", "[", "'accomplishment_copy_more_about'", "]", "=", "_", "(", "\"More about {fullname}'s accomplishment\"", ")", ".", "format", "(", "fullname", "=", "user_fullname", ")" ]
updates context dictionary with user related info .
train
false
19,981
def render_injected(http_resp, extra_html): assert isinstance(http_resp, HttpResponse) if ('text/html' not in http_resp.get('content-type', '')): return http_resp markers = ('</body>', '</BODY>') content = http_resp.content for marker in markers: pos = content.rfind(marker) if (pos != (-1)): break else: return http_resp if hasattr(extra_html, 'html'): extra_html = extra_html.html if callable(extra_html): extra_html = extra_html() http_resp.content = ''.join((content[:pos], extra_html, content[pos:])) return http_resp
[ "def", "render_injected", "(", "http_resp", ",", "extra_html", ")", ":", "assert", "isinstance", "(", "http_resp", ",", "HttpResponse", ")", "if", "(", "'text/html'", "not", "in", "http_resp", ".", "get", "(", "'content-type'", ",", "''", ")", ")", ":", "return", "http_resp", "markers", "=", "(", "'</body>'", ",", "'</BODY>'", ")", "content", "=", "http_resp", ".", "content", "for", "marker", "in", "markers", ":", "pos", "=", "content", ".", "rfind", "(", "marker", ")", "if", "(", "pos", "!=", "(", "-", "1", ")", ")", ":", "break", "else", ":", "return", "http_resp", "if", "hasattr", "(", "extra_html", ",", "'html'", ")", ":", "extra_html", "=", "extra_html", ".", "html", "if", "callable", "(", "extra_html", ")", ":", "extra_html", "=", "extra_html", "(", ")", "http_resp", ".", "content", "=", "''", ".", "join", "(", "(", "content", "[", ":", "pos", "]", ",", "extra_html", ",", "content", "[", "pos", ":", "]", ")", ")", "return", "http_resp" ]
render_injected -> httpresponse inject the extra html into the content of the http_resp .
train
false
19,982
def cidr_broadcast(cidr): ips = netaddr.IPNetwork(cidr) return str(ips.broadcast)
[ "def", "cidr_broadcast", "(", "cidr", ")", ":", "ips", "=", "netaddr", ".", "IPNetwork", "(", "cidr", ")", "return", "str", "(", "ips", ".", "broadcast", ")" ]
get the broadcast address associated with a cidr address .
train
false
19,987
def test_dict_key_completion_bytes(): ip = get_ipython() complete = ip.Completer.complete ip.user_ns['d'] = {'abc': None, 'abd': None} (_, matches) = complete(line_buffer='d[') nt.assert_in("'abc'", matches) nt.assert_in("b'abd'", matches) if False: (_, matches) = complete(line_buffer='d[b') nt.assert_in("b'abd'", matches) nt.assert_not_in("b'abc'", matches) (_, matches) = complete(line_buffer="d[b'") nt.assert_in('abd', matches) nt.assert_not_in('abc', matches) (_, matches) = complete(line_buffer="d[B'") nt.assert_in('abd', matches) nt.assert_not_in('abc', matches) (_, matches) = complete(line_buffer="d['") nt.assert_in('abc', matches) nt.assert_not_in('abd', matches)
[ "def", "test_dict_key_completion_bytes", "(", ")", ":", "ip", "=", "get_ipython", "(", ")", "complete", "=", "ip", ".", "Completer", ".", "complete", "ip", ".", "user_ns", "[", "'d'", "]", "=", "{", "'abc'", ":", "None", ",", "'abd'", ":", "None", "}", "(", "_", ",", "matches", ")", "=", "complete", "(", "line_buffer", "=", "'d['", ")", "nt", ".", "assert_in", "(", "\"'abc'\"", ",", "matches", ")", "nt", ".", "assert_in", "(", "\"b'abd'\"", ",", "matches", ")", "if", "False", ":", "(", "_", ",", "matches", ")", "=", "complete", "(", "line_buffer", "=", "'d[b'", ")", "nt", ".", "assert_in", "(", "\"b'abd'\"", ",", "matches", ")", "nt", ".", "assert_not_in", "(", "\"b'abc'\"", ",", "matches", ")", "(", "_", ",", "matches", ")", "=", "complete", "(", "line_buffer", "=", "\"d[b'\"", ")", "nt", ".", "assert_in", "(", "'abd'", ",", "matches", ")", "nt", ".", "assert_not_in", "(", "'abc'", ",", "matches", ")", "(", "_", ",", "matches", ")", "=", "complete", "(", "line_buffer", "=", "\"d[B'\"", ")", "nt", ".", "assert_in", "(", "'abd'", ",", "matches", ")", "nt", ".", "assert_not_in", "(", "'abc'", ",", "matches", ")", "(", "_", ",", "matches", ")", "=", "complete", "(", "line_buffer", "=", "\"d['\"", ")", "nt", ".", "assert_in", "(", "'abc'", ",", "matches", ")", "nt", ".", "assert_not_in", "(", "'abd'", ",", "matches", ")" ]
test handling of bytes in dict key completion .
train
false
19,988
def smbios_tables(attrs=None, where=None): return _osquery_cmd(table='smbios_tables', attrs=attrs, where=where)
[ "def", "smbios_tables", "(", "attrs", "=", "None", ",", "where", "=", "None", ")", ":", "return", "_osquery_cmd", "(", "table", "=", "'smbios_tables'", ",", "attrs", "=", "attrs", ",", "where", "=", "where", ")" ]
return smbios_tables information from osquery cli example: .
train
false
19,989
def natsort(string): return [(int(s) if s.isdigit() else s) for s in re.split('(\\d+)', string)]
[ "def", "natsort", "(", "string", ")", ":", "return", "[", "(", "int", "(", "s", ")", "if", "s", ".", "isdigit", "(", ")", "else", "s", ")", "for", "s", "in", "re", ".", "split", "(", "'(\\\\d+)'", ",", "string", ")", "]" ]
URL URL .
train
true
19,990
def execute_concurrent(session, statements_and_parameters, concurrency=100, raise_on_first_error=True, results_generator=False): if (concurrency <= 0): raise ValueError('concurrency must be greater than 0') if (not statements_and_parameters): return [] executor = (ConcurrentExecutorGenResults(session, statements_and_parameters) if results_generator else ConcurrentExecutorListResults(session, statements_and_parameters)) return executor.execute(concurrency, raise_on_first_error)
[ "def", "execute_concurrent", "(", "session", ",", "statements_and_parameters", ",", "concurrency", "=", "100", ",", "raise_on_first_error", "=", "True", ",", "results_generator", "=", "False", ")", ":", "if", "(", "concurrency", "<=", "0", ")", ":", "raise", "ValueError", "(", "'concurrency must be greater than 0'", ")", "if", "(", "not", "statements_and_parameters", ")", ":", "return", "[", "]", "executor", "=", "(", "ConcurrentExecutorGenResults", "(", "session", ",", "statements_and_parameters", ")", "if", "results_generator", "else", "ConcurrentExecutorListResults", "(", "session", ",", "statements_and_parameters", ")", ")", "return", "executor", ".", "execute", "(", "concurrency", ",", "raise_on_first_error", ")" ]
executes a sequence of tuples concurrently .
train
true
19,991
def install_plugin(site, plugin_name, output_dir=None, show_install_notes=False): LOGGER.notice(u"Installing plugin '{0}'".format(plugin_name)) plugin_installer_info = site.plugin_manager.getPluginByName(u'plugin', u'Command') if (plugin_installer_info is None): LOGGER.error(u'Internal error: cannot find the "plugin" plugin which is supposed to come with Nikola!') return False if (not plugin_installer_info.is_activated): site.plugin_manager.activatePluginByName(plugin_installer_info.name) plugin_installer_info.plugin_object.set_site(site) plugin_installer = plugin_installer_info.plugin_object options = {} for option in plugin_installer.cmd_options: options[option[u'name']] = option[u'default'] options[u'install'] = plugin_name options[u'output_dir'] = output_dir options[u'show_install_notes'] = show_install_notes if (plugin_installer.execute(options=options) > 0): return False site.plugin_manager.collectPlugins() site.compiler_extensions = site._activate_plugins_of_category(u'CompilerExtension') return True
[ "def", "install_plugin", "(", "site", ",", "plugin_name", ",", "output_dir", "=", "None", ",", "show_install_notes", "=", "False", ")", ":", "LOGGER", ".", "notice", "(", "u\"Installing plugin '{0}'\"", ".", "format", "(", "plugin_name", ")", ")", "plugin_installer_info", "=", "site", ".", "plugin_manager", ".", "getPluginByName", "(", "u'plugin'", ",", "u'Command'", ")", "if", "(", "plugin_installer_info", "is", "None", ")", ":", "LOGGER", ".", "error", "(", "u'Internal error: cannot find the \"plugin\" plugin which is supposed to come with Nikola!'", ")", "return", "False", "if", "(", "not", "plugin_installer_info", ".", "is_activated", ")", ":", "site", ".", "plugin_manager", ".", "activatePluginByName", "(", "plugin_installer_info", ".", "name", ")", "plugin_installer_info", ".", "plugin_object", ".", "set_site", "(", "site", ")", "plugin_installer", "=", "plugin_installer_info", ".", "plugin_object", "options", "=", "{", "}", "for", "option", "in", "plugin_installer", ".", "cmd_options", ":", "options", "[", "option", "[", "u'name'", "]", "]", "=", "option", "[", "u'default'", "]", "options", "[", "u'install'", "]", "=", "plugin_name", "options", "[", "u'output_dir'", "]", "=", "output_dir", "options", "[", "u'show_install_notes'", "]", "=", "show_install_notes", "if", "(", "plugin_installer", ".", "execute", "(", "options", "=", "options", ")", ">", "0", ")", ":", "return", "False", "site", ".", "plugin_manager", ".", "collectPlugins", "(", ")", "site", ".", "compiler_extensions", "=", "site", ".", "_activate_plugins_of_category", "(", "u'CompilerExtension'", ")", "return", "True" ]
installs a new plugin .
train
false
19,992
def is_valid_course_key(key): try: course_key = CourseKey.from_string(key) except InvalidKeyError: course_key = key return isinstance(course_key, CourseKey)
[ "def", "is_valid_course_key", "(", "key", ")", ":", "try", ":", "course_key", "=", "CourseKey", ".", "from_string", "(", "key", ")", "except", "InvalidKeyError", ":", "course_key", "=", "key", "return", "isinstance", "(", "course_key", ",", "CourseKey", ")" ]
validates course key .
train
false
19,993
def NormalizeAndTypeCheck(arg, types): if (not isinstance(types, (list, tuple))): types = (types,) assert ((list not in types) and (tuple not in types)) if isinstance(arg, types): return ([arg], False) else: if isinstance(arg, basestring): raise datastore_errors.BadArgumentError(('Expected an instance or iterable of %s; received %s (a %s).' % (types, arg, typename(arg)))) try: arg_list = list(arg) except TypeError: raise datastore_errors.BadArgumentError(('Expected an instance or iterable of %s; received %s (a %s).' % (types, arg, typename(arg)))) for val in arg_list: if (not isinstance(val, types)): raise datastore_errors.BadArgumentError(('Expected one of %s; received %s (a %s).' % (types, val, typename(val)))) return (arg_list, True)
[ "def", "NormalizeAndTypeCheck", "(", "arg", ",", "types", ")", ":", "if", "(", "not", "isinstance", "(", "types", ",", "(", "list", ",", "tuple", ")", ")", ")", ":", "types", "=", "(", "types", ",", ")", "assert", "(", "(", "list", "not", "in", "types", ")", "and", "(", "tuple", "not", "in", "types", ")", ")", "if", "isinstance", "(", "arg", ",", "types", ")", ":", "return", "(", "[", "arg", "]", ",", "False", ")", "else", ":", "if", "isinstance", "(", "arg", ",", "basestring", ")", ":", "raise", "datastore_errors", ".", "BadArgumentError", "(", "(", "'Expected an instance or iterable of %s; received %s (a %s).'", "%", "(", "types", ",", "arg", ",", "typename", "(", "arg", ")", ")", ")", ")", "try", ":", "arg_list", "=", "list", "(", "arg", ")", "except", "TypeError", ":", "raise", "datastore_errors", ".", "BadArgumentError", "(", "(", "'Expected an instance or iterable of %s; received %s (a %s).'", "%", "(", "types", ",", "arg", ",", "typename", "(", "arg", ")", ")", ")", ")", "for", "val", "in", "arg_list", ":", "if", "(", "not", "isinstance", "(", "val", ",", "types", ")", ")", ":", "raise", "datastore_errors", ".", "BadArgumentError", "(", "(", "'Expected one of %s; received %s (a %s).'", "%", "(", "types", ",", "val", ",", "typename", "(", "val", ")", ")", ")", ")", "return", "(", "arg_list", ",", "True", ")" ]
normalizes and type checks the given argument .
train
false
19,994
def getClosestDistanceIndexToPoint(point, loop): smallestDistance = 9.876543219876543e+17 closestDistanceIndex = None pointComplex = point.dropAxis() for (otherPointIndex, otherPoint) in enumerate(loop): distance = abs((pointComplex - otherPoint.dropAxis())) if (distance < smallestDistance): smallestDistance = distance closestDistanceIndex = euclidean.DistanceIndex(distance, otherPointIndex) return closestDistanceIndex
[ "def", "getClosestDistanceIndexToPoint", "(", "point", ",", "loop", ")", ":", "smallestDistance", "=", "9.876543219876543e+17", "closestDistanceIndex", "=", "None", "pointComplex", "=", "point", ".", "dropAxis", "(", ")", "for", "(", "otherPointIndex", ",", "otherPoint", ")", "in", "enumerate", "(", "loop", ")", ":", "distance", "=", "abs", "(", "(", "pointComplex", "-", "otherPoint", ".", "dropAxis", "(", ")", ")", ")", "if", "(", "distance", "<", "smallestDistance", ")", ":", "smallestDistance", "=", "distance", "closestDistanceIndex", "=", "euclidean", ".", "DistanceIndex", "(", "distance", ",", "otherPointIndex", ")", "return", "closestDistanceIndex" ]
get the distance squared to the closest point of the loop and index of that point .
train
false
19,995
def _chpi_objective(x, coil_dev_rrs, coil_head_rrs): d = np.dot(coil_dev_rrs, quat_to_rot(x[:3]).T) d += (x[3:] / 10.0) d -= coil_head_rrs d *= d return d.sum()
[ "def", "_chpi_objective", "(", "x", ",", "coil_dev_rrs", ",", "coil_head_rrs", ")", ":", "d", "=", "np", ".", "dot", "(", "coil_dev_rrs", ",", "quat_to_rot", "(", "x", "[", ":", "3", "]", ")", ".", "T", ")", "d", "+=", "(", "x", "[", "3", ":", "]", "/", "10.0", ")", "d", "-=", "coil_head_rrs", "d", "*=", "d", "return", "d", ".", "sum", "(", ")" ]
helper objective function .
train
false
19,996
def conditional_escape(text): if isinstance(text, SafeData): return text else: return escape(text)
[ "def", "conditional_escape", "(", "text", ")", ":", "if", "isinstance", "(", "text", ",", "SafeData", ")", ":", "return", "text", "else", ":", "return", "escape", "(", "text", ")" ]
similar to escape() .
train
false
19,998
def ensure_wgs84(point): ensure_point(point) new_point = point.clone() if (not new_point.srid): new_point.set_srid(WGS_84_SRID) elif (new_point.srid != WGS_84_SRID): new_point.transform(WGS_84_SRID) return new_point
[ "def", "ensure_wgs84", "(", "point", ")", ":", "ensure_point", "(", "point", ")", "new_point", "=", "point", ".", "clone", "(", ")", "if", "(", "not", "new_point", ".", "srid", ")", ":", "new_point", ".", "set_srid", "(", "WGS_84_SRID", ")", "elif", "(", "new_point", ".", "srid", "!=", "WGS_84_SRID", ")", ":", "new_point", ".", "transform", "(", "WGS_84_SRID", ")", "return", "new_point" ]
ensures the point passed in is a geos point & returns that points data is in the wgs-84 spatial reference .
train
false
19,999
def require_api_access(view_func): @wraps(view_func) def wrapped_view(view_obj, *args, **kwargs): 'Wrapper for the view function.' if ApiAccessRequest.has_api_access(args[0].user): return view_func(view_obj, *args, **kwargs) return redirect(reverse('api_admin:api-request')) return wrapped_view
[ "def", "require_api_access", "(", "view_func", ")", ":", "@", "wraps", "(", "view_func", ")", "def", "wrapped_view", "(", "view_obj", ",", "*", "args", ",", "**", "kwargs", ")", ":", "if", "ApiAccessRequest", ".", "has_api_access", "(", "args", "[", "0", "]", ".", "user", ")", ":", "return", "view_func", "(", "view_obj", ",", "*", "args", ",", "**", "kwargs", ")", "return", "redirect", "(", "reverse", "(", "'api_admin:api-request'", ")", ")", "return", "wrapped_view" ]
if the requesting user does not have api access .
train
false
20,002
def CreateTransactionOptions(**kwds): return datastore_rpc.TransactionOptions(**kwds)
[ "def", "CreateTransactionOptions", "(", "**", "kwds", ")", ":", "return", "datastore_rpc", ".", "TransactionOptions", "(", "**", "kwds", ")" ]
create a configuration object for use in configuring transactions .
train
false
20,003
def _compute_precision_cholesky(covariances, covariance_type): estimate_precision_error_message = 'Fitting the mixture model failed because some components have ill-defined empirical covariance (for instance caused by singleton or collapsed samples). Try to decrease the number of components, or increase reg_covar.' if (covariance_type in 'full'): (n_components, n_features, _) = covariances.shape precisions_chol = np.empty((n_components, n_features, n_features)) for (k, covariance) in enumerate(covariances): try: cov_chol = linalg.cholesky(covariance, lower=True) except linalg.LinAlgError: raise ValueError(estimate_precision_error_message) precisions_chol[k] = linalg.solve_triangular(cov_chol, np.eye(n_features), lower=True).T elif (covariance_type == 'tied'): (_, n_features) = covariances.shape try: cov_chol = linalg.cholesky(covariances, lower=True) except linalg.LinAlgError: raise ValueError(estimate_precision_error_message) precisions_chol = linalg.solve_triangular(cov_chol, np.eye(n_features), lower=True).T else: if np.any(np.less_equal(covariances, 0.0)): raise ValueError(estimate_precision_error_message) precisions_chol = (1.0 / np.sqrt(covariances)) return precisions_chol
[ "def", "_compute_precision_cholesky", "(", "covariances", ",", "covariance_type", ")", ":", "estimate_precision_error_message", "=", "'Fitting the mixture model failed because some components have ill-defined empirical covariance (for instance caused by singleton or collapsed samples). Try to decrease the number of components, or increase reg_covar.'", "if", "(", "covariance_type", "in", "'full'", ")", ":", "(", "n_components", ",", "n_features", ",", "_", ")", "=", "covariances", ".", "shape", "precisions_chol", "=", "np", ".", "empty", "(", "(", "n_components", ",", "n_features", ",", "n_features", ")", ")", "for", "(", "k", ",", "covariance", ")", "in", "enumerate", "(", "covariances", ")", ":", "try", ":", "cov_chol", "=", "linalg", ".", "cholesky", "(", "covariance", ",", "lower", "=", "True", ")", "except", "linalg", ".", "LinAlgError", ":", "raise", "ValueError", "(", "estimate_precision_error_message", ")", "precisions_chol", "[", "k", "]", "=", "linalg", ".", "solve_triangular", "(", "cov_chol", ",", "np", ".", "eye", "(", "n_features", ")", ",", "lower", "=", "True", ")", ".", "T", "elif", "(", "covariance_type", "==", "'tied'", ")", ":", "(", "_", ",", "n_features", ")", "=", "covariances", ".", "shape", "try", ":", "cov_chol", "=", "linalg", ".", "cholesky", "(", "covariances", ",", "lower", "=", "True", ")", "except", "linalg", ".", "LinAlgError", ":", "raise", "ValueError", "(", "estimate_precision_error_message", ")", "precisions_chol", "=", "linalg", ".", "solve_triangular", "(", "cov_chol", ",", "np", ".", "eye", "(", "n_features", ")", ",", "lower", "=", "True", ")", ".", "T", "else", ":", "if", "np", ".", "any", "(", "np", ".", "less_equal", "(", "covariances", ",", "0.0", ")", ")", ":", "raise", "ValueError", "(", "estimate_precision_error_message", ")", "precisions_chol", "=", "(", "1.0", "/", "np", ".", "sqrt", "(", "covariances", ")", ")", "return", "precisions_chol" ]
compute the cholesky decomposition of the precisions .
train
false
20,004
def getLargestLoop(loops): largestArea = (-987654321.0) largestLoop = [] for loop in loops: loopArea = abs(getAreaLoopAbsolute(loop)) if (loopArea > largestArea): largestArea = loopArea largestLoop = loop return largestLoop
[ "def", "getLargestLoop", "(", "loops", ")", ":", "largestArea", "=", "(", "-", "987654321.0", ")", "largestLoop", "=", "[", "]", "for", "loop", "in", "loops", ":", "loopArea", "=", "abs", "(", "getAreaLoopAbsolute", "(", "loop", ")", ")", "if", "(", "loopArea", ">", "largestArea", ")", ":", "largestArea", "=", "loopArea", "largestLoop", "=", "loop", "return", "largestLoop" ]
get largest loop from loops .
train
false
20,005
def check_greenthread_spawns(logical_line, physical_line, filename): msg = 'N340: Use nova.utils.%(spawn)s() rather than greenthread.%(spawn)s() and eventlet.%(spawn)s()' if (('nova/utils.py' in filename) or ('nova/tests/' in filename)): return match = re.match(spawn_re, logical_line) if match: (yield (0, (msg % {'spawn': match.group('spawn_part')})))
[ "def", "check_greenthread_spawns", "(", "logical_line", ",", "physical_line", ",", "filename", ")", ":", "msg", "=", "'N340: Use nova.utils.%(spawn)s() rather than greenthread.%(spawn)s() and eventlet.%(spawn)s()'", "if", "(", "(", "'nova/utils.py'", "in", "filename", ")", "or", "(", "'nova/tests/'", "in", "filename", ")", ")", ":", "return", "match", "=", "re", ".", "match", "(", "spawn_re", ",", "logical_line", ")", "if", "match", ":", "(", "yield", "(", "0", ",", "(", "msg", "%", "{", "'spawn'", ":", "match", ".", "group", "(", "'spawn_part'", ")", "}", ")", ")", ")" ]
check for use of greenthread .
train
false
20,006
def direct_delete_container_entry(container_ring, account_name, container_name, object_name, headers=None): pool = GreenPool() (part, nodes) = container_ring.get_nodes(account_name, container_name) for node in nodes: pool.spawn_n(direct_delete_container_object, node, part, account_name, container_name, object_name, headers=headers) pool.waitall()
[ "def", "direct_delete_container_entry", "(", "container_ring", ",", "account_name", ",", "container_name", ",", "object_name", ",", "headers", "=", "None", ")", ":", "pool", "=", "GreenPool", "(", ")", "(", "part", ",", "nodes", ")", "=", "container_ring", ".", "get_nodes", "(", "account_name", ",", "container_name", ")", "for", "node", "in", "nodes", ":", "pool", ".", "spawn_n", "(", "direct_delete_container_object", ",", "node", ",", "part", ",", "account_name", ",", "container_name", ",", "object_name", ",", "headers", "=", "headers", ")", "pool", ".", "waitall", "(", ")" ]
talk directly to the primary container servers to delete a particular object listing .
train
false
20,008
def build_ffi_for_binding(module_name, module_prefix, modules, libraries=[], extra_compile_args=[], extra_link_args=[]): types = [] includes = [] functions = [] macros = [] customizations = [] for name in modules: __import__((module_prefix + name)) module = sys.modules[(module_prefix + name)] types.append(module.TYPES) macros.append(module.MACROS) functions.append(module.FUNCTIONS) includes.append(module.INCLUDES) customizations.append(module.CUSTOMIZATIONS) verify_source = '\n'.join(((includes + functions) + customizations)) ffi = build_ffi(module_name, cdef_source='\n'.join(((types + functions) + macros)), verify_source=verify_source, libraries=libraries, extra_compile_args=extra_compile_args, extra_link_args=extra_link_args) return ffi
[ "def", "build_ffi_for_binding", "(", "module_name", ",", "module_prefix", ",", "modules", ",", "libraries", "=", "[", "]", ",", "extra_compile_args", "=", "[", "]", ",", "extra_link_args", "=", "[", "]", ")", ":", "types", "=", "[", "]", "includes", "=", "[", "]", "functions", "=", "[", "]", "macros", "=", "[", "]", "customizations", "=", "[", "]", "for", "name", "in", "modules", ":", "__import__", "(", "(", "module_prefix", "+", "name", ")", ")", "module", "=", "sys", ".", "modules", "[", "(", "module_prefix", "+", "name", ")", "]", "types", ".", "append", "(", "module", ".", "TYPES", ")", "macros", ".", "append", "(", "module", ".", "MACROS", ")", "functions", ".", "append", "(", "module", ".", "FUNCTIONS", ")", "includes", ".", "append", "(", "module", ".", "INCLUDES", ")", "customizations", ".", "append", "(", "module", ".", "CUSTOMIZATIONS", ")", "verify_source", "=", "'\\n'", ".", "join", "(", "(", "(", "includes", "+", "functions", ")", "+", "customizations", ")", ")", "ffi", "=", "build_ffi", "(", "module_name", ",", "cdef_source", "=", "'\\n'", ".", "join", "(", "(", "(", "types", "+", "functions", ")", "+", "macros", ")", ")", ",", "verify_source", "=", "verify_source", ",", "libraries", "=", "libraries", ",", "extra_compile_args", "=", "extra_compile_args", ",", "extra_link_args", "=", "extra_link_args", ")", "return", "ffi" ]
modules listed in modules should have the following attributes: * includes: a string containing c includes .
train
false
20,009
def test_double_start(qtbot, proc, py_proc): with qtbot.waitSignal(proc.started, timeout=10000): argv = py_proc('import time; time.sleep(10)') proc.start(*argv) with pytest.raises(ValueError): proc.start('', [])
[ "def", "test_double_start", "(", "qtbot", ",", "proc", ",", "py_proc", ")", ":", "with", "qtbot", ".", "waitSignal", "(", "proc", ".", "started", ",", "timeout", "=", "10000", ")", ":", "argv", "=", "py_proc", "(", "'import time; time.sleep(10)'", ")", "proc", ".", "start", "(", "*", "argv", ")", "with", "pytest", ".", "raises", "(", "ValueError", ")", ":", "proc", ".", "start", "(", "''", ",", "[", "]", ")" ]
test starting a guiprocess twice .
train
false
20,011
def textListToColors(names): Dnames = np.zeros((len(names), len(names))) for i in range(len(names)): for j in range(len(names)): Dnames[(i, j)] = (1 - ((2.0 * levenshtein(names[i], names[j])) / float(len((names[i] + names[j]))))) pca = sklearn.decomposition.PCA(n_components=1) pca.fit(Dnames) textToColor = pca.transform(Dnames) textToColor = ((255 * (textToColor - textToColor.min())) / (textToColor.max() - textToColor.min())) textmaps = generateColorMap() colors = [textmaps[int(c)] for c in textToColor] return colors
[ "def", "textListToColors", "(", "names", ")", ":", "Dnames", "=", "np", ".", "zeros", "(", "(", "len", "(", "names", ")", ",", "len", "(", "names", ")", ")", ")", "for", "i", "in", "range", "(", "len", "(", "names", ")", ")", ":", "for", "j", "in", "range", "(", "len", "(", "names", ")", ")", ":", "Dnames", "[", "(", "i", ",", "j", ")", "]", "=", "(", "1", "-", "(", "(", "2.0", "*", "levenshtein", "(", "names", "[", "i", "]", ",", "names", "[", "j", "]", ")", ")", "/", "float", "(", "len", "(", "(", "names", "[", "i", "]", "+", "names", "[", "j", "]", ")", ")", ")", ")", ")", "pca", "=", "sklearn", ".", "decomposition", ".", "PCA", "(", "n_components", "=", "1", ")", "pca", ".", "fit", "(", "Dnames", ")", "textToColor", "=", "pca", ".", "transform", "(", "Dnames", ")", "textToColor", "=", "(", "(", "255", "*", "(", "textToColor", "-", "textToColor", ".", "min", "(", ")", ")", ")", "/", "(", "textToColor", ".", "max", "(", ")", "-", "textToColor", ".", "min", "(", ")", ")", ")", "textmaps", "=", "generateColorMap", "(", ")", "colors", "=", "[", "textmaps", "[", "int", "(", "c", ")", "]", "for", "c", "in", "textToColor", "]", "return", "colors" ]
generates a list of colors based on a list of names .
train
true
20,012
def display_name_with_default(block): return (block.display_name if (block.display_name is not None) else url_name_for_block(block).replace('_', ' '))
[ "def", "display_name_with_default", "(", "block", ")", ":", "return", "(", "block", ".", "display_name", "if", "(", "block", ".", "display_name", "is", "not", "None", ")", "else", "url_name_for_block", "(", "block", ")", ".", "replace", "(", "'_'", ",", "' '", ")", ")" ]
calculates the display name for a block .
train
false
20,013
def _check_ranges(start, end, use_range_set, use_range, range_header): if ((end is not None) and (start is None)): raise ValueError('May not specify end value without start.') use_indexes = (start is not None) if use_indexes: if (end is not None): if (start > end): raise ValueError('start must be < end.') elif (start < 0): raise ValueError('end cannot be set if start < 0.') range_indexes = _serialize_range(start, end) if (use_range_set and use_range and use_indexes): if (range_header != range_indexes): raise ValueError(('May not provide non-equivalent range indexes and range headers: (header) %s != (indexes) %s' % (range_header, range_indexes))) if (use_range and (range_header is not None)): return range_header elif use_indexes: return range_indexes else: return None
[ "def", "_check_ranges", "(", "start", ",", "end", ",", "use_range_set", ",", "use_range", ",", "range_header", ")", ":", "if", "(", "(", "end", "is", "not", "None", ")", "and", "(", "start", "is", "None", ")", ")", ":", "raise", "ValueError", "(", "'May not specify end value without start.'", ")", "use_indexes", "=", "(", "start", "is", "not", "None", ")", "if", "use_indexes", ":", "if", "(", "end", "is", "not", "None", ")", ":", "if", "(", "start", ">", "end", ")", ":", "raise", "ValueError", "(", "'start must be < end.'", ")", "elif", "(", "start", "<", "0", ")", ":", "raise", "ValueError", "(", "'end cannot be set if start < 0.'", ")", "range_indexes", "=", "_serialize_range", "(", "start", ",", "end", ")", "if", "(", "use_range_set", "and", "use_range", "and", "use_indexes", ")", ":", "if", "(", "range_header", "!=", "range_indexes", ")", ":", "raise", "ValueError", "(", "(", "'May not provide non-equivalent range indexes and range headers: (header) %s != (indexes) %s'", "%", "(", "range_header", ",", "range_indexes", ")", ")", ")", "if", "(", "use_range", "and", "(", "range_header", "is", "not", "None", ")", ")", ":", "return", "range_header", "elif", "use_indexes", ":", "return", "range_indexes", "else", ":", "return", "None" ]
set the range header .
train
false
20,015
def nodename(name, hostname): return NODENAME_SEP.join((name, hostname))
[ "def", "nodename", "(", "name", ",", "hostname", ")", ":", "return", "NODENAME_SEP", ".", "join", "(", "(", "name", ",", "hostname", ")", ")" ]
create node name from name/hostname pair .
train
false
20,016
def Pcolor(xs, ys, zs, pcolor=True, contour=False, **options): _Underride(options, linewidth=3, cmap=matplotlib.cm.Blues) (X, Y) = np.meshgrid(xs, ys) Z = zs x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False) axes = pyplot.gca() axes.xaxis.set_major_formatter(x_formatter) if pcolor: pyplot.pcolormesh(X, Y, Z, **options) if contour: cs = pyplot.contour(X, Y, Z, **options) pyplot.clabel(cs, inline=1, fontsize=10)
[ "def", "Pcolor", "(", "xs", ",", "ys", ",", "zs", ",", "pcolor", "=", "True", ",", "contour", "=", "False", ",", "**", "options", ")", ":", "_Underride", "(", "options", ",", "linewidth", "=", "3", ",", "cmap", "=", "matplotlib", ".", "cm", ".", "Blues", ")", "(", "X", ",", "Y", ")", "=", "np", ".", "meshgrid", "(", "xs", ",", "ys", ")", "Z", "=", "zs", "x_formatter", "=", "matplotlib", ".", "ticker", ".", "ScalarFormatter", "(", "useOffset", "=", "False", ")", "axes", "=", "pyplot", ".", "gca", "(", ")", "axes", ".", "xaxis", ".", "set_major_formatter", "(", "x_formatter", ")", "if", "pcolor", ":", "pyplot", ".", "pcolormesh", "(", "X", ",", "Y", ",", "Z", ",", "**", "options", ")", "if", "contour", ":", "cs", "=", "pyplot", ".", "contour", "(", "X", ",", "Y", ",", "Z", ",", "**", "options", ")", "pyplot", ".", "clabel", "(", "cs", ",", "inline", "=", "1", ",", "fontsize", "=", "10", ")" ]
makes a pseudocolor plot .
train
false
20,018
@frappe.whitelist() def toggle_like(doctype, name, add=False): _toggle_like(doctype, name, add)
[ "@", "frappe", ".", "whitelist", "(", ")", "def", "toggle_like", "(", "doctype", ",", "name", ",", "add", "=", "False", ")", ":", "_toggle_like", "(", "doctype", ",", "name", ",", "add", ")" ]
adds / removes the current user in the __liked_by property of the given document .
train
false
20,019
def computeRawAnomalyScore(activeColumns, prevPredictedColumns): nActiveColumns = len(activeColumns) if (nActiveColumns > 0): score = numpy.in1d(activeColumns, prevPredictedColumns).sum() score = ((nActiveColumns - score) / float(nActiveColumns)) else: score = 0.0 return score
[ "def", "computeRawAnomalyScore", "(", "activeColumns", ",", "prevPredictedColumns", ")", ":", "nActiveColumns", "=", "len", "(", "activeColumns", ")", "if", "(", "nActiveColumns", ">", "0", ")", ":", "score", "=", "numpy", ".", "in1d", "(", "activeColumns", ",", "prevPredictedColumns", ")", ".", "sum", "(", ")", "score", "=", "(", "(", "nActiveColumns", "-", "score", ")", "/", "float", "(", "nActiveColumns", ")", ")", "else", ":", "score", "=", "0.0", "return", "score" ]
computes the raw anomaly score .
train
true
20,020
def _iterable_nodes(graph_in): nodes = nx.topological_sort(graph_in) inodes = [node for node in nodes if (node.iterables is not None)] inodes_no_src = [node for node in inodes if (not node.itersource)] inodes_src = [node for node in inodes if node.itersource] inodes_no_src.reverse() return (inodes_no_src + inodes_src)
[ "def", "_iterable_nodes", "(", "graph_in", ")", ":", "nodes", "=", "nx", ".", "topological_sort", "(", "graph_in", ")", "inodes", "=", "[", "node", "for", "node", "in", "nodes", "if", "(", "node", ".", "iterables", "is", "not", "None", ")", "]", "inodes_no_src", "=", "[", "node", "for", "node", "in", "inodes", "if", "(", "not", "node", ".", "itersource", ")", "]", "inodes_src", "=", "[", "node", "for", "node", "in", "inodes", "if", "node", ".", "itersource", "]", "inodes_no_src", ".", "reverse", "(", ")", "return", "(", "inodes_no_src", "+", "inodes_src", ")" ]
returns the iterable nodes in the given graph and their join dependencies .
train
false
20,021
def _get_sort_keys(sort_keys, mapping): if isinstance(sort_keys, six.string_types): sort_keys = [sort_keys] return [mapping[key] for key in (sort_keys or []) if (key in mapping)]
[ "def", "_get_sort_keys", "(", "sort_keys", ",", "mapping", ")", ":", "if", "isinstance", "(", "sort_keys", ",", "six", ".", "string_types", ")", ":", "sort_keys", "=", "[", "sort_keys", "]", "return", "[", "mapping", "[", "key", "]", "for", "key", "in", "(", "sort_keys", "or", "[", "]", ")", "if", "(", "key", "in", "mapping", ")", "]" ]
returns an array containing only whitelisted keys .
train
false
20,022
def get_win_drives(): assert NT drives = [] bitmask = windll.kernel32.GetLogicalDrives() for letter in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ': if ((bitmask & 1) and (win32file.GetDriveType(('%s:\\' % letter)) in DRIVES)): drives.append(letter) bitmask >>= 1 return drives
[ "def", "get_win_drives", "(", ")", ":", "assert", "NT", "drives", "=", "[", "]", "bitmask", "=", "windll", ".", "kernel32", ".", "GetLogicalDrives", "(", ")", "for", "letter", "in", "'ABCDEFGHIJKLMNOPQRSTUVWXYZ'", ":", "if", "(", "(", "bitmask", "&", "1", ")", "and", "(", "win32file", ".", "GetDriveType", "(", "(", "'%s:\\\\'", "%", "letter", ")", ")", "in", "DRIVES", ")", ")", ":", "drives", ".", "append", "(", "letter", ")", "bitmask", ">>=", "1", "return", "drives" ]
return list of detected drives .
train
false
20,023
def _validate_thumbnail_currentness(image_path, thumbnail_path): if ((not os.path.exists(thumbnail_path)) or (image_path == thumbnail_path)): return try: if (os.path.getmtime(image_path) > os.path.getmtime(thumbnail_path)): os.remove(thumbnail_path) except: pass
[ "def", "_validate_thumbnail_currentness", "(", "image_path", ",", "thumbnail_path", ")", ":", "if", "(", "(", "not", "os", ".", "path", ".", "exists", "(", "thumbnail_path", ")", ")", "or", "(", "image_path", "==", "thumbnail_path", ")", ")", ":", "return", "try", ":", "if", "(", "os", ".", "path", ".", "getmtime", "(", "image_path", ")", ">", "os", ".", "path", ".", "getmtime", "(", "thumbnail_path", ")", ")", ":", "os", ".", "remove", "(", "thumbnail_path", ")", "except", ":", "pass" ]
remove the thumbnail if it is outdated .
train
false
20,024
def test_sextractor_units(): table = ascii.read('t/sextractor2.dat', Reader=ascii.SExtractor, guess=False) expected_units = [None, Unit('pix'), Unit('pix'), Unit('mag'), Unit('mag'), None, Unit('pix**2'), Unit('m**(-6)'), Unit('mag * arcsec**(-2)')] expected_descrs = ['Running object number', 'Windowed position estimate along x', 'Windowed position estimate along y', 'Kron-like elliptical aperture magnitude', 'RMS error for AUTO magnitude', 'Extraction flags', None, 'Barycenter position along MAMA x axis', 'Peak surface brightness above background'] for (i, colname) in enumerate(table.colnames): assert (table[colname].unit == expected_units[i]) assert (table[colname].description == expected_descrs[i])
[ "def", "test_sextractor_units", "(", ")", ":", "table", "=", "ascii", ".", "read", "(", "'t/sextractor2.dat'", ",", "Reader", "=", "ascii", ".", "SExtractor", ",", "guess", "=", "False", ")", "expected_units", "=", "[", "None", ",", "Unit", "(", "'pix'", ")", ",", "Unit", "(", "'pix'", ")", ",", "Unit", "(", "'mag'", ")", ",", "Unit", "(", "'mag'", ")", ",", "None", ",", "Unit", "(", "'pix**2'", ")", ",", "Unit", "(", "'m**(-6)'", ")", ",", "Unit", "(", "'mag * arcsec**(-2)'", ")", "]", "expected_descrs", "=", "[", "'Running object number'", ",", "'Windowed position estimate along x'", ",", "'Windowed position estimate along y'", ",", "'Kron-like elliptical aperture magnitude'", ",", "'RMS error for AUTO magnitude'", ",", "'Extraction flags'", ",", "None", ",", "'Barycenter position along MAMA x axis'", ",", "'Peak surface brightness above background'", "]", "for", "(", "i", ",", "colname", ")", "in", "enumerate", "(", "table", ".", "colnames", ")", ":", "assert", "(", "table", "[", "colname", "]", ".", "unit", "==", "expected_units", "[", "i", "]", ")", "assert", "(", "table", "[", "colname", "]", ".", "description", "==", "expected_descrs", "[", "i", "]", ")" ]
make sure that the sextractor reader correctly inputs descriptions and units .
train
false
20,026
def needs_reboot(): pythoncom.CoInitialize() obj_sys = win32com.client.Dispatch('Microsoft.Update.SystemInfo') return salt.utils.is_true(obj_sys.RebootRequired)
[ "def", "needs_reboot", "(", ")", ":", "pythoncom", ".", "CoInitialize", "(", ")", "obj_sys", "=", "win32com", ".", "client", ".", "Dispatch", "(", "'Microsoft.Update.SystemInfo'", ")", "return", "salt", ".", "utils", ".", "is_true", "(", "obj_sys", ".", "RebootRequired", ")" ]
determines if the system needs to be rebooted .
train
false
20,027
def idct(x, type=2, n=None, axis=(-1), norm=None, overwrite_x=False): if ((type == 1) and (norm is not None)): raise NotImplementedError('Orthonormalization not yet supported for IDCT-I') _TP = {1: 1, 2: 3, 3: 2} return _dct(x, _TP[type], n, axis, normalize=norm, overwrite_x=overwrite_x)
[ "def", "idct", "(", "x", ",", "type", "=", "2", ",", "n", "=", "None", ",", "axis", "=", "(", "-", "1", ")", ",", "norm", "=", "None", ",", "overwrite_x", "=", "False", ")", ":", "if", "(", "(", "type", "==", "1", ")", "and", "(", "norm", "is", "not", "None", ")", ")", ":", "raise", "NotImplementedError", "(", "'Orthonormalization not yet supported for IDCT-I'", ")", "_TP", "=", "{", "1", ":", "1", ",", "2", ":", "3", ",", "3", ":", "2", "}", "return", "_dct", "(", "x", ",", "_TP", "[", "type", "]", ",", "n", ",", "axis", ",", "normalize", "=", "norm", ",", "overwrite_x", "=", "overwrite_x", ")" ]
return the inverse discrete cosine transform of an arbitrary type sequence .
train
false
20,028
def encode_to_s3(string, errors='replace'): if (type(string) != unicode): return str(string) try: return string.encode('UTF-8', errors) except UnicodeEncodeError: raise UnicodeEncodeError(('Conversion from unicode failed: %r' % string))
[ "def", "encode_to_s3", "(", "string", ",", "errors", "=", "'replace'", ")", ":", "if", "(", "type", "(", "string", ")", "!=", "unicode", ")", ":", "return", "str", "(", "string", ")", "try", ":", "return", "string", ".", "encode", "(", "'UTF-8'", ",", "errors", ")", "except", "UnicodeEncodeError", ":", "raise", "UnicodeEncodeError", "(", "(", "'Conversion from unicode failed: %r'", "%", "string", ")", ")" ]
convert unicode to s3 utf-8 string .
train
false
20,029
def dns(): if (salt.utils.is_windows() or ('proxyminion' in __opts__)): return {} resolv = salt.utils.dns.parse_resolv() for key in ('nameservers', 'ip4_nameservers', 'ip6_nameservers', 'sortlist'): if (key in resolv): resolv[key] = [str(i) for i in resolv[key]] return ({'dns': resolv} if resolv else {})
[ "def", "dns", "(", ")", ":", "if", "(", "salt", ".", "utils", ".", "is_windows", "(", ")", "or", "(", "'proxyminion'", "in", "__opts__", ")", ")", ":", "return", "{", "}", "resolv", "=", "salt", ".", "utils", ".", "dns", ".", "parse_resolv", "(", ")", "for", "key", "in", "(", "'nameservers'", ",", "'ip4_nameservers'", ",", "'ip6_nameservers'", ",", "'sortlist'", ")", ":", "if", "(", "key", "in", "resolv", ")", ":", "resolv", "[", "key", "]", "=", "[", "str", "(", "i", ")", "for", "i", "in", "resolv", "[", "key", "]", "]", "return", "(", "{", "'dns'", ":", "resolv", "}", "if", "resolv", "else", "{", "}", ")" ]
parse the resolver configuration file .
train
false
20,030
def encipher_vigenere(msg, key, symbols=None): (msg, key, A) = _prep(msg, key, symbols) map = {c: i for (i, c) in enumerate(A)} key = [map[c] for c in key] N = len(map) k = len(key) rv = [] for (i, m) in enumerate(msg): rv.append(A[((map[m] + key[(i % k)]) % N)]) rv = ''.join(rv) return rv
[ "def", "encipher_vigenere", "(", "msg", ",", "key", ",", "symbols", "=", "None", ")", ":", "(", "msg", ",", "key", ",", "A", ")", "=", "_prep", "(", "msg", ",", "key", ",", "symbols", ")", "map", "=", "{", "c", ":", "i", "for", "(", "i", ",", "c", ")", "in", "enumerate", "(", "A", ")", "}", "key", "=", "[", "map", "[", "c", "]", "for", "c", "in", "key", "]", "N", "=", "len", "(", "map", ")", "k", "=", "len", "(", "key", ")", "rv", "=", "[", "]", "for", "(", "i", ",", "m", ")", "in", "enumerate", "(", "msg", ")", ":", "rv", ".", "append", "(", "A", "[", "(", "(", "map", "[", "m", "]", "+", "key", "[", "(", "i", "%", "k", ")", "]", ")", "%", "N", ")", "]", ")", "rv", "=", "''", ".", "join", "(", "rv", ")", "return", "rv" ]
performs the vigenère cipher encryption on plaintext msg .
train
false
20,031
def makeService(config): if config['esmtp']: rmType = relaymanager.SmartHostESMTPRelayingManager smtpFactory = config.service.getESMTPFactory else: rmType = relaymanager.SmartHostSMTPRelayingManager smtpFactory = config.service.getSMTPFactory if config['relay']: dir = config['relay'] if (not os.path.isdir(dir)): os.mkdir(dir) config.service.setQueue(relaymanager.Queue(dir)) default = relay.DomainQueuer(config.service) manager = rmType(config.service.queue) if config['esmtp']: manager.fArgs += (None, None) manager.fArgs += (config['hostname'],) helper = relaymanager.RelayStateHelper(manager, 1) helper.setServiceParent(config.service) config.service.domains.setDefaultDomain(default) if config['pop3']: f = config.service.getPOP3Factory() for endpoint in config['pop3']: svc = internet.StreamServerEndpointService(endpoint, f) svc.setServiceParent(config.service) if config['smtp']: f = smtpFactory() if config['hostname']: f.domain = config['hostname'] f.fArgs = (f.domain,) if config['esmtp']: f.fArgs = ((None, None) + f.fArgs) for endpoint in config['smtp']: svc = internet.StreamServerEndpointService(endpoint, f) svc.setServiceParent(config.service) return config.service
[ "def", "makeService", "(", "config", ")", ":", "if", "config", "[", "'esmtp'", "]", ":", "rmType", "=", "relaymanager", ".", "SmartHostESMTPRelayingManager", "smtpFactory", "=", "config", ".", "service", ".", "getESMTPFactory", "else", ":", "rmType", "=", "relaymanager", ".", "SmartHostSMTPRelayingManager", "smtpFactory", "=", "config", ".", "service", ".", "getSMTPFactory", "if", "config", "[", "'relay'", "]", ":", "dir", "=", "config", "[", "'relay'", "]", "if", "(", "not", "os", ".", "path", ".", "isdir", "(", "dir", ")", ")", ":", "os", ".", "mkdir", "(", "dir", ")", "config", ".", "service", ".", "setQueue", "(", "relaymanager", ".", "Queue", "(", "dir", ")", ")", "default", "=", "relay", ".", "DomainQueuer", "(", "config", ".", "service", ")", "manager", "=", "rmType", "(", "config", ".", "service", ".", "queue", ")", "if", "config", "[", "'esmtp'", "]", ":", "manager", ".", "fArgs", "+=", "(", "None", ",", "None", ")", "manager", ".", "fArgs", "+=", "(", "config", "[", "'hostname'", "]", ",", ")", "helper", "=", "relaymanager", ".", "RelayStateHelper", "(", "manager", ",", "1", ")", "helper", ".", "setServiceParent", "(", "config", ".", "service", ")", "config", ".", "service", ".", "domains", ".", "setDefaultDomain", "(", "default", ")", "if", "config", "[", "'pop3'", "]", ":", "f", "=", "config", ".", "service", ".", "getPOP3Factory", "(", ")", "for", "endpoint", "in", "config", "[", "'pop3'", "]", ":", "svc", "=", "internet", ".", "StreamServerEndpointService", "(", "endpoint", ",", "f", ")", "svc", ".", "setServiceParent", "(", "config", ".", "service", ")", "if", "config", "[", "'smtp'", "]", ":", "f", "=", "smtpFactory", "(", ")", "if", "config", "[", "'hostname'", "]", ":", "f", ".", "domain", "=", "config", "[", "'hostname'", "]", "f", ".", "fArgs", "=", "(", "f", ".", "domain", ",", ")", "if", "config", "[", "'esmtp'", "]", ":", "f", ".", "fArgs", "=", "(", "(", "None", ",", "None", ")", "+", "f", ".", "fArgs", ")", "for", "endpoint", "in", "config", "[", "'smtp'", "]", ":", "svc", "=", "internet", ".", "StreamServerEndpointService", "(", "endpoint", ",", "f", ")", "svc", ".", "setServiceParent", "(", "config", ".", "service", ")", "return", "config", ".", "service" ]
return a service that will be attached to the application .
train
false
20,032
def perform_commit(request, obj): return execute_locked(request, obj, _('All pending translations were committed.'), obj.commit_pending, request)
[ "def", "perform_commit", "(", "request", ",", "obj", ")", ":", "return", "execute_locked", "(", "request", ",", "obj", ",", "_", "(", "'All pending translations were committed.'", ")", ",", "obj", ".", "commit_pending", ",", "request", ")" ]
helper function to do the repository commmit .
train
false
20,033
@track_state_change(light.ENTITY_ID_ALL_LIGHTS, STATE_ON, STATE_OFF) def all_lights_off(hass, entity_id, old_state, new_state): if (not TARGET_ID): return if core.is_on(hass, TARGET_ID): _LOGGER.info('All lights have been turned off, turning it off') core.turn_off(hass, TARGET_ID)
[ "@", "track_state_change", "(", "light", ".", "ENTITY_ID_ALL_LIGHTS", ",", "STATE_ON", ",", "STATE_OFF", ")", "def", "all_lights_off", "(", "hass", ",", "entity_id", ",", "old_state", ",", "new_state", ")", ":", "if", "(", "not", "TARGET_ID", ")", ":", "return", "if", "core", ".", "is_on", "(", "hass", ",", "TARGET_ID", ")", ":", "_LOGGER", ".", "info", "(", "'All lights have been turned off, turning it off'", ")", "core", ".", "turn_off", "(", "hass", ",", "TARGET_ID", ")" ]
if all lights turn off .
train
false
20,034
def test_batch_normalized_mlp_construction(): mlp = BatchNormalizedMLP([Tanh(), Tanh()], [5, 7, 9]) assert all((isinstance(a, Sequence) for a in mlp.activations)) assert all((isinstance(a.children[0], BatchNormalization) for a in mlp.activations)) assert all((isinstance(a.children[1], Tanh) for a in mlp.activations))
[ "def", "test_batch_normalized_mlp_construction", "(", ")", ":", "mlp", "=", "BatchNormalizedMLP", "(", "[", "Tanh", "(", ")", ",", "Tanh", "(", ")", "]", ",", "[", "5", ",", "7", ",", "9", "]", ")", "assert", "all", "(", "(", "isinstance", "(", "a", ",", "Sequence", ")", "for", "a", "in", "mlp", ".", "activations", ")", ")", "assert", "all", "(", "(", "isinstance", "(", "a", ".", "children", "[", "0", "]", ",", "BatchNormalization", ")", "for", "a", "in", "mlp", ".", "activations", ")", ")", "assert", "all", "(", "(", "isinstance", "(", "a", ".", "children", "[", "1", "]", ",", "Tanh", ")", "for", "a", "in", "mlp", ".", "activations", ")", ")" ]
test that batchnormalizedmlp performs construction correctly .
train
false
20,035
def connectMsToNet(Facility_presence=0, ConnectedSubaddress_presence=0, UserUser_presence=0, SsVersionIndicator_presence=0): a = TpPd(pd=3) b = MessageType(mesType=7) packet = (a / b) if (Facility_presence is 1): c = FacilityHdr(ieiF=28, eightBitF=0) packet = (packet / c) if (ConnectedSubaddress_presence is 1): d = ConnectedSubaddressHdr(ieiCS=77, eightBitCS=0) packet = (packet / d) if (UserUser_presence is 1): e = UserUserHdr(ieiUU=127, eightBitUU=0) packet = (packet / e) if (SsVersionIndicator_presence is 1): f = SsVersionIndicatorHdr(ieiSVI=127, eightBitSVI=0) packet = (packet / f) return packet
[ "def", "connectMsToNet", "(", "Facility_presence", "=", "0", ",", "ConnectedSubaddress_presence", "=", "0", ",", "UserUser_presence", "=", "0", ",", "SsVersionIndicator_presence", "=", "0", ")", ":", "a", "=", "TpPd", "(", "pd", "=", "3", ")", "b", "=", "MessageType", "(", "mesType", "=", "7", ")", "packet", "=", "(", "a", "/", "b", ")", "if", "(", "Facility_presence", "is", "1", ")", ":", "c", "=", "FacilityHdr", "(", "ieiF", "=", "28", ",", "eightBitF", "=", "0", ")", "packet", "=", "(", "packet", "/", "c", ")", "if", "(", "ConnectedSubaddress_presence", "is", "1", ")", ":", "d", "=", "ConnectedSubaddressHdr", "(", "ieiCS", "=", "77", ",", "eightBitCS", "=", "0", ")", "packet", "=", "(", "packet", "/", "d", ")", "if", "(", "UserUser_presence", "is", "1", ")", ":", "e", "=", "UserUserHdr", "(", "ieiUU", "=", "127", ",", "eightBitUU", "=", "0", ")", "packet", "=", "(", "packet", "/", "e", ")", "if", "(", "SsVersionIndicator_presence", "is", "1", ")", ":", "f", "=", "SsVersionIndicatorHdr", "(", "ieiSVI", "=", "127", ",", "eightBitSVI", "=", "0", ")", "packet", "=", "(", "packet", "/", "f", ")", "return", "packet" ]
connect section 9 .
train
true
20,036
def add_cell_to_compute_node(compute_node, cell_name): compute_proxy = ComputeNodeProxy(compute_node, cell_name) return compute_proxy
[ "def", "add_cell_to_compute_node", "(", "compute_node", ",", "cell_name", ")", ":", "compute_proxy", "=", "ComputeNodeProxy", "(", "compute_node", ",", "cell_name", ")", "return", "compute_proxy" ]
fix compute_node attributes that should be unique .
train
false
20,037
@docstring.dedent_interpd def specgram(x, NFFT=None, Fs=None, detrend=None, window=None, noverlap=None, pad_to=None, sides=None, scale_by_freq=None, mode=None): if (noverlap is None): noverlap = 128 if (NFFT is None): NFFT = 256 if (len(x) <= NFFT): warnings.warn((u'Only one segment is calculated since parameter NFFT ' + (u'(=%d) >= signal length (=%d).' % (NFFT, len(x))))) (spec, freqs, t) = _spectral_helper(x=x, y=None, NFFT=NFFT, Fs=Fs, detrend_func=detrend, window=window, noverlap=noverlap, pad_to=pad_to, sides=sides, scale_by_freq=scale_by_freq, mode=mode) if (mode != u'complex'): spec = spec.real return (spec, freqs, t)
[ "@", "docstring", ".", "dedent_interpd", "def", "specgram", "(", "x", ",", "NFFT", "=", "None", ",", "Fs", "=", "None", ",", "detrend", "=", "None", ",", "window", "=", "None", ",", "noverlap", "=", "None", ",", "pad_to", "=", "None", ",", "sides", "=", "None", ",", "scale_by_freq", "=", "None", ",", "mode", "=", "None", ")", ":", "if", "(", "noverlap", "is", "None", ")", ":", "noverlap", "=", "128", "if", "(", "NFFT", "is", "None", ")", ":", "NFFT", "=", "256", "if", "(", "len", "(", "x", ")", "<=", "NFFT", ")", ":", "warnings", ".", "warn", "(", "(", "u'Only one segment is calculated since parameter NFFT '", "+", "(", "u'(=%d) >= signal length (=%d).'", "%", "(", "NFFT", ",", "len", "(", "x", ")", ")", ")", ")", ")", "(", "spec", ",", "freqs", ",", "t", ")", "=", "_spectral_helper", "(", "x", "=", "x", ",", "y", "=", "None", ",", "NFFT", "=", "NFFT", ",", "Fs", "=", "Fs", ",", "detrend_func", "=", "detrend", ",", "window", "=", "window", ",", "noverlap", "=", "noverlap", ",", "pad_to", "=", "pad_to", ",", "sides", "=", "sides", ",", "scale_by_freq", "=", "scale_by_freq", ",", "mode", "=", "mode", ")", "if", "(", "mode", "!=", "u'complex'", ")", ":", "spec", "=", "spec", ".", "real", "return", "(", "spec", ",", "freqs", ",", "t", ")" ]
compute a spectrogram of data in *x* .
train
false
20,038
def test_evaluation_error(): max_rating = 5.0 min_rating = 1.0 y_real = np.array([0.0, 1.0, 0.0, 2.0, 3.0]) y_pred = np.array([0.0, 1.0, 0.0, 2.0, 3.0]) (mae, nmae, rmse) = evaluation_error(y_real, y_pred, max_rating, min_rating) assert_equals(mae, 0.0) assert_equals(nmae, 0.0) assert_equals(rmse, 0.0)
[ "def", "test_evaluation_error", "(", ")", ":", "max_rating", "=", "5.0", "min_rating", "=", "1.0", "y_real", "=", "np", ".", "array", "(", "[", "0.0", ",", "1.0", ",", "0.0", ",", "2.0", ",", "3.0", "]", ")", "y_pred", "=", "np", ".", "array", "(", "[", "0.0", ",", "1.0", ",", "0.0", ",", "2.0", ",", "3.0", "]", ")", "(", "mae", ",", "nmae", ",", "rmse", ")", "=", "evaluation_error", "(", "y_real", ",", "y_pred", ",", "max_rating", ",", "min_rating", ")", "assert_equals", "(", "mae", ",", "0.0", ")", "assert_equals", "(", "nmae", ",", "0.0", ")", "assert_equals", "(", "rmse", ",", "0.0", ")" ]
check the error evaluation .
train
false
20,039
@mock_ec2 def test_igw_attach(): conn = boto.connect_vpc(u'the_key', u'the_secret') igw = conn.create_internet_gateway() vpc = conn.create_vpc(VPC_CIDR) with assert_raises(JSONResponseError) as ex: conn.attach_internet_gateway(igw.id, vpc.id, dry_run=True) ex.exception.reason.should.equal(u'DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal(u'An error occurred (DryRunOperation) when calling the AttachInternetGateway operation: Request would have succeeded, but DryRun flag is set') conn.attach_internet_gateway(igw.id, vpc.id) igw = conn.get_all_internet_gateways()[0] igw.attachments[0].vpc_id.should.be.equal(vpc.id)
[ "@", "mock_ec2", "def", "test_igw_attach", "(", ")", ":", "conn", "=", "boto", ".", "connect_vpc", "(", "u'the_key'", ",", "u'the_secret'", ")", "igw", "=", "conn", ".", "create_internet_gateway", "(", ")", "vpc", "=", "conn", ".", "create_vpc", "(", "VPC_CIDR", ")", "with", "assert_raises", "(", "JSONResponseError", ")", "as", "ex", ":", "conn", ".", "attach_internet_gateway", "(", "igw", ".", "id", ",", "vpc", ".", "id", ",", "dry_run", "=", "True", ")", "ex", ".", "exception", ".", "reason", ".", "should", ".", "equal", "(", "u'DryRunOperation'", ")", "ex", ".", "exception", ".", "status", ".", "should", ".", "equal", "(", "400", ")", "ex", ".", "exception", ".", "message", ".", "should", ".", "equal", "(", "u'An error occurred (DryRunOperation) when calling the AttachInternetGateway operation: Request would have succeeded, but DryRun flag is set'", ")", "conn", ".", "attach_internet_gateway", "(", "igw", ".", "id", ",", "vpc", ".", "id", ")", "igw", "=", "conn", ".", "get_all_internet_gateways", "(", ")", "[", "0", "]", "igw", ".", "attachments", "[", "0", "]", ".", "vpc_id", ".", "should", ".", "be", ".", "equal", "(", "vpc", ".", "id", ")" ]
internet gateway attach .
train
false
20,041
def register_logger(app): if app.debug: return handler = logging.StreamHandler() handler.setLevel(logging.ERROR) app.logger.addHandler(handler)
[ "def", "register_logger", "(", "app", ")", ":", "if", "app", ".", "debug", ":", "return", "handler", "=", "logging", ".", "StreamHandler", "(", ")", "handler", ".", "setLevel", "(", "logging", ".", "ERROR", ")", "app", ".", "logger", ".", "addHandler", "(", "handler", ")" ]
track the logger for production mode .
train
false
20,043
def repartition_npartitions(df, npartitions): npartitions_ratio = (df.npartitions / npartitions) new_partitions_boundaries = [int((new_partition_index * npartitions_ratio)) for new_partition_index in range((npartitions + 1))] new_name = ('repartition-%d-%s' % (npartitions, tokenize(df))) dsk = {} for new_partition_index in range(npartitions): value = (pd.concat, [(df._name, old_partition_index) for old_partition_index in range(new_partitions_boundaries[new_partition_index], new_partitions_boundaries[(new_partition_index + 1)])]) dsk[(new_name, new_partition_index)] = value divisions = [df.divisions[new_partition_index] for new_partition_index in new_partitions_boundaries] return DataFrame(merge(df.dask, dsk), new_name, df._meta, divisions)
[ "def", "repartition_npartitions", "(", "df", ",", "npartitions", ")", ":", "npartitions_ratio", "=", "(", "df", ".", "npartitions", "/", "npartitions", ")", "new_partitions_boundaries", "=", "[", "int", "(", "(", "new_partition_index", "*", "npartitions_ratio", ")", ")", "for", "new_partition_index", "in", "range", "(", "(", "npartitions", "+", "1", ")", ")", "]", "new_name", "=", "(", "'repartition-%d-%s'", "%", "(", "npartitions", ",", "tokenize", "(", "df", ")", ")", ")", "dsk", "=", "{", "}", "for", "new_partition_index", "in", "range", "(", "npartitions", ")", ":", "value", "=", "(", "pd", ".", "concat", ",", "[", "(", "df", ".", "_name", ",", "old_partition_index", ")", "for", "old_partition_index", "in", "range", "(", "new_partitions_boundaries", "[", "new_partition_index", "]", ",", "new_partitions_boundaries", "[", "(", "new_partition_index", "+", "1", ")", "]", ")", "]", ")", "dsk", "[", "(", "new_name", ",", "new_partition_index", ")", "]", "=", "value", "divisions", "=", "[", "df", ".", "divisions", "[", "new_partition_index", "]", "for", "new_partition_index", "in", "new_partitions_boundaries", "]", "return", "DataFrame", "(", "merge", "(", "df", ".", "dask", ",", "dsk", ")", ",", "new_name", ",", "df", ".", "_meta", ",", "divisions", ")" ]
repartition dataframe to a smaller number of partitions .
train
false
20,045
def raw_post_view(request): if (request.method == 'POST'): root = parseString(request.body) first_book = root.firstChild.firstChild (title, author) = [n.firstChild.nodeValue for n in first_book.childNodes] t = Template('{{ title }} - {{ author }}', name='Book template') c = Context({'title': title, 'author': author}) else: t = Template('GET request.', name='Book GET template') c = Context() return HttpResponse(t.render(c))
[ "def", "raw_post_view", "(", "request", ")", ":", "if", "(", "request", ".", "method", "==", "'POST'", ")", ":", "root", "=", "parseString", "(", "request", ".", "body", ")", "first_book", "=", "root", ".", "firstChild", ".", "firstChild", "(", "title", ",", "author", ")", "=", "[", "n", ".", "firstChild", ".", "nodeValue", "for", "n", "in", "first_book", ".", "childNodes", "]", "t", "=", "Template", "(", "'{{ title }} - {{ author }}'", ",", "name", "=", "'Book template'", ")", "c", "=", "Context", "(", "{", "'title'", ":", "title", ",", "'author'", ":", "author", "}", ")", "else", ":", "t", "=", "Template", "(", "'GET request.'", ",", "name", "=", "'Book GET template'", ")", "c", "=", "Context", "(", ")", "return", "HttpResponse", "(", "t", ".", "render", "(", "c", ")", ")" ]
a view which expects raw xml to be posted and returns content extracted from the xml .
train
false
20,046
def line_at_cursor(cell, cursor_pos=0): offset = 0 lines = cell.splitlines(True) for line in lines: next_offset = (offset + len(line)) if (next_offset >= cursor_pos): break offset = next_offset else: line = '' return (line, offset)
[ "def", "line_at_cursor", "(", "cell", ",", "cursor_pos", "=", "0", ")", ":", "offset", "=", "0", "lines", "=", "cell", ".", "splitlines", "(", "True", ")", "for", "line", "in", "lines", ":", "next_offset", "=", "(", "offset", "+", "len", "(", "line", ")", ")", "if", "(", "next_offset", ">=", "cursor_pos", ")", ":", "break", "offset", "=", "next_offset", "else", ":", "line", "=", "''", "return", "(", "line", ",", "offset", ")" ]
return the line in a cell at a given cursor position used for calling line-based apis that dont support multi-line input .
train
false
20,047
def test_decimal(): assert (hug.types.decimal('1.1') == Decimal('1.1')) assert (hug.types.decimal('1') == Decimal('1')) assert (hug.types.decimal(1.1) == Decimal(1.1)) with pytest.raises(ValueError): hug.types.decimal('bacon')
[ "def", "test_decimal", "(", ")", ":", "assert", "(", "hug", ".", "types", ".", "decimal", "(", "'1.1'", ")", "==", "Decimal", "(", "'1.1'", ")", ")", "assert", "(", "hug", ".", "types", ".", "decimal", "(", "'1'", ")", "==", "Decimal", "(", "'1'", ")", ")", "assert", "(", "hug", ".", "types", ".", "decimal", "(", "1.1", ")", "==", "Decimal", "(", "1.1", ")", ")", "with", "pytest", ".", "raises", "(", "ValueError", ")", ":", "hug", ".", "types", ".", "decimal", "(", "'bacon'", ")" ]
tests to ensure the decimal type correctly allows decimal values .
train
false
20,048
def create_const(value, size, sparse=False): if (size == (1, 1)): op_type = lo.SCALAR_CONST if (not np.isscalar(value)): value = value[(0, 0)] elif sparse: op_type = lo.SPARSE_CONST else: op_type = lo.DENSE_CONST return lo.LinOp(op_type, size, [], value)
[ "def", "create_const", "(", "value", ",", "size", ",", "sparse", "=", "False", ")", ":", "if", "(", "size", "==", "(", "1", ",", "1", ")", ")", ":", "op_type", "=", "lo", ".", "SCALAR_CONST", "if", "(", "not", "np", ".", "isscalar", "(", "value", ")", ")", ":", "value", "=", "value", "[", "(", "0", ",", "0", ")", "]", "elif", "sparse", ":", "op_type", "=", "lo", ".", "SPARSE_CONST", "else", ":", "op_type", "=", "lo", ".", "DENSE_CONST", "return", "lo", ".", "LinOp", "(", "op_type", ",", "size", ",", "[", "]", ",", "value", ")" ]
wraps a constant .
train
false
20,049
def test_angle_with_cds_units_enabled(): from ...units import cds from ..angle_utilities import _AngleParser del _AngleParser._parser with cds.enable(): Angle(u'5d') del _AngleParser._parser Angle(u'5d')
[ "def", "test_angle_with_cds_units_enabled", "(", ")", ":", "from", "...", "units", "import", "cds", "from", ".", ".", "angle_utilities", "import", "_AngleParser", "del", "_AngleParser", ".", "_parser", "with", "cds", ".", "enable", "(", ")", ":", "Angle", "(", "u'5d'", ")", "del", "_AngleParser", ".", "_parser", "Angle", "(", "u'5d'", ")" ]
regression test for #5350 especially the example in URL#issuecomment-248770151 .
train
false
20,050
def s3_plugin_initialize(event_handlers): awscli_initialize(event_handlers)
[ "def", "s3_plugin_initialize", "(", "event_handlers", ")", ":", "awscli_initialize", "(", "event_handlers", ")" ]
this is a wrapper to make the plugin built-in to the cli as opposed to specifiying it in the configuration file .
train
false
20,051
def get_doc_module(module, doctype, name): module_name = u'{app}.{module}.{doctype}.{name}.{name}'.format(app=frappe.local.module_app[scrub(module)], doctype=scrub(doctype), module=scrub(module), name=scrub(name)) return frappe.get_module(module_name)
[ "def", "get_doc_module", "(", "module", ",", "doctype", ",", "name", ")", ":", "module_name", "=", "u'{app}.{module}.{doctype}.{name}.{name}'", ".", "format", "(", "app", "=", "frappe", ".", "local", ".", "module_app", "[", "scrub", "(", "module", ")", "]", ",", "doctype", "=", "scrub", "(", "doctype", ")", ",", "module", "=", "scrub", "(", "module", ")", ",", "name", "=", "scrub", "(", "name", ")", ")", "return", "frappe", ".", "get_module", "(", "module_name", ")" ]
get custom module for given document .
train
false
20,052
def _encoded(value, encoding='utf-8'): value_type = type(value) if (value_type != str): if (value_type == six.binary_type): value = value.decode(encoding) elif (value_type == six.text_type): value = value.encode(encoding) return value
[ "def", "_encoded", "(", "value", ",", "encoding", "=", "'utf-8'", ")", ":", "value_type", "=", "type", "(", "value", ")", "if", "(", "value_type", "!=", "str", ")", ":", "if", "(", "value_type", "==", "six", ".", "binary_type", ")", ":", "value", "=", "value", ".", "decode", "(", "encoding", ")", "elif", "(", "value_type", "==", "six", ".", "text_type", ")", ":", "value", "=", "value", ".", "encode", "(", "encoding", ")", "return", "value" ]
make sure the value is of type str in both py2 and py3 .
train
false
20,053
def quote_unescape(value, lf='&mjf-lf;', quot='&mjf-quot;'): return value.replace(lf, '\n').replace(quot, '"')
[ "def", "quote_unescape", "(", "value", ",", "lf", "=", "'&mjf-lf;'", ",", "quot", "=", "'&mjf-quot;'", ")", ":", "return", "value", ".", "replace", "(", "lf", ",", "'\\n'", ")", ".", "replace", "(", "quot", ",", "'\"'", ")" ]
unescape a string escaped by quote_escape .
train
false
20,054
def make_dir_struct(tag, base, out_base): lbase = len(base) pathsep = os.path.sep lpathsep = len(pathsep) out = [] for (dirpath, dirnames, filenames) in os.walk(base): dp_eff = dirpath[lbase:] if dp_eff.startswith(pathsep): dp_eff = dp_eff[lpathsep:] out_path = pjoin(out_base, dp_eff) pfiles = [pjoin(dirpath, f) for f in filenames] out.append((out_path, pfiles)) return out
[ "def", "make_dir_struct", "(", "tag", ",", "base", ",", "out_base", ")", ":", "lbase", "=", "len", "(", "base", ")", "pathsep", "=", "os", ".", "path", ".", "sep", "lpathsep", "=", "len", "(", "pathsep", ")", "out", "=", "[", "]", "for", "(", "dirpath", ",", "dirnames", ",", "filenames", ")", "in", "os", ".", "walk", "(", "base", ")", ":", "dp_eff", "=", "dirpath", "[", "lbase", ":", "]", "if", "dp_eff", ".", "startswith", "(", "pathsep", ")", ":", "dp_eff", "=", "dp_eff", "[", "lpathsep", ":", "]", "out_path", "=", "pjoin", "(", "out_base", ",", "dp_eff", ")", "pfiles", "=", "[", "pjoin", "(", "dirpath", ",", "f", ")", "for", "f", "in", "filenames", "]", "out", ".", "append", "(", "(", "out_path", ",", "pfiles", ")", ")", "return", "out" ]
make the directory structure of all files below a starting dir .
train
false
20,055
def request_configuration(network, hass, config): configurator = get_component('configurator') if ('ecobee' in _CONFIGURING): configurator.notify_errors(_CONFIGURING['ecobee'], 'Failed to register, please try again.') return def ecobee_configuration_callback(callback_data): 'The actions to do when our configuration callback is called.' network.request_tokens() network.update() setup_ecobee(hass, network, config) _CONFIGURING['ecobee'] = configurator.request_config(hass, 'Ecobee', ecobee_configuration_callback, description=('Please authorize this app at https://www.ecobee.com/consumerportal/index.html with pin code: ' + network.pin), description_image='/static/images/config_ecobee_thermostat.png', submit_caption='I have authorized the app.')
[ "def", "request_configuration", "(", "network", ",", "hass", ",", "config", ")", ":", "configurator", "=", "get_component", "(", "'configurator'", ")", "if", "(", "'ecobee'", "in", "_CONFIGURING", ")", ":", "configurator", ".", "notify_errors", "(", "_CONFIGURING", "[", "'ecobee'", "]", ",", "'Failed to register, please try again.'", ")", "return", "def", "ecobee_configuration_callback", "(", "callback_data", ")", ":", "network", ".", "request_tokens", "(", ")", "network", ".", "update", "(", ")", "setup_ecobee", "(", "hass", ",", "network", ",", "config", ")", "_CONFIGURING", "[", "'ecobee'", "]", "=", "configurator", ".", "request_config", "(", "hass", ",", "'Ecobee'", ",", "ecobee_configuration_callback", ",", "description", "=", "(", "'Please authorize this app at https://www.ecobee.com/consumerportal/index.html with pin code: '", "+", "network", ".", "pin", ")", ",", "description_image", "=", "'/static/images/config_ecobee_thermostat.png'", ",", "submit_caption", "=", "'I have authorized the app.'", ")" ]
request configuration steps from the user .
train
false
20,056
def report_total_messages_stats(sect, stats, previous_stats): lines = ['type', 'number', 'previous', 'difference'] lines += checkers.table_lines_from_stats(stats, previous_stats, ('convention', 'refactor', 'warning', 'error')) sect.append(ureports.Table(children=lines, cols=4, rheaders=1))
[ "def", "report_total_messages_stats", "(", "sect", ",", "stats", ",", "previous_stats", ")", ":", "lines", "=", "[", "'type'", ",", "'number'", ",", "'previous'", ",", "'difference'", "]", "lines", "+=", "checkers", ".", "table_lines_from_stats", "(", "stats", ",", "previous_stats", ",", "(", "'convention'", ",", "'refactor'", ",", "'warning'", ",", "'error'", ")", ")", "sect", ".", "append", "(", "ureports", ".", "Table", "(", "children", "=", "lines", ",", "cols", "=", "4", ",", "rheaders", "=", "1", ")", ")" ]
make total errors / warnings report .
train
true
20,058
def parse_docstring_info(text): q = '"""|\'\'\'' p = '({})\\s+([^\\n]+)\\s+\\=+\\s+(.*?)(\\1)'.format(q) m = re.search(p, text, re.S) if m: comment = m.group(3).replace('\n', ' ') first_sentence = comment[:(comment.find('.') + 1)] return {'docstring': m.group(0), 'title': m.group(2), 'description': m.group(3), 'first_sentence': first_sentence} else: return {'error': 'Did not find docstring with title at top of file.'}
[ "def", "parse_docstring_info", "(", "text", ")", ":", "q", "=", "'\"\"\"|\\'\\'\\''", "p", "=", "'({})\\\\s+([^\\\\n]+)\\\\s+\\\\=+\\\\s+(.*?)(\\\\1)'", ".", "format", "(", "q", ")", "m", "=", "re", ".", "search", "(", "p", ",", "text", ",", "re", ".", "S", ")", "if", "m", ":", "comment", "=", "m", ".", "group", "(", "3", ")", ".", "replace", "(", "'\\n'", ",", "' '", ")", "first_sentence", "=", "comment", "[", ":", "(", "comment", ".", "find", "(", "'.'", ")", "+", "1", ")", "]", "return", "{", "'docstring'", ":", "m", ".", "group", "(", "0", ")", ",", "'title'", ":", "m", ".", "group", "(", "2", ")", ",", "'description'", ":", "m", ".", "group", "(", "3", ")", ",", "'first_sentence'", ":", "first_sentence", "}", "else", ":", "return", "{", "'error'", ":", "'Did not find docstring with title at top of file.'", "}" ]
parse docstring from text and return an info dict .
train
false
20,060
def _inherited_permissions(object_uri, permission): (resource_name, plural) = _resource_endpoint(object_uri) try: object_perms_tree = PERMISSIONS_INHERITANCE_TREE[resource_name] except KeyError: return [] attributes_permission = (('%s:attributes' % permission) if (not plural) else permission) inherited_perms = object_perms_tree.get(attributes_permission, object_perms_tree[permission]) granters = set() for (related_resource_name, implicit_permissions) in inherited_perms.items(): for permission in implicit_permissions: related_uri = _relative_object_uri(related_resource_name, object_uri) granters.add((related_uri, permission)) return sorted(granters, key=(lambda uri_perm: len(uri_perm[0])), reverse=True)
[ "def", "_inherited_permissions", "(", "object_uri", ",", "permission", ")", ":", "(", "resource_name", ",", "plural", ")", "=", "_resource_endpoint", "(", "object_uri", ")", "try", ":", "object_perms_tree", "=", "PERMISSIONS_INHERITANCE_TREE", "[", "resource_name", "]", "except", "KeyError", ":", "return", "[", "]", "attributes_permission", "=", "(", "(", "'%s:attributes'", "%", "permission", ")", "if", "(", "not", "plural", ")", "else", "permission", ")", "inherited_perms", "=", "object_perms_tree", ".", "get", "(", "attributes_permission", ",", "object_perms_tree", "[", "permission", "]", ")", "granters", "=", "set", "(", ")", "for", "(", "related_resource_name", ",", "implicit_permissions", ")", "in", "inherited_perms", ".", "items", "(", ")", ":", "for", "permission", "in", "implicit_permissions", ":", "related_uri", "=", "_relative_object_uri", "(", "related_resource_name", ",", "object_uri", ")", "granters", ".", "add", "(", "(", "related_uri", ",", "permission", ")", ")", "return", "sorted", "(", "granters", ",", "key", "=", "(", "lambda", "uri_perm", ":", "len", "(", "uri_perm", "[", "0", "]", ")", ")", ",", "reverse", "=", "True", ")" ]
build the list of all permissions that can grant access to the given object uri and permission .
train
false
20,061
def file_reader(fp, chunk_size=CHUNK_SIZE): while 1: data = fp.read(chunk_size) if (not data): break (yield data) fp.close()
[ "def", "file_reader", "(", "fp", ",", "chunk_size", "=", "CHUNK_SIZE", ")", ":", "while", "1", ":", "data", "=", "fp", ".", "read", "(", "chunk_size", ")", "if", "(", "not", "data", ")", ":", "break", "(", "yield", "data", ")", "fp", ".", "close", "(", ")" ]
this generator yields the open fileobject in chunks .
train
false
20,062
def get_json_from_feed(user_feed): json = [] for entry in user_feed.entry: json.append({'given_name': entry.name.given_name, 'family_name': entry.name.family_name, 'username': entry.login.user_name, 'admin': entry.login.admin}) return simplejson.dumps(json)
[ "def", "get_json_from_feed", "(", "user_feed", ")", ":", "json", "=", "[", "]", "for", "entry", "in", "user_feed", ".", "entry", ":", "json", ".", "append", "(", "{", "'given_name'", ":", "entry", ".", "name", ".", "given_name", ",", "'family_name'", ":", "entry", ".", "name", ".", "family_name", ",", "'username'", ":", "entry", ".", "login", ".", "user_name", ",", "'admin'", ":", "entry", ".", "login", ".", "admin", "}", ")", "return", "simplejson", ".", "dumps", "(", "json", ")" ]
constructs and returns a json object from the given feed object args: user_feed: a gdata .
train
false
20,064
def cov_hc2(results): h = np.diag(np.dot(results.model.exog, np.dot(results.normalized_cov_params, results.model.exog.T))) het_scale = ((results.resid ** 2) / (1 - h)) cov_hc2_ = _HCCM(results, het_scale) return cov_hc2_
[ "def", "cov_hc2", "(", "results", ")", ":", "h", "=", "np", ".", "diag", "(", "np", ".", "dot", "(", "results", ".", "model", ".", "exog", ",", "np", ".", "dot", "(", "results", ".", "normalized_cov_params", ",", "results", ".", "model", ".", "exog", ".", "T", ")", ")", ")", "het_scale", "=", "(", "(", "results", ".", "resid", "**", "2", ")", "/", "(", "1", "-", "h", ")", ")", "cov_hc2_", "=", "_HCCM", "(", "results", ",", "het_scale", ")", "return", "cov_hc2_" ]
see statsmodels .
train
false
20,066
def usersearch_id(user, channel_id, term): query = generate_search_qs(term) aliases = dict(views='viewCount') if config.USER_ORDER.get: query['order'] = aliases.get(config.USER_ORDER.get, config.USER_ORDER.get) query['channelId'] = channel_id termuser = tuple([((c.y + x) + c.w) for x in (term, user)]) if term: msg = 'Results for {1}{3}{0} (by {2}{4}{0})' progtext = ('%s by %s' % termuser) failmsg = ('No matching results for %s (by %s)' % termuser) else: msg = 'Video uploads by {2}{4}{0}' progtext = termuser[1] if config.SEARCH_MUSIC: failmsg = ("User %s not found or has no videos in the Music category.\nUse 'set search_music False' to show results not in the Music category." % termuser[1]) else: failmsg = ('User %s not found or has no videos.' % termuser[1]) msg = str(msg).format(c.w, c.y, c.y, term, user) _search(progtext, query, msg, failmsg)
[ "def", "usersearch_id", "(", "user", ",", "channel_id", ",", "term", ")", ":", "query", "=", "generate_search_qs", "(", "term", ")", "aliases", "=", "dict", "(", "views", "=", "'viewCount'", ")", "if", "config", ".", "USER_ORDER", ".", "get", ":", "query", "[", "'order'", "]", "=", "aliases", ".", "get", "(", "config", ".", "USER_ORDER", ".", "get", ",", "config", ".", "USER_ORDER", ".", "get", ")", "query", "[", "'channelId'", "]", "=", "channel_id", "termuser", "=", "tuple", "(", "[", "(", "(", "c", ".", "y", "+", "x", ")", "+", "c", ".", "w", ")", "for", "x", "in", "(", "term", ",", "user", ")", "]", ")", "if", "term", ":", "msg", "=", "'Results for {1}{3}{0} (by {2}{4}{0})'", "progtext", "=", "(", "'%s by %s'", "%", "termuser", ")", "failmsg", "=", "(", "'No matching results for %s (by %s)'", "%", "termuser", ")", "else", ":", "msg", "=", "'Video uploads by {2}{4}{0}'", "progtext", "=", "termuser", "[", "1", "]", "if", "config", ".", "SEARCH_MUSIC", ":", "failmsg", "=", "(", "\"User %s not found or has no videos in the Music category.\\nUse 'set search_music False' to show results not in the Music category.\"", "%", "termuser", "[", "1", "]", ")", "else", ":", "failmsg", "=", "(", "'User %s not found or has no videos.'", "%", "termuser", "[", "1", "]", ")", "msg", "=", "str", "(", "msg", ")", ".", "format", "(", "c", ".", "w", ",", "c", ".", "y", ",", "c", ".", "y", ",", "term", ",", "user", ")", "_search", "(", "progtext", ",", "query", ",", "msg", ",", "failmsg", ")" ]
performs a search within a users uploads for an optional search term with the user identified by its id .
train
false
20,067
def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None): global _defaultproxy _defaultproxy = (proxytype, addr, port, rdns, username, password)
[ "def", "setdefaultproxy", "(", "proxytype", "=", "None", ",", "addr", "=", "None", ",", "port", "=", "None", ",", "rdns", "=", "True", ",", "username", "=", "None", ",", "password", "=", "None", ")", ":", "global", "_defaultproxy", "_defaultproxy", "=", "(", "proxytype", ",", "addr", ",", "port", ",", "rdns", ",", "username", ",", "password", ")" ]
setdefaultproxy sets a default proxy which all further socksocket objects will use .
train
false
20,068
@pytest.mark.network def test_search_exit_status_code_when_finds_no_package(script): result = script.pip('search', 'nonexistentpackage', expect_error=True) assert (result.returncode == NO_MATCHES_FOUND), result.returncode
[ "@", "pytest", ".", "mark", ".", "network", "def", "test_search_exit_status_code_when_finds_no_package", "(", "script", ")", ":", "result", "=", "script", ".", "pip", "(", "'search'", ",", "'nonexistentpackage'", ",", "expect_error", "=", "True", ")", "assert", "(", "result", ".", "returncode", "==", "NO_MATCHES_FOUND", ")", ",", "result", ".", "returncode" ]
test search exit status code for no matches .
train
false
20,069
def command_list(command, command_args=[], **kwds): command_parts = _docker_prefix(**kwds) command_parts.append(command) command_parts.extend(command_args) return command_parts
[ "def", "command_list", "(", "command", ",", "command_args", "=", "[", "]", ",", "**", "kwds", ")", ":", "command_parts", "=", "_docker_prefix", "(", "**", "kwds", ")", "command_parts", ".", "append", "(", "command", ")", "command_parts", ".", "extend", "(", "command_args", ")", "return", "command_parts" ]
return docker command as an argv list .
train
false
20,070
def volumes_tagged(name, tag_maps, authoritative=False, region=None, key=None, keyid=None, profile=None): ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} args = {'tag_maps': tag_maps, 'authoritative': authoritative, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if __opts__['test']: args['dry_run'] = True r = __salt__['boto_ec2.set_volumes_tags'](**args) if r.get('changes'): ret['comment'] = 'The following changes would be applied: {0}'.format(r) return ret r = __salt__['boto_ec2.set_volumes_tags'](**args) if (r['success'] is True): ret['comment'] = 'Tags applied.' ret['changes'] = r['changes'] else: ret['comment'] = 'Error updating requested volume tags.' ret['result'] = False return ret
[ "def", "volumes_tagged", "(", "name", ",", "tag_maps", ",", "authoritative", "=", "False", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "args", "=", "{", "'tag_maps'", ":", "tag_maps", ",", "'authoritative'", ":", "authoritative", ",", "'region'", ":", "region", ",", "'key'", ":", "key", ",", "'keyid'", ":", "keyid", ",", "'profile'", ":", "profile", "}", "if", "__opts__", "[", "'test'", "]", ":", "args", "[", "'dry_run'", "]", "=", "True", "r", "=", "__salt__", "[", "'boto_ec2.set_volumes_tags'", "]", "(", "**", "args", ")", "if", "r", ".", "get", "(", "'changes'", ")", ":", "ret", "[", "'comment'", "]", "=", "'The following changes would be applied: {0}'", ".", "format", "(", "r", ")", "return", "ret", "r", "=", "__salt__", "[", "'boto_ec2.set_volumes_tags'", "]", "(", "**", "args", ")", "if", "(", "r", "[", "'success'", "]", "is", "True", ")", ":", "ret", "[", "'comment'", "]", "=", "'Tags applied.'", "ret", "[", "'changes'", "]", "=", "r", "[", "'changes'", "]", "else", ":", "ret", "[", "'comment'", "]", "=", "'Error updating requested volume tags.'", "ret", "[", "'result'", "]", "=", "False", "return", "ret" ]
ensure ec2 volume(s) matching the given filters have the defined tags .
train
true
20,072
def invalid_headers_reason(headers): if (headers is None): return 'Headers dictionary was None.' if (not isinstance(headers, dict)): return 'Invalid type for headers. Should be a dictionary.' for (k, v) in headers.iteritems(): if (not isinstance(k, basestring)): return 'Header names should be strings.' if (not isinstance(v, basestring)): return 'Header values should be strings.' if (not is_ascii(k)): return 'Header name should be an ASCII string.' if (k.strip() not in HEADER_WHITELIST): return ('Header "%s" is not allowed.' % k.strip())
[ "def", "invalid_headers_reason", "(", "headers", ")", ":", "if", "(", "headers", "is", "None", ")", ":", "return", "'Headers dictionary was None.'", "if", "(", "not", "isinstance", "(", "headers", ",", "dict", ")", ")", ":", "return", "'Invalid type for headers. Should be a dictionary.'", "for", "(", "k", ",", "v", ")", "in", "headers", ".", "iteritems", "(", ")", ":", "if", "(", "not", "isinstance", "(", "k", ",", "basestring", ")", ")", ":", "return", "'Header names should be strings.'", "if", "(", "not", "isinstance", "(", "v", ",", "basestring", ")", ")", ":", "return", "'Header values should be strings.'", "if", "(", "not", "is_ascii", "(", "k", ")", ")", ":", "return", "'Header name should be an ASCII string.'", "if", "(", "k", ".", "strip", "(", ")", "not", "in", "HEADER_WHITELIST", ")", ":", "return", "(", "'Header \"%s\" is not allowed.'", "%", "k", ".", "strip", "(", ")", ")" ]
determine reason why headers is invalid .
train
false
20,073
def xvnc(registry, xml_parent, data): xwrapper = XML.SubElement(xml_parent, 'hudson.plugins.xvnc.Xvnc') xwrapper.set('plugin', 'xvnc') mapping = [('screenshot', 'takeScreenshot', False), ('xauthority', 'useXauthority', True)] convert_mapping_to_xml(xwrapper, data, mapping, fail_required=True)
[ "def", "xvnc", "(", "registry", ",", "xml_parent", ",", "data", ")", ":", "xwrapper", "=", "XML", ".", "SubElement", "(", "xml_parent", ",", "'hudson.plugins.xvnc.Xvnc'", ")", "xwrapper", ".", "set", "(", "'plugin'", ",", "'xvnc'", ")", "mapping", "=", "[", "(", "'screenshot'", ",", "'takeScreenshot'", ",", "False", ")", ",", "(", "'xauthority'", ",", "'useXauthority'", ",", "True", ")", "]", "convert_mapping_to_xml", "(", "xwrapper", ",", "data", ",", "mapping", ",", "fail_required", "=", "True", ")" ]
yaml: xvnc enable xvnc during the build .
train
false
20,074
def getSnippetsByFileName(fileName, functionName): fileText = archive.getFileText(fileName) snippets = [] functionStart = functionName[:(functionName.find('(') + 1)] tokenEnd = getTokenEnd(0, fileText, functionStart) while (tokenEnd != (-1)): snippet = Snippet(tokenEnd, fileText) snippets.append(snippet) tokenEnd = getTokenEnd(snippet.characterIndex, fileText, functionStart) return snippets
[ "def", "getSnippetsByFileName", "(", "fileName", ",", "functionName", ")", ":", "fileText", "=", "archive", ".", "getFileText", "(", "fileName", ")", "snippets", "=", "[", "]", "functionStart", "=", "functionName", "[", ":", "(", "functionName", ".", "find", "(", "'('", ")", "+", "1", ")", "]", "tokenEnd", "=", "getTokenEnd", "(", "0", ",", "fileText", ",", "functionStart", ")", "while", "(", "tokenEnd", "!=", "(", "-", "1", ")", ")", ":", "snippet", "=", "Snippet", "(", "tokenEnd", ",", "fileText", ")", "snippets", ".", "append", "(", "snippet", ")", "tokenEnd", "=", "getTokenEnd", "(", "snippet", ".", "characterIndex", ",", "fileText", ",", "functionStart", ")", "return", "snippets" ]
get the function signature snippets by the file name .
train
false
20,075
def detect_avro(contents): return (contents[:3] == 'Obj')
[ "def", "detect_avro", "(", "contents", ")", ":", "return", "(", "contents", "[", ":", "3", "]", "==", "'Obj'", ")" ]
this is a silly small function which checks to see if the file is avro .
train
false
20,076
def convert_method_list_to_integer(methods): method_map = construct_method_map_from_config() method_ints = [] for method in methods: for (k, v) in method_map.items(): if (v == method): method_ints.append(k) return sum(method_ints)
[ "def", "convert_method_list_to_integer", "(", "methods", ")", ":", "method_map", "=", "construct_method_map_from_config", "(", ")", "method_ints", "=", "[", "]", "for", "method", "in", "methods", ":", "for", "(", "k", ",", "v", ")", "in", "method_map", ".", "items", "(", ")", ":", "if", "(", "v", "==", "method", ")", ":", "method_ints", ".", "append", "(", "k", ")", "return", "sum", "(", "method_ints", ")" ]
convert the method type(s) to an integer .
train
false
20,077
def loads_with_persistent_ids(str, env): file = BytesIO(str) unpickler = pickle.Unpickler(file) unpickler.persistent_load = partial(_persistent_load, env=env) return unpickler.load()
[ "def", "loads_with_persistent_ids", "(", "str", ",", "env", ")", ":", "file", "=", "BytesIO", "(", "str", ")", "unpickler", "=", "pickle", ".", "Unpickler", "(", "file", ")", "unpickler", ".", "persistent_load", "=", "partial", "(", "_persistent_load", ",", "env", "=", "env", ")", "return", "unpickler", ".", "load", "(", ")" ]
performs a pickle loads on the given string .
train
false
20,078
def s3_remove_last_record_id(tablename=None): session = current.session if tablename: if ((RCVARS in session) and (tablename in session[RCVARS])): del session[RCVARS][tablename] elif (RCVARS in session): del session[RCVARS] return True
[ "def", "s3_remove_last_record_id", "(", "tablename", "=", "None", ")", ":", "session", "=", "current", ".", "session", "if", "tablename", ":", "if", "(", "(", "RCVARS", "in", "session", ")", "and", "(", "tablename", "in", "session", "[", "RCVARS", "]", ")", ")", ":", "del", "session", "[", "RCVARS", "]", "[", "tablename", "]", "elif", "(", "RCVARS", "in", "session", ")", ":", "del", "session", "[", "RCVARS", "]", "return", "True" ]
clears one or all last record ids stored in a session .
train
false
20,079
def _pair_grad_sensors_from_ch_names(ch_names): pairs = defaultdict(list) for (i, name) in enumerate(ch_names): if name.startswith('MEG'): if name.endswith(('2', '3')): key = name[(-4):(-1)] pairs[key].append(i) pairs = [p for p in pairs.values() if (len(p) == 2)] grad_chs = sum(pairs, []) return grad_chs
[ "def", "_pair_grad_sensors_from_ch_names", "(", "ch_names", ")", ":", "pairs", "=", "defaultdict", "(", "list", ")", "for", "(", "i", ",", "name", ")", "in", "enumerate", "(", "ch_names", ")", ":", "if", "name", ".", "startswith", "(", "'MEG'", ")", ":", "if", "name", ".", "endswith", "(", "(", "'2'", ",", "'3'", ")", ")", ":", "key", "=", "name", "[", "(", "-", "4", ")", ":", "(", "-", "1", ")", "]", "pairs", "[", "key", "]", ".", "append", "(", "i", ")", "pairs", "=", "[", "p", "for", "p", "in", "pairs", ".", "values", "(", ")", "if", "(", "len", "(", "p", ")", "==", "2", ")", "]", "grad_chs", "=", "sum", "(", "pairs", ",", "[", "]", ")", "return", "grad_chs" ]
find the indexes for pairing grad channels .
train
false
20,080
def dup_rr_lcm(f, g, K): (fc, f) = dup_primitive(f, K) (gc, g) = dup_primitive(g, K) c = K.lcm(fc, gc) h = dup_quo(dup_mul(f, g, K), dup_gcd(f, g, K), K) return dup_mul_ground(h, c, K)
[ "def", "dup_rr_lcm", "(", "f", ",", "g", ",", "K", ")", ":", "(", "fc", ",", "f", ")", "=", "dup_primitive", "(", "f", ",", "K", ")", "(", "gc", ",", "g", ")", "=", "dup_primitive", "(", "g", ",", "K", ")", "c", "=", "K", ".", "lcm", "(", "fc", ",", "gc", ")", "h", "=", "dup_quo", "(", "dup_mul", "(", "f", ",", "g", ",", "K", ")", ",", "dup_gcd", "(", "f", ",", "g", ",", "K", ")", ",", "K", ")", "return", "dup_mul_ground", "(", "h", ",", "c", ",", "K", ")" ]
computes polynomial lcm over a ring in k[x] .
train
false
20,081
def get_manifest(app_dir, toosl_dir, typ, binary): try: dat = read_manifest(app_dir, toosl_dir, typ, binary).replace('\n', '') try: print '[INFO] Parsing AndroidManifest.xml' manifest = minidom.parseString(dat) except: PrintException('[ERROR] Pasrsing AndroidManifest.xml') manifest = minidom.parseString('<?xml version="1.0" encoding="utf-8"?><manifest xmlns:android="http://schemas.android.com/apk/res/android" android:versionCode="Failed" android:versionName="Failed" package="Failed" platformBuildVersionCode="Failed" platformBuildVersionName="Failed XML Parsing" ></manifest>') print '[WARNING] Using Fake XML to continue the Analysis' return manifest except: PrintException('[ERROR] Parsing Manifest file')
[ "def", "get_manifest", "(", "app_dir", ",", "toosl_dir", ",", "typ", ",", "binary", ")", ":", "try", ":", "dat", "=", "read_manifest", "(", "app_dir", ",", "toosl_dir", ",", "typ", ",", "binary", ")", ".", "replace", "(", "'\\n'", ",", "''", ")", "try", ":", "print", "'[INFO] Parsing AndroidManifest.xml'", "manifest", "=", "minidom", ".", "parseString", "(", "dat", ")", "except", ":", "PrintException", "(", "'[ERROR] Pasrsing AndroidManifest.xml'", ")", "manifest", "=", "minidom", ".", "parseString", "(", "'<?xml version=\"1.0\" encoding=\"utf-8\"?><manifest xmlns:android=\"http://schemas.android.com/apk/res/android\" android:versionCode=\"Failed\" android:versionName=\"Failed\" package=\"Failed\" platformBuildVersionCode=\"Failed\" platformBuildVersionName=\"Failed XML Parsing\" ></manifest>'", ")", "print", "'[WARNING] Using Fake XML to continue the Analysis'", "return", "manifest", "except", ":", "PrintException", "(", "'[ERROR] Parsing Manifest file'", ")" ]
get the manifest file .
train
false
20,084
def is_tuple(n): return (type(n) == tuple)
[ "def", "is_tuple", "(", "n", ")", ":", "return", "(", "type", "(", "n", ")", "==", "tuple", ")" ]
check that the value is a tuple of values .
train
false
20,085
def create_module_file(package, module, opts): text = format_heading(1, ('%s Module' % module)) text += format_heading(2, (':mod:`%s` Module' % module)) text += format_directive(module, package) write_file(makename(package, module), text, opts)
[ "def", "create_module_file", "(", "package", ",", "module", ",", "opts", ")", ":", "text", "=", "format_heading", "(", "1", ",", "(", "'%s Module'", "%", "module", ")", ")", "text", "+=", "format_heading", "(", "2", ",", "(", "':mod:`%s` Module'", "%", "module", ")", ")", "text", "+=", "format_directive", "(", "module", ",", "package", ")", "write_file", "(", "makename", "(", "package", ",", "module", ")", ",", "text", ",", "opts", ")" ]
build the text of the file and write the file .
train
true
20,086
def masked_conv_coupling(input_, mask_in, dim, name, use_batch_norm=True, train=True, weight_norm=True, reverse=False, residual_blocks=5, bottleneck=False, use_aff=True, use_width=1.0, use_height=1.0, mask_channel=0.0, skip=True): if use_aff: return masked_conv_aff_coupling(input_=input_, mask_in=mask_in, dim=dim, name=name, use_batch_norm=use_batch_norm, train=train, weight_norm=weight_norm, reverse=reverse, residual_blocks=residual_blocks, bottleneck=bottleneck, use_width=use_width, use_height=use_height, mask_channel=mask_channel, skip=skip) else: return masked_conv_add_coupling(input_=input_, mask_in=mask_in, dim=dim, name=name, use_batch_norm=use_batch_norm, train=train, weight_norm=weight_norm, reverse=reverse, residual_blocks=residual_blocks, bottleneck=bottleneck, use_width=use_width, use_height=use_height, mask_channel=mask_channel, skip=skip)
[ "def", "masked_conv_coupling", "(", "input_", ",", "mask_in", ",", "dim", ",", "name", ",", "use_batch_norm", "=", "True", ",", "train", "=", "True", ",", "weight_norm", "=", "True", ",", "reverse", "=", "False", ",", "residual_blocks", "=", "5", ",", "bottleneck", "=", "False", ",", "use_aff", "=", "True", ",", "use_width", "=", "1.0", ",", "use_height", "=", "1.0", ",", "mask_channel", "=", "0.0", ",", "skip", "=", "True", ")", ":", "if", "use_aff", ":", "return", "masked_conv_aff_coupling", "(", "input_", "=", "input_", ",", "mask_in", "=", "mask_in", ",", "dim", "=", "dim", ",", "name", "=", "name", ",", "use_batch_norm", "=", "use_batch_norm", ",", "train", "=", "train", ",", "weight_norm", "=", "weight_norm", ",", "reverse", "=", "reverse", ",", "residual_blocks", "=", "residual_blocks", ",", "bottleneck", "=", "bottleneck", ",", "use_width", "=", "use_width", ",", "use_height", "=", "use_height", ",", "mask_channel", "=", "mask_channel", ",", "skip", "=", "skip", ")", "else", ":", "return", "masked_conv_add_coupling", "(", "input_", "=", "input_", ",", "mask_in", "=", "mask_in", ",", "dim", "=", "dim", ",", "name", "=", "name", ",", "use_batch_norm", "=", "use_batch_norm", ",", "train", "=", "train", ",", "weight_norm", "=", "weight_norm", ",", "reverse", "=", "reverse", ",", "residual_blocks", "=", "residual_blocks", ",", "bottleneck", "=", "bottleneck", ",", "use_width", "=", "use_width", ",", "use_height", "=", "use_height", ",", "mask_channel", "=", "mask_channel", ",", "skip", "=", "skip", ")" ]
coupling with masked convolution .
train
false
20,088
def xor(buf, key): encrypted = [] for (i, cr) in enumerate(buf): k = key[(i % len(key))] encrypted += [(cr ^ k)] return bytes(encrypted)
[ "def", "xor", "(", "buf", ",", "key", ")", ":", "encrypted", "=", "[", "]", "for", "(", "i", ",", "cr", ")", "in", "enumerate", "(", "buf", ")", ":", "k", "=", "key", "[", "(", "i", "%", "len", "(", "key", ")", ")", "]", "encrypted", "+=", "[", "(", "cr", "^", "k", ")", "]", "return", "bytes", "(", "encrypted", ")" ]
xor two strings .
train
false
20,089
def test_poster_attribute(): tags = ['video'] attrs = {'video': ['poster']} test = '<video poster="javascript:alert(1)"></video>' expect = '<video></video>' eq_(expect, clean(test, tags=tags, attributes=attrs)) ok = '<video poster="/foo.png"></video>' eq_(ok, clean(ok, tags=tags, attributes=attrs))
[ "def", "test_poster_attribute", "(", ")", ":", "tags", "=", "[", "'video'", "]", "attrs", "=", "{", "'video'", ":", "[", "'poster'", "]", "}", "test", "=", "'<video poster=\"javascript:alert(1)\"></video>'", "expect", "=", "'<video></video>'", "eq_", "(", "expect", ",", "clean", "(", "test", ",", "tags", "=", "tags", ",", "attributes", "=", "attrs", ")", ")", "ok", "=", "'<video poster=\"/foo.png\"></video>'", "eq_", "(", "ok", ",", "clean", "(", "ok", ",", "tags", "=", "tags", ",", "attributes", "=", "attrs", ")", ")" ]
poster attributes should not allow javascript .
train
false
20,090
def do_trim(value): return soft_unicode(value).strip()
[ "def", "do_trim", "(", "value", ")", ":", "return", "soft_unicode", "(", "value", ")", ".", "strip", "(", ")" ]
strip leading and trailing whitespace .
train
false
20,093
def local_run(): return (('SERVER_SOFTWARE' not in os.environ) or os.environ['SERVER_SOFTWARE'].startswith('Development'))
[ "def", "local_run", "(", ")", ":", "return", "(", "(", "'SERVER_SOFTWARE'", "not", "in", "os", ".", "environ", ")", "or", "os", ".", "environ", "[", "'SERVER_SOFTWARE'", "]", ".", "startswith", "(", "'Development'", ")", ")" ]
whether we should hit gcs dev appserver stub .
train
false
20,094
def _format_generated_config_header(): now = time.strftime('%Y-%m-%d %H:%M:%S') return __SALT_GENERATED_CONFIG_HEADER.format(now)
[ "def", "_format_generated_config_header", "(", ")", ":", "now", "=", "time", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S'", ")", "return", "__SALT_GENERATED_CONFIG_HEADER", ".", "format", "(", "now", ")" ]
formats a header .
train
false
20,095
def go_data(ofile): return itertools.dropwhile((lambda x: (not r_datameta.match(x))), ofile)
[ "def", "go_data", "(", "ofile", ")", ":", "return", "itertools", ".", "dropwhile", "(", "(", "lambda", "x", ":", "(", "not", "r_datameta", ".", "match", "(", "x", ")", ")", ")", ",", "ofile", ")" ]
skip header .
train
false
20,096
def RemoveScriptMaps(vd_params, options): (parent, name) = vd_params.split_path() target_dir = GetObject(FindPath(options, vd_params.Server, parent)) installed_maps = list(target_dir.ScriptMaps) for _map in map(str, vd_params.ScriptMaps): if (_map in installed_maps): installed_maps.remove(_map) target_dir.ScriptMaps = installed_maps target_dir.SetInfo()
[ "def", "RemoveScriptMaps", "(", "vd_params", ",", "options", ")", ":", "(", "parent", ",", "name", ")", "=", "vd_params", ".", "split_path", "(", ")", "target_dir", "=", "GetObject", "(", "FindPath", "(", "options", ",", "vd_params", ".", "Server", ",", "parent", ")", ")", "installed_maps", "=", "list", "(", "target_dir", ".", "ScriptMaps", ")", "for", "_map", "in", "map", "(", "str", ",", "vd_params", ".", "ScriptMaps", ")", ":", "if", "(", "_map", "in", "installed_maps", ")", ":", "installed_maps", ".", "remove", "(", "_map", ")", "target_dir", ".", "ScriptMaps", "=", "installed_maps", "target_dir", ".", "SetInfo", "(", ")" ]
remove script maps from the already installed virtual directory .
train
false
20,097
def _validate_backend_language(backend, language): langs = _lang_lookup.get(backend.upper(), False) if (not langs): raise ValueError(('Unrecognized backend: ' + backend)) if (language.upper() not in langs): raise ValueError('Backend {0} and language {1} are incompatible'.format(backend, language))
[ "def", "_validate_backend_language", "(", "backend", ",", "language", ")", ":", "langs", "=", "_lang_lookup", ".", "get", "(", "backend", ".", "upper", "(", ")", ",", "False", ")", "if", "(", "not", "langs", ")", ":", "raise", "ValueError", "(", "(", "'Unrecognized backend: '", "+", "backend", ")", ")", "if", "(", "language", ".", "upper", "(", ")", "not", "in", "langs", ")", ":", "raise", "ValueError", "(", "'Backend {0} and language {1} are incompatible'", ".", "format", "(", "backend", ",", "language", ")", ")" ]
throws error if backend and language are incompatible .
train
false
20,098
def GetSubmoduleName(fullname): return fullname.rsplit('.', 1)[(-1)]
[ "def", "GetSubmoduleName", "(", "fullname", ")", ":", "return", "fullname", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "(", "-", "1", ")", "]" ]
determines the leaf submodule name of a full module name .
train
false