id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
14,485
def get_archive_formats(): formats = [(name, registry[2]) for (name, registry) in _ARCHIVE_FORMATS.items()] formats.sort() return formats
[ "def", "get_archive_formats", "(", ")", ":", "formats", "=", "[", "(", "name", ",", "registry", "[", "2", "]", ")", "for", "(", "name", ",", "registry", ")", "in", "_ARCHIVE_FORMATS", ".", "items", "(", ")", "]", "formats", ".", "sort", "(", ")", "return", "formats" ]
returns a list of supported formats for archiving and unarchiving .
train
true
14,487
@treeio_login_required @handle_response_format def opportunity_delete(request, opportunity_id, response_format='html'): opportunity = get_object_or_404(Opportunity, pk=opportunity_id) if ((not request.user.profile.has_permission(opportunity, mode='w')) and (not request.user.profile.is_admin('treeio.sales'))): return user_denied(request, "You don't have access to this Sale Status", response_format) if request.POST: if ('delete' in request.POST): if ('trash' in request.POST): opportunity.trash = True opportunity.save() else: opportunity.delete() return HttpResponseRedirect(reverse('sales_opportunity_index')) elif ('cancel' in request.POST): return HttpResponseRedirect(reverse('sales_opportunity_view', args=[opportunity.id])) all_opportunities = Object.filter_by_request(request, Opportunity.objects) return render_to_response('sales/opportunity_delete', {'opportunity': opportunity, 'opportunities': all_opportunities}, context_instance=RequestContext(request), response_format=response_format)
[ "@", "treeio_login_required", "@", "handle_response_format", "def", "opportunity_delete", "(", "request", ",", "opportunity_id", ",", "response_format", "=", "'html'", ")", ":", "opportunity", "=", "get_object_or_404", "(", "Opportunity", ",", "pk", "=", "opportunity_id", ")", "if", "(", "(", "not", "request", ".", "user", ".", "profile", ".", "has_permission", "(", "opportunity", ",", "mode", "=", "'w'", ")", ")", "and", "(", "not", "request", ".", "user", ".", "profile", ".", "is_admin", "(", "'treeio.sales'", ")", ")", ")", ":", "return", "user_denied", "(", "request", ",", "\"You don't have access to this Sale Status\"", ",", "response_format", ")", "if", "request", ".", "POST", ":", "if", "(", "'delete'", "in", "request", ".", "POST", ")", ":", "if", "(", "'trash'", "in", "request", ".", "POST", ")", ":", "opportunity", ".", "trash", "=", "True", "opportunity", ".", "save", "(", ")", "else", ":", "opportunity", ".", "delete", "(", ")", "return", "HttpResponseRedirect", "(", "reverse", "(", "'sales_opportunity_index'", ")", ")", "elif", "(", "'cancel'", "in", "request", ".", "POST", ")", ":", "return", "HttpResponseRedirect", "(", "reverse", "(", "'sales_opportunity_view'", ",", "args", "=", "[", "opportunity", ".", "id", "]", ")", ")", "all_opportunities", "=", "Object", ".", "filter_by_request", "(", "request", ",", "Opportunity", ".", "objects", ")", "return", "render_to_response", "(", "'sales/opportunity_delete'", ",", "{", "'opportunity'", ":", "opportunity", ",", "'opportunities'", ":", "all_opportunities", "}", ",", "context_instance", "=", "RequestContext", "(", "request", ")", ",", "response_format", "=", "response_format", ")" ]
opportunity delete .
train
false
14,488
def stirling(n, k, d=None, kind=2, signed=False): n = as_int(n) k = as_int(k) if (n < 0): raise ValueError('n must be nonnegative') if (k > n): return S.Zero if d: return _stirling2(((n - d) + 1), ((k - d) + 1)) elif signed: return (((-1) ** (n - k)) * _stirling1(n, k)) if (kind == 1): return _stirling1(n, k) elif (kind == 2): return _stirling2(n, k) else: raise ValueError(('kind must be 1 or 2, not %s' % k))
[ "def", "stirling", "(", "n", ",", "k", ",", "d", "=", "None", ",", "kind", "=", "2", ",", "signed", "=", "False", ")", ":", "n", "=", "as_int", "(", "n", ")", "k", "=", "as_int", "(", "k", ")", "if", "(", "n", "<", "0", ")", ":", "raise", "ValueError", "(", "'n must be nonnegative'", ")", "if", "(", "k", ">", "n", ")", ":", "return", "S", ".", "Zero", "if", "d", ":", "return", "_stirling2", "(", "(", "(", "n", "-", "d", ")", "+", "1", ")", ",", "(", "(", "k", "-", "d", ")", "+", "1", ")", ")", "elif", "signed", ":", "return", "(", "(", "(", "-", "1", ")", "**", "(", "n", "-", "k", ")", ")", "*", "_stirling1", "(", "n", ",", "k", ")", ")", "if", "(", "kind", "==", "1", ")", ":", "return", "_stirling1", "(", "n", ",", "k", ")", "elif", "(", "kind", "==", "2", ")", ":", "return", "_stirling2", "(", "n", ",", "k", ")", "else", ":", "raise", "ValueError", "(", "(", "'kind must be 1 or 2, not %s'", "%", "k", ")", ")" ]
return stirling number s of the first or second kind .
train
false
14,489
def distance_metrics(): return PAIRWISE_DISTANCE_FUNCTIONS
[ "def", "distance_metrics", "(", ")", ":", "return", "PAIRWISE_DISTANCE_FUNCTIONS" ]
valid metrics for pairwise_distances .
train
false
14,490
def dup_laguerre(n, alpha, K): seq = [[K.zero], [K.one]] for i in range(1, (n + 1)): a = dup_mul(seq[(-1)], [((- K.one) / i), ((alpha / i) + (K(((2 * i) - 1)) / i))], K) b = dup_mul_ground(seq[(-2)], ((alpha / i) + (K((i - 1)) / i)), K) seq.append(dup_sub(a, b, K)) return seq[(-1)]
[ "def", "dup_laguerre", "(", "n", ",", "alpha", ",", "K", ")", ":", "seq", "=", "[", "[", "K", ".", "zero", "]", ",", "[", "K", ".", "one", "]", "]", "for", "i", "in", "range", "(", "1", ",", "(", "n", "+", "1", ")", ")", ":", "a", "=", "dup_mul", "(", "seq", "[", "(", "-", "1", ")", "]", ",", "[", "(", "(", "-", "K", ".", "one", ")", "/", "i", ")", ",", "(", "(", "alpha", "/", "i", ")", "+", "(", "K", "(", "(", "(", "2", "*", "i", ")", "-", "1", ")", ")", "/", "i", ")", ")", "]", ",", "K", ")", "b", "=", "dup_mul_ground", "(", "seq", "[", "(", "-", "2", ")", "]", ",", "(", "(", "alpha", "/", "i", ")", "+", "(", "K", "(", "(", "i", "-", "1", ")", ")", "/", "i", ")", ")", ",", "K", ")", "seq", ".", "append", "(", "dup_sub", "(", "a", ",", "b", ",", "K", ")", ")", "return", "seq", "[", "(", "-", "1", ")", "]" ]
low-level implementation of laguerre polynomials .
train
false
14,491
def roots_linear(f): r = ((- f.nth(0)) / f.nth(1)) dom = f.get_domain() if (not dom.is_Numerical): if dom.is_Composite: r = factor(r) else: r = simplify(r) return [r]
[ "def", "roots_linear", "(", "f", ")", ":", "r", "=", "(", "(", "-", "f", ".", "nth", "(", "0", ")", ")", "/", "f", ".", "nth", "(", "1", ")", ")", "dom", "=", "f", ".", "get_domain", "(", ")", "if", "(", "not", "dom", ".", "is_Numerical", ")", ":", "if", "dom", ".", "is_Composite", ":", "r", "=", "factor", "(", "r", ")", "else", ":", "r", "=", "simplify", "(", "r", ")", "return", "[", "r", "]" ]
returns a list of roots of a linear polynomial .
train
false
14,492
def test_category_delete_with_forum(forum): forum.category.delete() assert (forum is not None) assert (forum.category is not None) category = Category.query.filter_by(id=forum.category.id).first() forum = Forum.query.filter_by(id=forum.id).first() assert (forum is None) assert (category is None)
[ "def", "test_category_delete_with_forum", "(", "forum", ")", ":", "forum", ".", "category", ".", "delete", "(", ")", "assert", "(", "forum", "is", "not", "None", ")", "assert", "(", "forum", ".", "category", "is", "not", "None", ")", "category", "=", "Category", ".", "query", ".", "filter_by", "(", "id", "=", "forum", ".", "category", ".", "id", ")", ".", "first", "(", ")", "forum", "=", "Forum", ".", "query", ".", "filter_by", "(", "id", "=", "forum", ".", "id", ")", ".", "first", "(", ")", "assert", "(", "forum", "is", "None", ")", "assert", "(", "category", "is", "None", ")" ]
when deleting a category .
train
false
14,493
def test_quantity_iterability(): q1 = ([15.0, 17.0] * u.m) assert isiterable(q1) q2 = six.next(iter(q1)) assert (q2 == (15.0 * u.m)) assert (not isiterable(q2)) pytest.raises(TypeError, iter, q2)
[ "def", "test_quantity_iterability", "(", ")", ":", "q1", "=", "(", "[", "15.0", ",", "17.0", "]", "*", "u", ".", "m", ")", "assert", "isiterable", "(", "q1", ")", "q2", "=", "six", ".", "next", "(", "iter", "(", "q1", ")", ")", "assert", "(", "q2", "==", "(", "15.0", "*", "u", ".", "m", ")", ")", "assert", "(", "not", "isiterable", "(", "q2", ")", ")", "pytest", ".", "raises", "(", "TypeError", ",", "iter", ",", "q2", ")" ]
regressiont est for issue #878 .
train
false
14,497
def test_iht_bad_ratio(): ratio = (-1.0) iht = InstanceHardnessThreshold(ratio=ratio, random_state=RND_SEED) assert_raises(ValueError, iht.fit, X, Y) ratio = 100.0 iht = InstanceHardnessThreshold(ratio=ratio, random_state=RND_SEED) assert_raises(ValueError, iht.fit, X, Y) ratio = 'rnd' iht = InstanceHardnessThreshold(ratio=ratio, random_state=RND_SEED) assert_raises(ValueError, iht.fit, X, Y) ratio = [0.5, 0.5] iht = InstanceHardnessThreshold(ratio=ratio, random_state=RND_SEED) assert_raises(ValueError, iht.fit, X, Y)
[ "def", "test_iht_bad_ratio", "(", ")", ":", "ratio", "=", "(", "-", "1.0", ")", "iht", "=", "InstanceHardnessThreshold", "(", "ratio", "=", "ratio", ",", "random_state", "=", "RND_SEED", ")", "assert_raises", "(", "ValueError", ",", "iht", ".", "fit", ",", "X", ",", "Y", ")", "ratio", "=", "100.0", "iht", "=", "InstanceHardnessThreshold", "(", "ratio", "=", "ratio", ",", "random_state", "=", "RND_SEED", ")", "assert_raises", "(", "ValueError", ",", "iht", ".", "fit", ",", "X", ",", "Y", ")", "ratio", "=", "'rnd'", "iht", "=", "InstanceHardnessThreshold", "(", "ratio", "=", "ratio", ",", "random_state", "=", "RND_SEED", ")", "assert_raises", "(", "ValueError", ",", "iht", ".", "fit", ",", "X", ",", "Y", ")", "ratio", "=", "[", "0.5", ",", "0.5", "]", "iht", "=", "InstanceHardnessThreshold", "(", "ratio", "=", "ratio", ",", "random_state", "=", "RND_SEED", ")", "assert_raises", "(", "ValueError", ",", "iht", ".", "fit", ",", "X", ",", "Y", ")" ]
test either if an error is raised with a wrong decimal value for the ratio .
train
false
14,498
def makeFastConsumerSession(): dh = DiffieHellman(100389557, 2) return DiffieHellmanSHA1ConsumerSession(dh)
[ "def", "makeFastConsumerSession", "(", ")", ":", "dh", "=", "DiffieHellman", "(", "100389557", ",", "2", ")", "return", "DiffieHellmanSHA1ConsumerSession", "(", "dh", ")" ]
create custom dh object so tests run quickly .
train
false
14,499
def api_factory(cluster_id, **kwargs): if (kwargs != {'custom': u'arguments!'}): raise AssertionError("Didn't get correct arguments passed in") return DUMMY_API
[ "def", "api_factory", "(", "cluster_id", ",", "**", "kwargs", ")", ":", "if", "(", "kwargs", "!=", "{", "'custom'", ":", "u'arguments!'", "}", ")", ":", "raise", "AssertionError", "(", "\"Didn't get correct arguments passed in\"", ")", "return", "DUMMY_API" ]
factory for iblockdeviceapi .
train
false
14,500
def get_sdist_tarball(src_root): name = ('scipy-%s.zip' % get_scipy_version(src_root)) return name
[ "def", "get_sdist_tarball", "(", "src_root", ")", ":", "name", "=", "(", "'scipy-%s.zip'", "%", "get_scipy_version", "(", "src_root", ")", ")", "return", "name" ]
return the name of the installer built by sdist command .
train
false
14,501
def twitter_inbox(): if (not auth.s3_logged_in()): session.error = T('Requires Login!') redirect(URL(c='default', f='user', args='login')) tablename = 'msg_twitter' table = s3db.msg_twitter s3.filter = (table.inbound == True) table.inbound.readable = False s3.crud_strings[tablename] = Storage(title_display=T('Tweet Details'), title_list=T('Twitter InBox'), label_list_button=T('View Twitter InBox'), label_delete_button=T('Delete Tweet'), msg_record_deleted=T('Tweet deleted'), msg_list_empty=T('No Tweets currently in InBox')) s3db.configure(tablename, editable=False, insertable=False, list_fields=['id', 'date', 'from_address', 'body']) return s3_rest_controller(module, 'twitter')
[ "def", "twitter_inbox", "(", ")", ":", "if", "(", "not", "auth", ".", "s3_logged_in", "(", ")", ")", ":", "session", ".", "error", "=", "T", "(", "'Requires Login!'", ")", "redirect", "(", "URL", "(", "c", "=", "'default'", ",", "f", "=", "'user'", ",", "args", "=", "'login'", ")", ")", "tablename", "=", "'msg_twitter'", "table", "=", "s3db", ".", "msg_twitter", "s3", ".", "filter", "=", "(", "table", ".", "inbound", "==", "True", ")", "table", ".", "inbound", ".", "readable", "=", "False", "s3", ".", "crud_strings", "[", "tablename", "]", "=", "Storage", "(", "title_display", "=", "T", "(", "'Tweet Details'", ")", ",", "title_list", "=", "T", "(", "'Twitter InBox'", ")", ",", "label_list_button", "=", "T", "(", "'View Twitter InBox'", ")", ",", "label_delete_button", "=", "T", "(", "'Delete Tweet'", ")", ",", "msg_record_deleted", "=", "T", "(", "'Tweet deleted'", ")", ",", "msg_list_empty", "=", "T", "(", "'No Tweets currently in InBox'", ")", ")", "s3db", ".", "configure", "(", "tablename", ",", "editable", "=", "False", ",", "insertable", "=", "False", ",", "list_fields", "=", "[", "'id'", ",", "'date'", ",", "'from_address'", ",", "'body'", "]", ")", "return", "s3_rest_controller", "(", "module", ",", "'twitter'", ")" ]
restful crud controller for the twitter inbox - all inbound tweets are visible here .
train
false
14,502
@EmbedCoverArtPlugin.listen('album_imported') def album_imported(lib, album): if (album.artpath and config['embedart']['auto']): embed_album(album, config['embedart']['maxwidth'].get(int), True)
[ "@", "EmbedCoverArtPlugin", ".", "listen", "(", "'album_imported'", ")", "def", "album_imported", "(", "lib", ",", "album", ")", ":", "if", "(", "album", ".", "artpath", "and", "config", "[", "'embedart'", "]", "[", "'auto'", "]", ")", ":", "embed_album", "(", "album", ",", "config", "[", "'embedart'", "]", "[", "'maxwidth'", "]", ".", "get", "(", "int", ")", ",", "True", ")" ]
automatically embed art into imported albums .
train
false
14,504
def this_week_day(base_date, weekday): day_of_week = base_date.weekday() if (day_of_week > weekday): return next_week_day(base_date, weekday) start_of_this_week = (base_date - timedelta(days=(day_of_week + 1))) day = (start_of_this_week + timedelta(days=1)) while (day.weekday() != weekday): day = (day + timedelta(days=1)) return day
[ "def", "this_week_day", "(", "base_date", ",", "weekday", ")", ":", "day_of_week", "=", "base_date", ".", "weekday", "(", ")", "if", "(", "day_of_week", ">", "weekday", ")", ":", "return", "next_week_day", "(", "base_date", ",", "weekday", ")", "start_of_this_week", "=", "(", "base_date", "-", "timedelta", "(", "days", "=", "(", "day_of_week", "+", "1", ")", ")", ")", "day", "=", "(", "start_of_this_week", "+", "timedelta", "(", "days", "=", "1", ")", ")", "while", "(", "day", ".", "weekday", "(", ")", "!=", "weekday", ")", ":", "day", "=", "(", "day", "+", "timedelta", "(", "days", "=", "1", ")", ")", "return", "day" ]
return current day of the week 1 .
train
true
14,505
def recognize_derivative(a, d, DE, z=None): flag = True (a, d) = a.cancel(d, include=True) (q, r) = a.div(d) (Np, Sp) = splitfactor_sqf(d, DE, coefficientD=True, z=z) j = 1 for (s, i) in Sp: (delta_a, delta_d, H) = laurent_series(r, d, s, j, DE) g = gcd(d, H[(-1)]).as_poly() if (g is not d): flag = False break j = (j + 1) return flag
[ "def", "recognize_derivative", "(", "a", ",", "d", ",", "DE", ",", "z", "=", "None", ")", ":", "flag", "=", "True", "(", "a", ",", "d", ")", "=", "a", ".", "cancel", "(", "d", ",", "include", "=", "True", ")", "(", "q", ",", "r", ")", "=", "a", ".", "div", "(", "d", ")", "(", "Np", ",", "Sp", ")", "=", "splitfactor_sqf", "(", "d", ",", "DE", ",", "coefficientD", "=", "True", ",", "z", "=", "z", ")", "j", "=", "1", "for", "(", "s", ",", "i", ")", "in", "Sp", ":", "(", "delta_a", ",", "delta_d", ",", "H", ")", "=", "laurent_series", "(", "r", ",", "d", ",", "s", ",", "j", ",", "DE", ")", "g", "=", "gcd", "(", "d", ",", "H", "[", "(", "-", "1", ")", "]", ")", ".", "as_poly", "(", ")", "if", "(", "g", "is", "not", "d", ")", ":", "flag", "=", "False", "break", "j", "=", "(", "j", "+", "1", ")", "return", "flag" ]
compute the squarefree factorization of the denominator of f and for each di the polynomial h in k[x] .
train
false
14,506
def _createStructFormat(): format = {BIG_ENDIAN: {}, LITTLE_ENDIAN: {}} for struct_format in 'BHILQ': try: size = calcsize(struct_format) format[BIG_ENDIAN][size] = ('>%s' % struct_format) format[LITTLE_ENDIAN][size] = ('<%s' % struct_format) except struct_error: pass return format
[ "def", "_createStructFormat", "(", ")", ":", "format", "=", "{", "BIG_ENDIAN", ":", "{", "}", ",", "LITTLE_ENDIAN", ":", "{", "}", "}", "for", "struct_format", "in", "'BHILQ'", ":", "try", ":", "size", "=", "calcsize", "(", "struct_format", ")", "format", "[", "BIG_ENDIAN", "]", "[", "size", "]", "=", "(", "'>%s'", "%", "struct_format", ")", "format", "[", "LITTLE_ENDIAN", "]", "[", "size", "]", "=", "(", "'<%s'", "%", "struct_format", ")", "except", "struct_error", ":", "pass", "return", "format" ]
create a dictionnary => struct format used by str2long() to convert raw data to positive integer .
train
false
14,507
def register_mode(name, mode): if (name in predefined_modes): raise ValueError(('Mode name already taken: %s' % name)) predefined_modes[name] = mode
[ "def", "register_mode", "(", "name", ",", "mode", ")", ":", "if", "(", "name", "in", "predefined_modes", ")", ":", "raise", "ValueError", "(", "(", "'Mode name already taken: %s'", "%", "name", ")", ")", "predefined_modes", "[", "name", "]", "=", "mode" ]
add a mode which can be referred to by name in function .
train
false
14,508
def update_collection_status_in_search(collection_id): rights = rights_manager.get_collection_rights(collection_id) if (rights.status == rights_manager.ACTIVITY_STATUS_PRIVATE): delete_documents_from_search_index([collection_id]) else: patch_collection_search_document(rights.id, _collection_rights_to_search_dict(rights))
[ "def", "update_collection_status_in_search", "(", "collection_id", ")", ":", "rights", "=", "rights_manager", ".", "get_collection_rights", "(", "collection_id", ")", "if", "(", "rights", ".", "status", "==", "rights_manager", ".", "ACTIVITY_STATUS_PRIVATE", ")", ":", "delete_documents_from_search_index", "(", "[", "collection_id", "]", ")", "else", ":", "patch_collection_search_document", "(", "rights", ".", "id", ",", "_collection_rights_to_search_dict", "(", "rights", ")", ")" ]
updates the status field of a collection in the search index .
train
false
14,509
def uniform_scaling(shape=None, factor=1.0, dtype=tf.float32, seed=None): if shape: input_size = 1.0 for dim in shape[:(-1)]: input_size *= float(dim) max_val = (math.sqrt((3 / input_size)) * factor) return tf.random_ops.random_uniform(shape, (- max_val), max_val, dtype, seed=seed) else: return tf.uniform_unit_scaling_initializer(seed=seed, dtype=dtype)
[ "def", "uniform_scaling", "(", "shape", "=", "None", ",", "factor", "=", "1.0", ",", "dtype", "=", "tf", ".", "float32", ",", "seed", "=", "None", ")", ":", "if", "shape", ":", "input_size", "=", "1.0", "for", "dim", "in", "shape", "[", ":", "(", "-", "1", ")", "]", ":", "input_size", "*=", "float", "(", "dim", ")", "max_val", "=", "(", "math", ".", "sqrt", "(", "(", "3", "/", "input_size", ")", ")", "*", "factor", ")", "return", "tf", ".", "random_ops", ".", "random_uniform", "(", "shape", ",", "(", "-", "max_val", ")", ",", "max_val", ",", "dtype", ",", "seed", "=", "seed", ")", "else", ":", "return", "tf", ".", "uniform_unit_scaling_initializer", "(", "seed", "=", "seed", ",", "dtype", "=", "dtype", ")" ]
uniform scaling .
train
false
14,510
@requires_badges_enabled def course_group_check(user, course_key): from certificates.models import CertificateStatuses config = CourseEventBadgesConfiguration.current().course_group_settings awards = [] for (slug, keys) in config.items(): if (course_key in keys): certs = user.generatedcertificate_set.filter(status__in=CertificateStatuses.PASSED_STATUSES, course_id__in=keys) if (len(certs) == len(keys)): awards.append(slug) for slug in awards: badge_class = BadgeClass.get_badge_class(slug=slug, issuing_component='openedx__course', create=False) if (badge_class and (not badge_class.get_for_user(user))): badge_class.award(user)
[ "@", "requires_badges_enabled", "def", "course_group_check", "(", "user", ",", "course_key", ")", ":", "from", "certificates", ".", "models", "import", "CertificateStatuses", "config", "=", "CourseEventBadgesConfiguration", ".", "current", "(", ")", ".", "course_group_settings", "awards", "=", "[", "]", "for", "(", "slug", ",", "keys", ")", "in", "config", ".", "items", "(", ")", ":", "if", "(", "course_key", "in", "keys", ")", ":", "certs", "=", "user", ".", "generatedcertificate_set", ".", "filter", "(", "status__in", "=", "CertificateStatuses", ".", "PASSED_STATUSES", ",", "course_id__in", "=", "keys", ")", "if", "(", "len", "(", "certs", ")", "==", "len", "(", "keys", ")", ")", ":", "awards", ".", "append", "(", "slug", ")", "for", "slug", "in", "awards", ":", "badge_class", "=", "BadgeClass", ".", "get_badge_class", "(", "slug", "=", "slug", ",", "issuing_component", "=", "'openedx__course'", ",", "create", "=", "False", ")", "if", "(", "badge_class", "and", "(", "not", "badge_class", ".", "get_for_user", "(", "user", ")", ")", ")", ":", "badge_class", ".", "award", "(", "user", ")" ]
awards a badge if a user has completed every course in a defined set .
train
false
14,512
def get_top_level_actions(): _populate_defaults() return (_all_actions[action_id] for action_id in _top_level_ids)
[ "def", "get_top_level_actions", "(", ")", ":", "_populate_defaults", "(", ")", "return", "(", "_all_actions", "[", "action_id", "]", "for", "action_id", "in", "_top_level_ids", ")" ]
return a generator of all top-level registered action instances .
train
false
14,514
def format_unit(value, measurement_unit, length='long', format=None, locale=LC_NUMERIC): locale = Locale.parse(locale) q_unit = _find_unit_pattern(measurement_unit, locale=locale) if (not q_unit): raise UnknownUnitError(unit=measurement_unit, locale=locale) unit_patterns = locale._data['unit_patterns'][q_unit].get(length, {}) if isinstance(value, string_types): formatted_value = value plural_form = 'one' else: formatted_value = format_decimal(value, format, locale) plural_form = locale.plural_form(value) if (plural_form in unit_patterns): return unit_patterns[plural_form].format(formatted_value) return ('%s %s' % (formatted_value, (get_unit_name(measurement_unit, length=length, locale=locale) or measurement_unit)))
[ "def", "format_unit", "(", "value", ",", "measurement_unit", ",", "length", "=", "'long'", ",", "format", "=", "None", ",", "locale", "=", "LC_NUMERIC", ")", ":", "locale", "=", "Locale", ".", "parse", "(", "locale", ")", "q_unit", "=", "_find_unit_pattern", "(", "measurement_unit", ",", "locale", "=", "locale", ")", "if", "(", "not", "q_unit", ")", ":", "raise", "UnknownUnitError", "(", "unit", "=", "measurement_unit", ",", "locale", "=", "locale", ")", "unit_patterns", "=", "locale", ".", "_data", "[", "'unit_patterns'", "]", "[", "q_unit", "]", ".", "get", "(", "length", ",", "{", "}", ")", "if", "isinstance", "(", "value", ",", "string_types", ")", ":", "formatted_value", "=", "value", "plural_form", "=", "'one'", "else", ":", "formatted_value", "=", "format_decimal", "(", "value", ",", "format", ",", "locale", ")", "plural_form", "=", "locale", ".", "plural_form", "(", "value", ")", "if", "(", "plural_form", "in", "unit_patterns", ")", ":", "return", "unit_patterns", "[", "plural_form", "]", ".", "format", "(", "formatted_value", ")", "return", "(", "'%s %s'", "%", "(", "formatted_value", ",", "(", "get_unit_name", "(", "measurement_unit", ",", "length", "=", "length", ",", "locale", "=", "locale", ")", "or", "measurement_unit", ")", ")", ")" ]
format a value of a given unit .
train
false
14,516
def get_random_string(length=12, allowed_chars=u'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'): if (not using_sysrandom): random.seed(hashlib.sha256((u'%s%s%s' % (random.getstate(), time.time(), settings.SECRET_KEY)).encode(u'utf-8')).digest()) return u''.join([random.choice(allowed_chars) for i in range(length)])
[ "def", "get_random_string", "(", "length", "=", "12", ",", "allowed_chars", "=", "u'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'", ")", ":", "if", "(", "not", "using_sysrandom", ")", ":", "random", ".", "seed", "(", "hashlib", ".", "sha256", "(", "(", "u'%s%s%s'", "%", "(", "random", ".", "getstate", "(", ")", ",", "time", ".", "time", "(", ")", ",", "settings", ".", "SECRET_KEY", ")", ")", ".", "encode", "(", "u'utf-8'", ")", ")", ".", "digest", "(", ")", ")", "return", "u''", ".", "join", "(", "[", "random", ".", "choice", "(", "allowed_chars", ")", "for", "i", "in", "range", "(", "length", ")", "]", ")" ]
returns a securely generated random string .
train
true
14,517
def _name_estimators(estimators): names = [type(estimator).__name__.lower() for estimator in estimators] namecount = defaultdict(int) for (est, name) in zip(estimators, names): namecount[name] += 1 for (k, v) in list(six.iteritems(namecount)): if (v == 1): del namecount[k] for i in reversed(range(len(estimators))): name = names[i] if (name in namecount): names[i] += ('-%d' % namecount[name]) namecount[name] -= 1 return list(zip(names, estimators))
[ "def", "_name_estimators", "(", "estimators", ")", ":", "names", "=", "[", "type", "(", "estimator", ")", ".", "__name__", ".", "lower", "(", ")", "for", "estimator", "in", "estimators", "]", "namecount", "=", "defaultdict", "(", "int", ")", "for", "(", "est", ",", "name", ")", "in", "zip", "(", "estimators", ",", "names", ")", ":", "namecount", "[", "name", "]", "+=", "1", "for", "(", "k", ",", "v", ")", "in", "list", "(", "six", ".", "iteritems", "(", "namecount", ")", ")", ":", "if", "(", "v", "==", "1", ")", ":", "del", "namecount", "[", "k", "]", "for", "i", "in", "reversed", "(", "range", "(", "len", "(", "estimators", ")", ")", ")", ":", "name", "=", "names", "[", "i", "]", "if", "(", "name", "in", "namecount", ")", ":", "names", "[", "i", "]", "+=", "(", "'-%d'", "%", "namecount", "[", "name", "]", ")", "namecount", "[", "name", "]", "-=", "1", "return", "list", "(", "zip", "(", "names", ",", "estimators", ")", ")" ]
generate names for estimators .
train
false
14,518
def print_files_information_pep8(): infracting_files = [] non_infracting_files = [] pep8_checker = StyleGuide(quiet=True) for path in list_files('.py'): number_of_infractions = pep8_checker.input_file(path) rel_path = os.path.relpath(path, pylearn2.__path__[0]) if (number_of_infractions > 0): if (rel_path not in whitelist_pep8): infracting_files.append(path) elif (rel_path in whitelist_pep8): non_infracting_files.append(path) print('Files that must be corrected or added to whitelist:') for file in infracting_files: print(file) print('Files that can be removed from whitelist:') for file in non_infracting_files: print(file)
[ "def", "print_files_information_pep8", "(", ")", ":", "infracting_files", "=", "[", "]", "non_infracting_files", "=", "[", "]", "pep8_checker", "=", "StyleGuide", "(", "quiet", "=", "True", ")", "for", "path", "in", "list_files", "(", "'.py'", ")", ":", "number_of_infractions", "=", "pep8_checker", ".", "input_file", "(", "path", ")", "rel_path", "=", "os", ".", "path", ".", "relpath", "(", "path", ",", "pylearn2", ".", "__path__", "[", "0", "]", ")", "if", "(", "number_of_infractions", ">", "0", ")", ":", "if", "(", "rel_path", "not", "in", "whitelist_pep8", ")", ":", "infracting_files", ".", "append", "(", "path", ")", "elif", "(", "rel_path", "in", "whitelist_pep8", ")", ":", "non_infracting_files", ".", "append", "(", "path", ")", "print", "(", "'Files that must be corrected or added to whitelist:'", ")", "for", "file", "in", "infracting_files", ":", "print", "(", "file", ")", "print", "(", "'Files that can be removed from whitelist:'", ")", "for", "file", "in", "non_infracting_files", ":", "print", "(", "file", ")" ]
print the list of files which can be removed from the whitelist and the list of files which do not respect pep8 formatting that arent in the whitelist .
train
false
14,519
def measure_partial_oneshot(qubit, bits, format='sympy'): import random m = qubit_to_matrix(qubit, format) if (format == 'sympy'): m = m.normalized() possible_outcomes = _get_possible_outcomes(m, bits) random_number = random.random() total_prob = 0 for outcome in possible_outcomes: total_prob += (outcome.H * outcome)[0] if (total_prob >= random_number): return matrix_to_qubit(outcome.normalized()) else: raise NotImplementedError("This function can't handle non-sympy matrix formats yet")
[ "def", "measure_partial_oneshot", "(", "qubit", ",", "bits", ",", "format", "=", "'sympy'", ")", ":", "import", "random", "m", "=", "qubit_to_matrix", "(", "qubit", ",", "format", ")", "if", "(", "format", "==", "'sympy'", ")", ":", "m", "=", "m", ".", "normalized", "(", ")", "possible_outcomes", "=", "_get_possible_outcomes", "(", "m", ",", "bits", ")", "random_number", "=", "random", ".", "random", "(", ")", "total_prob", "=", "0", "for", "outcome", "in", "possible_outcomes", ":", "total_prob", "+=", "(", "outcome", ".", "H", "*", "outcome", ")", "[", "0", "]", "if", "(", "total_prob", ">=", "random_number", ")", ":", "return", "matrix_to_qubit", "(", "outcome", ".", "normalized", "(", ")", ")", "else", ":", "raise", "NotImplementedError", "(", "\"This function can't handle non-sympy matrix formats yet\"", ")" ]
perform a partial oneshot measurement on the specified qubits .
train
false
14,520
def test_cp20603(): import os for (root, files, dirs) in os.walk(''): for f in files: temp = os.path.join(root, f)
[ "def", "test_cp20603", "(", ")", ":", "import", "os", "for", "(", "root", ",", "files", ",", "dirs", ")", "in", "os", ".", "walk", "(", "''", ")", ":", "for", "f", "in", "files", ":", "temp", "=", "os", ".", "path", ".", "join", "(", "root", ",", "f", ")" ]
just ensure this does not throw a valueerror .
train
false
14,521
@app.route('/account/<subscription_id>/resourcegroups/<resource_group_name>/virtualnetworks/<network_name>') @auth.require_login def virtual_network_view(subscription_id, resource_group_name, network_name): creds = _get_credentials() model = models.get_virtual_network_details(subscription_id, creds, resource_group_name, network_name) return render_template('virtual_network.html', title=network_name, year=datetime.now().year, subscription_id=subscription_id, resource_group_name=resource_group_name, model=model)
[ "@", "app", ".", "route", "(", "'/account/<subscription_id>/resourcegroups/<resource_group_name>/virtualnetworks/<network_name>'", ")", "@", "auth", ".", "require_login", "def", "virtual_network_view", "(", "subscription_id", ",", "resource_group_name", ",", "network_name", ")", ":", "creds", "=", "_get_credentials", "(", ")", "model", "=", "models", ".", "get_virtual_network_details", "(", "subscription_id", ",", "creds", ",", "resource_group_name", ",", "network_name", ")", "return", "render_template", "(", "'virtual_network.html'", ",", "title", "=", "network_name", ",", "year", "=", "datetime", ".", "now", "(", ")", ".", "year", ",", "subscription_id", "=", "subscription_id", ",", "resource_group_name", "=", "resource_group_name", ",", "model", "=", "model", ")" ]
renders the vm details .
train
false
14,526
def iter_transport_opts(opts): transports = set() for (transport, opts_overrides) in six.iteritems(opts.get('transport_opts', {})): t_opts = dict(opts) t_opts.update(opts_overrides) t_opts['transport'] = transport transports.add(transport) (yield (transport, t_opts)) if (opts['transport'] not in transports): (yield (opts['transport'], opts))
[ "def", "iter_transport_opts", "(", "opts", ")", ":", "transports", "=", "set", "(", ")", "for", "(", "transport", ",", "opts_overrides", ")", "in", "six", ".", "iteritems", "(", "opts", ".", "get", "(", "'transport_opts'", ",", "{", "}", ")", ")", ":", "t_opts", "=", "dict", "(", "opts", ")", "t_opts", ".", "update", "(", "opts_overrides", ")", "t_opts", "[", "'transport'", "]", "=", "transport", "transports", ".", "add", "(", "transport", ")", "(", "yield", "(", "transport", ",", "t_opts", ")", ")", "if", "(", "opts", "[", "'transport'", "]", "not", "in", "transports", ")", ":", "(", "yield", "(", "opts", "[", "'transport'", "]", ",", "opts", ")", ")" ]
yield transport .
train
true
14,528
def aicc_sigma(sigma2, nobs, df_modelwc, islog=False): if (not islog): sigma2 = np.log(sigma2) return (sigma2 + (aicc(0, nobs, df_modelwc) / nobs))
[ "def", "aicc_sigma", "(", "sigma2", ",", "nobs", ",", "df_modelwc", ",", "islog", "=", "False", ")", ":", "if", "(", "not", "islog", ")", ":", "sigma2", "=", "np", ".", "log", "(", "sigma2", ")", "return", "(", "sigma2", "+", "(", "aicc", "(", "0", ",", "nobs", ",", "df_modelwc", ")", "/", "nobs", ")", ")" ]
akaike information criterion with small sample correction parameters sigma2 : float estimate of the residual variance or determinant of sigma_hat in the multivariate case .
train
false
14,529
def regular_seeds(ar_shape, n_points, dtype=int): grid = regular_grid(ar_shape, n_points) seed_img = np.zeros(ar_shape, dtype=dtype) seed_img[grid] = (1 + np.reshape(np.arange(seed_img[grid].size), seed_img[grid].shape)) return seed_img
[ "def", "regular_seeds", "(", "ar_shape", ",", "n_points", ",", "dtype", "=", "int", ")", ":", "grid", "=", "regular_grid", "(", "ar_shape", ",", "n_points", ")", "seed_img", "=", "np", ".", "zeros", "(", "ar_shape", ",", "dtype", "=", "dtype", ")", "seed_img", "[", "grid", "]", "=", "(", "1", "+", "np", ".", "reshape", "(", "np", ".", "arange", "(", "seed_img", "[", "grid", "]", ".", "size", ")", ",", "seed_img", "[", "grid", "]", ".", "shape", ")", ")", "return", "seed_img" ]
return an image with ~n_points regularly-spaced nonzero pixels .
train
false
14,530
def request_user_has_rule_action_permission(request, action_ref): if (not cfg.CONF.rbac.enable): return True user_db = get_user_db_from_request(request=request) action_db = action_utils.get_action_by_ref(ref=action_ref) action_resolver = resolvers.get_resolver_for_resource_type(ResourceType.ACTION) has_action_permission = action_resolver.user_has_resource_db_permission(user_db=user_db, resource_db=action_db, permission_type=PermissionType.ACTION_EXECUTE) if has_action_permission: return True return False
[ "def", "request_user_has_rule_action_permission", "(", "request", ",", "action_ref", ")", ":", "if", "(", "not", "cfg", ".", "CONF", ".", "rbac", ".", "enable", ")", ":", "return", "True", "user_db", "=", "get_user_db_from_request", "(", "request", "=", "request", ")", "action_db", "=", "action_utils", ".", "get_action_by_ref", "(", "ref", "=", "action_ref", ")", "action_resolver", "=", "resolvers", ".", "get_resolver_for_resource_type", "(", "ResourceType", ".", "ACTION", ")", "has_action_permission", "=", "action_resolver", ".", "user_has_resource_db_permission", "(", "user_db", "=", "user_db", ",", "resource_db", "=", "action_db", ",", "permission_type", "=", "PermissionType", ".", "ACTION_EXECUTE", ")", "if", "has_action_permission", ":", "return", "True", "return", "False" ]
check that the currently logged-in has necessary permissions on the action used / referenced inside the rule .
train
false
14,531
def utcnow_ts(): return calendar.timegm(utcnow().timetuple())
[ "def", "utcnow_ts", "(", ")", ":", "return", "calendar", ".", "timegm", "(", "utcnow", "(", ")", ".", "timetuple", "(", ")", ")" ]
timestamp version of our utcnow function .
train
false
14,532
def list_migrations(apps, database=DEFAULT_DB_ALIAS, **options): from south.models import MigrationHistory applied_migrations = MigrationHistory.objects.filter(app_name__in=[app.app_label() for app in apps]) if (database != DEFAULT_DB_ALIAS): applied_migrations = applied_migrations.using(database) applied_migrations_lookup = dict(((('%s.%s' % (mi.app_name, mi.migration)), mi) for mi in applied_migrations)) print() for app in apps: print((' ' + app.app_label())) for migration in app: full_name = ((migration.app_label() + '.') + migration.name()) if (full_name in applied_migrations_lookup): applied_migration = applied_migrations_lookup[full_name] print(format_migration_list_item(migration.name(), applied=applied_migration.applied, **options)) else: print(format_migration_list_item(migration.name(), applied=False, **options)) print()
[ "def", "list_migrations", "(", "apps", ",", "database", "=", "DEFAULT_DB_ALIAS", ",", "**", "options", ")", ":", "from", "south", ".", "models", "import", "MigrationHistory", "applied_migrations", "=", "MigrationHistory", ".", "objects", ".", "filter", "(", "app_name__in", "=", "[", "app", ".", "app_label", "(", ")", "for", "app", "in", "apps", "]", ")", "if", "(", "database", "!=", "DEFAULT_DB_ALIAS", ")", ":", "applied_migrations", "=", "applied_migrations", ".", "using", "(", "database", ")", "applied_migrations_lookup", "=", "dict", "(", "(", "(", "(", "'%s.%s'", "%", "(", "mi", ".", "app_name", ",", "mi", ".", "migration", ")", ")", ",", "mi", ")", "for", "mi", "in", "applied_migrations", ")", ")", "print", "(", ")", "for", "app", "in", "apps", ":", "print", "(", "(", "' '", "+", "app", ".", "app_label", "(", ")", ")", ")", "for", "migration", "in", "app", ":", "full_name", "=", "(", "(", "migration", ".", "app_label", "(", ")", "+", "'.'", ")", "+", "migration", ".", "name", "(", ")", ")", "if", "(", "full_name", "in", "applied_migrations_lookup", ")", ":", "applied_migration", "=", "applied_migrations_lookup", "[", "full_name", "]", "print", "(", "format_migration_list_item", "(", "migration", ".", "name", "(", ")", ",", "applied", "=", "applied_migration", ".", "applied", ",", "**", "options", ")", ")", "else", ":", "print", "(", "format_migration_list_item", "(", "migration", ".", "name", "(", ")", ",", "applied", "=", "False", ",", "**", "options", ")", ")", "print", "(", ")" ]
prints a list of all available migrations .
train
false
14,536
@treeio_login_required @handle_response_format def lead_view(request, lead_id, response_format='html'): profile = request.user.profile lead = get_object_or_404(Lead, pk=lead_id) if ((not profile.has_permission(lead)) and (not profile.is_admin('treeio.sales'))): return user_denied(request, message="You don't have access to this Lead") form = _do_update_record(profile, request, lead) if form.is_valid(): record = form.save() record.set_user_from_request(request) lead = record.object return render_to_response('sales/lead_view', {'lead': lead, 'form': form}, context_instance=RequestContext(request), response_format=response_format)
[ "@", "treeio_login_required", "@", "handle_response_format", "def", "lead_view", "(", "request", ",", "lead_id", ",", "response_format", "=", "'html'", ")", ":", "profile", "=", "request", ".", "user", ".", "profile", "lead", "=", "get_object_or_404", "(", "Lead", ",", "pk", "=", "lead_id", ")", "if", "(", "(", "not", "profile", ".", "has_permission", "(", "lead", ")", ")", "and", "(", "not", "profile", ".", "is_admin", "(", "'treeio.sales'", ")", ")", ")", ":", "return", "user_denied", "(", "request", ",", "message", "=", "\"You don't have access to this Lead\"", ")", "form", "=", "_do_update_record", "(", "profile", ",", "request", ",", "lead", ")", "if", "form", ".", "is_valid", "(", ")", ":", "record", "=", "form", ".", "save", "(", ")", "record", ".", "set_user_from_request", "(", "request", ")", "lead", "=", "record", ".", "object", "return", "render_to_response", "(", "'sales/lead_view'", ",", "{", "'lead'", ":", "lead", ",", "'form'", ":", "form", "}", ",", "context_instance", "=", "RequestContext", "(", "request", ")", ",", "response_format", "=", "response_format", ")" ]
queue view .
train
false
14,538
def _translate_volume_detail_view(context, vol, image_id=None): d = _translate_volume_summary_view(context, vol, image_id) return d
[ "def", "_translate_volume_detail_view", "(", "context", ",", "vol", ",", "image_id", "=", "None", ")", ":", "d", "=", "_translate_volume_summary_view", "(", "context", ",", "vol", ",", "image_id", ")", "return", "d" ]
maps keys for volumes details view .
train
false
14,540
def _remove_gens(base, strong_gens, basic_orbits=None, strong_gens_distr=None): from sympy.combinatorics.perm_groups import _orbit base_len = len(base) degree = strong_gens[0].size if (strong_gens_distr is None): strong_gens_distr = _distribute_gens_by_base(base, strong_gens) temp = strong_gens_distr[:] if (basic_orbits is None): basic_orbits = [] for i in range(base_len): basic_orbit = _orbit(degree, strong_gens_distr[i], base[i]) basic_orbits.append(basic_orbit) strong_gens_distr.append([]) res = strong_gens[:] for i in range((base_len - 1), (-1), (-1)): gens_copy = strong_gens_distr[i][:] for gen in strong_gens_distr[i]: if (gen not in strong_gens_distr[(i + 1)]): temp_gens = gens_copy[:] temp_gens.remove(gen) if (temp_gens == []): continue temp_orbit = _orbit(degree, temp_gens, base[i]) if (temp_orbit == basic_orbits[i]): gens_copy.remove(gen) res.remove(gen) return res
[ "def", "_remove_gens", "(", "base", ",", "strong_gens", ",", "basic_orbits", "=", "None", ",", "strong_gens_distr", "=", "None", ")", ":", "from", "sympy", ".", "combinatorics", ".", "perm_groups", "import", "_orbit", "base_len", "=", "len", "(", "base", ")", "degree", "=", "strong_gens", "[", "0", "]", ".", "size", "if", "(", "strong_gens_distr", "is", "None", ")", ":", "strong_gens_distr", "=", "_distribute_gens_by_base", "(", "base", ",", "strong_gens", ")", "temp", "=", "strong_gens_distr", "[", ":", "]", "if", "(", "basic_orbits", "is", "None", ")", ":", "basic_orbits", "=", "[", "]", "for", "i", "in", "range", "(", "base_len", ")", ":", "basic_orbit", "=", "_orbit", "(", "degree", ",", "strong_gens_distr", "[", "i", "]", ",", "base", "[", "i", "]", ")", "basic_orbits", ".", "append", "(", "basic_orbit", ")", "strong_gens_distr", ".", "append", "(", "[", "]", ")", "res", "=", "strong_gens", "[", ":", "]", "for", "i", "in", "range", "(", "(", "base_len", "-", "1", ")", ",", "(", "-", "1", ")", ",", "(", "-", "1", ")", ")", ":", "gens_copy", "=", "strong_gens_distr", "[", "i", "]", "[", ":", "]", "for", "gen", "in", "strong_gens_distr", "[", "i", "]", ":", "if", "(", "gen", "not", "in", "strong_gens_distr", "[", "(", "i", "+", "1", ")", "]", ")", ":", "temp_gens", "=", "gens_copy", "[", ":", "]", "temp_gens", ".", "remove", "(", "gen", ")", "if", "(", "temp_gens", "==", "[", "]", ")", ":", "continue", "temp_orbit", "=", "_orbit", "(", "degree", ",", "temp_gens", ",", "base", "[", "i", "]", ")", "if", "(", "temp_orbit", "==", "basic_orbits", "[", "i", "]", ")", ":", "gens_copy", ".", "remove", "(", "gen", ")", "res", ".", "remove", "(", "gen", ")", "return", "res" ]
remove redundant generators from a strong generating set .
train
false
14,541
def verify_credit_card(number): return CreditCard(number).verify()
[ "def", "verify_credit_card", "(", "number", ")", ":", "return", "CreditCard", "(", "number", ")", ".", "verify", "(", ")" ]
returns the card type for given card number or none if invalid .
train
false
14,543
def check_is_advsvc(context): init() credentials = context.to_policy_values() if (ADVSVC_CTX_POLICY not in _ENFORCER.rules): return False return _ENFORCER.enforce(ADVSVC_CTX_POLICY, credentials, credentials)
[ "def", "check_is_advsvc", "(", "context", ")", ":", "init", "(", ")", "credentials", "=", "context", ".", "to_policy_values", "(", ")", "if", "(", "ADVSVC_CTX_POLICY", "not", "in", "_ENFORCER", ".", "rules", ")", ":", "return", "False", "return", "_ENFORCER", ".", "enforce", "(", "ADVSVC_CTX_POLICY", ",", "credentials", ",", "credentials", ")" ]
verify context has advsvc rights according to policy settings .
train
false
14,544
def nova_no_cr(physical_line): pos = physical_line.find('\r') if ((pos != (-1)) and (pos == (len(physical_line) - 2))): return (pos, 'N901: Windows style line endings not allowed in code')
[ "def", "nova_no_cr", "(", "physical_line", ")", ":", "pos", "=", "physical_line", ".", "find", "(", "'\\r'", ")", "if", "(", "(", "pos", "!=", "(", "-", "1", ")", ")", "and", "(", "pos", "==", "(", "len", "(", "physical_line", ")", "-", "2", ")", ")", ")", ":", "return", "(", "pos", ",", "'N901: Windows style line endings not allowed in code'", ")" ]
check that we only use newlines not carriage returns .
train
false
14,545
def non_dominated_sort(iterable, key=(lambda x: x), allowequality=True): items = set(iterable) fronts = [] while items: front = non_dominated_front(items, key, allowequality) items -= front fronts.append(front) return fronts
[ "def", "non_dominated_sort", "(", "iterable", ",", "key", "=", "(", "lambda", "x", ":", "x", ")", ",", "allowequality", "=", "True", ")", ":", "items", "=", "set", "(", "iterable", ")", "fronts", "=", "[", "]", "while", "items", ":", "front", "=", "non_dominated_front", "(", "items", ",", "key", ",", "allowequality", ")", "items", "-=", "front", "fronts", ".", "append", "(", "front", ")", "return", "fronts" ]
return a list that is sorted in a non-dominating fashion .
train
false
14,547
def dup_exquo_ground(f, c, K): if (not c): raise ZeroDivisionError('polynomial division') if (not f): return f return [K.exquo(cf, c) for cf in f]
[ "def", "dup_exquo_ground", "(", "f", ",", "c", ",", "K", ")", ":", "if", "(", "not", "c", ")", ":", "raise", "ZeroDivisionError", "(", "'polynomial division'", ")", "if", "(", "not", "f", ")", ":", "return", "f", "return", "[", "K", ".", "exquo", "(", "cf", ",", "c", ")", "for", "cf", "in", "f", "]" ]
exact quotient by a constant in k[x] .
train
false
14,548
def delete_container(url, token, container, http_conn=None, response_dict=None, service_token=None, query_string=None, headers=None): if http_conn: (parsed, conn) = http_conn else: (parsed, conn) = http_connection(url) path = ('%s/%s' % (parsed.path, quote(container))) if headers: headers = dict(headers) else: headers = {} headers['X-Auth-Token'] = token if service_token: headers['X-Service-Token'] = service_token if query_string: path += ('?' + query_string.lstrip('?')) method = 'DELETE' conn.request(method, path, '', headers) resp = conn.getresponse() body = resp.read() http_log((('%s%s' % (url.replace(parsed.path, ''), path)), method), {'headers': headers}, resp, body) store_response(resp, response_dict) if ((resp.status < 200) or (resp.status >= 300)): raise ClientException.from_response(resp, 'Container DELETE failed', body)
[ "def", "delete_container", "(", "url", ",", "token", ",", "container", ",", "http_conn", "=", "None", ",", "response_dict", "=", "None", ",", "service_token", "=", "None", ",", "query_string", "=", "None", ",", "headers", "=", "None", ")", ":", "if", "http_conn", ":", "(", "parsed", ",", "conn", ")", "=", "http_conn", "else", ":", "(", "parsed", ",", "conn", ")", "=", "http_connection", "(", "url", ")", "path", "=", "(", "'%s/%s'", "%", "(", "parsed", ".", "path", ",", "quote", "(", "container", ")", ")", ")", "if", "headers", ":", "headers", "=", "dict", "(", "headers", ")", "else", ":", "headers", "=", "{", "}", "headers", "[", "'X-Auth-Token'", "]", "=", "token", "if", "service_token", ":", "headers", "[", "'X-Service-Token'", "]", "=", "service_token", "if", "query_string", ":", "path", "+=", "(", "'?'", "+", "query_string", ".", "lstrip", "(", "'?'", ")", ")", "method", "=", "'DELETE'", "conn", ".", "request", "(", "method", ",", "path", ",", "''", ",", "headers", ")", "resp", "=", "conn", ".", "getresponse", "(", ")", "body", "=", "resp", ".", "read", "(", ")", "http_log", "(", "(", "(", "'%s%s'", "%", "(", "url", ".", "replace", "(", "parsed", ".", "path", ",", "''", ")", ",", "path", ")", ")", ",", "method", ")", ",", "{", "'headers'", ":", "headers", "}", ",", "resp", ",", "body", ")", "store_response", "(", "resp", ",", "response_dict", ")", "if", "(", "(", "resp", ".", "status", "<", "200", ")", "or", "(", "resp", ".", "status", ">=", "300", ")", ")", ":", "raise", "ClientException", ".", "from_response", "(", "resp", ",", "'Container DELETE failed'", ",", "body", ")" ]
delete a container .
train
false
14,549
def add_tools_to_container(container, tools=default_toolbar_tools): for (group, grouptools) in tools: for (position, tool) in enumerate(grouptools): container.add_tool(tool, group, position)
[ "def", "add_tools_to_container", "(", "container", ",", "tools", "=", "default_toolbar_tools", ")", ":", "for", "(", "group", ",", "grouptools", ")", "in", "tools", ":", "for", "(", "position", ",", "tool", ")", "in", "enumerate", "(", "grouptools", ")", ":", "container", ".", "add_tool", "(", "tool", ",", "group", ",", "position", ")" ]
add multiple tools to the container .
train
false
14,550
def do_SpnUnregister(po): if (not g_createdSPNLast): _option_error(po, 'SpnCreate must appear before SpnUnregister') SpnRegister(_get_option(po, 'account_name_dn', None), (g_createdSPNLast,), dscon.DS_SPN_DELETE_SPN_OP) return g_createdSPNLast
[ "def", "do_SpnUnregister", "(", "po", ")", ":", "if", "(", "not", "g_createdSPNLast", ")", ":", "_option_error", "(", "po", ",", "'SpnCreate must appear before SpnUnregister'", ")", "SpnRegister", "(", "_get_option", "(", "po", ",", "'account_name_dn'", ",", "None", ")", ",", "(", "g_createdSPNLast", ",", ")", ",", "dscon", ".", "DS_SPN_DELETE_SPN_OP", ")", "return", "g_createdSPNLast" ]
unregister a previously created service principal name .
train
false
14,551
def custom_decode(encoding): encoding = encoding.lower() alternates = {'big5': 'big5hkscs', 'gb2312': 'gb18030', 'ascii': 'utf-8', 'MacCyrillic': 'cp1251'} if (encoding in alternates): return alternates[encoding] else: return encoding
[ "def", "custom_decode", "(", "encoding", ")", ":", "encoding", "=", "encoding", ".", "lower", "(", ")", "alternates", "=", "{", "'big5'", ":", "'big5hkscs'", ",", "'gb2312'", ":", "'gb18030'", ",", "'ascii'", ":", "'utf-8'", ",", "'MacCyrillic'", ":", "'cp1251'", "}", "if", "(", "encoding", "in", "alternates", ")", ":", "return", "alternates", "[", "encoding", "]", "else", ":", "return", "encoding" ]
overrides encoding when charset declaration or charset determination is a subset of a larger charset .
train
true
14,552
@handle_response_format @treeio_login_required def category_add(request, response_format='html'): if request.POST: if ('cancel' not in request.POST): category = KnowledgeCategory() form = KnowledgeCategoryForm(request.POST, instance=category) if form.is_valid(): category = form.save() category.set_user_from_request(request) return HttpResponseRedirect(reverse('knowledge_category_view', args=[category.treepath])) else: return HttpResponseRedirect(reverse('knowledge_categories')) else: form = KnowledgeCategoryForm() context = _get_default_context(request) context.update({'form': form}) return render_to_response('knowledge/category_add', context, context_instance=RequestContext(request), response_format=response_format)
[ "@", "handle_response_format", "@", "treeio_login_required", "def", "category_add", "(", "request", ",", "response_format", "=", "'html'", ")", ":", "if", "request", ".", "POST", ":", "if", "(", "'cancel'", "not", "in", "request", ".", "POST", ")", ":", "category", "=", "KnowledgeCategory", "(", ")", "form", "=", "KnowledgeCategoryForm", "(", "request", ".", "POST", ",", "instance", "=", "category", ")", "if", "form", ".", "is_valid", "(", ")", ":", "category", "=", "form", ".", "save", "(", ")", "category", ".", "set_user_from_request", "(", "request", ")", "return", "HttpResponseRedirect", "(", "reverse", "(", "'knowledge_category_view'", ",", "args", "=", "[", "category", ".", "treepath", "]", ")", ")", "else", ":", "return", "HttpResponseRedirect", "(", "reverse", "(", "'knowledge_categories'", ")", ")", "else", ":", "form", "=", "KnowledgeCategoryForm", "(", ")", "context", "=", "_get_default_context", "(", "request", ")", "context", ".", "update", "(", "{", "'form'", ":", "form", "}", ")", "return", "render_to_response", "(", "'knowledge/category_add'", ",", "context", ",", "context_instance", "=", "RequestContext", "(", "request", ")", ",", "response_format", "=", "response_format", ")" ]
new category form .
train
false
14,553
def upload_dev(user='pandas'): if os.system('cd build/html; rsync -avz . {0}@pandas.pydata.org:/usr/share/nginx/pandas/pandas-docs/dev/ -essh'.format(user)): raise SystemExit('Upload to Pydata Dev failed')
[ "def", "upload_dev", "(", "user", "=", "'pandas'", ")", ":", "if", "os", ".", "system", "(", "'cd build/html; rsync -avz . {0}@pandas.pydata.org:/usr/share/nginx/pandas/pandas-docs/dev/ -essh'", ".", "format", "(", "user", ")", ")", ":", "raise", "SystemExit", "(", "'Upload to Pydata Dev failed'", ")" ]
push a copy to the pydata dev directory .
train
false
14,555
def sign_source_distribution(data): dist_dir = os.path.join(data['tagdir'], 'dist') for f in os.listdir(dist_dir): f = os.path.join(dist_dir, f) print ('\nSigning file %s' % f) exec_command('gpg', '--detach-sign', '-a', f)
[ "def", "sign_source_distribution", "(", "data", ")", ":", "dist_dir", "=", "os", ".", "path", ".", "join", "(", "data", "[", "'tagdir'", "]", ",", "'dist'", ")", "for", "f", "in", "os", ".", "listdir", "(", "dist_dir", ")", ":", "f", "=", "os", ".", "path", ".", "join", "(", "dist_dir", ",", "f", ")", "print", "(", "'\\nSigning file %s'", "%", "f", ")", "exec_command", "(", "'gpg'", ",", "'--detach-sign'", ",", "'-a'", ",", "f", ")" ]
sign the tgz or zip archive that will be uploaded to pypi .
train
false
14,557
def add_global_args(parser): parser.add_argument('--log_basename', default='spinnaker_metric_tool') parser.add_argument('--log_dir', default='.')
[ "def", "add_global_args", "(", "parser", ")", ":", "parser", ".", "add_argument", "(", "'--log_basename'", ",", "default", "=", "'spinnaker_metric_tool'", ")", "parser", ".", "add_argument", "(", "'--log_dir'", ",", "default", "=", "'.'", ")" ]
add global parser options that are independent of the command .
train
false
14,558
def CreateEmptyResourceSample(): client = CreateClient() document = gdata.docs.data.Resource(type='document', title='My Sample Doc') document = client.CreateResource(document) print 'Created:', document.title.text, document.resource_id.text
[ "def", "CreateEmptyResourceSample", "(", ")", ":", "client", "=", "CreateClient", "(", ")", "document", "=", "gdata", ".", "docs", ".", "data", ".", "Resource", "(", "type", "=", "'document'", ",", "title", "=", "'My Sample Doc'", ")", "document", "=", "client", ".", "CreateResource", "(", "document", ")", "print", "'Created:'", ",", "document", ".", "title", ".", "text", ",", "document", ".", "resource_id", ".", "text" ]
create an empty resource of type document .
train
false
14,561
@core_helper def remove_linebreaks(string): return unicode(string).replace('\n', '')
[ "@", "core_helper", "def", "remove_linebreaks", "(", "string", ")", ":", "return", "unicode", "(", "string", ")", ".", "replace", "(", "'\\n'", ",", "''", ")" ]
remove linebreaks from string to make it usable in javascript .
train
false
14,562
@ensure_csrf_cookie @cache_control(no_cache=True, no_store=True, must_revalidate=True) @require_level('staff') def get_sale_records(request, course_id, csv=False): course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id) query_features = ['company_name', 'company_contact_name', 'company_contact_email', 'total_codes', 'total_used_codes', 'total_amount', 'created', 'customer_reference_number', 'recipient_name', 'recipient_email', 'created_by', 'internal_reference', 'invoice_number', 'codes', 'course_id'] sale_data = instructor_analytics.basic.sale_record_features(course_id, query_features) if (not csv): for item in sale_data: item['created_by'] = item['created_by'].username response_payload = {'course_id': course_id.to_deprecated_string(), 'sale': sale_data, 'queried_features': query_features} return JsonResponse(response_payload) else: (header, datarows) = instructor_analytics.csvs.format_dictlist(sale_data, query_features) return instructor_analytics.csvs.create_csv_response('e-commerce_sale_invoice_records.csv', header, datarows)
[ "@", "ensure_csrf_cookie", "@", "cache_control", "(", "no_cache", "=", "True", ",", "no_store", "=", "True", ",", "must_revalidate", "=", "True", ")", "@", "require_level", "(", "'staff'", ")", "def", "get_sale_records", "(", "request", ",", "course_id", ",", "csv", "=", "False", ")", ":", "course_id", "=", "SlashSeparatedCourseKey", ".", "from_deprecated_string", "(", "course_id", ")", "query_features", "=", "[", "'company_name'", ",", "'company_contact_name'", ",", "'company_contact_email'", ",", "'total_codes'", ",", "'total_used_codes'", ",", "'total_amount'", ",", "'created'", ",", "'customer_reference_number'", ",", "'recipient_name'", ",", "'recipient_email'", ",", "'created_by'", ",", "'internal_reference'", ",", "'invoice_number'", ",", "'codes'", ",", "'course_id'", "]", "sale_data", "=", "instructor_analytics", ".", "basic", ".", "sale_record_features", "(", "course_id", ",", "query_features", ")", "if", "(", "not", "csv", ")", ":", "for", "item", "in", "sale_data", ":", "item", "[", "'created_by'", "]", "=", "item", "[", "'created_by'", "]", ".", "username", "response_payload", "=", "{", "'course_id'", ":", "course_id", ".", "to_deprecated_string", "(", ")", ",", "'sale'", ":", "sale_data", ",", "'queried_features'", ":", "query_features", "}", "return", "JsonResponse", "(", "response_payload", ")", "else", ":", "(", "header", ",", "datarows", ")", "=", "instructor_analytics", ".", "csvs", ".", "format_dictlist", "(", "sale_data", ",", "query_features", ")", "return", "instructor_analytics", ".", "csvs", ".", "create_csv_response", "(", "'e-commerce_sale_invoice_records.csv'", ",", "header", ",", "datarows", ")" ]
return the summary of all sales records for a particular course .
train
false
14,563
def decode_base64_dict(data): b64 = base64.b64decode(data['__ndarray__']) array = np.fromstring(b64, dtype=data['dtype']) if (len(data['shape']) > 1): array = array.reshape(data['shape']) return array
[ "def", "decode_base64_dict", "(", "data", ")", ":", "b64", "=", "base64", ".", "b64decode", "(", "data", "[", "'__ndarray__'", "]", ")", "array", "=", "np", ".", "fromstring", "(", "b64", ",", "dtype", "=", "data", "[", "'dtype'", "]", ")", "if", "(", "len", "(", "data", "[", "'shape'", "]", ")", ">", "1", ")", ":", "array", "=", "array", ".", "reshape", "(", "data", "[", "'shape'", "]", ")", "return", "array" ]
decode a base64 encoded array into a numpy array .
train
false
14,564
def get_records(fname): f = open(fname) dtb = f.read() f.close() recs = dtb.split(u'begin')[1:] records = [Record(r) for r in recs] return records
[ "def", "get_records", "(", "fname", ")", ":", "f", "=", "open", "(", "fname", ")", "dtb", "=", "f", ".", "read", "(", ")", "f", ".", "close", "(", ")", "recs", "=", "dtb", ".", "split", "(", "u'begin'", ")", "[", "1", ":", "]", "records", "=", "[", "Record", "(", "r", ")", "for", "r", "in", "recs", "]", "return", "records" ]
read the records of an iraf database file into a python list parameters fname : str name of an iraf database file returns a list of records .
train
false
14,565
def permutate(x, indices, axis=0, inv=False): return Permutate(axis=axis, inv=inv)(x, indices)
[ "def", "permutate", "(", "x", ",", "indices", ",", "axis", "=", "0", ",", "inv", "=", "False", ")", ":", "return", "Permutate", "(", "axis", "=", "axis", ",", "inv", "=", "inv", ")", "(", "x", ",", "indices", ")" ]
permutates a given variable along an axis .
train
false
14,568
def _coerce_loc_index(divisions, o): if (divisions and isinstance(divisions[0], datetime)): return pd.Timestamp(o) if (divisions and isinstance(divisions[0], np.datetime64)): return np.datetime64(o).astype(divisions[0].dtype) return o
[ "def", "_coerce_loc_index", "(", "divisions", ",", "o", ")", ":", "if", "(", "divisions", "and", "isinstance", "(", "divisions", "[", "0", "]", ",", "datetime", ")", ")", ":", "return", "pd", ".", "Timestamp", "(", "o", ")", "if", "(", "divisions", "and", "isinstance", "(", "divisions", "[", "0", "]", ",", "np", ".", "datetime64", ")", ")", ":", "return", "np", ".", "datetime64", "(", "o", ")", ".", "astype", "(", "divisions", "[", "0", "]", ".", "dtype", ")", "return", "o" ]
transform values to be comparable against divisions this is particularly valuable to use with pandas datetimes .
train
false
14,569
@LocalContext def run_shellcode_exitcode(bytes): p = run_shellcode(bytes) p.wait_for_close() return p.poll()
[ "@", "LocalContext", "def", "run_shellcode_exitcode", "(", "bytes", ")", ":", "p", "=", "run_shellcode", "(", "bytes", ")", "p", ".", "wait_for_close", "(", ")", "return", "p", ".", "poll", "(", ")" ]
given assembled machine code bytes .
train
false
14,570
@functools.lru_cache(maxsize=16) def get_stylesheet(template_str): colordict = ColorDict(config.section('colors')) template = jinja2.Template(template_str) return template.render(color=colordict, font=config.section('fonts'), config=objreg.get('config'))
[ "@", "functools", ".", "lru_cache", "(", "maxsize", "=", "16", ")", "def", "get_stylesheet", "(", "template_str", ")", ":", "colordict", "=", "ColorDict", "(", "config", ".", "section", "(", "'colors'", ")", ")", "template", "=", "jinja2", ".", "Template", "(", "template_str", ")", "return", "template", ".", "render", "(", "color", "=", "colordict", ",", "font", "=", "config", ".", "section", "(", "'fonts'", ")", ",", "config", "=", "objreg", ".", "get", "(", "'config'", ")", ")" ]
format a stylesheet based on a template .
train
false
14,573
@memoized def gnu_get_libc_version(): if (not sys.platform.startswith(u'linux')): return None from ctypes import CDLL, cdll, c_char_p cdll.LoadLibrary(u'libc.so.6') libc = CDLL(u'libc.so.6') f = libc.gnu_get_libc_version f.restype = c_char_p result = f() if hasattr(result, u'decode'): result = result.decode(u'utf-8') return result
[ "@", "memoized", "def", "gnu_get_libc_version", "(", ")", ":", "if", "(", "not", "sys", ".", "platform", ".", "startswith", "(", "u'linux'", ")", ")", ":", "return", "None", "from", "ctypes", "import", "CDLL", ",", "cdll", ",", "c_char_p", "cdll", ".", "LoadLibrary", "(", "u'libc.so.6'", ")", "libc", "=", "CDLL", "(", "u'libc.so.6'", ")", "f", "=", "libc", ".", "gnu_get_libc_version", "f", ".", "restype", "=", "c_char_p", "result", "=", "f", "(", ")", "if", "hasattr", "(", "result", ",", "u'decode'", ")", ":", "result", "=", "result", ".", "decode", "(", "u'utf-8'", ")", "return", "result" ]
if on linux .
train
false
14,574
def _ignore_CTRL_C_posix(): signal.signal(signal.SIGINT, signal.SIG_IGN)
[ "def", "_ignore_CTRL_C_posix", "(", ")", ":", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "signal", ".", "SIG_IGN", ")" ]
ignore ctrl+c .
train
false
14,575
def dup_outer_refine_real_root(f, s, t, K, eps=None, steps=None, disjoint=None, fast=False): (a, b, c, d) = _mobius_from_interval((s, t), K.get_field()) f = dup_transform(f, dup_strip([a, b]), dup_strip([c, d]), K) if (dup_sign_variations(f, K) != 1): raise RefinementFailed(('there should be exactly one root in (%s, %s) interval' % (s, t))) return dup_inner_refine_real_root(f, (a, b, c, d), K, eps=eps, steps=steps, disjoint=disjoint, fast=fast)
[ "def", "dup_outer_refine_real_root", "(", "f", ",", "s", ",", "t", ",", "K", ",", "eps", "=", "None", ",", "steps", "=", "None", ",", "disjoint", "=", "None", ",", "fast", "=", "False", ")", ":", "(", "a", ",", "b", ",", "c", ",", "d", ")", "=", "_mobius_from_interval", "(", "(", "s", ",", "t", ")", ",", "K", ".", "get_field", "(", ")", ")", "f", "=", "dup_transform", "(", "f", ",", "dup_strip", "(", "[", "a", ",", "b", "]", ")", ",", "dup_strip", "(", "[", "c", ",", "d", "]", ")", ",", "K", ")", "if", "(", "dup_sign_variations", "(", "f", ",", "K", ")", "!=", "1", ")", ":", "raise", "RefinementFailed", "(", "(", "'there should be exactly one root in (%s, %s) interval'", "%", "(", "s", ",", "t", ")", ")", ")", "return", "dup_inner_refine_real_root", "(", "f", ",", "(", "a", ",", "b", ",", "c", ",", "d", ")", ",", "K", ",", "eps", "=", "eps", ",", "steps", "=", "steps", ",", "disjoint", "=", "disjoint", ",", "fast", "=", "fast", ")" ]
refine a positive root of f given an interval .
train
false
14,576
def _find_frame_imports(name, frame): imports = frame.nodes_of_class((astroid.Import, astroid.From)) for import_node in imports: for (import_name, import_alias) in import_node.names: if import_alias: if (import_alias == name): return True elif (import_name and (import_name == name)): return True
[ "def", "_find_frame_imports", "(", "name", ",", "frame", ")", ":", "imports", "=", "frame", ".", "nodes_of_class", "(", "(", "astroid", ".", "Import", ",", "astroid", ".", "From", ")", ")", "for", "import_node", "in", "imports", ":", "for", "(", "import_name", ",", "import_alias", ")", "in", "import_node", ".", "names", ":", "if", "import_alias", ":", "if", "(", "import_alias", "==", "name", ")", ":", "return", "True", "elif", "(", "import_name", "and", "(", "import_name", "==", "name", ")", ")", ":", "return", "True" ]
detect imports in the frame .
train
true
14,577
def filter_releases(package, settings, releases): platform_selectors = [((sublime.platform() + '-') + sublime.arch()), sublime.platform(), '*'] install_prereleases = settings.get('install_prereleases') allow_prereleases = (install_prereleases is True) if ((not allow_prereleases) and isinstance(install_prereleases, list) and (package in install_prereleases)): allow_prereleases = True if (not allow_prereleases): releases = version_exclude_prerelease(releases) output = [] for release in releases: platforms = release.get('platforms', '*') if (not isinstance(platforms, list)): platforms = [platforms] matched = False for selector in platform_selectors: if (selector in platforms): matched = True break if (not matched): continue if (not is_compatible_version(release.get('sublime_text', '*'))): continue output.append(release) return output
[ "def", "filter_releases", "(", "package", ",", "settings", ",", "releases", ")", ":", "platform_selectors", "=", "[", "(", "(", "sublime", ".", "platform", "(", ")", "+", "'-'", ")", "+", "sublime", ".", "arch", "(", ")", ")", ",", "sublime", ".", "platform", "(", ")", ",", "'*'", "]", "install_prereleases", "=", "settings", ".", "get", "(", "'install_prereleases'", ")", "allow_prereleases", "=", "(", "install_prereleases", "is", "True", ")", "if", "(", "(", "not", "allow_prereleases", ")", "and", "isinstance", "(", "install_prereleases", ",", "list", ")", "and", "(", "package", "in", "install_prereleases", ")", ")", ":", "allow_prereleases", "=", "True", "if", "(", "not", "allow_prereleases", ")", ":", "releases", "=", "version_exclude_prerelease", "(", "releases", ")", "output", "=", "[", "]", "for", "release", "in", "releases", ":", "platforms", "=", "release", ".", "get", "(", "'platforms'", ",", "'*'", ")", "if", "(", "not", "isinstance", "(", "platforms", ",", "list", ")", ")", ":", "platforms", "=", "[", "platforms", "]", "matched", "=", "False", "for", "selector", "in", "platform_selectors", ":", "if", "(", "selector", "in", "platforms", ")", ":", "matched", "=", "True", "break", "if", "(", "not", "matched", ")", ":", "continue", "if", "(", "not", "is_compatible_version", "(", "release", ".", "get", "(", "'sublime_text'", ",", "'*'", ")", ")", ")", ":", "continue", "output", ".", "append", "(", "release", ")", "return", "output" ]
returns all releases in the list of releases that are compatible with the current platform and version of sublime text .
train
false
14,580
def quadrect(f, n, a, b, kind='lege', *args, **kwargs): if (kind.lower() == 'lege'): (nodes, weights) = qnwlege(n, a, b) elif (kind.lower() == 'cheb'): (nodes, weights) = qnwcheb(n, a, b) elif (kind.lower() == 'trap'): (nodes, weights) = qnwtrap(n, a, b) elif (kind.lower() == 'simp'): (nodes, weights) = qnwsimp(n, a, b) else: (nodes, weights) = qnwequi(n, a, b, kind) out = weights.dot(f(nodes, *args, **kwargs)) return out
[ "def", "quadrect", "(", "f", ",", "n", ",", "a", ",", "b", ",", "kind", "=", "'lege'", ",", "*", "args", ",", "**", "kwargs", ")", ":", "if", "(", "kind", ".", "lower", "(", ")", "==", "'lege'", ")", ":", "(", "nodes", ",", "weights", ")", "=", "qnwlege", "(", "n", ",", "a", ",", "b", ")", "elif", "(", "kind", ".", "lower", "(", ")", "==", "'cheb'", ")", ":", "(", "nodes", ",", "weights", ")", "=", "qnwcheb", "(", "n", ",", "a", ",", "b", ")", "elif", "(", "kind", ".", "lower", "(", ")", "==", "'trap'", ")", ":", "(", "nodes", ",", "weights", ")", "=", "qnwtrap", "(", "n", ",", "a", ",", "b", ")", "elif", "(", "kind", ".", "lower", "(", ")", "==", "'simp'", ")", ":", "(", "nodes", ",", "weights", ")", "=", "qnwsimp", "(", "n", ",", "a", ",", "b", ")", "else", ":", "(", "nodes", ",", "weights", ")", "=", "qnwequi", "(", "n", ",", "a", ",", "b", ",", "kind", ")", "out", "=", "weights", ".", "dot", "(", "f", "(", "nodes", ",", "*", "args", ",", "**", "kwargs", ")", ")", "return", "out" ]
integrate the d-dimensional function f on a rectangle with lower and upper bound for dimension i defined by a[i] and b[i] .
train
true
14,582
def _create_default_topic_similarities(): topic_similarities_dict = {topic: {} for topic in RECOMMENDATION_CATEGORIES} data = DEFAULT_TOPIC_SIMILARITIES_STRING.splitlines() data = list(csv.reader(data)) topics_list = data[0] topic_similarities_values = data[1:] for (row_ind, topic_1) in enumerate(topics_list): for (col_ind, topic_2) in enumerate(topics_list): topic_similarities_dict[topic_1][topic_2] = float(topic_similarities_values[row_ind][col_ind]) return save_topic_similarities(topic_similarities_dict)
[ "def", "_create_default_topic_similarities", "(", ")", ":", "topic_similarities_dict", "=", "{", "topic", ":", "{", "}", "for", "topic", "in", "RECOMMENDATION_CATEGORIES", "}", "data", "=", "DEFAULT_TOPIC_SIMILARITIES_STRING", ".", "splitlines", "(", ")", "data", "=", "list", "(", "csv", ".", "reader", "(", "data", ")", ")", "topics_list", "=", "data", "[", "0", "]", "topic_similarities_values", "=", "data", "[", "1", ":", "]", "for", "(", "row_ind", ",", "topic_1", ")", "in", "enumerate", "(", "topics_list", ")", ":", "for", "(", "col_ind", ",", "topic_2", ")", "in", "enumerate", "(", "topics_list", ")", ":", "topic_similarities_dict", "[", "topic_1", "]", "[", "topic_2", "]", "=", "float", "(", "topic_similarities_values", "[", "row_ind", "]", "[", "col_ind", "]", ")", "return", "save_topic_similarities", "(", "topic_similarities_dict", ")" ]
creates the default topic similarities .
train
false
14,584
def CheckPropertyValue(name, value, max_length): num_values = ((((((value.has_int64value() + value.has_stringvalue()) + value.has_booleanvalue()) + value.has_doublevalue()) + value.has_pointvalue()) + value.has_uservalue()) + value.has_referencevalue()) Check((num_values <= 1), (('PropertyValue for ' + name) + ' has multiple value fields set')) if value.has_stringvalue(): s16 = value.stringvalue().decode('utf-8', 'replace').encode('utf-16') Check((((len(s16) - 2) / 2) <= max_length), ('Property %s is too long. Maximum length is %d.' % (name, max_length)))
[ "def", "CheckPropertyValue", "(", "name", ",", "value", ",", "max_length", ")", ":", "num_values", "=", "(", "(", "(", "(", "(", "(", "value", ".", "has_int64value", "(", ")", "+", "value", ".", "has_stringvalue", "(", ")", ")", "+", "value", ".", "has_booleanvalue", "(", ")", ")", "+", "value", ".", "has_doublevalue", "(", ")", ")", "+", "value", ".", "has_pointvalue", "(", ")", ")", "+", "value", ".", "has_uservalue", "(", ")", ")", "+", "value", ".", "has_referencevalue", "(", ")", ")", "Check", "(", "(", "num_values", "<=", "1", ")", ",", "(", "(", "'PropertyValue for '", "+", "name", ")", "+", "' has multiple value fields set'", ")", ")", "if", "value", ".", "has_stringvalue", "(", ")", ":", "s16", "=", "value", ".", "stringvalue", "(", ")", ".", "decode", "(", "'utf-8'", ",", "'replace'", ")", ".", "encode", "(", "'utf-16'", ")", "Check", "(", "(", "(", "(", "len", "(", "s16", ")", "-", "2", ")", "/", "2", ")", "<=", "max_length", ")", ",", "(", "'Property %s is too long. Maximum length is %d.'", "%", "(", "name", ",", "max_length", ")", ")", ")" ]
check if this property value can be stored .
train
false
14,585
def deep_force_unicode(value): if isinstance(value, (list, tuple, set)): value = type(value)(map(deep_force_unicode, value)) elif isinstance(value, dict): value = type(value)(map(deep_force_unicode, value.items())) elif isinstance(value, Promise): value = force_text(value) return value
[ "def", "deep_force_unicode", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ",", "set", ")", ")", ":", "value", "=", "type", "(", "value", ")", "(", "map", "(", "deep_force_unicode", ",", "value", ")", ")", "elif", "isinstance", "(", "value", ",", "dict", ")", ":", "value", "=", "type", "(", "value", ")", "(", "map", "(", "deep_force_unicode", ",", "value", ".", "items", "(", ")", ")", ")", "elif", "isinstance", "(", "value", ",", "Promise", ")", ":", "value", "=", "force_text", "(", "value", ")", "return", "value" ]
recursively call force_text on value .
train
true
14,586
def check_vendor(): if os.path.isfile('/vendor'): with open('/vendor', 'r') as f: line = f.readline() if ('noobs' in line): return 'noobs' if ('ts' in line): return 'ts' return None
[ "def", "check_vendor", "(", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "'/vendor'", ")", ":", "with", "open", "(", "'/vendor'", ",", "'r'", ")", "as", "f", ":", "line", "=", "f", ".", "readline", "(", ")", "if", "(", "'noobs'", "in", "line", ")", ":", "return", "'noobs'", "if", "(", "'ts'", "in", "line", ")", ":", "return", "'ts'", "return", "None" ]
checks whether osmc is being installed via n00bs or ts .
train
false
14,587
def get_pyextension_imports(modname): statement = ("\n import sys\n # Importing distutils filters common modules, especially in virtualenv.\n import distutils\n original_modlist = set(sys.modules.keys())\n # When importing this module - sys.modules gets updated.\n import %(modname)s\n all_modlist = set(sys.modules.keys())\n diff = all_modlist - original_modlist\n # Module list contain original modname. We do not need it there.\n diff.discard('%(modname)s')\n # Print module list to stdout.\n print(list(diff))\n " % {'modname': modname}) module_imports = eval_statement(statement) if (not module_imports): logger.error(('Cannot find imports for module %s' % modname)) return [] return module_imports
[ "def", "get_pyextension_imports", "(", "modname", ")", ":", "statement", "=", "(", "\"\\n import sys\\n # Importing distutils filters common modules, especially in virtualenv.\\n import distutils\\n original_modlist = set(sys.modules.keys())\\n # When importing this module - sys.modules gets updated.\\n import %(modname)s\\n all_modlist = set(sys.modules.keys())\\n diff = all_modlist - original_modlist\\n # Module list contain original modname. We do not need it there.\\n diff.discard('%(modname)s')\\n # Print module list to stdout.\\n print(list(diff))\\n \"", "%", "{", "'modname'", ":", "modname", "}", ")", "module_imports", "=", "eval_statement", "(", "statement", ")", "if", "(", "not", "module_imports", ")", ":", "logger", ".", "error", "(", "(", "'Cannot find imports for module %s'", "%", "modname", ")", ")", "return", "[", "]", "return", "module_imports" ]
return list of modules required by binary python extension .
train
false
14,588
def dmp_exquo_ground(f, c, u, K): if (not u): return dup_exquo_ground(f, c, K) v = (u - 1) return [dmp_exquo_ground(cf, c, v, K) for cf in f]
[ "def", "dmp_exquo_ground", "(", "f", ",", "c", ",", "u", ",", "K", ")", ":", "if", "(", "not", "u", ")", ":", "return", "dup_exquo_ground", "(", "f", ",", "c", ",", "K", ")", "v", "=", "(", "u", "-", "1", ")", "return", "[", "dmp_exquo_ground", "(", "cf", ",", "c", ",", "v", ",", "K", ")", "for", "cf", "in", "f", "]" ]
exact quotient by a constant in k[x] .
train
false
14,589
def get_blkdev_major_minor(path, lookup_for_file=True): st = os.stat(path) if stat.S_ISBLK(st.st_mode): (path, st) = _get_disk_of_partition(path, st) return ('%d:%d' % (os.major(st.st_rdev), os.minor(st.st_rdev))) elif stat.S_ISCHR(st.st_mode): return None elif lookup_for_file: (out, _err) = execute('df', path) devpath = out.split('\n')[1].split()[0] if (devpath[0] is not '/'): return None return get_blkdev_major_minor(devpath, False) else: msg = (_("Unable to get a block device for file '%s'") % path) raise exception.Error(msg)
[ "def", "get_blkdev_major_minor", "(", "path", ",", "lookup_for_file", "=", "True", ")", ":", "st", "=", "os", ".", "stat", "(", "path", ")", "if", "stat", ".", "S_ISBLK", "(", "st", ".", "st_mode", ")", ":", "(", "path", ",", "st", ")", "=", "_get_disk_of_partition", "(", "path", ",", "st", ")", "return", "(", "'%d:%d'", "%", "(", "os", ".", "major", "(", "st", ".", "st_rdev", ")", ",", "os", ".", "minor", "(", "st", ".", "st_rdev", ")", ")", ")", "elif", "stat", ".", "S_ISCHR", "(", "st", ".", "st_mode", ")", ":", "return", "None", "elif", "lookup_for_file", ":", "(", "out", ",", "_err", ")", "=", "execute", "(", "'df'", ",", "path", ")", "devpath", "=", "out", ".", "split", "(", "'\\n'", ")", "[", "1", "]", ".", "split", "(", ")", "[", "0", "]", "if", "(", "devpath", "[", "0", "]", "is", "not", "'/'", ")", ":", "return", "None", "return", "get_blkdev_major_minor", "(", "devpath", ",", "False", ")", "else", ":", "msg", "=", "(", "_", "(", "\"Unable to get a block device for file '%s'\"", ")", "%", "path", ")", "raise", "exception", ".", "Error", "(", "msg", ")" ]
get major:minor number of block device .
train
false
14,590
def l1norm(a): return np.sum(np.abs(a))
[ "def", "l1norm", "(", "a", ")", ":", "return", "np", ".", "sum", "(", "np", ".", "abs", "(", "a", ")", ")" ]
return the *l1* norm of *a* .
train
false
14,592
def js_dict(d): dict_items = [indent((u"'%s': (%s)" % (k, v))) for (k, v) in d.items()] return (u'{\n%s\n}' % u',\n'.join(dict_items))
[ "def", "js_dict", "(", "d", ")", ":", "dict_items", "=", "[", "indent", "(", "(", "u\"'%s': (%s)\"", "%", "(", "k", ",", "v", ")", ")", ")", "for", "(", "k", ",", "v", ")", "in", "d", ".", "items", "(", ")", "]", "return", "(", "u'{\\n%s\\n}'", "%", "u',\\n'", ".", "join", "(", "dict_items", ")", ")" ]
return a javascript expression string for the dict d .
train
false
14,593
def get_action_parameters_schema(additional_properties=False): return get_draft_schema(version='action_params', additional_properties=additional_properties)
[ "def", "get_action_parameters_schema", "(", "additional_properties", "=", "False", ")", ":", "return", "get_draft_schema", "(", "version", "=", "'action_params'", ",", "additional_properties", "=", "additional_properties", ")" ]
return a generic schema which is used for validating action parameters definition .
train
false
14,594
def save_course_update_items(location, course_updates, course_update_items, user=None): course_updates.items = course_update_items course_updates.data = '' modulestore().update_item(course_updates, user.id) return course_updates
[ "def", "save_course_update_items", "(", "location", ",", "course_updates", ",", "course_update_items", ",", "user", "=", "None", ")", ":", "course_updates", ".", "items", "=", "course_update_items", "course_updates", ".", "data", "=", "''", "modulestore", "(", ")", ".", "update_item", "(", "course_updates", ",", "user", ".", "id", ")", "return", "course_updates" ]
save list of course_updates data dictionaries in new field and html related to course update in data field .
train
false
14,595
def additions_version(): try: d = _additions_dir() except EnvironmentError: return False if (d and (len(os.listdir(d)) > 0)): return re.sub('^{0}-'.format(_additions_dir_prefix), '', os.path.basename(d)) return False
[ "def", "additions_version", "(", ")", ":", "try", ":", "d", "=", "_additions_dir", "(", ")", "except", "EnvironmentError", ":", "return", "False", "if", "(", "d", "and", "(", "len", "(", "os", ".", "listdir", "(", "d", ")", ")", ">", "0", ")", ")", ":", "return", "re", ".", "sub", "(", "'^{0}-'", ".", "format", "(", "_additions_dir_prefix", ")", ",", "''", ",", "os", ".", "path", ".", "basename", "(", "d", ")", ")", "return", "False" ]
check virtualbox guest additions version .
train
true
14,596
def _load_opts_from_mrjob_conf(runner_alias, conf_path, already_loaded): conf = _conf_object_at_path(conf_path) if (conf is None): return [(None, {})] real_conf_path = os.path.realpath(conf_path) if (real_conf_path in already_loaded): return [] else: already_loaded.append(real_conf_path) try: values = (conf['runners'][runner_alias] or {}) except (KeyError, TypeError, ValueError): values = {} inherited = [] if conf.get('include', None): includes = conf['include'] if isinstance(includes, string_types): includes = [includes] for include in reversed(includes): include = os.path.join(os.path.dirname(real_conf_path), expand_path(include)) inherited = (_load_opts_from_mrjob_conf(runner_alias, include, already_loaded) + inherited) return (inherited + [(conf_path, values)])
[ "def", "_load_opts_from_mrjob_conf", "(", "runner_alias", ",", "conf_path", ",", "already_loaded", ")", ":", "conf", "=", "_conf_object_at_path", "(", "conf_path", ")", "if", "(", "conf", "is", "None", ")", ":", "return", "[", "(", "None", ",", "{", "}", ")", "]", "real_conf_path", "=", "os", ".", "path", ".", "realpath", "(", "conf_path", ")", "if", "(", "real_conf_path", "in", "already_loaded", ")", ":", "return", "[", "]", "else", ":", "already_loaded", ".", "append", "(", "real_conf_path", ")", "try", ":", "values", "=", "(", "conf", "[", "'runners'", "]", "[", "runner_alias", "]", "or", "{", "}", ")", "except", "(", "KeyError", ",", "TypeError", ",", "ValueError", ")", ":", "values", "=", "{", "}", "inherited", "=", "[", "]", "if", "conf", ".", "get", "(", "'include'", ",", "None", ")", ":", "includes", "=", "conf", "[", "'include'", "]", "if", "isinstance", "(", "includes", ",", "string_types", ")", ":", "includes", "=", "[", "includes", "]", "for", "include", "in", "reversed", "(", "includes", ")", ":", "include", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "real_conf_path", ")", ",", "expand_path", "(", "include", ")", ")", "inherited", "=", "(", "_load_opts_from_mrjob_conf", "(", "runner_alias", ",", "include", ",", "already_loaded", ")", "+", "inherited", ")", "return", "(", "inherited", "+", "[", "(", "conf_path", ",", "values", ")", "]", ")" ]
helper for :py:func:load_opts_from_mrjob_conf for recursive use .
train
false
14,597
def copy_metadata(package_name): dist = pkg_resources.get_distribution(package_name) metadata_dir = dist.egg_info dest_dir = '{}.egg-info'.format(dist.egg_name()) if (metadata_dir is None): metadata_dir = os.path.join(dist.location, dest_dir) assert os.path.exists(metadata_dir) logger.debug('Package {} metadata found in {} belongs in {}'.format(package_name, metadata_dir, dest_dir)) return [(metadata_dir, dest_dir)]
[ "def", "copy_metadata", "(", "package_name", ")", ":", "dist", "=", "pkg_resources", ".", "get_distribution", "(", "package_name", ")", "metadata_dir", "=", "dist", ".", "egg_info", "dest_dir", "=", "'{}.egg-info'", ".", "format", "(", "dist", ".", "egg_name", "(", ")", ")", "if", "(", "metadata_dir", "is", "None", ")", ":", "metadata_dir", "=", "os", ".", "path", ".", "join", "(", "dist", ".", "location", ",", "dest_dir", ")", "assert", "os", ".", "path", ".", "exists", "(", "metadata_dir", ")", "logger", ".", "debug", "(", "'Package {} metadata found in {} belongs in {}'", ".", "format", "(", "package_name", ",", "metadata_dir", ",", "dest_dir", ")", ")", "return", "[", "(", "metadata_dir", ",", "dest_dir", ")", "]" ]
this function returns a list to be assigned to the datas global variable .
train
false
14,603
def fileobj_closed(f): if isinstance(f, string_types): return True if hasattr(f, 'closed'): return f.closed elif (hasattr(f, 'fileobj') and hasattr(f.fileobj, 'closed')): return f.fileobj.closed elif (hasattr(f, 'fp') and hasattr(f.fp, 'closed')): return f.fp.closed else: return False
[ "def", "fileobj_closed", "(", "f", ")", ":", "if", "isinstance", "(", "f", ",", "string_types", ")", ":", "return", "True", "if", "hasattr", "(", "f", ",", "'closed'", ")", ":", "return", "f", ".", "closed", "elif", "(", "hasattr", "(", "f", ",", "'fileobj'", ")", "and", "hasattr", "(", "f", ".", "fileobj", ",", "'closed'", ")", ")", ":", "return", "f", ".", "fileobj", ".", "closed", "elif", "(", "hasattr", "(", "f", ",", "'fp'", ")", "and", "hasattr", "(", "f", ".", "fp", ",", "'closed'", ")", ")", ":", "return", "f", ".", "fp", ".", "closed", "else", ":", "return", "False" ]
returns true if the given file-like object is closed or if f is a string .
train
false
14,604
def _safeReprVars(varsDictItems): return [(name, reflect.safe_repr(obj)) for (name, obj) in varsDictItems]
[ "def", "_safeReprVars", "(", "varsDictItems", ")", ":", "return", "[", "(", "name", ",", "reflect", ".", "safe_repr", "(", "obj", ")", ")", "for", "(", "name", ",", "obj", ")", "in", "varsDictItems", "]" ]
convert a list of pairs into pairs .
train
false
14,605
def dmp_content(f, u, K): (cont, v) = (dmp_LC(f, K), (u - 1)) if dmp_zero_p(f, u): return cont for c in f[1:]: cont = dmp_gcd(cont, c, v, K) if dmp_one_p(cont, v, K): break if K.is_negative(dmp_ground_LC(cont, v, K)): return dmp_neg(cont, v, K) else: return cont
[ "def", "dmp_content", "(", "f", ",", "u", ",", "K", ")", ":", "(", "cont", ",", "v", ")", "=", "(", "dmp_LC", "(", "f", ",", "K", ")", ",", "(", "u", "-", "1", ")", ")", "if", "dmp_zero_p", "(", "f", ",", "u", ")", ":", "return", "cont", "for", "c", "in", "f", "[", "1", ":", "]", ":", "cont", "=", "dmp_gcd", "(", "cont", ",", "c", ",", "v", ",", "K", ")", "if", "dmp_one_p", "(", "cont", ",", "v", ",", "K", ")", ":", "break", "if", "K", ".", "is_negative", "(", "dmp_ground_LC", "(", "cont", ",", "v", ",", "K", ")", ")", ":", "return", "dmp_neg", "(", "cont", ",", "v", ",", "K", ")", "else", ":", "return", "cont" ]
returns gcd of multivariate coefficients .
train
false
14,606
def _process_mass_lead_form(f): def wrap(request, *args, **kwargs): 'Wrap' if ('massform' in request.POST): for key in request.POST: if ('mass-lead' in key): try: lead = Lead.objects.get(pk=request.POST[key]) form = LeadMassActionForm(request.user.profile, request.POST, instance=lead) if (form.is_valid() and request.user.profile.has_permission(lead, mode='w')): form.save() except: pass return f(request, *args, **kwargs) wrap.__doc__ = f.__doc__ wrap.__name__ = f.__name__ return wrap
[ "def", "_process_mass_lead_form", "(", "f", ")", ":", "def", "wrap", "(", "request", ",", "*", "args", ",", "**", "kwargs", ")", ":", "if", "(", "'massform'", "in", "request", ".", "POST", ")", ":", "for", "key", "in", "request", ".", "POST", ":", "if", "(", "'mass-lead'", "in", "key", ")", ":", "try", ":", "lead", "=", "Lead", ".", "objects", ".", "get", "(", "pk", "=", "request", ".", "POST", "[", "key", "]", ")", "form", "=", "LeadMassActionForm", "(", "request", ".", "user", ".", "profile", ",", "request", ".", "POST", ",", "instance", "=", "lead", ")", "if", "(", "form", ".", "is_valid", "(", ")", "and", "request", ".", "user", ".", "profile", ".", "has_permission", "(", "lead", ",", "mode", "=", "'w'", ")", ")", ":", "form", ".", "save", "(", ")", "except", ":", "pass", "return", "f", "(", "request", ",", "*", "args", ",", "**", "kwargs", ")", "wrap", ".", "__doc__", "=", "f", ".", "__doc__", "wrap", ".", "__name__", "=", "f", ".", "__name__", "return", "wrap" ]
pre-process request to handle mass action form for orders .
train
false
14,607
def _DecodeUTF8(pb_value): if (pb_value is not None): return pb_value.decode('utf-8') return None
[ "def", "_DecodeUTF8", "(", "pb_value", ")", ":", "if", "(", "pb_value", "is", "not", "None", ")", ":", "return", "pb_value", ".", "decode", "(", "'utf-8'", ")", "return", "None" ]
decodes a utf-8 encoded string into unicode .
train
false
14,608
def update_version(event_id, is_created, column_to_increment): VersionUpdater(event_id=event_id, is_created=is_created, column_to_increment=column_to_increment).update()
[ "def", "update_version", "(", "event_id", ",", "is_created", ",", "column_to_increment", ")", ":", "VersionUpdater", "(", "event_id", "=", "event_id", ",", "is_created", "=", "is_created", ",", "column_to_increment", "=", "column_to_increment", ")", ".", "update", "(", ")" ]
function responsible for increasing version when some data will be created or changed .
train
false
14,609
def register_class_loader(loader): if (not hasattr(loader, '__call__')): raise TypeError('loader must be callable') CLASS_LOADERS.update([loader])
[ "def", "register_class_loader", "(", "loader", ")", ":", "if", "(", "not", "hasattr", "(", "loader", ",", "'__call__'", ")", ")", ":", "raise", "TypeError", "(", "'loader must be callable'", ")", "CLASS_LOADERS", ".", "update", "(", "[", "loader", "]", ")" ]
registers a loader that is called to provide the c{class} for a specific alias .
train
false
14,610
def compute_node_get_model(context, compute_id): return IMPL.compute_node_get_model(context, compute_id)
[ "def", "compute_node_get_model", "(", "context", ",", "compute_id", ")", ":", "return", "IMPL", ".", "compute_node_get_model", "(", "context", ",", "compute_id", ")" ]
get a compute node sqlalchemy model object by its id .
train
false
14,611
def split_options(opts, validate=True, warn=False): and_idx = opts.find('&') semi_idx = opts.find(';') try: if ((and_idx >= 0) and (semi_idx >= 0)): raise InvalidURI("Can not mix '&' and ';' for option separators.") elif (and_idx >= 0): options = _parse_options(opts, '&') elif (semi_idx >= 0): options = _parse_options(opts, ';') elif (opts.find('=') != (-1)): options = _parse_options(opts, None) else: raise ValueError except ValueError: raise InvalidURI('MongoDB URI options are key=value pairs.') if validate: return validate_options(options, warn) return options
[ "def", "split_options", "(", "opts", ",", "validate", "=", "True", ",", "warn", "=", "False", ")", ":", "and_idx", "=", "opts", ".", "find", "(", "'&'", ")", "semi_idx", "=", "opts", ".", "find", "(", "';'", ")", "try", ":", "if", "(", "(", "and_idx", ">=", "0", ")", "and", "(", "semi_idx", ">=", "0", ")", ")", ":", "raise", "InvalidURI", "(", "\"Can not mix '&' and ';' for option separators.\"", ")", "elif", "(", "and_idx", ">=", "0", ")", ":", "options", "=", "_parse_options", "(", "opts", ",", "'&'", ")", "elif", "(", "semi_idx", ">=", "0", ")", ":", "options", "=", "_parse_options", "(", "opts", ",", "';'", ")", "elif", "(", "opts", ".", "find", "(", "'='", ")", "!=", "(", "-", "1", ")", ")", ":", "options", "=", "_parse_options", "(", "opts", ",", "None", ")", "else", ":", "raise", "ValueError", "except", "ValueError", ":", "raise", "InvalidURI", "(", "'MongoDB URI options are key=value pairs.'", ")", "if", "validate", ":", "return", "validate_options", "(", "options", ",", "warn", ")", "return", "options" ]
takes the options portion of a mongodb uri .
train
true
14,613
def request_data(request, template='base.html', data='sausage'): return render_to_response(template, {'get-foo': request.GET.get('foo', None), 'get-bar': request.GET.get('bar', None), 'post-foo': request.POST.get('foo', None), 'post-bar': request.POST.get('bar', None), 'request-foo': request.REQUEST.get('foo', None), 'request-bar': request.REQUEST.get('bar', None), 'data': data})
[ "def", "request_data", "(", "request", ",", "template", "=", "'base.html'", ",", "data", "=", "'sausage'", ")", ":", "return", "render_to_response", "(", "template", ",", "{", "'get-foo'", ":", "request", ".", "GET", ".", "get", "(", "'foo'", ",", "None", ")", ",", "'get-bar'", ":", "request", ".", "GET", ".", "get", "(", "'bar'", ",", "None", ")", ",", "'post-foo'", ":", "request", ".", "POST", ".", "get", "(", "'foo'", ",", "None", ")", ",", "'post-bar'", ":", "request", ".", "POST", ".", "get", "(", "'bar'", ",", "None", ")", ",", "'request-foo'", ":", "request", ".", "REQUEST", ".", "get", "(", "'foo'", ",", "None", ")", ",", "'request-bar'", ":", "request", ".", "REQUEST", ".", "get", "(", "'bar'", ",", "None", ")", ",", "'data'", ":", "data", "}", ")" ]
a simple view that returns the request data in the context .
train
false
14,614
def _section_extensions(course): section_data = {'section_key': 'extensions', 'section_display_name': _('Extensions'), 'units_with_due_dates': [(title_or_url(unit), unicode(unit.location)) for unit in get_units_with_due_date(course)], 'change_due_date_url': reverse('change_due_date', kwargs={'course_id': unicode(course.id)}), 'reset_due_date_url': reverse('reset_due_date', kwargs={'course_id': unicode(course.id)}), 'show_unit_extensions_url': reverse('show_unit_extensions', kwargs={'course_id': unicode(course.id)}), 'show_student_extensions_url': reverse('show_student_extensions', kwargs={'course_id': unicode(course.id)})} return section_data
[ "def", "_section_extensions", "(", "course", ")", ":", "section_data", "=", "{", "'section_key'", ":", "'extensions'", ",", "'section_display_name'", ":", "_", "(", "'Extensions'", ")", ",", "'units_with_due_dates'", ":", "[", "(", "title_or_url", "(", "unit", ")", ",", "unicode", "(", "unit", ".", "location", ")", ")", "for", "unit", "in", "get_units_with_due_date", "(", "course", ")", "]", ",", "'change_due_date_url'", ":", "reverse", "(", "'change_due_date'", ",", "kwargs", "=", "{", "'course_id'", ":", "unicode", "(", "course", ".", "id", ")", "}", ")", ",", "'reset_due_date_url'", ":", "reverse", "(", "'reset_due_date'", ",", "kwargs", "=", "{", "'course_id'", ":", "unicode", "(", "course", ".", "id", ")", "}", ")", ",", "'show_unit_extensions_url'", ":", "reverse", "(", "'show_unit_extensions'", ",", "kwargs", "=", "{", "'course_id'", ":", "unicode", "(", "course", ".", "id", ")", "}", ")", ",", "'show_student_extensions_url'", ":", "reverse", "(", "'show_student_extensions'", ",", "kwargs", "=", "{", "'course_id'", ":", "unicode", "(", "course", ".", "id", ")", "}", ")", "}", "return", "section_data" ]
provide data for the corresponding dashboard section .
train
false
14,615
def s3_include_debug_js(): request = current.request folder = request.folder appname = request.application theme = current.deployment_settings.get_theme() scripts_dir = os.path.join(folder, 'static', 'scripts') sys.path.append(os.path.join(scripts_dir, 'tools')) import mergejsmf configDictCore = {'.': scripts_dir, 'ui': scripts_dir, 'web2py': scripts_dir, 'S3': scripts_dir} configFilename = ('%s/tools/sahana.js.cfg' % scripts_dir) (fs, files) = mergejsmf.getFiles(configDictCore, configFilename) include = '' for file in files: include = ('%s\n<script src="/%s/static/scripts/%s" type="text/javascript"></script>' % (include, appname, file)) return XML(include)
[ "def", "s3_include_debug_js", "(", ")", ":", "request", "=", "current", ".", "request", "folder", "=", "request", ".", "folder", "appname", "=", "request", ".", "application", "theme", "=", "current", ".", "deployment_settings", ".", "get_theme", "(", ")", "scripts_dir", "=", "os", ".", "path", ".", "join", "(", "folder", ",", "'static'", ",", "'scripts'", ")", "sys", ".", "path", ".", "append", "(", "os", ".", "path", ".", "join", "(", "scripts_dir", ",", "'tools'", ")", ")", "import", "mergejsmf", "configDictCore", "=", "{", "'.'", ":", "scripts_dir", ",", "'ui'", ":", "scripts_dir", ",", "'web2py'", ":", "scripts_dir", ",", "'S3'", ":", "scripts_dir", "}", "configFilename", "=", "(", "'%s/tools/sahana.js.cfg'", "%", "scripts_dir", ")", "(", "fs", ",", "files", ")", "=", "mergejsmf", ".", "getFiles", "(", "configDictCore", ",", "configFilename", ")", "include", "=", "''", "for", "file", "in", "files", ":", "include", "=", "(", "'%s\\n<script src=\"/%s/static/scripts/%s\" type=\"text/javascript\"></script>'", "%", "(", "include", ",", "appname", ",", "file", ")", ")", "return", "XML", "(", "include", ")" ]
generates html to include the js scripts listed in /static/scripts/tools/sahana .
train
false
14,616
def index_alt(): s3_redirect_default(URL(f='person'))
[ "def", "index_alt", "(", ")", ":", "s3_redirect_default", "(", "URL", "(", "f", "=", "'person'", ")", ")" ]
module homepage for non-admin users when no cms content found .
train
false
14,617
def _encode_auth(auth): auth_s = unquote(auth) auth_bytes = auth_s.encode() encoded_bytes = base64.encodestring(auth_bytes) encoded = encoded_bytes.decode() return encoded.replace('\n', '')
[ "def", "_encode_auth", "(", "auth", ")", ":", "auth_s", "=", "unquote", "(", "auth", ")", "auth_bytes", "=", "auth_s", ".", "encode", "(", ")", "encoded_bytes", "=", "base64", ".", "encodestring", "(", "auth_bytes", ")", "encoded", "=", "encoded_bytes", ".", "decode", "(", ")", "return", "encoded", ".", "replace", "(", "'\\n'", ",", "''", ")" ]
a function compatible with python 2 .
train
true
14,618
def weighted_projected_graph(B, nodes, ratio=False): if B.is_multigraph(): raise nx.NetworkXError('not defined for multigraphs') if B.is_directed(): pred = B.pred G = nx.DiGraph() else: pred = B.adj G = nx.Graph() G.graph.update(B.graph) G.add_nodes_from(((n, B.node[n]) for n in nodes)) n_top = float((len(B) - len(nodes))) for u in nodes: unbrs = set(B[u]) nbrs2 = (set((n for nbr in unbrs for n in B[nbr])) - set([u])) for v in nbrs2: vnbrs = set(pred[v]) common = (unbrs & vnbrs) if (not ratio): weight = len(common) else: weight = (len(common) / n_top) G.add_edge(u, v, weight=weight) return G
[ "def", "weighted_projected_graph", "(", "B", ",", "nodes", ",", "ratio", "=", "False", ")", ":", "if", "B", ".", "is_multigraph", "(", ")", ":", "raise", "nx", ".", "NetworkXError", "(", "'not defined for multigraphs'", ")", "if", "B", ".", "is_directed", "(", ")", ":", "pred", "=", "B", ".", "pred", "G", "=", "nx", ".", "DiGraph", "(", ")", "else", ":", "pred", "=", "B", ".", "adj", "G", "=", "nx", ".", "Graph", "(", ")", "G", ".", "graph", ".", "update", "(", "B", ".", "graph", ")", "G", ".", "add_nodes_from", "(", "(", "(", "n", ",", "B", ".", "node", "[", "n", "]", ")", "for", "n", "in", "nodes", ")", ")", "n_top", "=", "float", "(", "(", "len", "(", "B", ")", "-", "len", "(", "nodes", ")", ")", ")", "for", "u", "in", "nodes", ":", "unbrs", "=", "set", "(", "B", "[", "u", "]", ")", "nbrs2", "=", "(", "set", "(", "(", "n", "for", "nbr", "in", "unbrs", "for", "n", "in", "B", "[", "nbr", "]", ")", ")", "-", "set", "(", "[", "u", "]", ")", ")", "for", "v", "in", "nbrs2", ":", "vnbrs", "=", "set", "(", "pred", "[", "v", "]", ")", "common", "=", "(", "unbrs", "&", "vnbrs", ")", "if", "(", "not", "ratio", ")", ":", "weight", "=", "len", "(", "common", ")", "else", ":", "weight", "=", "(", "len", "(", "common", ")", "/", "n_top", ")", "G", ".", "add_edge", "(", "u", ",", "v", ",", "weight", "=", "weight", ")", "return", "G" ]
returns a weighted projection of b onto one of its node sets .
train
false
14,619
def _validate_cert_format(name): cert_formats = ['cer', 'pfx'] if (name not in cert_formats): message = "Invalid certificate format '{0}' specified. Valid formats: {1}".format(name, cert_formats) raise SaltInvocationError(message)
[ "def", "_validate_cert_format", "(", "name", ")", ":", "cert_formats", "=", "[", "'cer'", ",", "'pfx'", "]", "if", "(", "name", "not", "in", "cert_formats", ")", ":", "message", "=", "\"Invalid certificate format '{0}' specified. Valid formats: {1}\"", ".", "format", "(", "name", ",", "cert_formats", ")", "raise", "SaltInvocationError", "(", "message", ")" ]
ensure that the certificate format .
train
true
14,620
def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None): if (stride == 1): return slim.conv2d(inputs, num_outputs, kernel_size, stride=1, rate=rate, padding='SAME', scope=scope) else: kernel_size_effective = (kernel_size + ((kernel_size - 1) * (rate - 1))) pad_total = (kernel_size_effective - 1) pad_beg = (pad_total // 2) pad_end = (pad_total - pad_beg) inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]) return slim.conv2d(inputs, num_outputs, kernel_size, stride=stride, rate=rate, padding='VALID', scope=scope)
[ "def", "conv2d_same", "(", "inputs", ",", "num_outputs", ",", "kernel_size", ",", "stride", ",", "rate", "=", "1", ",", "scope", "=", "None", ")", ":", "if", "(", "stride", "==", "1", ")", ":", "return", "slim", ".", "conv2d", "(", "inputs", ",", "num_outputs", ",", "kernel_size", ",", "stride", "=", "1", ",", "rate", "=", "rate", ",", "padding", "=", "'SAME'", ",", "scope", "=", "scope", ")", "else", ":", "kernel_size_effective", "=", "(", "kernel_size", "+", "(", "(", "kernel_size", "-", "1", ")", "*", "(", "rate", "-", "1", ")", ")", ")", "pad_total", "=", "(", "kernel_size_effective", "-", "1", ")", "pad_beg", "=", "(", "pad_total", "//", "2", ")", "pad_end", "=", "(", "pad_total", "-", "pad_beg", ")", "inputs", "=", "tf", ".", "pad", "(", "inputs", ",", "[", "[", "0", ",", "0", "]", ",", "[", "pad_beg", ",", "pad_end", "]", ",", "[", "pad_beg", ",", "pad_end", "]", ",", "[", "0", ",", "0", "]", "]", ")", "return", "slim", ".", "conv2d", "(", "inputs", ",", "num_outputs", ",", "kernel_size", ",", "stride", "=", "stride", ",", "rate", "=", "rate", ",", "padding", "=", "'VALID'", ",", "scope", "=", "scope", ")" ]
strided 2-d convolution with same padding .
train
false
14,623
def parse_schema_element(elt, files={}, repr_=Thier_repr(with_ns=False), skip_errors=False): return XmlSchemaParser(files, repr_=repr_, skip_errors=skip_errors).parse_schema(elt)
[ "def", "parse_schema_element", "(", "elt", ",", "files", "=", "{", "}", ",", "repr_", "=", "Thier_repr", "(", "with_ns", "=", "False", ")", ",", "skip_errors", "=", "False", ")", ":", "return", "XmlSchemaParser", "(", "files", ",", "repr_", "=", "repr_", ",", "skip_errors", "=", "skip_errors", ")", ".", "parse_schema", "(", "elt", ")" ]
parses a <xs:schema> element and returns a _schema object .
train
false
14,624
def force_content_type_jsonapi(test_client): def set_content_type(func): 'Returns a decorated version of ``func``, as described in the\n wrapper defined below.\n\n ' @wraps(func) def new_func(*args, **kw): 'Sets the correct :http:header:`Content-Type` headers\n before executing ``func(*args, **kw)``.\n\n ' if ('headers' not in kw): kw['headers'] = dict() headers = kw['headers'] if (('content_type' not in kw) and ('Content-Type' not in headers)): kw['content_type'] = JSONAPI_MIMETYPE return func(*args, **kw) return new_func test_client.patch = set_content_type(test_client.patch) test_client.post = set_content_type(test_client.post)
[ "def", "force_content_type_jsonapi", "(", "test_client", ")", ":", "def", "set_content_type", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "new_func", "(", "*", "args", ",", "**", "kw", ")", ":", "if", "(", "'headers'", "not", "in", "kw", ")", ":", "kw", "[", "'headers'", "]", "=", "dict", "(", ")", "headers", "=", "kw", "[", "'headers'", "]", "if", "(", "(", "'content_type'", "not", "in", "kw", ")", "and", "(", "'Content-Type'", "not", "in", "headers", ")", ")", ":", "kw", "[", "'content_type'", "]", "=", "JSONAPI_MIMETYPE", "return", "func", "(", "*", "args", ",", "**", "kw", ")", "return", "new_func", "test_client", ".", "patch", "=", "set_content_type", "(", "test_client", ".", "patch", ")", "test_client", ".", "post", "=", "set_content_type", "(", "test_client", ".", "post", ")" ]
ensures that all requests made by the specified flask test client that include data have the correct :http:header:content-type header .
train
false
14,627
def p_command_rem(p): p[0] = ('REM', p[1])
[ "def", "p_command_rem", "(", "p", ")", ":", "p", "[", "0", "]", "=", "(", "'REM'", ",", "p", "[", "1", "]", ")" ]
command : rem .
train
false