id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
40,250
@endpoint(u'/interface-data/get-books', postprocess=json) def get_books(ctx, rd): (library_id, db, sorts, orders) = get_basic_query_data(ctx, rd) try: num = int(rd.query.get(u'num', DEFAULT_NUMBER_OF_BOOKS)) except Exception: raise HTTPNotFound((u'Invalid number of books: %r' % rd.query.get(u'num'))) searchq = rd.query.get(u'search', u'') db = get_library_data(ctx, rd)[0] ans = {} mdata = ans[u'metadata'] = {} with db.safe_read_lock: try: ans[u'search_result'] = search_result(ctx, rd, db, searchq, num, 0, u','.join(sorts), u','.join(orders)) except ParseException as err: raise HTTPBadRequest((u'Invalid search expression: %s' % as_unicode(err))) for book_id in ans[u'search_result'][u'book_ids']: data = book_as_json(db, book_id) if (data is not None): mdata[book_id] = data return ans
[ "@", "endpoint", "(", "u'/interface-data/get-books'", ",", "postprocess", "=", "json", ")", "def", "get_books", "(", "ctx", ",", "rd", ")", ":", "(", "library_id", ",", "db", ",", "sorts", ",", "orders", ")", "=", "get_basic_query_data", "(", "ctx", ",", "rd", ")", "try", ":", "num", "=", "int", "(", "rd", ".", "query", ".", "get", "(", "u'num'", ",", "DEFAULT_NUMBER_OF_BOOKS", ")", ")", "except", "Exception", ":", "raise", "HTTPNotFound", "(", "(", "u'Invalid number of books: %r'", "%", "rd", ".", "query", ".", "get", "(", "u'num'", ")", ")", ")", "searchq", "=", "rd", ".", "query", ".", "get", "(", "u'search'", ",", "u''", ")", "db", "=", "get_library_data", "(", "ctx", ",", "rd", ")", "[", "0", "]", "ans", "=", "{", "}", "mdata", "=", "ans", "[", "u'metadata'", "]", "=", "{", "}", "with", "db", ".", "safe_read_lock", ":", "try", ":", "ans", "[", "u'search_result'", "]", "=", "search_result", "(", "ctx", ",", "rd", ",", "db", ",", "searchq", ",", "num", ",", "0", ",", "u','", ".", "join", "(", "sorts", ")", ",", "u','", ".", "join", "(", "orders", ")", ")", "except", "ParseException", "as", "err", ":", "raise", "HTTPBadRequest", "(", "(", "u'Invalid search expression: %s'", "%", "as_unicode", "(", "err", ")", ")", ")", "for", "book_id", "in", "ans", "[", "u'search_result'", "]", "[", "u'book_ids'", "]", ":", "data", "=", "book_as_json", "(", "db", ",", "book_id", ")", "if", "(", "data", "is", "not", "None", ")", ":", "mdata", "[", "book_id", "]", "=", "data", "return", "ans" ]
get books for the specified query optional: ?library_id=<default library>&num=50&sort=timestamp .
train
false
40,251
def pre_delete_title(instance, **kwargs): page = instance.page page_languages = page.get_languages() if (instance.language in page_languages): page_languages.remove(instance.language) page.update_languages(page_languages) if instance.publisher_is_draft: instance.page.mark_descendants_pending(instance.language)
[ "def", "pre_delete_title", "(", "instance", ",", "**", "kwargs", ")", ":", "page", "=", "instance", ".", "page", "page_languages", "=", "page", ".", "get_languages", "(", ")", "if", "(", "instance", ".", "language", "in", "page_languages", ")", ":", "page_languages", ".", "remove", "(", "instance", ".", "language", ")", "page", ".", "update_languages", "(", "page_languages", ")", "if", "instance", ".", "publisher_is_draft", ":", "instance", ".", "page", ".", "mark_descendants_pending", "(", "instance", ".", "language", ")" ]
save old state to instance and setup path .
train
false
40,252
def remove_initial_data(apps, schema_editor): pass
[ "def", "remove_initial_data", "(", "apps", ",", "schema_editor", ")", ":", "pass" ]
this function does nothing .
train
false
40,253
def hide_untranslated(language, site_id=None): obj = get_language_object(language, site_id) return obj.get('hide_untranslated', True)
[ "def", "hide_untranslated", "(", "language", ",", "site_id", "=", "None", ")", ":", "obj", "=", "get_language_object", "(", "language", ",", "site_id", ")", "return", "obj", ".", "get", "(", "'hide_untranslated'", ",", "True", ")" ]
should untranslated pages in this language be hidden? .
train
false
40,254
def get_integration_controller(service_name): try: return frappe.get_doc(u'{0} Settings'.format(service_name)) except Exception: frappe.throw(_(u'Module {service} not found'.format(service=service_name)))
[ "def", "get_integration_controller", "(", "service_name", ")", ":", "try", ":", "return", "frappe", ".", "get_doc", "(", "u'{0} Settings'", ".", "format", "(", "service_name", ")", ")", "except", "Exception", ":", "frappe", ".", "throw", "(", "_", "(", "u'Module {service} not found'", ".", "format", "(", "service", "=", "service_name", ")", ")", ")" ]
returns integration controller module from app_name .
train
false
40,256
def HT_PHASOR(ds, count): ret = call_talib_with_ds(ds, count, talib.HT_PHASOR) if (ret is None): ret = (None, None) return ret
[ "def", "HT_PHASOR", "(", "ds", ",", "count", ")", ":", "ret", "=", "call_talib_with_ds", "(", "ds", ",", "count", ",", "talib", ".", "HT_PHASOR", ")", "if", "(", "ret", "is", "None", ")", ":", "ret", "=", "(", "None", ",", "None", ")", "return", "ret" ]
hilbert transform - phasor components .
train
false
40,257
def nn_movie(ureviews, reviews, uid, mid, k=1): X = ureviews y = ureviews[mid].copy() y -= y.mean() y /= (y.std() + 1e-05) corrs = np.dot(X, y) likes = corrs.argsort() likes = likes[::(-1)] c = 0 pred = 3.0 for ell in likes: if (ell == mid): continue if (reviews[(uid, ell)] > 0): pred = reviews[(uid, ell)] if (c == k): return pred c += 1 return pred
[ "def", "nn_movie", "(", "ureviews", ",", "reviews", ",", "uid", ",", "mid", ",", "k", "=", "1", ")", ":", "X", "=", "ureviews", "y", "=", "ureviews", "[", "mid", "]", ".", "copy", "(", ")", "y", "-=", "y", ".", "mean", "(", ")", "y", "/=", "(", "y", ".", "std", "(", ")", "+", "1e-05", ")", "corrs", "=", "np", ".", "dot", "(", "X", ",", "y", ")", "likes", "=", "corrs", ".", "argsort", "(", ")", "likes", "=", "likes", "[", ":", ":", "(", "-", "1", ")", "]", "c", "=", "0", "pred", "=", "3.0", "for", "ell", "in", "likes", ":", "if", "(", "ell", "==", "mid", ")", ":", "continue", "if", "(", "reviews", "[", "(", "uid", ",", "ell", ")", "]", ">", "0", ")", ":", "pred", "=", "reviews", "[", "(", "uid", ",", "ell", ")", "]", "if", "(", "c", "==", "k", ")", ":", "return", "pred", "c", "+=", "1", "return", "pred" ]
movie neighbor based classifier parameters ureviews : ndarray reviews : ndarray uid : int index of user mid : int index of movie k : int index of neighbor to return returns pred : float .
train
false
40,260
def show_lb(kwargs=None, call=None): if (call != 'function'): raise SaltCloudSystemExit('The show_lb function must be called with -f or --function.') if ((not kwargs) or ('name' not in kwargs)): log.error('Must specify name of load-balancer.') return False lb_conn = get_lb_conn(get_conn()) return _expand_balancer(lb_conn.get_balancer(kwargs['name']))
[ "def", "show_lb", "(", "kwargs", "=", "None", ",", "call", "=", "None", ")", ":", "if", "(", "call", "!=", "'function'", ")", ":", "raise", "SaltCloudSystemExit", "(", "'The show_lb function must be called with -f or --function.'", ")", "if", "(", "(", "not", "kwargs", ")", "or", "(", "'name'", "not", "in", "kwargs", ")", ")", ":", "log", ".", "error", "(", "'Must specify name of load-balancer.'", ")", "return", "False", "lb_conn", "=", "get_lb_conn", "(", "get_conn", "(", ")", ")", "return", "_expand_balancer", "(", "lb_conn", ".", "get_balancer", "(", "kwargs", "[", "'name'", "]", ")", ")" ]
show the details of an existing load-balancer .
train
true
40,261
def unmatched(match): (start, end) = match.span(0) return (match.string[:start] + match.string[end:])
[ "def", "unmatched", "(", "match", ")", ":", "(", "start", ",", "end", ")", "=", "match", ".", "span", "(", "0", ")", "return", "(", "match", ".", "string", "[", ":", "start", "]", "+", "match", ".", "string", "[", "end", ":", "]", ")" ]
return unmatched part of re .
train
true
40,262
def _writable_dir(path): return (os.path.isdir(path) and os.access(path, os.W_OK))
[ "def", "_writable_dir", "(", "path", ")", ":", "return", "(", "os", ".", "path", ".", "isdir", "(", "path", ")", "and", "os", ".", "access", "(", "path", ",", "os", ".", "W_OK", ")", ")" ]
whether path is a directory .
train
true
40,263
def check_already_managed_volume(vol_id): try: return (vol_id and isinstance(vol_id, six.string_types) and uuid.UUID(vol_id, version=4) and objects.Volume.exists(context.get_admin_context(), vol_id)) except ValueError: return False
[ "def", "check_already_managed_volume", "(", "vol_id", ")", ":", "try", ":", "return", "(", "vol_id", "and", "isinstance", "(", "vol_id", ",", "six", ".", "string_types", ")", "and", "uuid", ".", "UUID", "(", "vol_id", ",", "version", "=", "4", ")", "and", "objects", ".", "Volume", ".", "exists", "(", "context", ".", "get_admin_context", "(", ")", ",", "vol_id", ")", ")", "except", "ValueError", ":", "return", "False" ]
check cinder db for already managed volume .
train
false
40,264
def list_secret_keys(user=None, gnupghome=None): _keys = [] for _key in _list_keys(user, gnupghome, secret=True): tmp = {'keyid': _key['keyid'], 'fingerprint': _key['fingerprint'], 'uids': _key['uids']} expires = _key.get('expires', None) date = _key.get('date', None) length = _key.get('length', None) owner_trust = _key.get('ownertrust', None) trust = _key.get('trust', None) if expires: tmp['expires'] = time.strftime('%Y-%m-%d', time.localtime(float(_key['expires']))) if date: tmp['created'] = time.strftime('%Y-%m-%d', time.localtime(float(_key['date']))) if length: tmp['keyLength'] = _key['length'] if owner_trust: tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']] if trust: tmp['trust'] = LETTER_TRUST_DICT[_key['trust']] _keys.append(tmp) return _keys
[ "def", "list_secret_keys", "(", "user", "=", "None", ",", "gnupghome", "=", "None", ")", ":", "_keys", "=", "[", "]", "for", "_key", "in", "_list_keys", "(", "user", ",", "gnupghome", ",", "secret", "=", "True", ")", ":", "tmp", "=", "{", "'keyid'", ":", "_key", "[", "'keyid'", "]", ",", "'fingerprint'", ":", "_key", "[", "'fingerprint'", "]", ",", "'uids'", ":", "_key", "[", "'uids'", "]", "}", "expires", "=", "_key", ".", "get", "(", "'expires'", ",", "None", ")", "date", "=", "_key", ".", "get", "(", "'date'", ",", "None", ")", "length", "=", "_key", ".", "get", "(", "'length'", ",", "None", ")", "owner_trust", "=", "_key", ".", "get", "(", "'ownertrust'", ",", "None", ")", "trust", "=", "_key", ".", "get", "(", "'trust'", ",", "None", ")", "if", "expires", ":", "tmp", "[", "'expires'", "]", "=", "time", ".", "strftime", "(", "'%Y-%m-%d'", ",", "time", ".", "localtime", "(", "float", "(", "_key", "[", "'expires'", "]", ")", ")", ")", "if", "date", ":", "tmp", "[", "'created'", "]", "=", "time", ".", "strftime", "(", "'%Y-%m-%d'", ",", "time", ".", "localtime", "(", "float", "(", "_key", "[", "'date'", "]", ")", ")", ")", "if", "length", ":", "tmp", "[", "'keyLength'", "]", "=", "_key", "[", "'length'", "]", "if", "owner_trust", ":", "tmp", "[", "'ownerTrust'", "]", "=", "LETTER_TRUST_DICT", "[", "_key", "[", "'ownertrust'", "]", "]", "if", "trust", ":", "tmp", "[", "'trust'", "]", "=", "LETTER_TRUST_DICT", "[", "_key", "[", "'trust'", "]", "]", "_keys", ".", "append", "(", "tmp", ")", "return", "_keys" ]
list secret keys in gpg keychain user which users keychain to access .
train
true
40,265
def webob_to_django_response(webob_response): from django.http import HttpResponse django_response = HttpResponse(webob_response.app_iter, content_type=webob_response.content_type, status=webob_response.status_code) for (name, value) in webob_response.headerlist: django_response[name] = value return django_response
[ "def", "webob_to_django_response", "(", "webob_response", ")", ":", "from", "django", ".", "http", "import", "HttpResponse", "django_response", "=", "HttpResponse", "(", "webob_response", ".", "app_iter", ",", "content_type", "=", "webob_response", ".", "content_type", ",", "status", "=", "webob_response", ".", "status_code", ")", "for", "(", "name", ",", "value", ")", "in", "webob_response", ".", "headerlist", ":", "django_response", "[", "name", "]", "=", "value", "return", "django_response" ]
returns a django response to the webob_response .
train
true
40,267
def _get_bootstrap_url(directory): v = _get_buildout_ver(directory) return _URL_VERSIONS.get(v, _URL_VERSIONS[DEFAULT_VER])
[ "def", "_get_bootstrap_url", "(", "directory", ")", ":", "v", "=", "_get_buildout_ver", "(", "directory", ")", "return", "_URL_VERSIONS", ".", "get", "(", "v", ",", "_URL_VERSIONS", "[", "DEFAULT_VER", "]", ")" ]
get the most appropriate download url for the bootstrap script .
train
true
40,270
def test_fit_sample_object(): ratio = 'auto' cluster = KMeans(random_state=RND_SEED) cc = ClusterCentroids(ratio=ratio, random_state=RND_SEED, estimator=cluster) (X_resampled, y_resampled) = cc.fit_sample(X, Y) X_gt = np.array([[0.92923648, 0.76103773], [0.47104475, 0.44386323], [0.13347175, 0.12167502], [0.06738818, (-0.529627)], [0.17901516, 0.69860992], [0.094035, (-2.55298982)]]) y_gt = np.array([0, 0, 0, 1, 1, 1]) assert_allclose(X_resampled, X_gt, rtol=R_TOL) assert_array_equal(y_resampled, y_gt)
[ "def", "test_fit_sample_object", "(", ")", ":", "ratio", "=", "'auto'", "cluster", "=", "KMeans", "(", "random_state", "=", "RND_SEED", ")", "cc", "=", "ClusterCentroids", "(", "ratio", "=", "ratio", ",", "random_state", "=", "RND_SEED", ",", "estimator", "=", "cluster", ")", "(", "X_resampled", ",", "y_resampled", ")", "=", "cc", ".", "fit_sample", "(", "X", ",", "Y", ")", "X_gt", "=", "np", ".", "array", "(", "[", "[", "0.92923648", ",", "0.76103773", "]", ",", "[", "0.47104475", ",", "0.44386323", "]", ",", "[", "0.13347175", ",", "0.12167502", "]", ",", "[", "0.06738818", ",", "(", "-", "0.529627", ")", "]", ",", "[", "0.17901516", ",", "0.69860992", "]", ",", "[", "0.094035", ",", "(", "-", "2.55298982", ")", "]", "]", ")", "y_gt", "=", "np", ".", "array", "(", "[", "0", ",", "0", ",", "0", ",", "1", ",", "1", ",", "1", "]", ")", "assert_allclose", "(", "X_resampled", ",", "X_gt", ",", "rtol", "=", "R_TOL", ")", "assert_array_equal", "(", "y_resampled", ",", "y_gt", ")" ]
test fit and sample using a kmeans object .
train
false
40,271
def _enable_profiling(): import cProfile import atexit global _profiler _profiler = cProfile.Profile() _profiler.enable() atexit.register(_profile_atexit)
[ "def", "_enable_profiling", "(", ")", ":", "import", "cProfile", "import", "atexit", "global", "_profiler", "_profiler", "=", "cProfile", ".", "Profile", "(", ")", "_profiler", ".", "enable", "(", ")", "atexit", ".", "register", "(", "_profile_atexit", ")" ]
start profiling and register callback to print stats when the program exits .
train
true
40,274
def bundled_settings(debug): settings = {} settings['libraries'] = [] settings['library_dirs'] = [] settings['include_dirs'] = [pjoin('bundled', 'zeromq', 'include')] settings['runtime_library_dirs'] = [] if sys.platform.startswith('freebsd'): settings['libraries'].append('pthread') elif sys.platform.startswith('win'): plat = distutils.util.get_platform() temp = ('temp.%s-%i.%i' % (plat, sys.version_info[0], sys.version_info[1])) suffix = '' if (sys.version_info >= (3, 5)): ext_suffix = distutils.sysconfig.get_config_var('EXT_SUFFIX') suffix = os.path.splitext(ext_suffix)[0] if debug: suffix = ('_d' + suffix) release = 'Debug' else: release = 'Release' settings['libraries'].append((libzmq_name + suffix)) settings['library_dirs'].append(pjoin('build', temp, release, 'buildutils')) return settings
[ "def", "bundled_settings", "(", "debug", ")", ":", "settings", "=", "{", "}", "settings", "[", "'libraries'", "]", "=", "[", "]", "settings", "[", "'library_dirs'", "]", "=", "[", "]", "settings", "[", "'include_dirs'", "]", "=", "[", "pjoin", "(", "'bundled'", ",", "'zeromq'", ",", "'include'", ")", "]", "settings", "[", "'runtime_library_dirs'", "]", "=", "[", "]", "if", "sys", ".", "platform", ".", "startswith", "(", "'freebsd'", ")", ":", "settings", "[", "'libraries'", "]", ".", "append", "(", "'pthread'", ")", "elif", "sys", ".", "platform", ".", "startswith", "(", "'win'", ")", ":", "plat", "=", "distutils", ".", "util", ".", "get_platform", "(", ")", "temp", "=", "(", "'temp.%s-%i.%i'", "%", "(", "plat", ",", "sys", ".", "version_info", "[", "0", "]", ",", "sys", ".", "version_info", "[", "1", "]", ")", ")", "suffix", "=", "''", "if", "(", "sys", ".", "version_info", ">=", "(", "3", ",", "5", ")", ")", ":", "ext_suffix", "=", "distutils", ".", "sysconfig", ".", "get_config_var", "(", "'EXT_SUFFIX'", ")", "suffix", "=", "os", ".", "path", ".", "splitext", "(", "ext_suffix", ")", "[", "0", "]", "if", "debug", ":", "suffix", "=", "(", "'_d'", "+", "suffix", ")", "release", "=", "'Debug'", "else", ":", "release", "=", "'Release'", "settings", "[", "'libraries'", "]", ".", "append", "(", "(", "libzmq_name", "+", "suffix", ")", ")", "settings", "[", "'library_dirs'", "]", ".", "append", "(", "pjoin", "(", "'build'", ",", "temp", ",", "release", ",", "'buildutils'", ")", ")", "return", "settings" ]
settings for linking extensions against bundled libzmq .
train
false
40,275
def has_configure_files(build): return bool(build.distribution.configure_files)
[ "def", "has_configure_files", "(", "build", ")", ":", "return", "bool", "(", "build", ".", "distribution", ".", "configure_files", ")" ]
check if the distribution has configuration files to work on .
train
false
40,276
def get_label_line(sample_id, fasta_label, bc, corrected_bc, num_errors): orig_label = fasta_label.split()[0] final_label = ('%s %s orig_bc=%s new_bc=%s bc_diffs=%d' % (sample_id, orig_label, bc, corrected_bc, num_errors)) return final_label
[ "def", "get_label_line", "(", "sample_id", ",", "fasta_label", ",", "bc", ",", "corrected_bc", ",", "num_errors", ")", ":", "orig_label", "=", "fasta_label", ".", "split", "(", ")", "[", "0", "]", "final_label", "=", "(", "'%s %s orig_bc=%s new_bc=%s bc_diffs=%d'", "%", "(", "sample_id", ",", "orig_label", ",", "bc", ",", "corrected_bc", ",", "num_errors", ")", ")", "return", "final_label" ]
returns line to use for fasta/qual output label sample_id: enumerated sampleid fasta_label: original fasta label bc: original barcode sequence corrected_barcode: corrected barcode sequence num_errors: number of errors/mismatches in barcode sequence .
train
false
40,277
def omni_normtest(resids, axis=0): resids = np.asarray(resids) n = resids.shape[axis] if (n < 8): from warnings import warn warn(('omni_normtest is not valid with less than 8 observations; %i samples were given.' % int(n)), ValueWarning) return (np.nan, np.nan) return stats.normaltest(resids, axis=axis)
[ "def", "omni_normtest", "(", "resids", ",", "axis", "=", "0", ")", ":", "resids", "=", "np", ".", "asarray", "(", "resids", ")", "n", "=", "resids", ".", "shape", "[", "axis", "]", "if", "(", "n", "<", "8", ")", ":", "from", "warnings", "import", "warn", "warn", "(", "(", "'omni_normtest is not valid with less than 8 observations; %i samples were given.'", "%", "int", "(", "n", ")", ")", ",", "ValueWarning", ")", "return", "(", "np", ".", "nan", ",", "np", ".", "nan", ")", "return", "stats", ".", "normaltest", "(", "resids", ",", "axis", "=", "axis", ")" ]
omnibus test for normality parameters resid : array-like axis : int .
train
false
40,284
def _split_rules(rules): split = [] for rule in rules: cidr_ip = rule.get('cidr_ip') group_name = rule.get('source_group_name') group_id = rule.get('source_group_group_id') if (cidr_ip and (not isinstance(cidr_ip, string_types))): for ip in cidr_ip: _rule = rule.copy() _rule['cidr_ip'] = ip split.append(_rule) elif (group_name and (not isinstance(group_name, string_types))): for name in group_name: _rule = rule.copy() _rule['source_group_name'] = name split.append(_rule) elif (group_id and (not isinstance(group_id, string_types))): for _id in group_id: _rule = rule.copy() _rule['source_group_group_id'] = _id split.append(_rule) else: split.append(rule) return split
[ "def", "_split_rules", "(", "rules", ")", ":", "split", "=", "[", "]", "for", "rule", "in", "rules", ":", "cidr_ip", "=", "rule", ".", "get", "(", "'cidr_ip'", ")", "group_name", "=", "rule", ".", "get", "(", "'source_group_name'", ")", "group_id", "=", "rule", ".", "get", "(", "'source_group_group_id'", ")", "if", "(", "cidr_ip", "and", "(", "not", "isinstance", "(", "cidr_ip", ",", "string_types", ")", ")", ")", ":", "for", "ip", "in", "cidr_ip", ":", "_rule", "=", "rule", ".", "copy", "(", ")", "_rule", "[", "'cidr_ip'", "]", "=", "ip", "split", ".", "append", "(", "_rule", ")", "elif", "(", "group_name", "and", "(", "not", "isinstance", "(", "group_name", ",", "string_types", ")", ")", ")", ":", "for", "name", "in", "group_name", ":", "_rule", "=", "rule", ".", "copy", "(", ")", "_rule", "[", "'source_group_name'", "]", "=", "name", "split", ".", "append", "(", "_rule", ")", "elif", "(", "group_id", "and", "(", "not", "isinstance", "(", "group_id", ",", "string_types", ")", ")", ")", ":", "for", "_id", "in", "group_id", ":", "_rule", "=", "rule", ".", "copy", "(", ")", "_rule", "[", "'source_group_group_id'", "]", "=", "_id", "split", ".", "append", "(", "_rule", ")", "else", ":", "split", ".", "append", "(", "rule", ")", "return", "split" ]
split rules with combined grants into individual rules .
train
true
40,285
def load_world(): _DATA_DIR = os.path.join(os.path.expanduser(u'~'), u'.ggplot') if (not os.path.exists(_DATA_DIR)): os.mkdir(_DATA_DIR) f = os.path.join(_DATA_DIR, u'world.csv') if os.path.exists(f): world = pd.read_csv(f) else: sys.stderr.write(u'downloading world data set...') url = u'https://raw.githubusercontent.com/yhat/ggplot/master/data/world.csv' world = pd.read_csv(url) world.to_csv(f, index=False) sys.stderr.write(u'done!') return world
[ "def", "load_world", "(", ")", ":", "_DATA_DIR", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "expanduser", "(", "u'~'", ")", ",", "u'.ggplot'", ")", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "_DATA_DIR", ")", ")", ":", "os", ".", "mkdir", "(", "_DATA_DIR", ")", "f", "=", "os", ".", "path", ".", "join", "(", "_DATA_DIR", ",", "u'world.csv'", ")", "if", "os", ".", "path", ".", "exists", "(", "f", ")", ":", "world", "=", "pd", ".", "read_csv", "(", "f", ")", "else", ":", "sys", ".", "stderr", ".", "write", "(", "u'downloading world data set...'", ")", "url", "=", "u'https://raw.githubusercontent.com/yhat/ggplot/master/data/world.csv'", "world", "=", "pd", ".", "read_csv", "(", "url", ")", "world", ".", "to_csv", "(", "f", ",", "index", "=", "False", ")", "sys", ".", "stderr", ".", "write", "(", "u'done!'", ")", "return", "world" ]
load world map data .
train
false
40,286
@click.command('remote-reset-url') @click.argument('app') def remote_reset_url(app): git_url = 'https://github.com/frappe/{}.git'.format(app) set_git_remote_url(git_url)
[ "@", "click", ".", "command", "(", "'remote-reset-url'", ")", "@", "click", ".", "argument", "(", "'app'", ")", "def", "remote_reset_url", "(", "app", ")", ":", "git_url", "=", "'https://github.com/frappe/{}.git'", ".", "format", "(", "app", ")", "set_git_remote_url", "(", "git_url", ")" ]
reset app remote url to frappe official .
train
false
40,287
def run_wait(name, location='\\'): if (name not in list_tasks(location)): return '{0} not found in {1}'.format(name, location) pythoncom.CoInitialize() task_service = win32com.client.Dispatch('Schedule.Service') task_service.Connect() task_folder = task_service.GetFolder(location) task = task_folder.GetTask(name) if (task.State == TASK_STATE_RUNNING): return 'Task already running' try: task.Run('') time.sleep(1) running = True except pythoncom.com_error: return False while running: running = False try: running_tasks = task_service.GetRunningTasks(0) if running_tasks.Count: for item in running_tasks: if (item.Name == name): running = True except pythoncom.com_error: running = False return True
[ "def", "run_wait", "(", "name", ",", "location", "=", "'\\\\'", ")", ":", "if", "(", "name", "not", "in", "list_tasks", "(", "location", ")", ")", ":", "return", "'{0} not found in {1}'", ".", "format", "(", "name", ",", "location", ")", "pythoncom", ".", "CoInitialize", "(", ")", "task_service", "=", "win32com", ".", "client", ".", "Dispatch", "(", "'Schedule.Service'", ")", "task_service", ".", "Connect", "(", ")", "task_folder", "=", "task_service", ".", "GetFolder", "(", "location", ")", "task", "=", "task_folder", ".", "GetTask", "(", "name", ")", "if", "(", "task", ".", "State", "==", "TASK_STATE_RUNNING", ")", ":", "return", "'Task already running'", "try", ":", "task", ".", "Run", "(", "''", ")", "time", ".", "sleep", "(", "1", ")", "running", "=", "True", "except", "pythoncom", ".", "com_error", ":", "return", "False", "while", "running", ":", "running", "=", "False", "try", ":", "running_tasks", "=", "task_service", ".", "GetRunningTasks", "(", "0", ")", "if", "running_tasks", ".", "Count", ":", "for", "item", "in", "running_tasks", ":", "if", "(", "item", ".", "Name", "==", "name", ")", ":", "running", "=", "True", "except", "pythoncom", ".", "com_error", ":", "running", "=", "False", "return", "True" ]
run a scheduled task and return when the task finishes .
train
true
40,288
def shared_floatx_zeros_matching(shared_variable, name=None, **kwargs): if (not is_shared_variable(shared_variable)): raise ValueError('argument must be a shared variable') return shared_floatx_zeros(shared_variable.get_value().shape, name=name, broadcastable=shared_variable.broadcastable, **kwargs)
[ "def", "shared_floatx_zeros_matching", "(", "shared_variable", ",", "name", "=", "None", ",", "**", "kwargs", ")", ":", "if", "(", "not", "is_shared_variable", "(", "shared_variable", ")", ")", ":", "raise", "ValueError", "(", "'argument must be a shared variable'", ")", "return", "shared_floatx_zeros", "(", "shared_variable", ".", "get_value", "(", ")", ".", "shape", ",", "name", "=", "name", ",", "broadcastable", "=", "shared_variable", ".", "broadcastable", ",", "**", "kwargs", ")" ]
create another shared variable with matching shape and broadcast .
train
false
40,292
def exec_action(module, action, module_parameter=None, action_parameter=None, state_only=False): out = __salt__['cmd.run']('eselect --brief --colour=no {0} {1} {2} {3}'.format(module, (module_parameter or ''), action, (action_parameter or '')), python_shell=False) out = out.strip().split('\n') if out[0].startswith('!!! Error'): return False if state_only: return True if (len(out) < 1): return False if ((len(out) == 1) and (not out[0].strip())): return False return out
[ "def", "exec_action", "(", "module", ",", "action", ",", "module_parameter", "=", "None", ",", "action_parameter", "=", "None", ",", "state_only", "=", "False", ")", ":", "out", "=", "__salt__", "[", "'cmd.run'", "]", "(", "'eselect --brief --colour=no {0} {1} {2} {3}'", ".", "format", "(", "module", ",", "(", "module_parameter", "or", "''", ")", ",", "action", ",", "(", "action_parameter", "or", "''", ")", ")", ",", "python_shell", "=", "False", ")", "out", "=", "out", ".", "strip", "(", ")", ".", "split", "(", "'\\n'", ")", "if", "out", "[", "0", "]", ".", "startswith", "(", "'!!! Error'", ")", ":", "return", "False", "if", "state_only", ":", "return", "True", "if", "(", "len", "(", "out", ")", "<", "1", ")", ":", "return", "False", "if", "(", "(", "len", "(", "out", ")", "==", "1", ")", "and", "(", "not", "out", "[", "0", "]", ".", "strip", "(", ")", ")", ")", ":", "return", "False", "return", "out" ]
execute an arbitrary action on a module .
train
true
40,293
def delete_host(mac=None, name=None): if (not (mac or name)): raise TypeError('At least one argument is required') o = _conn() msg = omapi.OmapiMessage.open('host') if mac: msg.obj.append(('hardware-address', omapi.pack_mac(mac))) msg.obj.append(('hardware-type', struct.pack('!I', 1))) if name: msg.obj.append(('name', name)) response = o.query_server(msg) if (response.opcode != omapi.OMAPI_OP_UPDATE): return None if (response.handle == 0): return False response = o.query_server(omapi.OmapiMessage.delete(response.handle)) if (response.opcode != omapi.OMAPI_OP_STATUS): return False return True
[ "def", "delete_host", "(", "mac", "=", "None", ",", "name", "=", "None", ")", ":", "if", "(", "not", "(", "mac", "or", "name", ")", ")", ":", "raise", "TypeError", "(", "'At least one argument is required'", ")", "o", "=", "_conn", "(", ")", "msg", "=", "omapi", ".", "OmapiMessage", ".", "open", "(", "'host'", ")", "if", "mac", ":", "msg", ".", "obj", ".", "append", "(", "(", "'hardware-address'", ",", "omapi", ".", "pack_mac", "(", "mac", ")", ")", ")", "msg", ".", "obj", ".", "append", "(", "(", "'hardware-type'", ",", "struct", ".", "pack", "(", "'!I'", ",", "1", ")", ")", ")", "if", "name", ":", "msg", ".", "obj", ".", "append", "(", "(", "'name'", ",", "name", ")", ")", "response", "=", "o", ".", "query_server", "(", "msg", ")", "if", "(", "response", ".", "opcode", "!=", "omapi", ".", "OMAPI_OP_UPDATE", ")", ":", "return", "None", "if", "(", "response", ".", "handle", "==", "0", ")", ":", "return", "False", "response", "=", "o", ".", "query_server", "(", "omapi", ".", "OmapiMessage", ".", "delete", "(", "response", ".", "handle", ")", ")", "if", "(", "response", ".", "opcode", "!=", "omapi", ".", "OMAPI_OP_STATUS", ")", ":", "return", "False", "return", "True" ]
delete the host with the given mac or name .
train
false
40,294
@must_be_logged_in def delete_external_identity(auth, **kwargs): data = request.get_json() identity = data.get('identity') if (not identity): raise HTTPError(http.BAD_REQUEST) for service in auth.user.external_identity: if (identity in auth.user.external_identity[service]): auth.user.external_identity[service].pop(identity) if (len(auth.user.external_identity[service]) == 0): auth.user.external_identity.pop(service) auth.user.save() return raise HTTPError(http.NOT_FOUND, 'Unable to find requested identity')
[ "@", "must_be_logged_in", "def", "delete_external_identity", "(", "auth", ",", "**", "kwargs", ")", ":", "data", "=", "request", ".", "get_json", "(", ")", "identity", "=", "data", ".", "get", "(", "'identity'", ")", "if", "(", "not", "identity", ")", ":", "raise", "HTTPError", "(", "http", ".", "BAD_REQUEST", ")", "for", "service", "in", "auth", ".", "user", ".", "external_identity", ":", "if", "(", "identity", "in", "auth", ".", "user", ".", "external_identity", "[", "service", "]", ")", ":", "auth", ".", "user", ".", "external_identity", "[", "service", "]", ".", "pop", "(", "identity", ")", "if", "(", "len", "(", "auth", ".", "user", ".", "external_identity", "[", "service", "]", ")", "==", "0", ")", ":", "auth", ".", "user", ".", "external_identity", ".", "pop", "(", "service", ")", "auth", ".", "user", ".", "save", "(", ")", "return", "raise", "HTTPError", "(", "http", ".", "NOT_FOUND", ",", "'Unable to find requested identity'", ")" ]
removes single external identity from user .
train
false
40,295
@login_required def edit_alias(request, project_slug, alias_id=None): proj = get_object_or_404(Project.objects.for_admin_user(request.user), slug=project_slug) if alias_id: alias = proj.aliases.get(pk=alias_id) form = AliasForm(instance=alias, data=(request.POST or None)) else: form = AliasForm((request.POST or None)) if ((request.method == 'POST') and form.is_valid()): alias = form.save() return HttpResponseRedirect(alias.project.get_absolute_url()) return render_to_response('projects/alias_edit.html', {'form': form}, context_instance=RequestContext(request))
[ "@", "login_required", "def", "edit_alias", "(", "request", ",", "project_slug", ",", "alias_id", "=", "None", ")", ":", "proj", "=", "get_object_or_404", "(", "Project", ".", "objects", ".", "for_admin_user", "(", "request", ".", "user", ")", ",", "slug", "=", "project_slug", ")", "if", "alias_id", ":", "alias", "=", "proj", ".", "aliases", ".", "get", "(", "pk", "=", "alias_id", ")", "form", "=", "AliasForm", "(", "instance", "=", "alias", ",", "data", "=", "(", "request", ".", "POST", "or", "None", ")", ")", "else", ":", "form", "=", "AliasForm", "(", "(", "request", ".", "POST", "or", "None", ")", ")", "if", "(", "(", "request", ".", "method", "==", "'POST'", ")", "and", "form", ".", "is_valid", "(", ")", ")", ":", "alias", "=", "form", ".", "save", "(", ")", "return", "HttpResponseRedirect", "(", "alias", ".", "project", ".", "get_absolute_url", "(", ")", ")", "return", "render_to_response", "(", "'projects/alias_edit.html'", ",", "{", "'form'", ":", "form", "}", ",", "context_instance", "=", "RequestContext", "(", "request", ")", ")" ]
edit project alias form view .
train
false
40,296
def createRecordSensor(network, name, dataSource): regionType = 'py.RecordSensor' regionParams = json.dumps({'verbosity': _VERBOSITY}) network.addRegion(name, regionType, regionParams) sensorRegion = network.regions[name].getSelf() sensorRegion.encoder = createEncoder() sensorRegion.dataSource = dataSource return sensorRegion
[ "def", "createRecordSensor", "(", "network", ",", "name", ",", "dataSource", ")", ":", "regionType", "=", "'py.RecordSensor'", "regionParams", "=", "json", ".", "dumps", "(", "{", "'verbosity'", ":", "_VERBOSITY", "}", ")", "network", ".", "addRegion", "(", "name", ",", "regionType", ",", "regionParams", ")", "sensorRegion", "=", "network", ".", "regions", "[", "name", "]", ".", "getSelf", "(", ")", "sensorRegion", ".", "encoder", "=", "createEncoder", "(", ")", "sensorRegion", ".", "dataSource", "=", "dataSource", "return", "sensorRegion" ]
creates a recordsensor region that allows us to specify a file record stream as the input source .
train
true
40,297
def _get_client_id_from_environ(): assert ('OAUTH_CLIENT_ID' in os.environ) return os.environ['OAUTH_CLIENT_ID']
[ "def", "_get_client_id_from_environ", "(", ")", ":", "assert", "(", "'OAUTH_CLIENT_ID'", "in", "os", ".", "environ", ")", "return", "os", ".", "environ", "[", "'OAUTH_CLIENT_ID'", "]" ]
returns client id based on values stored in os .
train
false
40,298
@register_uncanonicalize @gof.local_optimizer([T.neg]) def local_max_to_min(node): if ((node.op == T.neg) and node.inputs[0].owner): max = node.inputs[0] if (max.owner and isinstance(max.owner.op, CAReduce) and (max.owner.op.scalar_op == scal.maximum)): neg = max.owner.inputs[0] if (neg.owner and (neg.owner.op == T.neg)): return [CAReduce(scal.minimum, max.owner.op.axis)(neg.owner.inputs[0])] return False
[ "@", "register_uncanonicalize", "@", "gof", ".", "local_optimizer", "(", "[", "T", ".", "neg", "]", ")", "def", "local_max_to_min", "(", "node", ")", ":", "if", "(", "(", "node", ".", "op", "==", "T", ".", "neg", ")", "and", "node", ".", "inputs", "[", "0", "]", ".", "owner", ")", ":", "max", "=", "node", ".", "inputs", "[", "0", "]", "if", "(", "max", ".", "owner", "and", "isinstance", "(", "max", ".", "owner", ".", "op", ",", "CAReduce", ")", "and", "(", "max", ".", "owner", ".", "op", ".", "scalar_op", "==", "scal", ".", "maximum", ")", ")", ":", "neg", "=", "max", ".", "owner", ".", "inputs", "[", "0", "]", "if", "(", "neg", ".", "owner", "and", "(", "neg", ".", "owner", ".", "op", "==", "T", ".", "neg", ")", ")", ":", "return", "[", "CAReduce", "(", "scal", ".", "minimum", ",", "max", ".", "owner", ".", "op", ".", "axis", ")", "(", "neg", ".", "owner", ".", "inputs", "[", "0", "]", ")", "]", "return", "False" ]
change -(max) to min .
train
false
40,299
def _is_dense(x): if (not isinstance(x, (scipy.sparse.spmatrix, np.ndarray))): raise NotImplementedError('this function should only be called on sparse.scipy.sparse.spmatrix or numpy.ndarray, not,', x) return isinstance(x, np.ndarray)
[ "def", "_is_dense", "(", "x", ")", ":", "if", "(", "not", "isinstance", "(", "x", ",", "(", "scipy", ".", "sparse", ".", "spmatrix", ",", "np", ".", "ndarray", ")", ")", ")", ":", "raise", "NotImplementedError", "(", "'this function should only be called on sparse.scipy.sparse.spmatrix or numpy.ndarray, not,'", ",", "x", ")", "return", "isinstance", "(", "x", ",", "np", ".", "ndarray", ")" ]
returns boolean true unless x is a l{scipy .
train
false
40,300
def execute_manager(settings_mod, argv=None): warnings.warn("The 'execute_manager' function is deprecated, you likely need to update your 'manage.py'; please see the Django 1.4 release notes (https://docs.djangoproject.com/en/dev/releases/1.4/).", PendingDeprecationWarning) setup_environ(settings_mod) utility = ManagementUtility(argv) utility.execute()
[ "def", "execute_manager", "(", "settings_mod", ",", "argv", "=", "None", ")", ":", "warnings", ".", "warn", "(", "\"The 'execute_manager' function is deprecated, you likely need to update your 'manage.py'; please see the Django 1.4 release notes (https://docs.djangoproject.com/en/dev/releases/1.4/).\"", ",", "PendingDeprecationWarning", ")", "setup_environ", "(", "settings_mod", ")", "utility", "=", "ManagementUtility", "(", "argv", ")", "utility", ".", "execute", "(", ")" ]
like execute_from_command_line() .
train
false
40,301
def test_values_only_input(df_with_cat_index): bar_builder = BarBuilder(df_with_cat_index, values='col1') bar_builder.create() assert (bar_builder.attributes['label'].columns[0] == 'index')
[ "def", "test_values_only_input", "(", "df_with_cat_index", ")", ":", "bar_builder", "=", "BarBuilder", "(", "df_with_cat_index", ",", "values", "=", "'col1'", ")", "bar_builder", ".", "create", "(", ")", "assert", "(", "bar_builder", ".", "attributes", "[", "'label'", "]", ".", "columns", "[", "0", "]", "==", "'index'", ")" ]
given values only input .
train
false
40,302
def erase_menu(stdscr, menu_y): stdscr.move(menu_y, 0) stdscr.clrtoeol() stdscr.move((menu_y + 1), 0) stdscr.clrtoeol()
[ "def", "erase_menu", "(", "stdscr", ",", "menu_y", ")", ":", "stdscr", ".", "move", "(", "menu_y", ",", "0", ")", "stdscr", ".", "clrtoeol", "(", ")", "stdscr", ".", "move", "(", "(", "menu_y", "+", "1", ")", ",", "0", ")", "stdscr", ".", "clrtoeol", "(", ")" ]
clear the space where the menu resides .
train
false
40,304
def items_to_dict(items): res = collections.defaultdict(list) for (k, v) in items: res[k].append(v) return normalize_dict(dict(res))
[ "def", "items_to_dict", "(", "items", ")", ":", "res", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "(", "k", ",", "v", ")", "in", "items", ":", "res", "[", "k", "]", ".", "append", "(", "v", ")", "return", "normalize_dict", "(", "dict", "(", "res", ")", ")" ]
converts list of tuples to dictionary with duplicate keys converted to lists .
train
true
40,305
def is_auto_address_subnet(subnet): modes = [const.IPV6_SLAAC, const.DHCPV6_STATELESS] return ((subnet['ipv6_address_mode'] in modes) or (subnet['ipv6_ra_mode'] in modes))
[ "def", "is_auto_address_subnet", "(", "subnet", ")", ":", "modes", "=", "[", "const", ".", "IPV6_SLAAC", ",", "const", ".", "DHCPV6_STATELESS", "]", "return", "(", "(", "subnet", "[", "'ipv6_address_mode'", "]", "in", "modes", ")", "or", "(", "subnet", "[", "'ipv6_ra_mode'", "]", "in", "modes", ")", ")" ]
check if subnet is an auto address subnet .
train
false
40,306
def _check_rows_and_columns(a, b): check_consistent_length(*a) check_consistent_length(*b) checks = (lambda x: check_array(x, ensure_2d=False)) (a_rows, a_cols) = map(checks, a) (b_rows, b_cols) = map(checks, b) return (a_rows, a_cols, b_rows, b_cols)
[ "def", "_check_rows_and_columns", "(", "a", ",", "b", ")", ":", "check_consistent_length", "(", "*", "a", ")", "check_consistent_length", "(", "*", "b", ")", "checks", "=", "(", "lambda", "x", ":", "check_array", "(", "x", ",", "ensure_2d", "=", "False", ")", ")", "(", "a_rows", ",", "a_cols", ")", "=", "map", "(", "checks", ",", "a", ")", "(", "b_rows", ",", "b_cols", ")", "=", "map", "(", "checks", ",", "b", ")", "return", "(", "a_rows", ",", "a_cols", ",", "b_rows", ",", "b_cols", ")" ]
unpacks the row and column arrays and checks their shape .
train
false
40,308
def remove_dead_links(directory, verbose=0): for (dirpath, dirname, filenames) in walk(directory): for filename in (dirnames + filenames): src = join(dirpath, filename) if (islink(src) and (not exists(src))): if verbose: print('remove dead link', src) remove(src)
[ "def", "remove_dead_links", "(", "directory", ",", "verbose", "=", "0", ")", ":", "for", "(", "dirpath", ",", "dirname", ",", "filenames", ")", "in", "walk", "(", "directory", ")", ":", "for", "filename", "in", "(", "dirnames", "+", "filenames", ")", ":", "src", "=", "join", "(", "dirpath", ",", "filename", ")", "if", "(", "islink", "(", "src", ")", "and", "(", "not", "exists", "(", "src", ")", ")", ")", ":", "if", "verbose", ":", "print", "(", "'remove dead link'", ",", "src", ")", "remove", "(", "src", ")" ]
recursively traverse directory and remove all dead links .
train
false
40,309
def chi2(X, y): X = check_array(X, accept_sparse='csr') if np.any(((X.data if issparse(X) else X) < 0)): raise ValueError('Input X must be non-negative.') Y = LabelBinarizer().fit_transform(y) if (Y.shape[1] == 1): Y = np.append((1 - Y), Y, axis=1) observed = safe_sparse_dot(Y.T, X) feature_count = X.sum(axis=0).reshape(1, (-1)) class_prob = Y.mean(axis=0).reshape(1, (-1)) expected = np.dot(class_prob.T, feature_count) return _chisquare(observed, expected)
[ "def", "chi2", "(", "X", ",", "y", ")", ":", "X", "=", "check_array", "(", "X", ",", "accept_sparse", "=", "'csr'", ")", "if", "np", ".", "any", "(", "(", "(", "X", ".", "data", "if", "issparse", "(", "X", ")", "else", "X", ")", "<", "0", ")", ")", ":", "raise", "ValueError", "(", "'Input X must be non-negative.'", ")", "Y", "=", "LabelBinarizer", "(", ")", ".", "fit_transform", "(", "y", ")", "if", "(", "Y", ".", "shape", "[", "1", "]", "==", "1", ")", ":", "Y", "=", "np", ".", "append", "(", "(", "1", "-", "Y", ")", ",", "Y", ",", "axis", "=", "1", ")", "observed", "=", "safe_sparse_dot", "(", "Y", ".", "T", ",", "X", ")", "feature_count", "=", "X", ".", "sum", "(", "axis", "=", "0", ")", ".", "reshape", "(", "1", ",", "(", "-", "1", ")", ")", "class_prob", "=", "Y", ".", "mean", "(", "axis", "=", "0", ")", ".", "reshape", "(", "1", ",", "(", "-", "1", ")", ")", "expected", "=", "np", ".", "dot", "(", "class_prob", ".", "T", ",", "feature_count", ")", "return", "_chisquare", "(", "observed", ",", "expected", ")" ]
compute chi-squared stats between each non-negative feature and class .
train
false
40,310
def markdownFromFile(*args, **kwargs): pos = [u'input', u'output', u'extensions', u'encoding'] c = 0 for arg in args: if (pos[c] not in kwargs): kwargs[pos[c]] = arg c += 1 if (c == len(pos)): break md = Markdown(**kwargs) md.convertFile(kwargs.get(u'input', None), kwargs.get(u'output', None), kwargs.get(u'encoding', None))
[ "def", "markdownFromFile", "(", "*", "args", ",", "**", "kwargs", ")", ":", "pos", "=", "[", "u'input'", ",", "u'output'", ",", "u'extensions'", ",", "u'encoding'", "]", "c", "=", "0", "for", "arg", "in", "args", ":", "if", "(", "pos", "[", "c", "]", "not", "in", "kwargs", ")", ":", "kwargs", "[", "pos", "[", "c", "]", "]", "=", "arg", "c", "+=", "1", "if", "(", "c", "==", "len", "(", "pos", ")", ")", ":", "break", "md", "=", "Markdown", "(", "**", "kwargs", ")", "md", ".", "convertFile", "(", "kwargs", ".", "get", "(", "u'input'", ",", "None", ")", ",", "kwargs", ".", "get", "(", "u'output'", ",", "None", ")", ",", "kwargs", ".", "get", "(", "u'encoding'", ",", "None", ")", ")" ]
read markdown code from a file and write it to a file or a stream .
train
false
40,311
def reset_globals(): global _SESSION _SESSION = None
[ "def", "reset_globals", "(", ")", ":", "global", "_SESSION", "_SESSION", "=", "None" ]
testing method to reset globals .
train
false
40,312
def sanitize_timeout(timeout): if (timeout > (((30 * 24) * 60) * 60)): timeout += time.time() return timeout
[ "def", "sanitize_timeout", "(", "timeout", ")", ":", "if", "(", "timeout", ">", "(", "(", "(", "30", "*", "24", ")", "*", "60", ")", "*", "60", ")", ")", ":", "timeout", "+=", "time", ".", "time", "(", ")", "return", "timeout" ]
sanitize a timeout value to use an absolute expiration time if the delta is greater than 30 days .
train
false
40,313
def _pb_timestamp_to_rfc3339(timestamp_pb): timestamp = _pb_timestamp_to_datetime(timestamp_pb) return _datetime_to_rfc3339(timestamp)
[ "def", "_pb_timestamp_to_rfc3339", "(", "timestamp_pb", ")", ":", "timestamp", "=", "_pb_timestamp_to_datetime", "(", "timestamp_pb", ")", "return", "_datetime_to_rfc3339", "(", "timestamp", ")" ]
convert a timestamp protobuf to an rfc 3339 string .
train
false
40,314
def quo_z(p, q, x): delta = ((degree(p, x) - degree(q, x)) + 1) return quo(((Abs(LC(q, x)) ** delta) * p), q, x)
[ "def", "quo_z", "(", "p", ",", "q", ",", "x", ")", ":", "delta", "=", "(", "(", "degree", "(", "p", ",", "x", ")", "-", "degree", "(", "q", ",", "x", ")", ")", "+", "1", ")", "return", "quo", "(", "(", "(", "Abs", "(", "LC", "(", "q", ",", "x", ")", ")", "**", "delta", ")", "*", "p", ")", ",", "q", ",", "x", ")" ]
intended mainly for p .
train
false
40,315
def thrift2json(tft): if isinstance(tft, type(None)): return None if isinstance(tft, (float, int, long, complex, basestring)): return tft if isinstance(tft, dict): d = {} for (key, val) in tft.iteritems(): d[key] = thrift2json(val) return d if isinstance(tft, list): return [thrift2json(x) for x in tft] if isinstance(tft, set): return dict(((x, True) for x in tft)) json = {} d = {} if hasattr(tft, '__dict__'): d = tft.__dict__ elif hasattr(tft, '__slots__'): d = tft.__slots__ else: return {} for k in d: v = getattr(tft, k) json[k] = thrift2json(v) return json
[ "def", "thrift2json", "(", "tft", ")", ":", "if", "isinstance", "(", "tft", ",", "type", "(", "None", ")", ")", ":", "return", "None", "if", "isinstance", "(", "tft", ",", "(", "float", ",", "int", ",", "long", ",", "complex", ",", "basestring", ")", ")", ":", "return", "tft", "if", "isinstance", "(", "tft", ",", "dict", ")", ":", "d", "=", "{", "}", "for", "(", "key", ",", "val", ")", "in", "tft", ".", "iteritems", "(", ")", ":", "d", "[", "key", "]", "=", "thrift2json", "(", "val", ")", "return", "d", "if", "isinstance", "(", "tft", ",", "list", ")", ":", "return", "[", "thrift2json", "(", "x", ")", "for", "x", "in", "tft", "]", "if", "isinstance", "(", "tft", ",", "set", ")", ":", "return", "dict", "(", "(", "(", "x", ",", "True", ")", "for", "x", "in", "tft", ")", ")", "json", "=", "{", "}", "d", "=", "{", "}", "if", "hasattr", "(", "tft", ",", "'__dict__'", ")", ":", "d", "=", "tft", ".", "__dict__", "elif", "hasattr", "(", "tft", ",", "'__slots__'", ")", ":", "d", "=", "tft", ".", "__slots__", "else", ":", "return", "{", "}", "for", "k", "in", "d", ":", "v", "=", "getattr", "(", "tft", ",", "k", ")", "json", "[", "k", "]", "=", "thrift2json", "(", "v", ")", "return", "json" ]
convert a thrift structure to a json compatible dictionary by recursing over the dictionary .
train
false
40,320
def quota_update(context, project_id, resource, limit): return IMPL.quota_update(context, project_id, resource, limit)
[ "def", "quota_update", "(", "context", ",", "project_id", ",", "resource", ",", "limit", ")", ":", "return", "IMPL", ".", "quota_update", "(", "context", ",", "project_id", ",", "resource", ",", "limit", ")" ]
update a quota or raise if it does not exist .
train
false
40,321
def perform_flag(request, comment): (flag, created) = comments.models.CommentFlag.objects.get_or_create(comment=comment, user=request.user, flag=comments.models.CommentFlag.SUGGEST_REMOVAL) signals.comment_was_flagged.send(sender=comment.__class__, comment=comment, flag=flag, created=created, request=request)
[ "def", "perform_flag", "(", "request", ",", "comment", ")", ":", "(", "flag", ",", "created", ")", "=", "comments", ".", "models", ".", "CommentFlag", ".", "objects", ".", "get_or_create", "(", "comment", "=", "comment", ",", "user", "=", "request", ".", "user", ",", "flag", "=", "comments", ".", "models", ".", "CommentFlag", ".", "SUGGEST_REMOVAL", ")", "signals", ".", "comment_was_flagged", ".", "send", "(", "sender", "=", "comment", ".", "__class__", ",", "comment", "=", "comment", ",", "flag", "=", "flag", ",", "created", "=", "created", ",", "request", "=", "request", ")" ]
actually perform the flagging of a comment from a request .
train
true
40,322
def predecessor_path(tree, u, v): def _traverse(u, v): w = tree[u][v] if (w == u): return [] return ((_traverse(u, w) + [w]) + _traverse(w, v)) return (([u] + _traverse(u, v)) + [v])
[ "def", "predecessor_path", "(", "tree", ",", "u", ",", "v", ")", ":", "def", "_traverse", "(", "u", ",", "v", ")", ":", "w", "=", "tree", "[", "u", "]", "[", "v", "]", "if", "(", "w", "==", "u", ")", ":", "return", "[", "]", "return", "(", "(", "_traverse", "(", "u", ",", "w", ")", "+", "[", "w", "]", ")", "+", "_traverse", "(", "w", ",", "v", ")", ")", "return", "(", "(", "[", "u", "]", "+", "_traverse", "(", "u", ",", "v", ")", ")", "+", "[", "v", "]", ")" ]
returns the path between node u and node v as a list of node ids .
train
false
40,323
def test_morlet(): Wz = morlet(1000, [10], 2.0, zero_mean=True) W = morlet(1000, [10], 2.0, zero_mean=False) assert_true((np.abs(np.mean(np.real(Wz[0]))) < 1e-05)) assert_true((np.abs(np.mean(np.real(W[0]))) > 0.001))
[ "def", "test_morlet", "(", ")", ":", "Wz", "=", "morlet", "(", "1000", ",", "[", "10", "]", ",", "2.0", ",", "zero_mean", "=", "True", ")", "W", "=", "morlet", "(", "1000", ",", "[", "10", "]", ",", "2.0", ",", "zero_mean", "=", "False", ")", "assert_true", "(", "(", "np", ".", "abs", "(", "np", ".", "mean", "(", "np", ".", "real", "(", "Wz", "[", "0", "]", ")", ")", ")", "<", "1e-05", ")", ")", "assert_true", "(", "(", "np", ".", "abs", "(", "np", ".", "mean", "(", "np", ".", "real", "(", "W", "[", "0", "]", ")", ")", ")", ">", "0.001", ")", ")" ]
test morlet with and without zero mean .
train
false
40,324
def create_populated_cluster(ctxt, num_services, num_down_svcs=0, **values): up_time = timeutils.utcnow() down_time = (up_time - datetime.timedelta(seconds=(CONF.service_down_time + 1))) cluster = create_cluster(ctxt, **values) svcs = [db.service_create(ctxt, {'cluster_name': cluster.name, 'host': ('host' + str(i)), 'updated_at': (down_time if (i < num_down_svcs) else up_time)}) for i in range(num_services)] return (cluster, svcs)
[ "def", "create_populated_cluster", "(", "ctxt", ",", "num_services", ",", "num_down_svcs", "=", "0", ",", "**", "values", ")", ":", "up_time", "=", "timeutils", ".", "utcnow", "(", ")", "down_time", "=", "(", "up_time", "-", "datetime", ".", "timedelta", "(", "seconds", "=", "(", "CONF", ".", "service_down_time", "+", "1", ")", ")", ")", "cluster", "=", "create_cluster", "(", "ctxt", ",", "**", "values", ")", "svcs", "=", "[", "db", ".", "service_create", "(", "ctxt", ",", "{", "'cluster_name'", ":", "cluster", ".", "name", ",", "'host'", ":", "(", "'host'", "+", "str", "(", "i", ")", ")", ",", "'updated_at'", ":", "(", "down_time", "if", "(", "i", "<", "num_down_svcs", ")", "else", "up_time", ")", "}", ")", "for", "i", "in", "range", "(", "num_services", ")", "]", "return", "(", "cluster", ",", "svcs", ")" ]
helper method that creates a cluster with up and down services .
train
false
40,325
def compound(tgt, minion_id=None): opts = {'grains': __grains__} if (minion_id is not None): if (not isinstance(minion_id, string_types)): minion_id = str(minion_id) else: minion_id = __grains__['id'] opts['id'] = minion_id matcher = salt.minion.Matcher(opts, __salt__) try: return matcher.compound_match(tgt) except Exception as exc: log.exception(exc) return False
[ "def", "compound", "(", "tgt", ",", "minion_id", "=", "None", ")", ":", "opts", "=", "{", "'grains'", ":", "__grains__", "}", "if", "(", "minion_id", "is", "not", "None", ")", ":", "if", "(", "not", "isinstance", "(", "minion_id", ",", "string_types", ")", ")", ":", "minion_id", "=", "str", "(", "minion_id", ")", "else", ":", "minion_id", "=", "__grains__", "[", "'id'", "]", "opts", "[", "'id'", "]", "=", "minion_id", "matcher", "=", "salt", ".", "minion", ".", "Matcher", "(", "opts", ",", "__salt__", ")", "try", ":", "return", "matcher", ".", "compound_match", "(", "tgt", ")", "except", "Exception", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "return", "False" ]
return true if the minion id matches the given compound target minion_id specify the minion id to match against the target expression .
train
false
40,326
def PropertyTypeName(value): if (value.__class__ in _PROPERTY_MEANINGS): meaning = _PROPERTY_MEANINGS[value.__class__] name = entity_pb.Property._Meaning_NAMES[meaning] return name.lower().replace('_', ':') elif isinstance(value, basestring): return 'string' elif isinstance(value, users.User): return 'user' elif isinstance(value, long): return 'int' elif (value is None): return 'null' else: return typename(value).lower()
[ "def", "PropertyTypeName", "(", "value", ")", ":", "if", "(", "value", ".", "__class__", "in", "_PROPERTY_MEANINGS", ")", ":", "meaning", "=", "_PROPERTY_MEANINGS", "[", "value", ".", "__class__", "]", "name", "=", "entity_pb", ".", "Property", ".", "_Meaning_NAMES", "[", "meaning", "]", "return", "name", ".", "lower", "(", ")", ".", "replace", "(", "'_'", ",", "':'", ")", "elif", "isinstance", "(", "value", ",", "basestring", ")", ":", "return", "'string'", "elif", "isinstance", "(", "value", ",", "users", ".", "User", ")", ":", "return", "'user'", "elif", "isinstance", "(", "value", ",", "long", ")", ":", "return", "'int'", "elif", "(", "value", "is", "None", ")", ":", "return", "'null'", "else", ":", "return", "typename", "(", "value", ")", ".", "lower", "(", ")" ]
returns the name of the type of the given property value .
train
false
40,328
def cyclic_pattern_charset(charset_type=None): charset = [] charset += ['ABCDEFGHIJKLMNOPQRSTUVWXYZ'] charset += ['abcdefghijklmnopqrstuvwxyz'] charset += ['0123456789'] if (not charset_type): charset_type = config.Option.get('pattern') if (charset_type == 1): charset[1] = ('%$-;' + re.sub('[sn]', '', charset[1])) charset[2] = ('sn()' + charset[2]) if (charset_type == 2): charset += ['!"#$%&\\()*+,-./:;<=>?@[]^_{|}~'] mixed_charset = mixed = '' k = 0 while True: for i in range(0, len(charset)): mixed += charset[i][k:(k + 1)] if (not mixed): break mixed_charset += mixed mixed = '' k += 1 return mixed_charset
[ "def", "cyclic_pattern_charset", "(", "charset_type", "=", "None", ")", ":", "charset", "=", "[", "]", "charset", "+=", "[", "'ABCDEFGHIJKLMNOPQRSTUVWXYZ'", "]", "charset", "+=", "[", "'abcdefghijklmnopqrstuvwxyz'", "]", "charset", "+=", "[", "'0123456789'", "]", "if", "(", "not", "charset_type", ")", ":", "charset_type", "=", "config", ".", "Option", ".", "get", "(", "'pattern'", ")", "if", "(", "charset_type", "==", "1", ")", ":", "charset", "[", "1", "]", "=", "(", "'%$-;'", "+", "re", ".", "sub", "(", "'[sn]'", ",", "''", ",", "charset", "[", "1", "]", ")", ")", "charset", "[", "2", "]", "=", "(", "'sn()'", "+", "charset", "[", "2", "]", ")", "if", "(", "charset_type", "==", "2", ")", ":", "charset", "+=", "[", "'!\"#$%&\\\\()*+,-./:;<=>?@[]^_{|}~'", "]", "mixed_charset", "=", "mixed", "=", "''", "k", "=", "0", "while", "True", ":", "for", "i", "in", "range", "(", "0", ",", "len", "(", "charset", ")", ")", ":", "mixed", "+=", "charset", "[", "i", "]", "[", "k", ":", "(", "k", "+", "1", ")", "]", "if", "(", "not", "mixed", ")", ":", "break", "mixed_charset", "+=", "mixed", "mixed", "=", "''", "k", "+=", "1", "return", "mixed_charset" ]
generate charset for cyclic pattern args: - charset_type: charset type 0: basic 1: extended 2: maximum returns: - list of charset .
train
false
40,329
def _get_external_workers(worker): worker_that_blocked_task = collections.defaultdict(set) get_work_response_history = worker._get_work_response_history for get_work_response in get_work_response_history: if (get_work_response['task_id'] is None): for running_task in get_work_response['running_tasks']: other_worker_id = running_task['worker'] other_task_id = running_task['task_id'] other_task = worker._scheduled_tasks.get(other_task_id) if ((other_worker_id == worker._id) or (not other_task)): continue worker_that_blocked_task[other_worker_id].add(other_task) return worker_that_blocked_task
[ "def", "_get_external_workers", "(", "worker", ")", ":", "worker_that_blocked_task", "=", "collections", ".", "defaultdict", "(", "set", ")", "get_work_response_history", "=", "worker", ".", "_get_work_response_history", "for", "get_work_response", "in", "get_work_response_history", ":", "if", "(", "get_work_response", "[", "'task_id'", "]", "is", "None", ")", ":", "for", "running_task", "in", "get_work_response", "[", "'running_tasks'", "]", ":", "other_worker_id", "=", "running_task", "[", "'worker'", "]", "other_task_id", "=", "running_task", "[", "'task_id'", "]", "other_task", "=", "worker", ".", "_scheduled_tasks", ".", "get", "(", "other_task_id", ")", "if", "(", "(", "other_worker_id", "==", "worker", ".", "_id", ")", "or", "(", "not", "other_task", ")", ")", ":", "continue", "worker_that_blocked_task", "[", "other_worker_id", "]", ".", "add", "(", "other_task", ")", "return", "worker_that_blocked_task" ]
this returns a dict with a set of tasks for all of the other workers .
train
true
40,331
def graph_clique_number(G, cliques=None): if (cliques is None): cliques = find_cliques(G) return max([len(c) for c in cliques])
[ "def", "graph_clique_number", "(", "G", ",", "cliques", "=", "None", ")", ":", "if", "(", "cliques", "is", "None", ")", ":", "cliques", "=", "find_cliques", "(", "G", ")", "return", "max", "(", "[", "len", "(", "c", ")", "for", "c", "in", "cliques", "]", ")" ]
returns the clique number of the graph .
train
false
40,332
def standardizeJSType(vartype): if vartype: typename = known_javascript_types.get(vartype.lower(), None) if (typename is None): return vartype return typename
[ "def", "standardizeJSType", "(", "vartype", ")", ":", "if", "vartype", ":", "typename", "=", "known_javascript_types", ".", "get", "(", "vartype", ".", "lower", "(", ")", ",", "None", ")", "if", "(", "typename", "is", "None", ")", ":", "return", "vartype", "return", "typename" ]
return a standardized name for the given type if it is a known type .
train
false
40,333
def _fastq_illumina_convert_qual(in_handle, out_handle, alphabet=None): mapping = dict(((chr((q + 64)), str(q)) for q in range(0, (62 + 1)))) return _fastq_convert_qual(in_handle, out_handle, mapping)
[ "def", "_fastq_illumina_convert_qual", "(", "in_handle", ",", "out_handle", ",", "alphabet", "=", "None", ")", ":", "mapping", "=", "dict", "(", "(", "(", "chr", "(", "(", "q", "+", "64", ")", ")", ",", "str", "(", "q", ")", ")", "for", "q", "in", "range", "(", "0", ",", "(", "62", "+", "1", ")", ")", ")", ")", "return", "_fastq_convert_qual", "(", "in_handle", ",", "out_handle", ",", "mapping", ")" ]
fast illumina 1 .
train
false
40,334
@pytest.fixture def it_tutorial_po(po_directory, settings, italian_tutorial): return _require_store(italian_tutorial, settings.POOTLE_TRANSLATION_DIRECTORY, 'tutorial.po')
[ "@", "pytest", ".", "fixture", "def", "it_tutorial_po", "(", "po_directory", ",", "settings", ",", "italian_tutorial", ")", ":", "return", "_require_store", "(", "italian_tutorial", ",", "settings", ".", "POOTLE_TRANSLATION_DIRECTORY", ",", "'tutorial.po'", ")" ]
require the /it/tutorial/tutorial .
train
false
40,335
def _siftup_max(heap, pos): endpos = len(heap) startpos = pos newitem = heap[pos] childpos = ((2 * pos) + 1) while (childpos < endpos): rightpos = (childpos + 1) if ((rightpos < endpos) and (not cmp_lt(heap[rightpos], heap[childpos]))): childpos = rightpos heap[pos] = heap[childpos] pos = childpos childpos = ((2 * pos) + 1) heap[pos] = newitem _siftdown_max(heap, startpos, pos)
[ "def", "_siftup_max", "(", "heap", ",", "pos", ")", ":", "endpos", "=", "len", "(", "heap", ")", "startpos", "=", "pos", "newitem", "=", "heap", "[", "pos", "]", "childpos", "=", "(", "(", "2", "*", "pos", ")", "+", "1", ")", "while", "(", "childpos", "<", "endpos", ")", ":", "rightpos", "=", "(", "childpos", "+", "1", ")", "if", "(", "(", "rightpos", "<", "endpos", ")", "and", "(", "not", "cmp_lt", "(", "heap", "[", "rightpos", "]", ",", "heap", "[", "childpos", "]", ")", ")", ")", ":", "childpos", "=", "rightpos", "heap", "[", "pos", "]", "=", "heap", "[", "childpos", "]", "pos", "=", "childpos", "childpos", "=", "(", "(", "2", "*", "pos", ")", "+", "1", ")", "heap", "[", "pos", "]", "=", "newitem", "_siftdown_max", "(", "heap", ",", "startpos", ",", "pos", ")" ]
maxheap variant of _siftup .
train
true
40,336
def ricker_matrix(width, resolution, n_components): centers = np.linspace(0, (resolution - 1), n_components) D = np.empty((n_components, resolution)) for (i, center) in enumerate(centers): D[i] = ricker_function(resolution, center, width) D /= np.sqrt(np.sum((D ** 2), axis=1))[:, np.newaxis] return D
[ "def", "ricker_matrix", "(", "width", ",", "resolution", ",", "n_components", ")", ":", "centers", "=", "np", ".", "linspace", "(", "0", ",", "(", "resolution", "-", "1", ")", ",", "n_components", ")", "D", "=", "np", ".", "empty", "(", "(", "n_components", ",", "resolution", ")", ")", "for", "(", "i", ",", "center", ")", "in", "enumerate", "(", "centers", ")", ":", "D", "[", "i", "]", "=", "ricker_function", "(", "resolution", ",", "center", ",", "width", ")", "D", "/=", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "(", "D", "**", "2", ")", ",", "axis", "=", "1", ")", ")", "[", ":", ",", "np", ".", "newaxis", "]", "return", "D" ]
dictionary of ricker wavelets .
train
false
40,337
def _read_float32(f): return np.float32(struct.unpack('>f', f.read(4))[0])
[ "def", "_read_float32", "(", "f", ")", ":", "return", "np", ".", "float32", "(", "struct", ".", "unpack", "(", "'>f'", ",", "f", ".", "read", "(", "4", ")", ")", "[", "0", "]", ")" ]
read a 32-bit float .
train
false
40,338
def import_site_module(path, module, dummy=None, modulefile=None): short_module = module[(module.rfind('.') + 1):] if (not modulefile): modulefile = (short_module + '.py') if os.path.exists(os.path.join(os.path.dirname(path), modulefile)): return __import__(module, {}, {}, [short_module]) return dummy
[ "def", "import_site_module", "(", "path", ",", "module", ",", "dummy", "=", "None", ",", "modulefile", "=", "None", ")", ":", "short_module", "=", "module", "[", "(", "module", ".", "rfind", "(", "'.'", ")", "+", "1", ")", ":", "]", "if", "(", "not", "modulefile", ")", ":", "modulefile", "=", "(", "short_module", "+", "'.py'", ")", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "path", ")", ",", "modulefile", ")", ")", ":", "return", "__import__", "(", "module", ",", "{", "}", ",", "{", "}", ",", "[", "short_module", "]", ")", "return", "dummy" ]
try to import the site specific module if it exists .
train
false
40,339
def parse_preference(path): storage = {} read = open(path) for line in read: line = line.strip() if line.startswith('<string name="'): index = line.find('"', 14) key = line[14:index] value = line[(index + 2):(-9)] storage[key] = value read.close() return storage
[ "def", "parse_preference", "(", "path", ")", ":", "storage", "=", "{", "}", "read", "=", "open", "(", "path", ")", "for", "line", "in", "read", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "line", ".", "startswith", "(", "'<string name=\"'", ")", ":", "index", "=", "line", ".", "find", "(", "'\"'", ",", "14", ")", "key", "=", "line", "[", "14", ":", "index", "]", "value", "=", "line", "[", "(", "index", "+", "2", ")", ":", "(", "-", "9", ")", "]", "storage", "[", "key", "]", "=", "value", "read", ".", "close", "(", ")", "return", "storage" ]
parse androids shared preference xml .
train
false
40,340
def inlineformset_factory(parent_model, model, form=ModelForm, formset=BaseInlineFormSet, fk_name=None, fields=None, exclude=None, extra=3, can_order=False, can_delete=True, max_num=None, formfield_callback=None): fk = _get_foreign_key(parent_model, model, fk_name=fk_name) if fk.unique: max_num = 1 kwargs = {u'form': form, u'formfield_callback': formfield_callback, u'formset': formset, u'extra': extra, u'can_delete': can_delete, u'can_order': can_order, u'fields': fields, u'exclude': exclude, u'max_num': max_num} FormSet = modelformset_factory(model, **kwargs) FormSet.fk = fk return FormSet
[ "def", "inlineformset_factory", "(", "parent_model", ",", "model", ",", "form", "=", "ModelForm", ",", "formset", "=", "BaseInlineFormSet", ",", "fk_name", "=", "None", ",", "fields", "=", "None", ",", "exclude", "=", "None", ",", "extra", "=", "3", ",", "can_order", "=", "False", ",", "can_delete", "=", "True", ",", "max_num", "=", "None", ",", "formfield_callback", "=", "None", ")", ":", "fk", "=", "_get_foreign_key", "(", "parent_model", ",", "model", ",", "fk_name", "=", "fk_name", ")", "if", "fk", ".", "unique", ":", "max_num", "=", "1", "kwargs", "=", "{", "u'form'", ":", "form", ",", "u'formfield_callback'", ":", "formfield_callback", ",", "u'formset'", ":", "formset", ",", "u'extra'", ":", "extra", ",", "u'can_delete'", ":", "can_delete", ",", "u'can_order'", ":", "can_order", ",", "u'fields'", ":", "fields", ",", "u'exclude'", ":", "exclude", ",", "u'max_num'", ":", "max_num", "}", "FormSet", "=", "modelformset_factory", "(", "model", ",", "**", "kwargs", ")", "FormSet", ".", "fk", "=", "fk", "return", "FormSet" ]
returns an inlineformset for the given kwargs .
train
false
40,342
def load_default(): return _module_to_dict(defaultconfig)
[ "def", "load_default", "(", ")", ":", "return", "_module_to_dict", "(", "defaultconfig", ")" ]
return default config as a dict .
train
false
40,343
def find_best_blas_type(arrays=(), dtype=None): dtype = _np.dtype(dtype) prefer_fortran = False if arrays: dtypes = [ar.dtype for ar in arrays] dtype = _np.find_common_type(dtypes, ()) try: index = dtypes.index(dtype) except ValueError: index = 0 if arrays[index].flags['FORTRAN']: prefer_fortran = True prefix = _type_conv.get(dtype.char, 'd') if (dtype.char == 'G'): dtype = _np.dtype('D') elif (dtype.char not in 'fdFD'): dtype = _np.dtype('d') return (prefix, dtype, prefer_fortran)
[ "def", "find_best_blas_type", "(", "arrays", "=", "(", ")", ",", "dtype", "=", "None", ")", ":", "dtype", "=", "_np", ".", "dtype", "(", "dtype", ")", "prefer_fortran", "=", "False", "if", "arrays", ":", "dtypes", "=", "[", "ar", ".", "dtype", "for", "ar", "in", "arrays", "]", "dtype", "=", "_np", ".", "find_common_type", "(", "dtypes", ",", "(", ")", ")", "try", ":", "index", "=", "dtypes", ".", "index", "(", "dtype", ")", "except", "ValueError", ":", "index", "=", "0", "if", "arrays", "[", "index", "]", ".", "flags", "[", "'FORTRAN'", "]", ":", "prefer_fortran", "=", "True", "prefix", "=", "_type_conv", ".", "get", "(", "dtype", ".", "char", ",", "'d'", ")", "if", "(", "dtype", ".", "char", "==", "'G'", ")", ":", "dtype", "=", "_np", ".", "dtype", "(", "'D'", ")", "elif", "(", "dtype", ".", "char", "not", "in", "'fdFD'", ")", ":", "dtype", "=", "_np", ".", "dtype", "(", "'d'", ")", "return", "(", "prefix", ",", "dtype", ",", "prefer_fortran", ")" ]
find best-matching blas/lapack type .
train
false
40,349
@pytest.mark.parametrize('input,output', DTYPE_TESTS) def test_dtype_info_name(input, output): assert (dtype_info_name(input) == output)
[ "@", "pytest", ".", "mark", ".", "parametrize", "(", "'input,output'", ",", "DTYPE_TESTS", ")", "def", "test_dtype_info_name", "(", "input", ",", "output", ")", ":", "assert", "(", "dtype_info_name", "(", "input", ")", "==", "output", ")" ]
test that dtype_info_name is giving the expected output here the available types:: b boolean i integer u unsigned integer f floating-point c complex-floating point o objects s .
train
false
40,350
def _get_files(only_py=False): for (dirpath, _dirnames, filenames) in os.walk('.'): parts = dirpath.split(os.sep) if (len(parts) >= 2): rootdir = parts[1] if (rootdir.startswith('.') or (rootdir == 'htmlcov')): continue if only_py: endings = {'.py'} else: endings = {'.py', '.asciidoc', '.js', '.feature'} files = (e for e in filenames if (os.path.splitext(e)[1] in endings)) for name in files: (yield os.path.join(dirpath, name))
[ "def", "_get_files", "(", "only_py", "=", "False", ")", ":", "for", "(", "dirpath", ",", "_dirnames", ",", "filenames", ")", "in", "os", ".", "walk", "(", "'.'", ")", ":", "parts", "=", "dirpath", ".", "split", "(", "os", ".", "sep", ")", "if", "(", "len", "(", "parts", ")", ">=", "2", ")", ":", "rootdir", "=", "parts", "[", "1", "]", "if", "(", "rootdir", ".", "startswith", "(", "'.'", ")", "or", "(", "rootdir", "==", "'htmlcov'", ")", ")", ":", "continue", "if", "only_py", ":", "endings", "=", "{", "'.py'", "}", "else", ":", "endings", "=", "{", "'.py'", ",", "'.asciidoc'", ",", "'.js'", ",", "'.feature'", "}", "files", "=", "(", "e", "for", "e", "in", "filenames", "if", "(", "os", ".", "path", ".", "splitext", "(", "e", ")", "[", "1", "]", "in", "endings", ")", ")", "for", "name", "in", "files", ":", "(", "yield", "os", ".", "path", ".", "join", "(", "dirpath", ",", "name", ")", ")" ]
iterate over all python files and yield filenames .
train
false
40,352
def rslices(n, allow_empty=False): for _ in range(5): (yield rslice(n, allow_empty))
[ "def", "rslices", "(", "n", ",", "allow_empty", "=", "False", ")", ":", "for", "_", "in", "range", "(", "5", ")", ":", "(", "yield", "rslice", "(", "n", ",", "allow_empty", ")", ")" ]
generate random slices for a single dimension .
train
false
40,353
def scalar(name=None, dtype=None): if (dtype is None): dtype = config.floatX type = TensorType(dtype, ()) return type(name)
[ "def", "scalar", "(", "name", "=", "None", ",", "dtype", "=", "None", ")", ":", "if", "(", "dtype", "is", "None", ")", ":", "dtype", "=", "config", ".", "floatX", "type", "=", "TensorType", "(", "dtype", ",", "(", ")", ")", "return", "type", "(", "name", ")" ]
return a symbolic scalar variable .
train
false
40,354
def edmonds_karp_impl(G, s, t, capacity, residual, cutoff): if (s not in G): raise nx.NetworkXError(('node %s not in graph' % str(s))) if (t not in G): raise nx.NetworkXError(('node %s not in graph' % str(t))) if (s == t): raise nx.NetworkXError('source and sink are the same node') if (residual is None): R = build_residual_network(G, capacity) else: R = residual for u in R: for e in R[u].values(): e['flow'] = 0 if (cutoff is None): cutoff = float('inf') R.graph['flow_value'] = edmonds_karp_core(R, s, t, cutoff) return R
[ "def", "edmonds_karp_impl", "(", "G", ",", "s", ",", "t", ",", "capacity", ",", "residual", ",", "cutoff", ")", ":", "if", "(", "s", "not", "in", "G", ")", ":", "raise", "nx", ".", "NetworkXError", "(", "(", "'node %s not in graph'", "%", "str", "(", "s", ")", ")", ")", "if", "(", "t", "not", "in", "G", ")", ":", "raise", "nx", ".", "NetworkXError", "(", "(", "'node %s not in graph'", "%", "str", "(", "t", ")", ")", ")", "if", "(", "s", "==", "t", ")", ":", "raise", "nx", ".", "NetworkXError", "(", "'source and sink are the same node'", ")", "if", "(", "residual", "is", "None", ")", ":", "R", "=", "build_residual_network", "(", "G", ",", "capacity", ")", "else", ":", "R", "=", "residual", "for", "u", "in", "R", ":", "for", "e", "in", "R", "[", "u", "]", ".", "values", "(", ")", ":", "e", "[", "'flow'", "]", "=", "0", "if", "(", "cutoff", "is", "None", ")", ":", "cutoff", "=", "float", "(", "'inf'", ")", "R", ".", "graph", "[", "'flow_value'", "]", "=", "edmonds_karp_core", "(", "R", ",", "s", ",", "t", ",", "cutoff", ")", "return", "R" ]
implementation of the edmonds-karp algorithm .
train
false
40,356
def test_abort(question, qtbot): with qtbot.waitSignals([question.aborted, question.completed], order='strict'): question.abort() assert question.is_aborted
[ "def", "test_abort", "(", "question", ",", "qtbot", ")", ":", "with", "qtbot", ".", "waitSignals", "(", "[", "question", ".", "aborted", ",", "question", ".", "completed", "]", ",", "order", "=", "'strict'", ")", ":", "question", ".", "abort", "(", ")", "assert", "question", ".", "is_aborted" ]
test question .
train
false
40,358
@hug.default_output_format(apply_globally=True) def output_formatter_global(data): return hug.output_format.json(data)
[ "@", "hug", ".", "default_output_format", "(", "apply_globally", "=", "True", ")", "def", "output_formatter_global", "(", "data", ")", ":", "return", "hug", ".", "output_format", ".", "json", "(", "data", ")" ]
for testing .
train
false
40,359
def test_check_clean(script): result = script.pip('check') expected_lines = ('No broken requirements found.',) assert matches_expected_lines(result.stdout, expected_lines)
[ "def", "test_check_clean", "(", "script", ")", ":", "result", "=", "script", ".", "pip", "(", "'check'", ")", "expected_lines", "=", "(", "'No broken requirements found.'", ",", ")", "assert", "matches_expected_lines", "(", "result", ".", "stdout", ",", "expected_lines", ")" ]
on a clean environment .
train
false
40,360
def _s2cmi(m, nidx): nv = (-1) for (i, v) in m.items(): if (i >= nidx): m[i] += 1 elif (v > nv): nv = v m[nidx] = (nv + 1) return (nv + 1)
[ "def", "_s2cmi", "(", "m", ",", "nidx", ")", ":", "nv", "=", "(", "-", "1", ")", "for", "(", "i", ",", "v", ")", "in", "m", ".", "items", "(", ")", ":", "if", "(", "i", ">=", "nidx", ")", ":", "m", "[", "i", "]", "+=", "1", "elif", "(", "v", ">", "nv", ")", ":", "nv", "=", "v", "m", "[", "nidx", "]", "=", "(", "nv", "+", "1", ")", "return", "(", "nv", "+", "1", ")" ]
sparse to contigous mapping inserter .
train
false
40,361
def __catalina_home(): locations = ['/usr/share/tomcat*', '/opt/tomcat'] for location in locations: folders = glob.glob(location) if folders: for catalina_home in folders: if os.path.isdir((catalina_home + '/bin')): return catalina_home return False
[ "def", "__catalina_home", "(", ")", ":", "locations", "=", "[", "'/usr/share/tomcat*'", ",", "'/opt/tomcat'", "]", "for", "location", "in", "locations", ":", "folders", "=", "glob", ".", "glob", "(", "location", ")", "if", "folders", ":", "for", "catalina_home", "in", "folders", ":", "if", "os", ".", "path", ".", "isdir", "(", "(", "catalina_home", "+", "'/bin'", ")", ")", ":", "return", "catalina_home", "return", "False" ]
tomcat paths differ depending on packaging .
train
true
40,362
def metric_cleanup(): logging.debug('metric_cleanup') pass
[ "def", "metric_cleanup", "(", ")", ":", "logging", ".", "debug", "(", "'metric_cleanup'", ")", "pass" ]
clean up the module called on shutdown .
train
false
40,364
def internal_prep_message(realm, sender_email, recipient_type_name, recipients, subject, content): if (len(content) > MAX_MESSAGE_LENGTH): content = (content[0:3900] + '\n\n[message was too long and has been truncated]') sender = get_user_profile_by_email(sender_email) if (realm is None): raise RuntimeError('None is not a valid realm for internal_prep_message!') parsed_recipients = extract_recipients(recipients) if (recipient_type_name == 'stream'): (stream, _) = create_stream_if_needed(realm, parsed_recipients[0]) try: return check_message(sender, get_client('Internal'), recipient_type_name, parsed_recipients, subject, content, realm=realm) except JsonableError as e: logging.error(('Error queueing internal message by %s: %s' % (sender_email, str(e)))) return None
[ "def", "internal_prep_message", "(", "realm", ",", "sender_email", ",", "recipient_type_name", ",", "recipients", ",", "subject", ",", "content", ")", ":", "if", "(", "len", "(", "content", ")", ">", "MAX_MESSAGE_LENGTH", ")", ":", "content", "=", "(", "content", "[", "0", ":", "3900", "]", "+", "'\\n\\n[message was too long and has been truncated]'", ")", "sender", "=", "get_user_profile_by_email", "(", "sender_email", ")", "if", "(", "realm", "is", "None", ")", ":", "raise", "RuntimeError", "(", "'None is not a valid realm for internal_prep_message!'", ")", "parsed_recipients", "=", "extract_recipients", "(", "recipients", ")", "if", "(", "recipient_type_name", "==", "'stream'", ")", ":", "(", "stream", ",", "_", ")", "=", "create_stream_if_needed", "(", "realm", ",", "parsed_recipients", "[", "0", "]", ")", "try", ":", "return", "check_message", "(", "sender", ",", "get_client", "(", "'Internal'", ")", ",", "recipient_type_name", ",", "parsed_recipients", ",", "subject", ",", "content", ",", "realm", "=", "realm", ")", "except", "JsonableError", "as", "e", ":", "logging", ".", "error", "(", "(", "'Error queueing internal message by %s: %s'", "%", "(", "sender_email", ",", "str", "(", "e", ")", ")", ")", ")", "return", "None" ]
create a message object and checks it .
train
false
40,366
def _date_from_iso8601_date(value): return datetime.datetime.strptime(value, '%Y-%m-%d').date()
[ "def", "_date_from_iso8601_date", "(", "value", ")", ":", "return", "datetime", ".", "datetime", ".", "strptime", "(", "value", ",", "'%Y-%m-%d'", ")", ".", "date", "(", ")" ]
convert a iso8601 date string to native datetime date :type value: str .
train
false
40,367
def BatchMailEntryFromString(xml_string): return atom.CreateClassFromXMLString(BatchMailEntry, xml_string)
[ "def", "BatchMailEntryFromString", "(", "xml_string", ")", ":", "return", "atom", ".", "CreateClassFromXMLString", "(", "BatchMailEntry", ",", "xml_string", ")" ]
parse in the batchmailentry from the xml definition .
train
false
40,368
def verify_hmac_sha1(request, client_secret=None, resource_owner_secret=None): norm_params = normalize_parameters(request.params) uri = normalize_base_string_uri(request.uri) base_string = construct_base_string(request.http_method, uri, norm_params) signature = sign_hmac_sha1(base_string, client_secret, resource_owner_secret) return safe_string_equals(signature, request.signature)
[ "def", "verify_hmac_sha1", "(", "request", ",", "client_secret", "=", "None", ",", "resource_owner_secret", "=", "None", ")", ":", "norm_params", "=", "normalize_parameters", "(", "request", ".", "params", ")", "uri", "=", "normalize_base_string_uri", "(", "request", ".", "uri", ")", "base_string", "=", "construct_base_string", "(", "request", ".", "http_method", ",", "uri", ",", "norm_params", ")", "signature", "=", "sign_hmac_sha1", "(", "base_string", ",", "client_secret", ",", "resource_owner_secret", ")", "return", "safe_string_equals", "(", "signature", ",", "request", ".", "signature", ")" ]
verify a hmac-sha1 signature .
train
false
40,370
def test_api_key(): @hug.authentication.api_key def api_key_authentication(api_key): if (api_key == 'Bacon'): return 'Timothy' @hug.get(requires=api_key_authentication) def hello_world(): return 'Hello world!' assert (hug.test.get(api, 'hello_world', headers={'X-Api-Key': 'Bacon'}).data == 'Hello world!') assert ('401' in hug.test.get(api, 'hello_world').status) assert ('401' in hug.test.get(api, 'hello_world', headers={'X-Api-Key': 'Invalid'}).status)
[ "def", "test_api_key", "(", ")", ":", "@", "hug", ".", "authentication", ".", "api_key", "def", "api_key_authentication", "(", "api_key", ")", ":", "if", "(", "api_key", "==", "'Bacon'", ")", ":", "return", "'Timothy'", "@", "hug", ".", "get", "(", "requires", "=", "api_key_authentication", ")", "def", "hello_world", "(", ")", ":", "return", "'Hello world!'", "assert", "(", "hug", ".", "test", ".", "get", "(", "api", ",", "'hello_world'", ",", "headers", "=", "{", "'X-Api-Key'", ":", "'Bacon'", "}", ")", ".", "data", "==", "'Hello world!'", ")", "assert", "(", "'401'", "in", "hug", ".", "test", ".", "get", "(", "api", ",", "'hello_world'", ")", ".", "status", ")", "assert", "(", "'401'", "in", "hug", ".", "test", ".", "get", "(", "api", ",", "'hello_world'", ",", "headers", "=", "{", "'X-Api-Key'", ":", "'Invalid'", "}", ")", ".", "status", ")" ]
test the included api_key based header to ensure it works as expected to allow x-api-key based authentication .
train
false
40,371
def determine_repo_dir(template, abbreviations, clone_to_dir, checkout, no_input): template = expand_abbreviations(template, abbreviations) if is_repo_url(template): cloned_repo = clone(repo_url=template, checkout=checkout, clone_to_dir=clone_to_dir, no_input=no_input) repository_candidates = [cloned_repo] else: repository_candidates = [template, os.path.join(clone_to_dir, template)] for repo_candidate in repository_candidates: if repository_has_cookiecutter_json(repo_candidate): return repo_candidate raise RepositoryNotFound(u'A valid repository for "{}" could not be found in the following locations:\n{}'.format(template, u'\n'.join(repository_candidates)))
[ "def", "determine_repo_dir", "(", "template", ",", "abbreviations", ",", "clone_to_dir", ",", "checkout", ",", "no_input", ")", ":", "template", "=", "expand_abbreviations", "(", "template", ",", "abbreviations", ")", "if", "is_repo_url", "(", "template", ")", ":", "cloned_repo", "=", "clone", "(", "repo_url", "=", "template", ",", "checkout", "=", "checkout", ",", "clone_to_dir", "=", "clone_to_dir", ",", "no_input", "=", "no_input", ")", "repository_candidates", "=", "[", "cloned_repo", "]", "else", ":", "repository_candidates", "=", "[", "template", ",", "os", ".", "path", ".", "join", "(", "clone_to_dir", ",", "template", ")", "]", "for", "repo_candidate", "in", "repository_candidates", ":", "if", "repository_has_cookiecutter_json", "(", "repo_candidate", ")", ":", "return", "repo_candidate", "raise", "RepositoryNotFound", "(", "u'A valid repository for \"{}\" could not be found in the following locations:\\n{}'", ".", "format", "(", "template", ",", "u'\\n'", ".", "join", "(", "repository_candidates", ")", ")", ")" ]
locate the repository directory from a template reference .
train
false
40,372
def configure(cache_name='cache', backend='sqlite', expire_after=None, allowable_codes=(200,), allowable_methods=('GET',), monkey_patch=True, **backend_options): try: global _cache _cache = backends.registry[backend](cache_name, **backend_options) except KeyError: raise ValueError(('Unsupported backend "%s" try one of: %s' % (backend, ', '.join(backends.registry.keys())))) if monkey_patch: redo_patch() _config['expire_after'] = expire_after _config['allowable_codes'] = allowable_codes _config['allowable_methods'] = allowable_methods
[ "def", "configure", "(", "cache_name", "=", "'cache'", ",", "backend", "=", "'sqlite'", ",", "expire_after", "=", "None", ",", "allowable_codes", "=", "(", "200", ",", ")", ",", "allowable_methods", "=", "(", "'GET'", ",", ")", ",", "monkey_patch", "=", "True", ",", "**", "backend_options", ")", ":", "try", ":", "global", "_cache", "_cache", "=", "backends", ".", "registry", "[", "backend", "]", "(", "cache_name", ",", "**", "backend_options", ")", "except", "KeyError", ":", "raise", "ValueError", "(", "(", "'Unsupported backend \"%s\" try one of: %s'", "%", "(", "backend", ",", "', '", ".", "join", "(", "backends", ".", "registry", ".", "keys", "(", ")", ")", ")", ")", ")", "if", "monkey_patch", ":", "redo_patch", "(", ")", "_config", "[", "'expire_after'", "]", "=", "expire_after", "_config", "[", "'allowable_codes'", "]", "=", "allowable_codes", "_config", "[", "'allowable_methods'", "]", "=", "allowable_methods" ]
set up the python-musicbrainz-ngs module according to settings from the beets configuration .
train
false
40,373
def parse_accept_lang_header(lang_string): result = [] pieces = accept_language_re.split(lang_string) if pieces[(-1)]: return [] for i in range(0, (len(pieces) - 1), 3): (first, lang, priority) = pieces[i:(i + 3)] if first: return [] priority = ((priority and float(priority)) or 1.0) result.append((lang, priority)) result.sort((lambda x, y: (- cmp(x[1], y[1])))) return result
[ "def", "parse_accept_lang_header", "(", "lang_string", ")", ":", "result", "=", "[", "]", "pieces", "=", "accept_language_re", ".", "split", "(", "lang_string", ")", "if", "pieces", "[", "(", "-", "1", ")", "]", ":", "return", "[", "]", "for", "i", "in", "range", "(", "0", ",", "(", "len", "(", "pieces", ")", "-", "1", ")", ",", "3", ")", ":", "(", "first", ",", "lang", ",", "priority", ")", "=", "pieces", "[", "i", ":", "(", "i", "+", "3", ")", "]", "if", "first", ":", "return", "[", "]", "priority", "=", "(", "(", "priority", "and", "float", "(", "priority", ")", ")", "or", "1.0", ")", "result", ".", "append", "(", "(", "lang", ",", "priority", ")", ")", "result", ".", "sort", "(", "(", "lambda", "x", ",", "y", ":", "(", "-", "cmp", "(", "x", "[", "1", "]", ",", "y", "[", "1", "]", ")", ")", ")", ")", "return", "result" ]
parses the lang_string .
train
true
40,374
def sign(wire, keyname, secret, time, fudge, original_id, error, other_data, request_mac, ctx=None, multi=False, first=True, algorithm=default_algorithm): (algorithm_name, digestmod) = get_algorithm(algorithm) if first: ctx = hmac.new(secret, digestmod=digestmod) ml = len(request_mac) if (ml > 0): ctx.update(struct.pack('!H', ml)) ctx.update(request_mac) id = struct.pack('!H', original_id) ctx.update(id) ctx.update(wire[2:]) if first: ctx.update(keyname.to_digestable()) ctx.update(struct.pack('!H', dns.rdataclass.ANY)) ctx.update(struct.pack('!I', 0)) long_time = (time + 0L) upper_time = ((long_time >> 32) & 65535L) lower_time = (long_time & 4294967295L) time_mac = struct.pack('!HIH', upper_time, lower_time, fudge) pre_mac = (algorithm_name + time_mac) ol = len(other_data) if (ol > 65535): raise ValueError('TSIG Other Data is > 65535 bytes') post_mac = (struct.pack('!HH', error, ol) + other_data) if first: ctx.update(pre_mac) ctx.update(post_mac) else: ctx.update(time_mac) mac = ctx.digest() mpack = struct.pack('!H', len(mac)) tsig_rdata = ((((pre_mac + mpack) + mac) + id) + post_mac) if multi: ctx = hmac.new(secret, digestmod=digestmod) ml = len(mac) ctx.update(struct.pack('!H', ml)) ctx.update(mac) else: ctx = None return (tsig_rdata, mac, ctx)
[ "def", "sign", "(", "wire", ",", "keyname", ",", "secret", ",", "time", ",", "fudge", ",", "original_id", ",", "error", ",", "other_data", ",", "request_mac", ",", "ctx", "=", "None", ",", "multi", "=", "False", ",", "first", "=", "True", ",", "algorithm", "=", "default_algorithm", ")", ":", "(", "algorithm_name", ",", "digestmod", ")", "=", "get_algorithm", "(", "algorithm", ")", "if", "first", ":", "ctx", "=", "hmac", ".", "new", "(", "secret", ",", "digestmod", "=", "digestmod", ")", "ml", "=", "len", "(", "request_mac", ")", "if", "(", "ml", ">", "0", ")", ":", "ctx", ".", "update", "(", "struct", ".", "pack", "(", "'!H'", ",", "ml", ")", ")", "ctx", ".", "update", "(", "request_mac", ")", "id", "=", "struct", ".", "pack", "(", "'!H'", ",", "original_id", ")", "ctx", ".", "update", "(", "id", ")", "ctx", ".", "update", "(", "wire", "[", "2", ":", "]", ")", "if", "first", ":", "ctx", ".", "update", "(", "keyname", ".", "to_digestable", "(", ")", ")", "ctx", ".", "update", "(", "struct", ".", "pack", "(", "'!H'", ",", "dns", ".", "rdataclass", ".", "ANY", ")", ")", "ctx", ".", "update", "(", "struct", ".", "pack", "(", "'!I'", ",", "0", ")", ")", "long_time", "=", "(", "time", "+", "0", "L", ")", "upper_time", "=", "(", "(", "long_time", ">>", "32", ")", "&", "65535", "L", ")", "lower_time", "=", "(", "long_time", "&", "4294967295", "L", ")", "time_mac", "=", "struct", ".", "pack", "(", "'!HIH'", ",", "upper_time", ",", "lower_time", ",", "fudge", ")", "pre_mac", "=", "(", "algorithm_name", "+", "time_mac", ")", "ol", "=", "len", "(", "other_data", ")", "if", "(", "ol", ">", "65535", ")", ":", "raise", "ValueError", "(", "'TSIG Other Data is > 65535 bytes'", ")", "post_mac", "=", "(", "struct", ".", "pack", "(", "'!HH'", ",", "error", ",", "ol", ")", "+", "other_data", ")", "if", "first", ":", "ctx", ".", "update", "(", "pre_mac", ")", "ctx", ".", "update", "(", "post_mac", ")", "else", ":", "ctx", ".", "update", "(", "time_mac", ")", "mac", "=", "ctx", ".", "digest", "(", ")", "mpack", "=", "struct", ".", "pack", "(", "'!H'", ",", "len", "(", "mac", ")", ")", "tsig_rdata", "=", "(", "(", "(", "(", "pre_mac", "+", "mpack", ")", "+", "mac", ")", "+", "id", ")", "+", "post_mac", ")", "if", "multi", ":", "ctx", "=", "hmac", ".", "new", "(", "secret", ",", "digestmod", "=", "digestmod", ")", "ml", "=", "len", "(", "mac", ")", "ctx", ".", "update", "(", "struct", ".", "pack", "(", "'!H'", ",", "ml", ")", ")", "ctx", ".", "update", "(", "mac", ")", "else", ":", "ctx", "=", "None", "return", "(", "tsig_rdata", ",", "mac", ",", "ctx", ")" ]
returns a signed receipt .
train
true
40,376
def getQuadraticPoints(begin, controlPoint, end, numberOfBezierPoints=globalNumberOfBezierPoints): bezierPortion = (1.0 / float(numberOfBezierPoints)) quadraticPoints = [] for bezierIndex in xrange(1, (numberOfBezierPoints + 1)): quadraticPoints.append(getQuadraticPoint((bezierPortion * bezierIndex), begin, controlPoint, end)) return quadraticPoints
[ "def", "getQuadraticPoints", "(", "begin", ",", "controlPoint", ",", "end", ",", "numberOfBezierPoints", "=", "globalNumberOfBezierPoints", ")", ":", "bezierPortion", "=", "(", "1.0", "/", "float", "(", "numberOfBezierPoints", ")", ")", "quadraticPoints", "=", "[", "]", "for", "bezierIndex", "in", "xrange", "(", "1", ",", "(", "numberOfBezierPoints", "+", "1", ")", ")", ":", "quadraticPoints", ".", "append", "(", "getQuadraticPoint", "(", "(", "bezierPortion", "*", "bezierIndex", ")", ",", "begin", ",", "controlPoint", ",", "end", ")", ")", "return", "quadraticPoints" ]
get the quadratic points .
train
false
40,377
def _build_label_filter(category, *args, **kwargs): terms = list(args) for (key, value) in six.iteritems(kwargs): if (value is None): continue suffix = None if key.endswith(('_prefix', '_suffix', '_greater', '_greaterequal', '_less', '_lessequal')): (key, suffix) = key.rsplit('_', 1) if ((category == 'resource') and (key == 'resource_type')): key = 'resource.type' else: key = '.'.join((category, 'label', key)) if (suffix == 'prefix'): term = '{key} = starts_with("{value}")' elif (suffix == 'suffix'): term = '{key} = ends_with("{value}")' elif (suffix == 'greater'): term = '{key} > {value}' elif (suffix == 'greaterequal'): term = '{key} >= {value}' elif (suffix == 'less'): term = '{key} < {value}' elif (suffix == 'lessequal'): term = '{key} <= {value}' else: term = '{key} = "{value}"' terms.append(term.format(key=key, value=value)) return ' AND '.join(sorted(terms))
[ "def", "_build_label_filter", "(", "category", ",", "*", "args", ",", "**", "kwargs", ")", ":", "terms", "=", "list", "(", "args", ")", "for", "(", "key", ",", "value", ")", "in", "six", ".", "iteritems", "(", "kwargs", ")", ":", "if", "(", "value", "is", "None", ")", ":", "continue", "suffix", "=", "None", "if", "key", ".", "endswith", "(", "(", "'_prefix'", ",", "'_suffix'", ",", "'_greater'", ",", "'_greaterequal'", ",", "'_less'", ",", "'_lessequal'", ")", ")", ":", "(", "key", ",", "suffix", ")", "=", "key", ".", "rsplit", "(", "'_'", ",", "1", ")", "if", "(", "(", "category", "==", "'resource'", ")", "and", "(", "key", "==", "'resource_type'", ")", ")", ":", "key", "=", "'resource.type'", "else", ":", "key", "=", "'.'", ".", "join", "(", "(", "category", ",", "'label'", ",", "key", ")", ")", "if", "(", "suffix", "==", "'prefix'", ")", ":", "term", "=", "'{key} = starts_with(\"{value}\")'", "elif", "(", "suffix", "==", "'suffix'", ")", ":", "term", "=", "'{key} = ends_with(\"{value}\")'", "elif", "(", "suffix", "==", "'greater'", ")", ":", "term", "=", "'{key} > {value}'", "elif", "(", "suffix", "==", "'greaterequal'", ")", ":", "term", "=", "'{key} >= {value}'", "elif", "(", "suffix", "==", "'less'", ")", ":", "term", "=", "'{key} < {value}'", "elif", "(", "suffix", "==", "'lessequal'", ")", ":", "term", "=", "'{key} <= {value}'", "else", ":", "term", "=", "'{key} = \"{value}\"'", "terms", ".", "append", "(", "term", ".", "format", "(", "key", "=", "key", ",", "value", "=", "value", ")", ")", "return", "' AND '", ".", "join", "(", "sorted", "(", "terms", ")", ")" ]
construct a filter string to filter on metric or resource labels .
train
true
40,378
def generate_min(extra_mods='', overwrite=False, so_mods='', python2_bin='python2', python3_bin='python3'): conf_mods = __opts__.get('min_extra_mods') if conf_mods: extra_mods = ','.join([conf_mods, extra_mods]) return salt.utils.thin.gen_min(__opts__['cachedir'], extra_mods, overwrite, so_mods, python2_bin, python3_bin)
[ "def", "generate_min", "(", "extra_mods", "=", "''", ",", "overwrite", "=", "False", ",", "so_mods", "=", "''", ",", "python2_bin", "=", "'python2'", ",", "python3_bin", "=", "'python3'", ")", ":", "conf_mods", "=", "__opts__", ".", "get", "(", "'min_extra_mods'", ")", "if", "conf_mods", ":", "extra_mods", "=", "','", ".", "join", "(", "[", "conf_mods", ",", "extra_mods", "]", ")", "return", "salt", ".", "utils", ".", "thin", ".", "gen_min", "(", "__opts__", "[", "'cachedir'", "]", ",", "extra_mods", ",", "overwrite", ",", "so_mods", ",", "python2_bin", ",", "python3_bin", ")" ]
generate the salt-thin tarball and print the location of the tarball optional additional mods to include can be supplied as a comma delimited string .
train
true
40,379
@login_required def course_survey(request, course_id): course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id) course = get_course_with_access(request.user, 'load', course_key) redirect_url = reverse('info', args=[course_id]) if (not course.course_survey_name): return redirect(redirect_url) return survey.views.view_student_survey(request.user, course.course_survey_name, course=course, redirect_url=redirect_url, is_required=course.course_survey_required)
[ "@", "login_required", "def", "course_survey", "(", "request", ",", "course_id", ")", ":", "course_key", "=", "SlashSeparatedCourseKey", ".", "from_deprecated_string", "(", "course_id", ")", "course", "=", "get_course_with_access", "(", "request", ".", "user", ",", "'load'", ",", "course_key", ")", "redirect_url", "=", "reverse", "(", "'info'", ",", "args", "=", "[", "course_id", "]", ")", "if", "(", "not", "course", ".", "course_survey_name", ")", ":", "return", "redirect", "(", "redirect_url", ")", "return", "survey", ".", "views", ".", "view_student_survey", "(", "request", ".", "user", ",", "course", ".", "course_survey_name", ",", "course", "=", "course", ",", "redirect_url", "=", "redirect_url", ",", "is_required", "=", "course", ".", "course_survey_required", ")" ]
url endpoint to present a survey that is associated with a course_id note that the actual implementation of course survey is handled in the views .
train
false
40,380
def sorted_dict_repr(d): keys = list(d.keys()) keys.sort() return (('{' + ', '.join([('%r: %r' % (k, d[k])) for k in keys])) + '}')
[ "def", "sorted_dict_repr", "(", "d", ")", ":", "keys", "=", "list", "(", "d", ".", "keys", "(", ")", ")", "keys", ".", "sort", "(", ")", "return", "(", "(", "'{'", "+", "', '", ".", "join", "(", "[", "(", "'%r: %r'", "%", "(", "k", ",", "d", "[", "k", "]", ")", ")", "for", "k", "in", "keys", "]", ")", ")", "+", "'}'", ")" ]
repr() a dictionary with the keys in order .
train
true
40,382
@patch('xmodule.modulestore.django.create_modulestore_instance', autospec=True) def drop_mongo_collections(mock_create): mock_create.return_value = None module_store = modulestore() if hasattr(module_store, '_drop_database'): module_store._drop_database(database=False) _CONTENTSTORE.clear() if hasattr(module_store, 'close_connections'): module_store.close_connections()
[ "@", "patch", "(", "'xmodule.modulestore.django.create_modulestore_instance'", ",", "autospec", "=", "True", ")", "def", "drop_mongo_collections", "(", "mock_create", ")", ":", "mock_create", ".", "return_value", "=", "None", "module_store", "=", "modulestore", "(", ")", "if", "hasattr", "(", "module_store", ",", "'_drop_database'", ")", ":", "module_store", ".", "_drop_database", "(", "database", "=", "False", ")", "_CONTENTSTORE", ".", "clear", "(", ")", "if", "hasattr", "(", "module_store", ",", "'close_connections'", ")", ":", "module_store", ".", "close_connections", "(", ")" ]
if using a mongo-backed modulestore & contentstore .
train
false
40,383
def renames(old, new): (head, tail) = os.path.split(new) if (head and tail and (not os.path.exists(head))): os.makedirs(head) shutil.move(old, new) (head, tail) = os.path.split(old) if (head and tail): try: os.removedirs(head) except OSError: pass
[ "def", "renames", "(", "old", ",", "new", ")", ":", "(", "head", ",", "tail", ")", "=", "os", ".", "path", ".", "split", "(", "new", ")", "if", "(", "head", "and", "tail", "and", "(", "not", "os", ".", "path", ".", "exists", "(", "head", ")", ")", ")", ":", "os", ".", "makedirs", "(", "head", ")", "shutil", ".", "move", "(", "old", ",", "new", ")", "(", "head", ",", "tail", ")", "=", "os", ".", "path", ".", "split", "(", "old", ")", "if", "(", "head", "and", "tail", ")", ":", "try", ":", "os", ".", "removedirs", "(", "head", ")", "except", "OSError", ":", "pass" ]
renames super-rename; create directories as necessary and delete any left empty .
train
true
40,384
def template_shebang(template, renderers, default, blacklist, whitelist, input_data): render_pipe = [] line = '' if (template == ':string:'): line = input_data.split()[0] else: with salt.utils.fopen(template, 'r') as ifile: line = ifile.readline() if (line.startswith('#!') and (not line.startswith('#!/'))): render_pipe = check_render_pipe_str(line.strip()[2:], renderers, blacklist, whitelist) if (not render_pipe): render_pipe = check_render_pipe_str(default, renderers, blacklist, whitelist) return render_pipe
[ "def", "template_shebang", "(", "template", ",", "renderers", ",", "default", ",", "blacklist", ",", "whitelist", ",", "input_data", ")", ":", "render_pipe", "=", "[", "]", "line", "=", "''", "if", "(", "template", "==", "':string:'", ")", ":", "line", "=", "input_data", ".", "split", "(", ")", "[", "0", "]", "else", ":", "with", "salt", ".", "utils", ".", "fopen", "(", "template", ",", "'r'", ")", "as", "ifile", ":", "line", "=", "ifile", ".", "readline", "(", ")", "if", "(", "line", ".", "startswith", "(", "'#!'", ")", "and", "(", "not", "line", ".", "startswith", "(", "'#!/'", ")", ")", ")", ":", "render_pipe", "=", "check_render_pipe_str", "(", "line", ".", "strip", "(", ")", "[", "2", ":", "]", ",", "renderers", ",", "blacklist", ",", "whitelist", ")", "if", "(", "not", "render_pipe", ")", ":", "render_pipe", "=", "check_render_pipe_str", "(", "default", ",", "renderers", ",", "blacklist", ",", "whitelist", ")", "return", "render_pipe" ]
check the template shebang line and return the list of renderers specified in the pipe .
train
true
40,386
def check_expression(text): try: module = parse(text) except SyntaxError: return False if (not isinstance(module, Module)): return False statements = module.body if (not (len(statements) == 1)): return False expression = statements[0] if (expression.__class__.__name__ != 'Expr'): return False for ast_node in walk(expression): ast_node_class = ast_node.__class__.__name__ if (ast_node_class not in AST_NODE_TYPE_WHITELIST): return False if ((ast_node_class == 'Name') and (not __check_name(ast_node))): return False return True
[ "def", "check_expression", "(", "text", ")", ":", "try", ":", "module", "=", "parse", "(", "text", ")", "except", "SyntaxError", ":", "return", "False", "if", "(", "not", "isinstance", "(", "module", ",", "Module", ")", ")", ":", "return", "False", "statements", "=", "module", ".", "body", "if", "(", "not", "(", "len", "(", "statements", ")", "==", "1", ")", ")", ":", "return", "False", "expression", "=", "statements", "[", "0", "]", "if", "(", "expression", ".", "__class__", ".", "__name__", "!=", "'Expr'", ")", ":", "return", "False", "for", "ast_node", "in", "walk", "(", "expression", ")", ":", "ast_node_class", "=", "ast_node", ".", "__class__", ".", "__name__", "if", "(", "ast_node_class", "not", "in", "AST_NODE_TYPE_WHITELIST", ")", ":", "return", "False", "if", "(", "(", "ast_node_class", "==", "'Name'", ")", "and", "(", "not", "__check_name", "(", "ast_node", ")", ")", ")", ":", "return", "False", "return", "True" ]
does eval both in sage and sympy and does other checks .
train
false
40,387
def walkdir(dir): GLOB_PATTERN = os.path.join(dir, '*.[p][y]*') pathlist = glob.glob(GLOB_PATTERN) filterlist = [x for x in pathlist if (x.endswith('.py') or x.endswith('.pyw'))] if (filterlist != []): return filterlist else: return None
[ "def", "walkdir", "(", "dir", ")", ":", "GLOB_PATTERN", "=", "os", ".", "path", ".", "join", "(", "dir", ",", "'*.[p][y]*'", ")", "pathlist", "=", "glob", ".", "glob", "(", "GLOB_PATTERN", ")", "filterlist", "=", "[", "x", "for", "x", "in", "pathlist", "if", "(", "x", ".", "endswith", "(", "'.py'", ")", "or", "x", ".", "endswith", "(", "'.pyw'", ")", ")", "]", "if", "(", "filterlist", "!=", "[", "]", ")", ":", "return", "filterlist", "else", ":", "return", "None" ]
return a list of .
train
false
40,389
def _decimal_lshift_exact(n, e): if (n == 0): return 0 elif (e >= 0): return (n * (10 ** e)) else: str_n = str(abs(n)) val_n = (len(str_n) - len(str_n.rstrip('0'))) return (None if (val_n < (- e)) else (n // (10 ** (- e))))
[ "def", "_decimal_lshift_exact", "(", "n", ",", "e", ")", ":", "if", "(", "n", "==", "0", ")", ":", "return", "0", "elif", "(", "e", ">=", "0", ")", ":", "return", "(", "n", "*", "(", "10", "**", "e", ")", ")", "else", ":", "str_n", "=", "str", "(", "abs", "(", "n", ")", ")", "val_n", "=", "(", "len", "(", "str_n", ")", "-", "len", "(", "str_n", ".", "rstrip", "(", "'0'", ")", ")", ")", "return", "(", "None", "if", "(", "val_n", "<", "(", "-", "e", ")", ")", "else", "(", "n", "//", "(", "10", "**", "(", "-", "e", ")", ")", ")", ")" ]
given integers n and e .
train
false
40,392
def maybe_from_tuple(tup_or_range): if isinstance(tup_or_range, tuple): return from_tuple(tup_or_range) elif isinstance(tup_or_range, range): return tup_or_range raise ValueError(('maybe_from_tuple expects a tuple or range, got %r: %r' % (type(tup_or_range).__name__, tup_or_range)))
[ "def", "maybe_from_tuple", "(", "tup_or_range", ")", ":", "if", "isinstance", "(", "tup_or_range", ",", "tuple", ")", ":", "return", "from_tuple", "(", "tup_or_range", ")", "elif", "isinstance", "(", "tup_or_range", ",", "range", ")", ":", "return", "tup_or_range", "raise", "ValueError", "(", "(", "'maybe_from_tuple expects a tuple or range, got %r: %r'", "%", "(", "type", "(", "tup_or_range", ")", ".", "__name__", ",", "tup_or_range", ")", ")", ")" ]
convert a tuple into a range but pass ranges through silently .
train
true