id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
54,011
@step(u'{word:w} step fails with "{message}"') def step_fails_with_message(context, word, message): assert False, ('FAILED: %s' % message)
[ "@", "step", "(", "u'{word:w} step fails with \"{message}\"'", ")", "def", "step_fails_with_message", "(", "context", ",", "word", ",", "message", ")", ":", "assert", "False", ",", "(", "'FAILED: %s'", "%", "message", ")" ]
step that always fails .
train
false
54,012
def except_keyword(source, start, keyword): start = pass_white(source, start) kl = len(keyword) if ((kl + start) > len(source)): return None if (source[start:(start + kl)] != keyword): return None if (((kl + start) < len(source)) and (source[(start + kl)] in IDENTIFIER_PART)): return None return (start + kl)
[ "def", "except_keyword", "(", "source", ",", "start", ",", "keyword", ")", ":", "start", "=", "pass_white", "(", "source", ",", "start", ")", "kl", "=", "len", "(", "keyword", ")", "if", "(", "(", "kl", "+", "start", ")", ">", "len", "(", "source", ")", ")", ":", "return", "None", "if", "(", "source", "[", "start", ":", "(", "start", "+", "kl", ")", "]", "!=", "keyword", ")", ":", "return", "None", "if", "(", "(", "(", "kl", "+", "start", ")", "<", "len", "(", "source", ")", ")", "and", "(", "source", "[", "(", "start", "+", "kl", ")", "]", "in", "IDENTIFIER_PART", ")", ")", ":", "return", "None", "return", "(", "start", "+", "kl", ")" ]
returns position after keyword if found else none note: skips white space .
train
true
54,013
def _euclidean_algorithm(f, g, minpoly, p): ring = f.ring f = _trunc(f, minpoly, p) g = _trunc(g, minpoly, p) while g: rem = f deg = g.degree(0) (lcinv, _, gcd) = _gf_gcdex(ring.dmp_LC(g), minpoly, p) if (not (gcd == 1)): return None while True: degrem = rem.degree(0) if (degrem < deg): break quo = (lcinv * ring.dmp_LC(rem)).set_ring(ring) rem = _trunc((rem - (g.mul_monom(((degrem - deg), 0)) * quo)), minpoly, p) f = g g = rem lcfinv = _gf_gcdex(ring.dmp_LC(f), minpoly, p)[0].set_ring(ring) return _trunc((f * lcfinv), minpoly, p)
[ "def", "_euclidean_algorithm", "(", "f", ",", "g", ",", "minpoly", ",", "p", ")", ":", "ring", "=", "f", ".", "ring", "f", "=", "_trunc", "(", "f", ",", "minpoly", ",", "p", ")", "g", "=", "_trunc", "(", "g", ",", "minpoly", ",", "p", ")", "while", "g", ":", "rem", "=", "f", "deg", "=", "g", ".", "degree", "(", "0", ")", "(", "lcinv", ",", "_", ",", "gcd", ")", "=", "_gf_gcdex", "(", "ring", ".", "dmp_LC", "(", "g", ")", ",", "minpoly", ",", "p", ")", "if", "(", "not", "(", "gcd", "==", "1", ")", ")", ":", "return", "None", "while", "True", ":", "degrem", "=", "rem", ".", "degree", "(", "0", ")", "if", "(", "degrem", "<", "deg", ")", ":", "break", "quo", "=", "(", "lcinv", "*", "ring", ".", "dmp_LC", "(", "rem", ")", ")", ".", "set_ring", "(", "ring", ")", "rem", "=", "_trunc", "(", "(", "rem", "-", "(", "g", ".", "mul_monom", "(", "(", "(", "degrem", "-", "deg", ")", ",", "0", ")", ")", "*", "quo", ")", ")", ",", "minpoly", ",", "p", ")", "f", "=", "g", "g", "=", "rem", "lcfinv", "=", "_gf_gcdex", "(", "ring", ".", "dmp_LC", "(", "f", ")", ",", "minpoly", ",", "p", ")", "[", "0", "]", ".", "set_ring", "(", "ring", ")", "return", "_trunc", "(", "(", "f", "*", "lcfinv", ")", ",", "minpoly", ",", "p", ")" ]
compute the monic gcd of two univariate polynomials in mathbb{z}_p[z]/(check m_{alpha}(z))[x] with the euclidean algorithm .
train
false
54,014
def check_cuda(feature_name='You are using code that relies on cuda-convnet. Cuda-convnet', check_enabled=True): if (not cuda.cuda_available): raise RuntimeError(("%s only runs on GPUs, but there doesn't seem to be a GPU available. If you would like assistance making a CPU version of convolutional maxout, contact pylearn-dev@googlegroups.com." % feature_name)) if (not hasattr(cuda.cuda_ndarray.cuda_ndarray, 'cublas_v2')): warnings.warn('You are using probably a too old Theano version. That will cause compilation crash. If so, update Theano.') elif (not cuda.cuda_ndarray.cuda_ndarray.cublas_v2()): raise RuntimeError('You are using probably a too old Theano version. That will cause compilation crash. Update Theano') if (check_enabled and (not cuda.cuda_enabled)): raise RuntimeError(('%s must run be with theano configured to use the GPU' % feature_name))
[ "def", "check_cuda", "(", "feature_name", "=", "'You are using code that relies on cuda-convnet. Cuda-convnet'", ",", "check_enabled", "=", "True", ")", ":", "if", "(", "not", "cuda", ".", "cuda_available", ")", ":", "raise", "RuntimeError", "(", "(", "\"%s only runs on GPUs, but there doesn't seem to be a GPU available. If you would like assistance making a CPU version of convolutional maxout, contact pylearn-dev@googlegroups.com.\"", "%", "feature_name", ")", ")", "if", "(", "not", "hasattr", "(", "cuda", ".", "cuda_ndarray", ".", "cuda_ndarray", ",", "'cublas_v2'", ")", ")", ":", "warnings", ".", "warn", "(", "'You are using probably a too old Theano version. That will cause compilation crash. If so, update Theano.'", ")", "elif", "(", "not", "cuda", ".", "cuda_ndarray", ".", "cuda_ndarray", ".", "cublas_v2", "(", ")", ")", ":", "raise", "RuntimeError", "(", "'You are using probably a too old Theano version. That will cause compilation crash. Update Theano'", ")", "if", "(", "check_enabled", "and", "(", "not", "cuda", ".", "cuda_enabled", ")", ")", ":", "raise", "RuntimeError", "(", "(", "'%s must run be with theano configured to use the GPU'", "%", "feature_name", ")", ")" ]
call this function before sections of code that depend on the cuda_convnet module .
train
false
54,016
def token_get(profile=None, **connection_args): kstone = auth(profile, **connection_args) token = kstone.service_catalog.get_token() return {'id': token['id'], 'expires': token['expires'], 'user_id': token['user_id'], 'tenant_id': token['tenant_id']}
[ "def", "token_get", "(", "profile", "=", "None", ",", "**", "connection_args", ")", ":", "kstone", "=", "auth", "(", "profile", ",", "**", "connection_args", ")", "token", "=", "kstone", ".", "service_catalog", ".", "get_token", "(", ")", "return", "{", "'id'", ":", "token", "[", "'id'", "]", ",", "'expires'", ":", "token", "[", "'expires'", "]", ",", "'user_id'", ":", "token", "[", "'user_id'", "]", ",", "'tenant_id'", ":", "token", "[", "'tenant_id'", "]", "}" ]
return the configured tokens cli example: .
train
true
54,017
@receiver(post_save, sender=settings.AUTH_USER_MODEL) def user_post_save(sender, **kwargs): if kwargs.get(u'raw', False): return False (user, created) = (kwargs[u'instance'], kwargs[u'created']) disabled = getattr(user, u'_disable_account_creation', (not settings.ACCOUNT_CREATE_ON_SAVE)) if (created and (not disabled)): Account.create(user=user)
[ "@", "receiver", "(", "post_save", ",", "sender", "=", "settings", ".", "AUTH_USER_MODEL", ")", "def", "user_post_save", "(", "sender", ",", "**", "kwargs", ")", ":", "if", "kwargs", ".", "get", "(", "u'raw'", ",", "False", ")", ":", "return", "False", "(", "user", ",", "created", ")", "=", "(", "kwargs", "[", "u'instance'", "]", ",", "kwargs", "[", "u'created'", "]", ")", "disabled", "=", "getattr", "(", "user", ",", "u'_disable_account_creation'", ",", "(", "not", "settings", ".", "ACCOUNT_CREATE_ON_SAVE", ")", ")", "if", "(", "created", "and", "(", "not", "disabled", ")", ")", ":", "Account", ".", "create", "(", "user", "=", "user", ")" ]
after user .
train
false
54,018
def compare_code_with_srcfile(pyc_filename, src_filename): (version, code_obj1) = uncompyle2._load_module(pyc_filename) code_obj2 = uncompyle2._load_file(src_filename) cmp_code_objects(version, code_obj1, code_obj2)
[ "def", "compare_code_with_srcfile", "(", "pyc_filename", ",", "src_filename", ")", ":", "(", "version", ",", "code_obj1", ")", "=", "uncompyle2", ".", "_load_module", "(", "pyc_filename", ")", "code_obj2", "=", "uncompyle2", ".", "_load_file", "(", "src_filename", ")", "cmp_code_objects", "(", "version", ",", "code_obj1", ",", "code_obj2", ")" ]
compare a .
train
false
54,019
@atomic def create_version_for_upload(addon, upload, channel): fileupload_exists = addon.fileupload_set.filter(created__gt=upload.created, version=upload.version).exists() version_exists = Version.unfiltered.filter(addon=addon, version=upload.version).exists() if (fileupload_exists or version_exists): log.info('Skipping Version creation for {upload_uuid} that would cause duplicate version'.format(upload_uuid=upload.uuid)) else: from olympia.devhub.views import auto_sign_version log.info('Creating version for {upload_uuid} that passed validation'.format(upload_uuid=upload.uuid)) beta = (bool(upload.version) and is_beta(upload.version)) version = Version.from_upload(upload, addon, [amo.PLATFORM_ALL.id], channel, is_beta=beta) if ((addon.status == amo.STATUS_NULL) and (channel == amo.RELEASE_CHANNEL_LISTED)): addon.update(status=amo.STATUS_NOMINATED) auto_sign_version(version, is_beta=version.is_beta)
[ "@", "atomic", "def", "create_version_for_upload", "(", "addon", ",", "upload", ",", "channel", ")", ":", "fileupload_exists", "=", "addon", ".", "fileupload_set", ".", "filter", "(", "created__gt", "=", "upload", ".", "created", ",", "version", "=", "upload", ".", "version", ")", ".", "exists", "(", ")", "version_exists", "=", "Version", ".", "unfiltered", ".", "filter", "(", "addon", "=", "addon", ",", "version", "=", "upload", ".", "version", ")", ".", "exists", "(", ")", "if", "(", "fileupload_exists", "or", "version_exists", ")", ":", "log", ".", "info", "(", "'Skipping Version creation for {upload_uuid} that would cause duplicate version'", ".", "format", "(", "upload_uuid", "=", "upload", ".", "uuid", ")", ")", "else", ":", "from", "olympia", ".", "devhub", ".", "views", "import", "auto_sign_version", "log", ".", "info", "(", "'Creating version for {upload_uuid} that passed validation'", ".", "format", "(", "upload_uuid", "=", "upload", ".", "uuid", ")", ")", "beta", "=", "(", "bool", "(", "upload", ".", "version", ")", "and", "is_beta", "(", "upload", ".", "version", ")", ")", "version", "=", "Version", ".", "from_upload", "(", "upload", ",", "addon", ",", "[", "amo", ".", "PLATFORM_ALL", ".", "id", "]", ",", "channel", ",", "is_beta", "=", "beta", ")", "if", "(", "(", "addon", ".", "status", "==", "amo", ".", "STATUS_NULL", ")", "and", "(", "channel", "==", "amo", ".", "RELEASE_CHANNEL_LISTED", ")", ")", ":", "addon", ".", "update", "(", "status", "=", "amo", ".", "STATUS_NOMINATED", ")", "auto_sign_version", "(", "version", ",", "is_beta", "=", "version", ".", "is_beta", ")" ]
note this function is only used for api uploads .
train
false
54,020
def _list_removed(old, new): return [x for x in old if (x not in new)]
[ "def", "_list_removed", "(", "old", ",", "new", ")", ":", "return", "[", "x", "for", "x", "in", "old", "if", "(", "x", "not", "in", "new", ")", "]" ]
list the packages which have been removed between the two package objects .
train
false
54,022
def write_feed(file_obj): writer = csv.DictWriter(file_obj, ATTRIBUTES, dialect=csv.excel_tab) writer.writeheader() categories = Category.objects.all() discounts = Sale.objects.all().prefetch_related(u'products', u'categories') attributes_dict = {a.name: a.pk for a in ProductAttribute.objects.all()} attribute_values_dict = {smart_text(a.pk): smart_text(a) for a in AttributeChoiceValue.objects.all()} category_paths = {} current_site = Site.objects.get_current() for item in get_feed_items(): item_data = item_attributes(item, categories, category_paths, current_site, discounts, attributes_dict, attribute_values_dict) writer.writerow(item_data)
[ "def", "write_feed", "(", "file_obj", ")", ":", "writer", "=", "csv", ".", "DictWriter", "(", "file_obj", ",", "ATTRIBUTES", ",", "dialect", "=", "csv", ".", "excel_tab", ")", "writer", ".", "writeheader", "(", ")", "categories", "=", "Category", ".", "objects", ".", "all", "(", ")", "discounts", "=", "Sale", ".", "objects", ".", "all", "(", ")", ".", "prefetch_related", "(", "u'products'", ",", "u'categories'", ")", "attributes_dict", "=", "{", "a", ".", "name", ":", "a", ".", "pk", "for", "a", "in", "ProductAttribute", ".", "objects", ".", "all", "(", ")", "}", "attribute_values_dict", "=", "{", "smart_text", "(", "a", ".", "pk", ")", ":", "smart_text", "(", "a", ")", "for", "a", "in", "AttributeChoiceValue", ".", "objects", ".", "all", "(", ")", "}", "category_paths", "=", "{", "}", "current_site", "=", "Site", ".", "objects", ".", "get_current", "(", ")", "for", "item", "in", "get_feed_items", "(", ")", ":", "item_data", "=", "item_attributes", "(", "item", ",", "categories", ",", "category_paths", ",", "current_site", ",", "discounts", ",", "attributes_dict", ",", "attribute_values_dict", ")", "writer", ".", "writerow", "(", "item_data", ")" ]
writes feed contents info provided file object .
train
false
54,023
def prompt_for_password(args): if (not args.password): args.password = getpass.getpass(prompt=('Enter password for host %s and user %s: ' % (args.host, args.user))) return args
[ "def", "prompt_for_password", "(", "args", ")", ":", "if", "(", "not", "args", ".", "password", ")", ":", "args", ".", "password", "=", "getpass", ".", "getpass", "(", "prompt", "=", "(", "'Enter password for host %s and user %s: '", "%", "(", "args", ".", "host", ",", "args", ".", "user", ")", ")", ")", "return", "args" ]
if no password is specified on the command line .
train
false
54,028
def test_issue595(): words = [u'Do', u"n't", u'feed', u'the', u'dog'] tag_map = {u'VB': {POS: VERB, u'morph': VerbForm_inf}} rules = {u'verb': [[u'ed', u'e']]} lemmatizer = Lemmatizer({u'verb': {}}, {u'verb': {}}, rules) vocab = Vocab(lemmatizer=lemmatizer, tag_map=tag_map) doc = get_doc(vocab, words) doc[2].tag_ = u'VB' assert (doc[2].text == u'feed') assert (doc[2].lemma_ == u'feed')
[ "def", "test_issue595", "(", ")", ":", "words", "=", "[", "u'Do'", ",", "u\"n't\"", ",", "u'feed'", ",", "u'the'", ",", "u'dog'", "]", "tag_map", "=", "{", "u'VB'", ":", "{", "POS", ":", "VERB", ",", "u'morph'", ":", "VerbForm_inf", "}", "}", "rules", "=", "{", "u'verb'", ":", "[", "[", "u'ed'", ",", "u'e'", "]", "]", "}", "lemmatizer", "=", "Lemmatizer", "(", "{", "u'verb'", ":", "{", "}", "}", ",", "{", "u'verb'", ":", "{", "}", "}", ",", "rules", ")", "vocab", "=", "Vocab", "(", "lemmatizer", "=", "lemmatizer", ",", "tag_map", "=", "tag_map", ")", "doc", "=", "get_doc", "(", "vocab", ",", "words", ")", "doc", "[", "2", "]", ".", "tag_", "=", "u'VB'", "assert", "(", "doc", "[", "2", "]", ".", "text", "==", "u'feed'", ")", "assert", "(", "doc", "[", "2", "]", ".", "lemma_", "==", "u'feed'", ")" ]
test lemmatization of base forms .
train
false
54,029
def fetch_from_url_to_file(url, config, output_file, data=None, handlers=None): (return_code, return_message, response) = open_url(url, config, data=data, handlers=handlers) if (return_code == http_client_.OK): return_data = response.read() response.close() outfile = open(output_file, 'w') outfile.write(return_data) outfile.close() return (return_code, return_message, (return_code == http_client_.OK))
[ "def", "fetch_from_url_to_file", "(", "url", ",", "config", ",", "output_file", ",", "data", "=", "None", ",", "handlers", "=", "None", ")", ":", "(", "return_code", ",", "return_message", ",", "response", ")", "=", "open_url", "(", "url", ",", "config", ",", "data", "=", "data", ",", "handlers", "=", "handlers", ")", "if", "(", "return_code", "==", "http_client_", ".", "OK", ")", ":", "return_data", "=", "response", ".", "read", "(", ")", "response", ".", "close", "(", ")", "outfile", "=", "open", "(", "output_file", ",", "'w'", ")", "outfile", ".", "write", "(", "return_data", ")", "outfile", ".", "close", "(", ")", "return", "(", "return_code", ",", "return_message", ",", "(", "return_code", "==", "http_client_", ".", "OK", ")", ")" ]
writes data retrieved from a url to a file .
train
false
54,030
def flag_calls(func): if hasattr(func, 'called'): return func def wrapper(*args, **kw): wrapper.called = False out = func(*args, **kw) wrapper.called = True return out wrapper.called = False wrapper.__doc__ = func.__doc__ return wrapper
[ "def", "flag_calls", "(", "func", ")", ":", "if", "hasattr", "(", "func", ",", "'called'", ")", ":", "return", "func", "def", "wrapper", "(", "*", "args", ",", "**", "kw", ")", ":", "wrapper", ".", "called", "=", "False", "out", "=", "func", "(", "*", "args", ",", "**", "kw", ")", "wrapper", ".", "called", "=", "True", "return", "out", "wrapper", ".", "called", "=", "False", "wrapper", ".", "__doc__", "=", "func", ".", "__doc__", "return", "wrapper" ]
wrap a function to detect and flag when it gets called .
train
true
54,031
def new_scratch_buffer(text): vim.command('botright new') vim.command('set ft=') vim.command('set buftype=nofile') vim.current.buffer[:] = text.splitlines() feedkeys('\\<Esc>')
[ "def", "new_scratch_buffer", "(", "text", ")", ":", "vim", ".", "command", "(", "'botright new'", ")", "vim", ".", "command", "(", "'set ft='", ")", "vim", ".", "command", "(", "'set buftype=nofile'", ")", "vim", ".", "current", ".", "buffer", "[", ":", "]", "=", "text", ".", "splitlines", "(", ")", "feedkeys", "(", "'\\\\<Esc>'", ")" ]
create a new scratch buffer with the text given .
train
false
54,032
def AD(barDs, count): return call_talib_with_hlcv(barDs, count, talib.AD)
[ "def", "AD", "(", "barDs", ",", "count", ")", ":", "return", "call_talib_with_hlcv", "(", "barDs", ",", "count", ",", "talib", ".", "AD", ")" ]
chaikin a/d line .
train
false
54,036
def __escape_command(command): result = command.replace('\\', '\\\\') result = result.replace('"', '\\"') return result
[ "def", "__escape_command", "(", "command", ")", ":", "result", "=", "command", ".", "replace", "(", "'\\\\'", ",", "'\\\\\\\\'", ")", "result", "=", "result", ".", "replace", "(", "'\"'", ",", "'\\\\\"'", ")", "return", "result" ]
this function escapes the command so that can be passed in the command line to jboss cli .
train
false
54,037
def _int_arg(s): return int(s.strip())
[ "def", "_int_arg", "(", "s", ")", ":", "return", "int", "(", "s", ".", "strip", "(", ")", ")" ]
convert a string argument to an integer for use in a template function .
train
false
54,038
@testing.requires_testing_data def test_source_psd(): raw = read_raw_fif(fname_data) inverse_operator = read_inverse_operator(fname_inv) label = read_label(fname_label) (tmin, tmax) = (0, 20) (fmin, fmax) = (55, 65) n_fft = 2048 stc = compute_source_psd(raw, inverse_operator, lambda2=(1.0 / 9.0), method='dSPM', tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, pick_ori='normal', n_fft=n_fft, label=label, overlap=0.1) assert_true((stc.times[0] >= (fmin * 0.001))) assert_true((stc.times[(-1)] <= (fmax * 0.001))) assert_true((0.059 <= stc.times[np.argmax(np.sum(stc.data, axis=0))] <= 0.061))
[ "@", "testing", ".", "requires_testing_data", "def", "test_source_psd", "(", ")", ":", "raw", "=", "read_raw_fif", "(", "fname_data", ")", "inverse_operator", "=", "read_inverse_operator", "(", "fname_inv", ")", "label", "=", "read_label", "(", "fname_label", ")", "(", "tmin", ",", "tmax", ")", "=", "(", "0", ",", "20", ")", "(", "fmin", ",", "fmax", ")", "=", "(", "55", ",", "65", ")", "n_fft", "=", "2048", "stc", "=", "compute_source_psd", "(", "raw", ",", "inverse_operator", ",", "lambda2", "=", "(", "1.0", "/", "9.0", ")", ",", "method", "=", "'dSPM'", ",", "tmin", "=", "tmin", ",", "tmax", "=", "tmax", ",", "fmin", "=", "fmin", ",", "fmax", "=", "fmax", ",", "pick_ori", "=", "'normal'", ",", "n_fft", "=", "n_fft", ",", "label", "=", "label", ",", "overlap", "=", "0.1", ")", "assert_true", "(", "(", "stc", ".", "times", "[", "0", "]", ">=", "(", "fmin", "*", "0.001", ")", ")", ")", "assert_true", "(", "(", "stc", ".", "times", "[", "(", "-", "1", ")", "]", "<=", "(", "fmax", "*", "0.001", ")", ")", ")", "assert_true", "(", "(", "0.059", "<=", "stc", ".", "times", "[", "np", ".", "argmax", "(", "np", ".", "sum", "(", "stc", ".", "data", ",", "axis", "=", "0", ")", ")", "]", "<=", "0.061", ")", ")" ]
test source psd computation in label .
train
false
54,039
def generate_track_info(track_id='track info', values={}): track = TrackInfo(title=u'track info', track_id=track_id) for field in TRACK_INFO_FIELDS: setattr(track, field, u'track info') for (field, value) in values.items(): setattr(track, field, value) return track
[ "def", "generate_track_info", "(", "track_id", "=", "'track info'", ",", "values", "=", "{", "}", ")", ":", "track", "=", "TrackInfo", "(", "title", "=", "u'track info'", ",", "track_id", "=", "track_id", ")", "for", "field", "in", "TRACK_INFO_FIELDS", ":", "setattr", "(", "track", ",", "field", ",", "u'track info'", ")", "for", "(", "field", ",", "value", ")", "in", "values", ".", "items", "(", ")", ":", "setattr", "(", "track", ",", "field", ",", "value", ")", "return", "track" ]
return trackinfo populated with mock data .
train
false
54,040
def varying_between(table, idvarlist): def inst_key(inst, vars): return tuple((str(inst[var]) for var in vars)) excluded = set(idvarlist) all_possible = [var for var in (table.domain.variables + table.domain.metas) if (var not in excluded)] candidate_set = set(all_possible) idmap = group_table_indices(table, idvarlist) values = {} varying = set() for indices in idmap.values(): subset = table[indices] for var in list(candidate_set): values = subset[:, var] (values, _) = subset.get_column_view(var) if var.is_string: uniq = set(values) else: uniq = unique_non_nan(values) if (len(uniq) > 1): varying.add(var) candidate_set.remove(var) return sorted(varying, key=all_possible.index)
[ "def", "varying_between", "(", "table", ",", "idvarlist", ")", ":", "def", "inst_key", "(", "inst", ",", "vars", ")", ":", "return", "tuple", "(", "(", "str", "(", "inst", "[", "var", "]", ")", "for", "var", "in", "vars", ")", ")", "excluded", "=", "set", "(", "idvarlist", ")", "all_possible", "=", "[", "var", "for", "var", "in", "(", "table", ".", "domain", ".", "variables", "+", "table", ".", "domain", ".", "metas", ")", "if", "(", "var", "not", "in", "excluded", ")", "]", "candidate_set", "=", "set", "(", "all_possible", ")", "idmap", "=", "group_table_indices", "(", "table", ",", "idvarlist", ")", "values", "=", "{", "}", "varying", "=", "set", "(", ")", "for", "indices", "in", "idmap", ".", "values", "(", ")", ":", "subset", "=", "table", "[", "indices", "]", "for", "var", "in", "list", "(", "candidate_set", ")", ":", "values", "=", "subset", "[", ":", ",", "var", "]", "(", "values", ",", "_", ")", "=", "subset", ".", "get_column_view", "(", "var", ")", "if", "var", ".", "is_string", ":", "uniq", "=", "set", "(", "values", ")", "else", ":", "uniq", "=", "unique_non_nan", "(", "values", ")", "if", "(", "len", "(", "uniq", ")", ">", "1", ")", ":", "varying", ".", "add", "(", "var", ")", "candidate_set", ".", "remove", "(", "var", ")", "return", "sorted", "(", "varying", ",", "key", "=", "all_possible", ".", "index", ")" ]
return a list of all variables with non constant values between groups defined by idvarlist .
train
false
54,042
def images(): for (ci, cl) in enumerate(classes): images = glob('{}/{}/*.jpg'.format(basedir, cl)) for im in sorted(images): (yield (im, ci))
[ "def", "images", "(", ")", ":", "for", "(", "ci", ",", "cl", ")", "in", "enumerate", "(", "classes", ")", ":", "images", "=", "glob", "(", "'{}/{}/*.jpg'", ".", "format", "(", "basedir", ",", "cl", ")", ")", "for", "im", "in", "sorted", "(", "images", ")", ":", "(", "yield", "(", "im", ",", "ci", ")", ")" ]
iterate over all pairs this function will return .
train
false
54,043
def _enqueue_feedback_thread_status_change_email_task(user_id, reference, old_status, new_status): payload = {'user_id': user_id, 'reference_dict': reference.to_dict(), 'old_status': old_status, 'new_status': new_status} taskqueue_services.enqueue_task(feconf.TASK_URL_FEEDBACK_STATUS_EMAILS, payload, 0)
[ "def", "_enqueue_feedback_thread_status_change_email_task", "(", "user_id", ",", "reference", ",", "old_status", ",", "new_status", ")", ":", "payload", "=", "{", "'user_id'", ":", "user_id", ",", "'reference_dict'", ":", "reference", ".", "to_dict", "(", ")", ",", "'old_status'", ":", "old_status", ",", "'new_status'", ":", "new_status", "}", "taskqueue_services", ".", "enqueue_task", "(", "feconf", ".", "TASK_URL_FEEDBACK_STATUS_EMAILS", ",", "payload", ",", "0", ")" ]
adds a task for sending email when a feedback thread status is changed .
train
false
54,044
def _CheckExpression(expression): expression = _ValidateString(expression, max_len=MAXIMUM_EXPRESSION_LENGTH) try: expression_parser.Parse(expression) except expression_parser.ExpressionException as e: raise ExpressionError(('Failed to parse expression "%s"' % expression)) return expression
[ "def", "_CheckExpression", "(", "expression", ")", ":", "expression", "=", "_ValidateString", "(", "expression", ",", "max_len", "=", "MAXIMUM_EXPRESSION_LENGTH", ")", "try", ":", "expression_parser", ".", "Parse", "(", "expression", ")", "except", "expression_parser", ".", "ExpressionException", "as", "e", ":", "raise", "ExpressionError", "(", "(", "'Failed to parse expression \"%s\"'", "%", "expression", ")", ")", "return", "expression" ]
checks whether the expression is a string .
train
false
54,045
def gethostbyname(hostname): return get_hub().resolver.gethostbyname(hostname)
[ "def", "gethostbyname", "(", "hostname", ")", ":", "return", "get_hub", "(", ")", ".", "resolver", ".", "gethostbyname", "(", "hostname", ")" ]
gethostbyname -> address return the ip address for a host .
train
false
54,046
def ensure_relative(path): (drive, path) = os.path.splitdrive(path) if (sys.platform == 'mac'): return (os.sep + path) else: if (path[0:1] == os.sep): path = (drive + path[1:]) return path
[ "def", "ensure_relative", "(", "path", ")", ":", "(", "drive", ",", "path", ")", "=", "os", ".", "path", ".", "splitdrive", "(", "path", ")", "if", "(", "sys", ".", "platform", "==", "'mac'", ")", ":", "return", "(", "os", ".", "sep", "+", "path", ")", "else", ":", "if", "(", "path", "[", "0", ":", "1", "]", "==", "os", ".", "sep", ")", ":", "path", "=", "(", "drive", "+", "path", "[", "1", ":", "]", ")", "return", "path" ]
take the full path path .
train
false
54,047
def addXIntersectionsFromLoopsForTable(loops, xIntersectionsTable, width): for loop in loops: addXIntersectionsFromLoopForTable(loop, xIntersectionsTable, width)
[ "def", "addXIntersectionsFromLoopsForTable", "(", "loops", ",", "xIntersectionsTable", ",", "width", ")", ":", "for", "loop", "in", "loops", ":", "addXIntersectionsFromLoopForTable", "(", "loop", ",", "xIntersectionsTable", ",", "width", ")" ]
add the x intersections for a loop into a table .
train
false
54,048
def get_cache_duration(cache_key): namespace = _get_cache_key_namespace(cache_key) duration = settings.SHUUP_CACHE_DURATIONS.get(namespace) if (duration is None): duration = DEFAULT_CACHE_DURATIONS.get(namespace, settings.SHUUP_DEFAULT_CACHE_DURATION) return duration
[ "def", "get_cache_duration", "(", "cache_key", ")", ":", "namespace", "=", "_get_cache_key_namespace", "(", "cache_key", ")", "duration", "=", "settings", ".", "SHUUP_CACHE_DURATIONS", ".", "get", "(", "namespace", ")", "if", "(", "duration", "is", "None", ")", ":", "duration", "=", "DEFAULT_CACHE_DURATIONS", ".", "get", "(", "namespace", ",", "settings", ".", "SHUUP_DEFAULT_CACHE_DURATION", ")", "return", "duration" ]
determine a cache duration for the given cache key .
train
false
54,049
def eagerload(*args, **kwargs): return joinedload(*args, **kwargs)
[ "def", "eagerload", "(", "*", "args", ",", "**", "kwargs", ")", ":", "return", "joinedload", "(", "*", "args", ",", "**", "kwargs", ")" ]
a synonym for :func:joinedload() .
train
false
54,050
def add_stderr_logger(level=logging.DEBUG): logger = logging.getLogger(__name__) handler = logging.StreamHandler() handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s')) logger.addHandler(handler) logger.setLevel(level) logger.debug(('Added a stderr logging handler to logger: %s' % __name__)) return handler
[ "def", "add_stderr_logger", "(", "level", "=", "logging", ".", "DEBUG", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "handler", "=", "logging", ".", "StreamHandler", "(", ")", "handler", ".", "setFormatter", "(", "logging", ".", "Formatter", "(", "'%(asctime)s %(levelname)s %(message)s'", ")", ")", "logger", ".", "addHandler", "(", "handler", ")", "logger", ".", "setLevel", "(", "level", ")", "logger", ".", "debug", "(", "(", "'Added a stderr logging handler to logger: %s'", "%", "__name__", ")", ")", "return", "handler" ]
helper for quickly adding a streamhandler to the logger .
train
true
54,052
def test_json_view_normal_response(): expected = http.HttpResponseForbidden() response = json_view((lambda r: expected))(mock.Mock()) assert (expected is response) eq_(response['Content-Type'], 'text/html; charset=utf-8')
[ "def", "test_json_view_normal_response", "(", ")", ":", "expected", "=", "http", ".", "HttpResponseForbidden", "(", ")", "response", "=", "json_view", "(", "(", "lambda", "r", ":", "expected", ")", ")", "(", "mock", ".", "Mock", "(", ")", ")", "assert", "(", "expected", "is", "response", ")", "eq_", "(", "response", "[", "'Content-Type'", "]", ",", "'text/html; charset=utf-8'", ")" ]
normal responses get passed through .
train
false
54,053
def kegg_get(dbentries, option=None): if (isinstance(dbentries, list) and (len(dbentries) <= 10)): dbentries = '+'.join(dbentries) elif (isinstance(dbentries, list) and (len(dbentries) > 10)): raise Exception('Maximum number of dbentries is 10 for kegg get query') if (option in ['aaseq', 'ntseq', 'mol', 'kcf', 'image', 'kgml']): resp = _q('get', dbentries, option) elif option: raise Exception('Invalid option arg for kegg get request.') else: resp = _q('get', dbentries) return resp
[ "def", "kegg_get", "(", "dbentries", ",", "option", "=", "None", ")", ":", "if", "(", "isinstance", "(", "dbentries", ",", "list", ")", "and", "(", "len", "(", "dbentries", ")", "<=", "10", ")", ")", ":", "dbentries", "=", "'+'", ".", "join", "(", "dbentries", ")", "elif", "(", "isinstance", "(", "dbentries", ",", "list", ")", "and", "(", "len", "(", "dbentries", ")", ">", "10", ")", ")", ":", "raise", "Exception", "(", "'Maximum number of dbentries is 10 for kegg get query'", ")", "if", "(", "option", "in", "[", "'aaseq'", ",", "'ntseq'", ",", "'mol'", ",", "'kcf'", ",", "'image'", ",", "'kgml'", "]", ")", ":", "resp", "=", "_q", "(", "'get'", ",", "dbentries", ",", "option", ")", "elif", "option", ":", "raise", "Exception", "(", "'Invalid option arg for kegg get request.'", ")", "else", ":", "resp", "=", "_q", "(", "'get'", ",", "dbentries", ")", "return", "resp" ]
kegg get - data retrieval .
train
false
54,056
@contextmanager def mocked_context(*args, **kwargs): (yield type('Tunnelled', (object,), {}))
[ "@", "contextmanager", "def", "mocked_context", "(", "*", "args", ",", "**", "kwargs", ")", ":", "(", "yield", "type", "(", "'Tunnelled'", ",", "(", "object", ",", ")", ",", "{", "}", ")", ")" ]
to be directly patched into an ssh .
train
false
54,057
def walk_modules(path): mods = [] mod = import_module(path) mods.append(mod) if hasattr(mod, '__path__'): for (_, subpath, ispkg) in iter_modules(mod.__path__): fullpath = ((path + '.') + subpath) if ispkg: mods += walk_modules(fullpath) else: submod = import_module(fullpath) mods.append(submod) return mods
[ "def", "walk_modules", "(", "path", ")", ":", "mods", "=", "[", "]", "mod", "=", "import_module", "(", "path", ")", "mods", ".", "append", "(", "mod", ")", "if", "hasattr", "(", "mod", ",", "'__path__'", ")", ":", "for", "(", "_", ",", "subpath", ",", "ispkg", ")", "in", "iter_modules", "(", "mod", ".", "__path__", ")", ":", "fullpath", "=", "(", "(", "path", "+", "'.'", ")", "+", "subpath", ")", "if", "ispkg", ":", "mods", "+=", "walk_modules", "(", "fullpath", ")", "else", ":", "submod", "=", "import_module", "(", "fullpath", ")", "mods", ".", "append", "(", "submod", ")", "return", "mods" ]
loads a module and all its submodules from the given module path and returns them .
train
true
54,058
def daily_return(prices): g = np.zeros_like(prices) g[1:] = ((prices[1:] - prices[:(-1)]) / prices[:(-1)]) return g
[ "def", "daily_return", "(", "prices", ")", ":", "g", "=", "np", ".", "zeros_like", "(", "prices", ")", "g", "[", "1", ":", "]", "=", "(", "(", "prices", "[", "1", ":", "]", "-", "prices", "[", ":", "(", "-", "1", ")", "]", ")", "/", "prices", "[", ":", "(", "-", "1", ")", "]", ")", "return", "g" ]
an array of daily returns from price array .
train
false
54,059
def green(text, attrib=None): return colorize(text, 'green', attrib)
[ "def", "green", "(", "text", ",", "attrib", "=", "None", ")", ":", "return", "colorize", "(", "text", ",", "'green'", ",", "attrib", ")" ]
wrapper for colorize .
train
false
54,060
def topic_detail(request, slug, topic_id, template_name='groups/topics/topic_detail.html'): group = get_object_or_404(Group, slug=slug, is_active=True) topic = get_object_or_404(GroupTopic, pk=topic_id, is_active=True) message_form = GroupMessageForm() return render(request, template_name, {'group': group, 'topic': topic, 'message_form': message_form})
[ "def", "topic_detail", "(", "request", ",", "slug", ",", "topic_id", ",", "template_name", "=", "'groups/topics/topic_detail.html'", ")", ":", "group", "=", "get_object_or_404", "(", "Group", ",", "slug", "=", "slug", ",", "is_active", "=", "True", ")", "topic", "=", "get_object_or_404", "(", "GroupTopic", ",", "pk", "=", "topic_id", ",", "is_active", "=", "True", ")", "message_form", "=", "GroupMessageForm", "(", ")", "return", "render", "(", "request", ",", "template_name", ",", "{", "'group'", ":", "group", ",", "'topic'", ":", "topic", ",", "'message_form'", ":", "message_form", "}", ")" ]
returns a group topic detail page .
train
false
54,061
@task(name='clean-all', aliases=('distclean',)) def clean_all(ctx, dry_run=False): cleanup_dirs((ctx.clean_all.directories or []), dry_run=dry_run) cleanup_dirs((ctx.clean_all.extra_directories or []), dry_run=dry_run) cleanup_files((ctx.clean_all.files or []), dry_run=dry_run) cleanup_files((ctx.clean_all.extra_files or []), dry_run=dry_run) execute_cleanup_tasks(ctx, cleanup_all_tasks, dry_run=dry_run) clean(ctx, dry_run=dry_run)
[ "@", "task", "(", "name", "=", "'clean-all'", ",", "aliases", "=", "(", "'distclean'", ",", ")", ")", "def", "clean_all", "(", "ctx", ",", "dry_run", "=", "False", ")", ":", "cleanup_dirs", "(", "(", "ctx", ".", "clean_all", ".", "directories", "or", "[", "]", ")", ",", "dry_run", "=", "dry_run", ")", "cleanup_dirs", "(", "(", "ctx", ".", "clean_all", ".", "extra_directories", "or", "[", "]", ")", ",", "dry_run", "=", "dry_run", ")", "cleanup_files", "(", "(", "ctx", ".", "clean_all", ".", "files", "or", "[", "]", ")", ",", "dry_run", "=", "dry_run", ")", "cleanup_files", "(", "(", "ctx", ".", "clean_all", ".", "extra_files", "or", "[", "]", ")", ",", "dry_run", "=", "dry_run", ")", "execute_cleanup_tasks", "(", "ctx", ",", "cleanup_all_tasks", ",", "dry_run", "=", "dry_run", ")", "clean", "(", "ctx", ",", "dry_run", "=", "dry_run", ")" ]
clean up everything .
train
true
54,062
def InstallerNotifyServer(): config_lib.CONFIG.SetWriteBack('temp.yaml') try: log_data = open(config_lib.CONFIG['Installer.logfile'], 'rb').read() except (IOError, OSError): log_data = '' comms.CommsInit().RunOnce() client = comms.GRRHTTPClient(ca_cert=config_lib.CONFIG['CA.certificate'], private_key=config_lib.CONFIG.Get('Client.private_key')) client.client_worker.SendReply(session_id=rdfvalue.FlowSessionID(flow_name='InstallationFailed'), message_type=rdf_flows.GrrMessage.Type.STATUS, request_id=0, response_id=0, rdf_value=rdf_flows.GrrStatus(status=rdf_flows.GrrStatus.ReturnedStatus.GENERIC_ERROR, error_message='Installation failed.', backtrace=log_data[(-10000):])) client.RunOnce()
[ "def", "InstallerNotifyServer", "(", ")", ":", "config_lib", ".", "CONFIG", ".", "SetWriteBack", "(", "'temp.yaml'", ")", "try", ":", "log_data", "=", "open", "(", "config_lib", ".", "CONFIG", "[", "'Installer.logfile'", "]", ",", "'rb'", ")", ".", "read", "(", ")", "except", "(", "IOError", ",", "OSError", ")", ":", "log_data", "=", "''", "comms", ".", "CommsInit", "(", ")", ".", "RunOnce", "(", ")", "client", "=", "comms", ".", "GRRHTTPClient", "(", "ca_cert", "=", "config_lib", ".", "CONFIG", "[", "'CA.certificate'", "]", ",", "private_key", "=", "config_lib", ".", "CONFIG", ".", "Get", "(", "'Client.private_key'", ")", ")", "client", ".", "client_worker", ".", "SendReply", "(", "session_id", "=", "rdfvalue", ".", "FlowSessionID", "(", "flow_name", "=", "'InstallationFailed'", ")", ",", "message_type", "=", "rdf_flows", ".", "GrrMessage", ".", "Type", ".", "STATUS", ",", "request_id", "=", "0", ",", "response_id", "=", "0", ",", "rdf_value", "=", "rdf_flows", ".", "GrrStatus", "(", "status", "=", "rdf_flows", ".", "GrrStatus", ".", "ReturnedStatus", ".", "GENERIC_ERROR", ",", "error_message", "=", "'Installation failed.'", ",", "backtrace", "=", "log_data", "[", "(", "-", "10000", ")", ":", "]", ")", ")", "client", ".", "RunOnce", "(", ")" ]
an emergency function invoked when the client installation failed .
train
false
54,065
def NDP_Attack_NS_Spoofing(src_lladdr=None, src=None, target='2001:db8::1', dst=None, src_mac=None, dst_mac=None, loop=True, inter=1, iface=None): if (not iface): iface = conf.iface if (not src_lladdr): src_lladdr = get_if_hwaddr(iface) ether_params = {} if src_mac: ether_params['src'] = src_mac if dst_mac: ether_params['dst'] = dst_mac ipv6_params = {} if src: ipv6_params['src'] = src if dst: ipv6_params['dst'] = dst else: tmp = inet_ntop(socket.AF_INET6, in6_getnsma(inet_pton(socket.AF_INET6, target))) ipv6_params['dst'] = tmp pkt = Ether(**ether_params) pkt /= IPv6(**ipv6_params) pkt /= ICMPv6ND_NS(tgt=target) pkt /= ICMPv6NDOptSrcLLAddr(lladdr=src_lladdr) sendp(pkt, inter=inter, loop=loop, iface=iface, verbose=0)
[ "def", "NDP_Attack_NS_Spoofing", "(", "src_lladdr", "=", "None", ",", "src", "=", "None", ",", "target", "=", "'2001:db8::1'", ",", "dst", "=", "None", ",", "src_mac", "=", "None", ",", "dst_mac", "=", "None", ",", "loop", "=", "True", ",", "inter", "=", "1", ",", "iface", "=", "None", ")", ":", "if", "(", "not", "iface", ")", ":", "iface", "=", "conf", ".", "iface", "if", "(", "not", "src_lladdr", ")", ":", "src_lladdr", "=", "get_if_hwaddr", "(", "iface", ")", "ether_params", "=", "{", "}", "if", "src_mac", ":", "ether_params", "[", "'src'", "]", "=", "src_mac", "if", "dst_mac", ":", "ether_params", "[", "'dst'", "]", "=", "dst_mac", "ipv6_params", "=", "{", "}", "if", "src", ":", "ipv6_params", "[", "'src'", "]", "=", "src", "if", "dst", ":", "ipv6_params", "[", "'dst'", "]", "=", "dst", "else", ":", "tmp", "=", "inet_ntop", "(", "socket", ".", "AF_INET6", ",", "in6_getnsma", "(", "inet_pton", "(", "socket", ".", "AF_INET6", ",", "target", ")", ")", ")", "ipv6_params", "[", "'dst'", "]", "=", "tmp", "pkt", "=", "Ether", "(", "**", "ether_params", ")", "pkt", "/=", "IPv6", "(", "**", "ipv6_params", ")", "pkt", "/=", "ICMPv6ND_NS", "(", "tgt", "=", "target", ")", "pkt", "/=", "ICMPv6NDOptSrcLLAddr", "(", "lladdr", "=", "src_lladdr", ")", "sendp", "(", "pkt", ",", "inter", "=", "inter", ",", "loop", "=", "loop", ",", "iface", "=", "iface", ",", "verbose", "=", "0", ")" ]
the main purpose of this function is to send fake neighbor solicitations messages to a victim .
train
true
54,067
def derive_aggregation(dim_cols, agg_col, agg): if ((dim_cols == 'index') or (agg_col == 'index') or (dim_cols is None)): agg = None agg_col = None elif (agg_col is None): if isinstance(dim_cols, list): agg_col = dim_cols[0] else: agg_col = dim_cols agg = 'count' return (agg_col, agg)
[ "def", "derive_aggregation", "(", "dim_cols", ",", "agg_col", ",", "agg", ")", ":", "if", "(", "(", "dim_cols", "==", "'index'", ")", "or", "(", "agg_col", "==", "'index'", ")", "or", "(", "dim_cols", "is", "None", ")", ")", ":", "agg", "=", "None", "agg_col", "=", "None", "elif", "(", "agg_col", "is", "None", ")", ":", "if", "isinstance", "(", "dim_cols", ",", "list", ")", ":", "agg_col", "=", "dim_cols", "[", "0", "]", "else", ":", "agg_col", "=", "dim_cols", "agg", "=", "'count'", "return", "(", "agg_col", ",", "agg", ")" ]
produces consistent aggregation spec from optional column specification .
train
false
54,068
def evaluate(hps, logdir, traindir, subset='valid', return_val=False): hps.batch_size = 100 with tf.Graph().as_default(): with tf.device('/cpu:0'): with tf.variable_scope('model') as var_scope: eval_model = RealNVP(hps) summary_writer = tf.summary.FileWriter(logdir) var_scope.reuse_variables() saver = tf.train.Saver() sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) tf.train.start_queue_runners(sess) previous_global_step = 0 with sess.as_default(): while True: ckpt_state = tf.train.get_checkpoint_state(traindir) if (not (ckpt_state and ckpt_state.model_checkpoint_path)): print ('No model to eval yet at %s' % traindir) time.sleep(30) continue print ('Loading file %s' % ckpt_state.model_checkpoint_path) saver.restore(sess, ckpt_state.model_checkpoint_path) current_step = tf.train.global_step(sess, eval_model.step) if (current_step == previous_global_step): print 'Waiting for the checkpoint to be updated.' time.sleep(30) continue previous_global_step = current_step print 'Evaluating...' bit_per_dim = eval_model.eval_epoch(hps) print ('Epoch: %d, %s -> %.3f bits/dim' % (current_step, subset, bit_per_dim)) print 'Writing summary...' summary = tf.Summary() summary.value.extend([tf.Summary.Value(tag='bit_per_dim', simple_value=bit_per_dim)]) summary_writer.add_summary(summary, current_step) if return_val: return (current_step, bit_per_dim)
[ "def", "evaluate", "(", "hps", ",", "logdir", ",", "traindir", ",", "subset", "=", "'valid'", ",", "return_val", "=", "False", ")", ":", "hps", ".", "batch_size", "=", "100", "with", "tf", ".", "Graph", "(", ")", ".", "as_default", "(", ")", ":", "with", "tf", ".", "device", "(", "'/cpu:0'", ")", ":", "with", "tf", ".", "variable_scope", "(", "'model'", ")", "as", "var_scope", ":", "eval_model", "=", "RealNVP", "(", "hps", ")", "summary_writer", "=", "tf", ".", "summary", ".", "FileWriter", "(", "logdir", ")", "var_scope", ".", "reuse_variables", "(", ")", "saver", "=", "tf", ".", "train", ".", "Saver", "(", ")", "sess", "=", "tf", ".", "Session", "(", "config", "=", "tf", ".", "ConfigProto", "(", "allow_soft_placement", "=", "True", ",", "log_device_placement", "=", "True", ")", ")", "tf", ".", "train", ".", "start_queue_runners", "(", "sess", ")", "previous_global_step", "=", "0", "with", "sess", ".", "as_default", "(", ")", ":", "while", "True", ":", "ckpt_state", "=", "tf", ".", "train", ".", "get_checkpoint_state", "(", "traindir", ")", "if", "(", "not", "(", "ckpt_state", "and", "ckpt_state", ".", "model_checkpoint_path", ")", ")", ":", "print", "(", "'No model to eval yet at %s'", "%", "traindir", ")", "time", ".", "sleep", "(", "30", ")", "continue", "print", "(", "'Loading file %s'", "%", "ckpt_state", ".", "model_checkpoint_path", ")", "saver", ".", "restore", "(", "sess", ",", "ckpt_state", ".", "model_checkpoint_path", ")", "current_step", "=", "tf", ".", "train", ".", "global_step", "(", "sess", ",", "eval_model", ".", "step", ")", "if", "(", "current_step", "==", "previous_global_step", ")", ":", "print", "'Waiting for the checkpoint to be updated.'", "time", ".", "sleep", "(", "30", ")", "continue", "previous_global_step", "=", "current_step", "print", "'Evaluating...'", "bit_per_dim", "=", "eval_model", ".", "eval_epoch", "(", "hps", ")", "print", "(", "'Epoch: %d, %s -> %.3f bits/dim'", "%", "(", "current_step", ",", "subset", ",", "bit_per_dim", ")", ")", "print", "'Writing summary...'", "summary", "=", "tf", ".", "Summary", "(", ")", "summary", ".", "value", ".", "extend", "(", "[", "tf", ".", "Summary", ".", "Value", "(", "tag", "=", "'bit_per_dim'", ",", "simple_value", "=", "bit_per_dim", ")", "]", ")", "summary_writer", ".", "add_summary", "(", "summary", ",", "current_step", ")", "if", "return_val", ":", "return", "(", "current_step", ",", "bit_per_dim", ")" ]
x: a theano variable y: a theano variable expr: a theano expression involving x and y x_value: a numpy value y_value: a numpy value returns the value of expr when x_value is substituted for x and y_value is substituted for y .
train
false
54,069
def _item_to_sub_for_client(iterator, sub_pb, topics): resource = MessageToDict(sub_pb) return Subscription.from_api_repr(resource, iterator.client, topics=topics)
[ "def", "_item_to_sub_for_client", "(", "iterator", ",", "sub_pb", ",", "topics", ")", ":", "resource", "=", "MessageToDict", "(", "sub_pb", ")", "return", "Subscription", ".", "from_api_repr", "(", "resource", ",", "iterator", ".", "client", ",", "topics", "=", "topics", ")" ]
convert a subscription protobuf to the native object .
train
false
54,070
def test_score_2(): tpot_obj = TPOTClassifier() tpot_obj._pbar = tqdm(total=1, disable=True) known_score = 0.986318199045 tpot_obj._optimized_pipeline = creator.Individual.from_string('RandomForestClassifier(input_matrix)', tpot_obj._pset) tpot_obj._fitted_pipeline = tpot_obj._toolbox.compile(expr=tpot_obj._optimized_pipeline) tpot_obj._fitted_pipeline.fit(training_features, training_classes) score = tpot_obj.score(testing_features, testing_classes) def isclose(a, b, rel_tol=1e-09, abs_tol=0.0): return (abs((a - b)) <= max((rel_tol * max(abs(a), abs(b))), abs_tol)) assert isclose(known_score, score)
[ "def", "test_score_2", "(", ")", ":", "tpot_obj", "=", "TPOTClassifier", "(", ")", "tpot_obj", ".", "_pbar", "=", "tqdm", "(", "total", "=", "1", ",", "disable", "=", "True", ")", "known_score", "=", "0.986318199045", "tpot_obj", ".", "_optimized_pipeline", "=", "creator", ".", "Individual", ".", "from_string", "(", "'RandomForestClassifier(input_matrix)'", ",", "tpot_obj", ".", "_pset", ")", "tpot_obj", ".", "_fitted_pipeline", "=", "tpot_obj", ".", "_toolbox", ".", "compile", "(", "expr", "=", "tpot_obj", ".", "_optimized_pipeline", ")", "tpot_obj", ".", "_fitted_pipeline", ".", "fit", "(", "training_features", ",", "training_classes", ")", "score", "=", "tpot_obj", ".", "score", "(", "testing_features", ",", "testing_classes", ")", "def", "isclose", "(", "a", ",", "b", ",", "rel_tol", "=", "1e-09", ",", "abs_tol", "=", "0.0", ")", ":", "return", "(", "abs", "(", "(", "a", "-", "b", ")", ")", "<=", "max", "(", "(", "rel_tol", "*", "max", "(", "abs", "(", "a", ")", ",", "abs", "(", "b", ")", ")", ")", ",", "abs_tol", ")", ")", "assert", "isclose", "(", "known_score", ",", "score", ")" ]
assert that the tpotclassifier score function outputs a known score for a fixed pipeline .
train
false
54,072
def test_forum_is_unread(guest, user, forum, topic, forumsread): assert (not forum_is_unread(None, None, guest)) assert forum_is_unread(forum, None, user) assert forum_is_unread(forum, forumsread, user) topic.update_read(user, topic.forum, forumsread) time_read = (datetime.datetime.utcnow() - datetime.timedelta(hours=1)) forumsread.cleared = time_read forumsread.last_read = datetime.datetime.utcnow() forumsread.save() assert (not forum_is_unread(forum, forumsread, user)) flaskbb_config['TRACKER_LENGTH'] = 0 assert (not forum_is_unread(forum, forumsread, user)) flaskbb_config['TRACKER_LENGTH'] = 1 forum.last_post_created = (forum.last_post_created - datetime.timedelta(hours=48)) forum.save() assert (not forum_is_unread(forum, forumsread, user)) topic.delete() forum = Forum.query.filter_by(id=forum.id).first() flaskbb_config['TRACKER_LENGTH'] = 1 assert (forum.topic_count == 0) assert (not forum_is_unread(forum, None, user))
[ "def", "test_forum_is_unread", "(", "guest", ",", "user", ",", "forum", ",", "topic", ",", "forumsread", ")", ":", "assert", "(", "not", "forum_is_unread", "(", "None", ",", "None", ",", "guest", ")", ")", "assert", "forum_is_unread", "(", "forum", ",", "None", ",", "user", ")", "assert", "forum_is_unread", "(", "forum", ",", "forumsread", ",", "user", ")", "topic", ".", "update_read", "(", "user", ",", "topic", ".", "forum", ",", "forumsread", ")", "time_read", "=", "(", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "-", "datetime", ".", "timedelta", "(", "hours", "=", "1", ")", ")", "forumsread", ".", "cleared", "=", "time_read", "forumsread", ".", "last_read", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "forumsread", ".", "save", "(", ")", "assert", "(", "not", "forum_is_unread", "(", "forum", ",", "forumsread", ",", "user", ")", ")", "flaskbb_config", "[", "'TRACKER_LENGTH'", "]", "=", "0", "assert", "(", "not", "forum_is_unread", "(", "forum", ",", "forumsread", ",", "user", ")", ")", "flaskbb_config", "[", "'TRACKER_LENGTH'", "]", "=", "1", "forum", ".", "last_post_created", "=", "(", "forum", ".", "last_post_created", "-", "datetime", ".", "timedelta", "(", "hours", "=", "48", ")", ")", "forum", ".", "save", "(", ")", "assert", "(", "not", "forum_is_unread", "(", "forum", ",", "forumsread", ",", "user", ")", ")", "topic", ".", "delete", "(", ")", "forum", "=", "Forum", ".", "query", ".", "filter_by", "(", "id", "=", "forum", ".", "id", ")", ".", "first", "(", ")", "flaskbb_config", "[", "'TRACKER_LENGTH'", "]", "=", "1", "assert", "(", "forum", ".", "topic_count", "==", "0", ")", "assert", "(", "not", "forum_is_unread", "(", "forum", ",", "None", ",", "user", ")", ")" ]
test the forum is unread function .
train
false
54,073
@utils.arg('--flavor', metavar='<flavor>', help=_('Filter results by flavor name or ID.')) @utils.arg('--tenant', metavar='<tenant_id>', help=_('Filter results by tenant ID.'), action=shell.DeprecatedAction, real_action='nothing', use=_('this option is not supported, and will be removed in version 5.0.0.')) def do_flavor_access_list(cs, args): if args.flavor: flavor = _find_flavor(cs, args.flavor) if flavor.is_public: raise exceptions.CommandError(_('Access list not available for public flavors.')) kwargs = {'flavor': flavor} else: raise exceptions.CommandError(_('Unable to get all access lists. Specify --flavor')) try: access_list = cs.flavor_access.list(**kwargs) except NotImplementedError as e: raise exceptions.CommandError(('%s' % str(e))) columns = ['Flavor_ID', 'Tenant_ID'] utils.print_list(access_list, columns)
[ "@", "utils", ".", "arg", "(", "'--flavor'", ",", "metavar", "=", "'<flavor>'", ",", "help", "=", "_", "(", "'Filter results by flavor name or ID.'", ")", ")", "@", "utils", ".", "arg", "(", "'--tenant'", ",", "metavar", "=", "'<tenant_id>'", ",", "help", "=", "_", "(", "'Filter results by tenant ID.'", ")", ",", "action", "=", "shell", ".", "DeprecatedAction", ",", "real_action", "=", "'nothing'", ",", "use", "=", "_", "(", "'this option is not supported, and will be removed in version 5.0.0.'", ")", ")", "def", "do_flavor_access_list", "(", "cs", ",", "args", ")", ":", "if", "args", ".", "flavor", ":", "flavor", "=", "_find_flavor", "(", "cs", ",", "args", ".", "flavor", ")", "if", "flavor", ".", "is_public", ":", "raise", "exceptions", ".", "CommandError", "(", "_", "(", "'Access list not available for public flavors.'", ")", ")", "kwargs", "=", "{", "'flavor'", ":", "flavor", "}", "else", ":", "raise", "exceptions", ".", "CommandError", "(", "_", "(", "'Unable to get all access lists. Specify --flavor'", ")", ")", "try", ":", "access_list", "=", "cs", ".", "flavor_access", ".", "list", "(", "**", "kwargs", ")", "except", "NotImplementedError", "as", "e", ":", "raise", "exceptions", ".", "CommandError", "(", "(", "'%s'", "%", "str", "(", "e", ")", ")", ")", "columns", "=", "[", "'Flavor_ID'", ",", "'Tenant_ID'", "]", "utils", ".", "print_list", "(", "access_list", ",", "columns", ")" ]
print access information about the given flavor .
train
false
54,075
def headers_cb(): headers = {'Access-Control-Allow-Credentials': 'true', 'Access-Control-Allow-Headers': ((('accept, accept-charset, accept-encoding, ' + 'accept-language, authorization, content-length, ') + 'content-type, host, origin, proxy-connection, ') + 'referer, user-agent, x-requested-with'), 'Access-Control-Allow-Methods': 'HEAD, GET, PUT, POST, PATCH, DELETE', 'Access-Control-Allow-Origin': '*', 'Access-Control-Max-Age': '86400'} if add_headers_cb: headers.update(add_headers_cb()) return headers
[ "def", "headers_cb", "(", ")", ":", "headers", "=", "{", "'Access-Control-Allow-Credentials'", ":", "'true'", ",", "'Access-Control-Allow-Headers'", ":", "(", "(", "(", "'accept, accept-charset, accept-encoding, '", "+", "'accept-language, authorization, content-length, '", ")", "+", "'content-type, host, origin, proxy-connection, '", ")", "+", "'referer, user-agent, x-requested-with'", ")", ",", "'Access-Control-Allow-Methods'", ":", "'HEAD, GET, PUT, POST, PATCH, DELETE'", ",", "'Access-Control-Allow-Origin'", ":", "'*'", ",", "'Access-Control-Max-Age'", ":", "'86400'", "}", "if", "add_headers_cb", ":", "headers", ".", "update", "(", "add_headers_cb", "(", ")", ")", "return", "headers" ]
callback method for providing headers per request add_headers_cb is another callback providing headers to update the defaults in this method .
train
false
54,077
def is_special_key(keystr): return (keystr.startswith('<') and keystr.endswith('>'))
[ "def", "is_special_key", "(", "keystr", ")", ":", "return", "(", "keystr", ".", "startswith", "(", "'<'", ")", "and", "keystr", ".", "endswith", "(", "'>'", ")", ")" ]
true if keystr is a special keystring .
train
false
54,078
def nonterminals(symbols): if (u',' in symbols): symbol_list = symbols.split(u',') else: symbol_list = symbols.split() return [Nonterminal(s.strip()) for s in symbol_list]
[ "def", "nonterminals", "(", "symbols", ")", ":", "if", "(", "u','", "in", "symbols", ")", ":", "symbol_list", "=", "symbols", ".", "split", "(", "u','", ")", "else", ":", "symbol_list", "=", "symbols", ".", "split", "(", ")", "return", "[", "Nonterminal", "(", "s", ".", "strip", "(", ")", ")", "for", "s", "in", "symbol_list", "]" ]
given a string containing a list of symbol names .
train
false
54,079
def reload_(name): term(name)
[ "def", "reload_", "(", "name", ")", ":", "term", "(", "name", ")" ]
wrapper for term() cli example: .
train
false
54,080
def CountErrors(ocr_text, truth_text): counts = collections.Counter(truth_text) counts.subtract(ocr_text) drops = sum((c for c in counts.values() if (c > 0))) adds = sum(((- c) for c in counts.values() if (c < 0))) return ErrorCounts(drops, adds, len(truth_text), len(ocr_text))
[ "def", "CountErrors", "(", "ocr_text", ",", "truth_text", ")", ":", "counts", "=", "collections", ".", "Counter", "(", "truth_text", ")", "counts", ".", "subtract", "(", "ocr_text", ")", "drops", "=", "sum", "(", "(", "c", "for", "c", "in", "counts", ".", "values", "(", ")", "if", "(", "c", ">", "0", ")", ")", ")", "adds", "=", "sum", "(", "(", "(", "-", "c", ")", "for", "c", "in", "counts", ".", "values", "(", ")", "if", "(", "c", "<", "0", ")", ")", ")", "return", "ErrorCounts", "(", "drops", ",", "adds", ",", "len", "(", "truth_text", ")", ",", "len", "(", "ocr_text", ")", ")" ]
counts the drops and adds between 2 bags of iterables .
train
false
54,081
def _to_byte_string(value, num_bits): shifts = six.moves.xrange((num_bits - 8), (-8), (-8)) def byte_at(off): return (((value >> off) if (off >= 0) else (value << (- off))) & 255) return ''.join((six.unichr(byte_at(offset)) for offset in shifts))
[ "def", "_to_byte_string", "(", "value", ",", "num_bits", ")", ":", "shifts", "=", "six", ".", "moves", ".", "xrange", "(", "(", "num_bits", "-", "8", ")", ",", "(", "-", "8", ")", ",", "(", "-", "8", ")", ")", "def", "byte_at", "(", "off", ")", ":", "return", "(", "(", "(", "value", ">>", "off", ")", "if", "(", "off", ">=", "0", ")", "else", "(", "value", "<<", "(", "-", "off", ")", ")", ")", "&", "255", ")", "return", "''", ".", "join", "(", "(", "six", ".", "unichr", "(", "byte_at", "(", "offset", ")", ")", "for", "offset", "in", "shifts", ")", ")" ]
convert an integer to a big-endian string of bytes with padding .
train
false
54,084
def test_replace_string_type(): replaced = replace_hy_obj(str_type('foo'), HyString('bar')) assert (replaced == HyString('foo'))
[ "def", "test_replace_string_type", "(", ")", ":", "replaced", "=", "replace_hy_obj", "(", "str_type", "(", "'foo'", ")", ",", "HyString", "(", "'bar'", ")", ")", "assert", "(", "replaced", "==", "HyString", "(", "'foo'", ")", ")" ]
test replacing python string .
train
false
54,085
def param_init_fflayer(options, params, prefix='ff', nin=None, nout=None, ortho=True): if (nin == None): nin = options['dim_proj'] if (nout == None): nout = options['dim_proj'] params[_p(prefix, 'W')] = norm_weight(nin, nout, ortho=ortho) params[_p(prefix, 'b')] = numpy.zeros((nout,)).astype('float32') return params
[ "def", "param_init_fflayer", "(", "options", ",", "params", ",", "prefix", "=", "'ff'", ",", "nin", "=", "None", ",", "nout", "=", "None", ",", "ortho", "=", "True", ")", ":", "if", "(", "nin", "==", "None", ")", ":", "nin", "=", "options", "[", "'dim_proj'", "]", "if", "(", "nout", "==", "None", ")", ":", "nout", "=", "options", "[", "'dim_proj'", "]", "params", "[", "_p", "(", "prefix", ",", "'W'", ")", "]", "=", "norm_weight", "(", "nin", ",", "nout", ",", "ortho", "=", "ortho", ")", "params", "[", "_p", "(", "prefix", ",", "'b'", ")", "]", "=", "numpy", ".", "zeros", "(", "(", "nout", ",", ")", ")", ".", "astype", "(", "'float32'", ")", "return", "params" ]
affine transformation + point-wise nonlinearity .
train
false
54,086
def new(rsa_key): return PKCS115_SigScheme(rsa_key)
[ "def", "new", "(", "rsa_key", ")", ":", "return", "PKCS115_SigScheme", "(", "rsa_key", ")" ]
return a cipher object pkcs115_cipher that can be used to perform pkcs#1 v1 .
train
false
54,087
def urlparse_cached(request_or_response): if (request_or_response not in _urlparse_cache): _urlparse_cache[request_or_response] = urlparse(request_or_response.url) return _urlparse_cache[request_or_response]
[ "def", "urlparse_cached", "(", "request_or_response", ")", ":", "if", "(", "request_or_response", "not", "in", "_urlparse_cache", ")", ":", "_urlparse_cache", "[", "request_or_response", "]", "=", "urlparse", "(", "request_or_response", ".", "url", ")", "return", "_urlparse_cache", "[", "request_or_response", "]" ]
return urlparse .
train
false
54,090
def marker_comparator_predicate(match): return ((not match.private) and (match.name not in ['proper_count', 'title', 'episode_title', 'alternative_title']) and (not ((match.name == 'container') and ('extension' in match.tags))))
[ "def", "marker_comparator_predicate", "(", "match", ")", ":", "return", "(", "(", "not", "match", ".", "private", ")", "and", "(", "match", ".", "name", "not", "in", "[", "'proper_count'", ",", "'title'", ",", "'episode_title'", ",", "'alternative_title'", "]", ")", "and", "(", "not", "(", "(", "match", ".", "name", "==", "'container'", ")", "and", "(", "'extension'", "in", "match", ".", "tags", ")", ")", ")", ")" ]
match predicate used in comparator .
train
false
54,092
def normalizePath(filepath): retVal = filepath if retVal: retVal = retVal.strip('\r\n') retVal = (ntpath.normpath(retVal) if isWindowsDriveLetterPath(retVal) else posixpath.normpath(retVal)) return retVal
[ "def", "normalizePath", "(", "filepath", ")", ":", "retVal", "=", "filepath", "if", "retVal", ":", "retVal", "=", "retVal", ".", "strip", "(", "'\\r\\n'", ")", "retVal", "=", "(", "ntpath", ".", "normpath", "(", "retVal", ")", "if", "isWindowsDriveLetterPath", "(", "retVal", ")", "else", "posixpath", ".", "normpath", "(", "retVal", ")", ")", "return", "retVal" ]
returns normalized string representation of a given filepath .
train
false
54,094
def convert_IMProperty(model, prop, kwargs): return None
[ "def", "convert_IMProperty", "(", "model", ",", "prop", ",", "kwargs", ")", ":", "return", "None" ]
returns a form field for a db .
train
false
54,096
def shard_df_on_index(df, divisions): if isinstance(divisions, Iterator): divisions = list(divisions) if (not len(divisions)): (yield df) else: divisions = np.array(divisions) df = df.sort_index() index = df.index if is_categorical_dtype(index): index = index.as_ordered() indices = index.searchsorted(divisions) (yield df.iloc[:indices[0]]) for i in range((len(indices) - 1)): (yield df.iloc[indices[i]:indices[(i + 1)]]) (yield df.iloc[indices[(-1)]:])
[ "def", "shard_df_on_index", "(", "df", ",", "divisions", ")", ":", "if", "isinstance", "(", "divisions", ",", "Iterator", ")", ":", "divisions", "=", "list", "(", "divisions", ")", "if", "(", "not", "len", "(", "divisions", ")", ")", ":", "(", "yield", "df", ")", "else", ":", "divisions", "=", "np", ".", "array", "(", "divisions", ")", "df", "=", "df", ".", "sort_index", "(", ")", "index", "=", "df", ".", "index", "if", "is_categorical_dtype", "(", "index", ")", ":", "index", "=", "index", ".", "as_ordered", "(", ")", "indices", "=", "index", ".", "searchsorted", "(", "divisions", ")", "(", "yield", "df", ".", "iloc", "[", ":", "indices", "[", "0", "]", "]", ")", "for", "i", "in", "range", "(", "(", "len", "(", "indices", ")", "-", "1", ")", ")", ":", "(", "yield", "df", ".", "iloc", "[", "indices", "[", "i", "]", ":", "indices", "[", "(", "i", "+", "1", ")", "]", "]", ")", "(", "yield", "df", ".", "iloc", "[", "indices", "[", "(", "-", "1", ")", "]", ":", "]", ")" ]
shard a dataframe by ranges on its index examples .
train
false
54,097
def keys_to_string(data): if isinstance(data, dict): for key in list(data.keys()): if isinstance(key, six.string_types): value = data[key] val = keys_to_string(value) del data[key] data[key.encode('utf8', 'ignore')] = val return data
[ "def", "keys_to_string", "(", "data", ")", ":", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "for", "key", "in", "list", "(", "data", ".", "keys", "(", ")", ")", ":", "if", "isinstance", "(", "key", ",", "six", ".", "string_types", ")", ":", "value", "=", "data", "[", "key", "]", "val", "=", "keys_to_string", "(", "value", ")", "del", "data", "[", "key", "]", "data", "[", "key", ".", "encode", "(", "'utf8'", ",", "'ignore'", ")", "]", "=", "val", "return", "data" ]
function to convert all the unicode keys in string keys .
train
true
54,098
def generate_file(fname, ns_func, dest_dir='.'): with open(pjoin(root, 'buildutils', 'templates', ('%s' % fname)), 'r') as f: tpl = f.read() out = tpl.format(**ns_func()) dest = pjoin(dest_dir, fname) info(('generating %s from template' % dest)) with open(dest, 'w') as f: f.write(out)
[ "def", "generate_file", "(", "fname", ",", "ns_func", ",", "dest_dir", "=", "'.'", ")", ":", "with", "open", "(", "pjoin", "(", "root", ",", "'buildutils'", ",", "'templates'", ",", "(", "'%s'", "%", "fname", ")", ")", ",", "'r'", ")", "as", "f", ":", "tpl", "=", "f", ".", "read", "(", ")", "out", "=", "tpl", ".", "format", "(", "**", "ns_func", "(", ")", ")", "dest", "=", "pjoin", "(", "dest_dir", ",", "fname", ")", "info", "(", "(", "'generating %s from template'", "%", "dest", ")", ")", "with", "open", "(", "dest", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "out", ")" ]
generate a constants file from its template .
train
true
54,100
def mainloop(n=0): _default_root.tk.mainloop(n)
[ "def", "mainloop", "(", "n", "=", "0", ")", ":", "_default_root", ".", "tk", ".", "mainloop", "(", "n", ")" ]
run the main loop of tcl .
train
false
54,101
def docstring_summary(docstring): return docstring.split(u'\n')[0]
[ "def", "docstring_summary", "(", "docstring", ")", ":", "return", "docstring", ".", "split", "(", "u'\\n'", ")", "[", "0", "]" ]
return summary of docstring .
train
false
54,102
def delete_policy(name, policy_name, region=None, key=None, keyid=None, profile=None): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if (not exists(name, region, key, keyid, profile)): return True try: conn.delete_lb_policy(name, policy_name) log.info('Deleted policy {0} on ELB {1}'.format(policy_name, name)) return True except boto.exception.BotoServerError as e: log.debug(e) msg = 'Failed to delete policy {0} on ELB {1}: {2}'.format(policy_name, name, e.message) log.error(msg) return False
[ "def", "delete_policy", "(", "name", ",", "policy_name", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "(", "not", "exists", "(", "name", ",", "region", ",", "key", ",", "keyid", ",", "profile", ")", ")", ":", "return", "True", "try", ":", "conn", ".", "delete_lb_policy", "(", "name", ",", "policy_name", ")", "log", ".", "info", "(", "'Deleted policy {0} on ELB {1}'", ".", "format", "(", "policy_name", ",", "name", ")", ")", "return", "True", "except", "boto", ".", "exception", ".", "BotoServerError", "as", "e", ":", "log", ".", "debug", "(", "e", ")", "msg", "=", "'Failed to delete policy {0} on ELB {1}: {2}'", ".", "format", "(", "policy_name", ",", "name", ",", "e", ".", "message", ")", "log", ".", "error", "(", "msg", ")", "return", "False" ]
delete an elb policy .
train
false
54,103
def _convert_to_standard_attr(attr): ret_attr = ATTR_MAP.get(attr, None) if (ret_attr is None): return attr.lower() return ret_attr
[ "def", "_convert_to_standard_attr", "(", "attr", ")", ":", "ret_attr", "=", "ATTR_MAP", ".", "get", "(", "attr", ",", "None", ")", "if", "(", "ret_attr", "is", "None", ")", ":", "return", "attr", ".", "lower", "(", ")", "return", "ret_attr" ]
helper function for _process_info_installed_output() converts an opkg attribute name to a standard attribute name which is used across pkg modules .
train
true
54,104
def strtobool(term, table={u'false': False, u'no': False, u'0': False, u'true': True, u'yes': True, u'1': True, u'on': True, u'off': False}): if isinstance(term, string_t): try: return table[term.lower()] except KeyError: raise TypeError(u'Cannot coerce {0!r} to type bool'.format(term)) return term
[ "def", "strtobool", "(", "term", ",", "table", "=", "{", "u'false'", ":", "False", ",", "u'no'", ":", "False", ",", "u'0'", ":", "False", ",", "u'true'", ":", "True", ",", "u'yes'", ":", "True", ",", "u'1'", ":", "True", ",", "u'on'", ":", "True", ",", "u'off'", ":", "False", "}", ")", ":", "if", "isinstance", "(", "term", ",", "string_t", ")", ":", "try", ":", "return", "table", "[", "term", ".", "lower", "(", ")", "]", "except", "KeyError", ":", "raise", "TypeError", "(", "u'Cannot coerce {0!r} to type bool'", ".", "format", "(", "term", ")", ")", "return", "term" ]
convert common terms for true/false to bool .
train
false
54,105
@contextlib.contextmanager def kill_on_error(proc): with proc: try: (yield proc) except: proc.kill() raise
[ "@", "contextlib", ".", "contextmanager", "def", "kill_on_error", "(", "proc", ")", ":", "with", "proc", ":", "try", ":", "(", "yield", "proc", ")", "except", ":", "proc", ".", "kill", "(", ")", "raise" ]
context manager killing the subprocess if a python exception is raised .
train
false
54,107
def _sanitize(migrate_engine, table): session = orm.sessionmaker(bind=migrate_engine)() qry = session.query(table.c.image_id, table.c.member).group_by(table.c.image_id, table.c.member).having((func.count() > 1)) for (image_id, member) in qry: d = table.delete().where(and_((table.c.deleted == True), (table.c.image_id == image_id), (table.c.member == member))) d.execute() session.close()
[ "def", "_sanitize", "(", "migrate_engine", ",", "table", ")", ":", "session", "=", "orm", ".", "sessionmaker", "(", "bind", "=", "migrate_engine", ")", "(", ")", "qry", "=", "session", ".", "query", "(", "table", ".", "c", ".", "image_id", ",", "table", ".", "c", ".", "member", ")", ".", "group_by", "(", "table", ".", "c", ".", "image_id", ",", "table", ".", "c", ".", "member", ")", ".", "having", "(", "(", "func", ".", "count", "(", ")", ">", "1", ")", ")", "for", "(", "image_id", ",", "member", ")", "in", "qry", ":", "d", "=", "table", ".", "delete", "(", ")", ".", "where", "(", "and_", "(", "(", "table", ".", "c", ".", "deleted", "==", "True", ")", ",", "(", "table", ".", "c", ".", "image_id", "==", "image_id", ")", ",", "(", "table", ".", "c", ".", "member", "==", "member", ")", ")", ")", "d", ".", "execute", "(", ")", "session", ".", "close", "(", ")" ]
avoid possible integrity error by removing deleted rows to accommdate less restrictive uniqueness constraint .
train
false
54,110
def validatePort(switch, intf): ofport = int(switch.cmd('ovs-vsctl get Interface', intf, 'ofport')) if (ofport != switch.ports[intf]): warn('WARNING: ofport for', intf, 'is actually', ofport, '\n') return 0 else: return 1
[ "def", "validatePort", "(", "switch", ",", "intf", ")", ":", "ofport", "=", "int", "(", "switch", ".", "cmd", "(", "'ovs-vsctl get Interface'", ",", "intf", ",", "'ofport'", ")", ")", "if", "(", "ofport", "!=", "switch", ".", "ports", "[", "intf", "]", ")", ":", "warn", "(", "'WARNING: ofport for'", ",", "intf", ",", "'is actually'", ",", "ofport", ",", "'\\n'", ")", "return", "0", "else", ":", "return", "1" ]
validate intfs of port number .
train
false
54,111
def endtags(html): NON_CLOSING_TAGS = ['AREA', 'BASE', 'BASEFONT', 'BR', 'COL', 'FRAME', 'HR', 'IMG', 'INPUT', 'ISINDEX', 'LINK', 'META', 'PARAM'] opened_tags = re.findall('<([a-z]+)[^<>]*>', html) closed_tags = re.findall('</([a-z]+)>', html) opened_tags = [i.lower() for i in opened_tags if (i.upper() not in NON_CLOSING_TAGS)] closed_tags = [i.lower() for i in closed_tags] len_opened = len(opened_tags) if (len_opened == len(closed_tags)): return html opened_tags.reverse() for tag in opened_tags: if (tag in closed_tags): closed_tags.remove(tag) else: html += ('</%s>' % tag) return html
[ "def", "endtags", "(", "html", ")", ":", "NON_CLOSING_TAGS", "=", "[", "'AREA'", ",", "'BASE'", ",", "'BASEFONT'", ",", "'BR'", ",", "'COL'", ",", "'FRAME'", ",", "'HR'", ",", "'IMG'", ",", "'INPUT'", ",", "'ISINDEX'", ",", "'LINK'", ",", "'META'", ",", "'PARAM'", "]", "opened_tags", "=", "re", ".", "findall", "(", "'<([a-z]+)[^<>]*>'", ",", "html", ")", "closed_tags", "=", "re", ".", "findall", "(", "'</([a-z]+)>'", ",", "html", ")", "opened_tags", "=", "[", "i", ".", "lower", "(", ")", "for", "i", "in", "opened_tags", "if", "(", "i", ".", "upper", "(", ")", "not", "in", "NON_CLOSING_TAGS", ")", "]", "closed_tags", "=", "[", "i", ".", "lower", "(", ")", "for", "i", "in", "closed_tags", "]", "len_opened", "=", "len", "(", "opened_tags", ")", "if", "(", "len_opened", "==", "len", "(", "closed_tags", ")", ")", ":", "return", "html", "opened_tags", ".", "reverse", "(", ")", "for", "tag", "in", "opened_tags", ":", "if", "(", "tag", "in", "closed_tags", ")", ":", "closed_tags", ".", "remove", "(", "tag", ")", "else", ":", "html", "+=", "(", "'</%s>'", "%", "tag", ")", "return", "html" ]
close all open html tags at the end of the string .
train
false
54,112
def create_geq(lh_op, rh_op=None, constr_id=None): if (rh_op is not None): rh_op = neg_expr(rh_op) return create_leq(neg_expr(lh_op), rh_op, constr_id)
[ "def", "create_geq", "(", "lh_op", ",", "rh_op", "=", "None", ",", "constr_id", "=", "None", ")", ":", "if", "(", "rh_op", "is", "not", "None", ")", ":", "rh_op", "=", "neg_expr", "(", "rh_op", ")", "return", "create_leq", "(", "neg_expr", "(", "lh_op", ")", ",", "rh_op", ",", "constr_id", ")" ]
creates an internal greater than or equal constraint .
train
false
54,113
def get_current_module_name(): return os.environ['CURRENT_MODULE_ID']
[ "def", "get_current_module_name", "(", ")", ":", "return", "os", ".", "environ", "[", "'CURRENT_MODULE_ID'", "]" ]
returns the module name of the current instance .
train
false
54,115
def scale_timings(timelist, input_units, output_units, time_repetition): if (input_units == output_units): _scalefactor = 1.0 if ((input_units == u'scans') and (output_units == u'secs')): _scalefactor = time_repetition if ((input_units == u'secs') and (output_units == u'scans')): _scalefactor = (1.0 / time_repetition) timelist = [np.max([0.0, (_scalefactor * t)]) for t in timelist] return timelist
[ "def", "scale_timings", "(", "timelist", ",", "input_units", ",", "output_units", ",", "time_repetition", ")", ":", "if", "(", "input_units", "==", "output_units", ")", ":", "_scalefactor", "=", "1.0", "if", "(", "(", "input_units", "==", "u'scans'", ")", "and", "(", "output_units", "==", "u'secs'", ")", ")", ":", "_scalefactor", "=", "time_repetition", "if", "(", "(", "input_units", "==", "u'secs'", ")", "and", "(", "output_units", "==", "u'scans'", ")", ")", ":", "_scalefactor", "=", "(", "1.0", "/", "time_repetition", ")", "timelist", "=", "[", "np", ".", "max", "(", "[", "0.0", ",", "(", "_scalefactor", "*", "t", ")", "]", ")", "for", "t", "in", "timelist", "]", "return", "timelist" ]
scales timings given input and output units parameters timelist: list of times to scale input_units: secs or scans output_units: ibid .
train
false
54,116
def get_datetime_format(format='medium', locale=LC_TIME): patterns = Locale.parse(locale).datetime_formats if (format not in patterns): format = None return patterns[format]
[ "def", "get_datetime_format", "(", "format", "=", "'medium'", ",", "locale", "=", "LC_TIME", ")", ":", "patterns", "=", "Locale", ".", "parse", "(", "locale", ")", ".", "datetime_formats", "if", "(", "format", "not", "in", "patterns", ")", ":", "format", "=", "None", "return", "patterns", "[", "format", "]" ]
return the datetime formatting patterns used by the locale for the specified format .
train
false
54,117
def encrypt_and_encode(data, key): return base64.urlsafe_b64encode(aes_encrypt(data, key))
[ "def", "encrypt_and_encode", "(", "data", ",", "key", ")", ":", "return", "base64", ".", "urlsafe_b64encode", "(", "aes_encrypt", "(", "data", ",", "key", ")", ")" ]
encrypts and endcodes data using key .
train
false
54,118
def offset_func(func, offset, *args): def _offset(*args): args2 = list(map(add, args, offset)) return func(*args2) with ignoring(Exception): _offset.__name__ = ('offset_' + func.__name__) return _offset
[ "def", "offset_func", "(", "func", ",", "offset", ",", "*", "args", ")", ":", "def", "_offset", "(", "*", "args", ")", ":", "args2", "=", "list", "(", "map", "(", "add", ",", "args", ",", "offset", ")", ")", "return", "func", "(", "*", "args2", ")", "with", "ignoring", "(", "Exception", ")", ":", "_offset", ".", "__name__", "=", "(", "'offset_'", "+", "func", ".", "__name__", ")", "return", "_offset" ]
offsets inputs by offset .
train
false
54,119
def findMajorityElement(lst): dd = defaultdict(int) n = len(lst) for i in lst: dd[i] += 1 for key in dd: if (dd[key] > (n // 2)): return key return None
[ "def", "findMajorityElement", "(", "lst", ")", ":", "dd", "=", "defaultdict", "(", "int", ")", "n", "=", "len", "(", "lst", ")", "for", "i", "in", "lst", ":", "dd", "[", "i", "]", "+=", "1", "for", "key", "in", "dd", ":", "if", "(", "dd", "[", "key", "]", ">", "(", "n", "//", "2", ")", ")", ":", "return", "key", "return", "None" ]
lst: list of entries to find a majority element from return: majority element "majority element" here refers to an element of a list that occurs the "majority" of the time .
train
false
54,120
def _update_args(args, key, value): args = dict(args) if (key not in args): args[key] = value return args
[ "def", "_update_args", "(", "args", ",", "key", ",", "value", ")", ":", "args", "=", "dict", "(", "args", ")", "if", "(", "key", "not", "in", "args", ")", ":", "args", "[", "key", "]", "=", "value", "return", "args" ]
add a new pair to arguments dict .
train
false
54,121
def exampleCustomTags(): net = Mininet(topo=VLANStarTopo()) net.start() CLI(net) net.stop()
[ "def", "exampleCustomTags", "(", ")", ":", "net", "=", "Mininet", "(", "topo", "=", "VLANStarTopo", "(", ")", ")", "net", ".", "start", "(", ")", "CLI", "(", "net", ")", "net", ".", "stop", "(", ")" ]
simple example that exercises vlanstartopo .
train
false
54,122
def describe_data(data): items = OrderedDict() if (data is None): return items if isinstance(data, SqlTable): items['Data instances'] = data.approx_len() else: items['Data instances'] = len(data) items.update(describe_domain(data.domain)) return items
[ "def", "describe_data", "(", "data", ")", ":", "items", "=", "OrderedDict", "(", ")", "if", "(", "data", "is", "None", ")", ":", "return", "items", "if", "isinstance", "(", "data", ",", "SqlTable", ")", ":", "items", "[", "'Data instances'", "]", "=", "data", ".", "approx_len", "(", ")", "else", ":", "items", "[", "'Data instances'", "]", "=", "len", "(", "data", ")", "items", ".", "update", "(", "describe_domain", "(", "data", ".", "domain", ")", ")", "return", "items" ]
return an :obj:ordereddict describing the data description contains keys "data instances" and "features" .
train
false
54,123
def _write_js(output_root, classes): contents = {} js_fragments = set() for class_ in classes: module_js = class_.get_javascript() js_fragments.add((0, 'js', module_js.get('xmodule_js'))) for filetype in ('coffee', 'js'): for (idx, fragment) in enumerate(module_js.get(filetype, [])): js_fragments.add(((idx + 1), filetype, fragment)) for (idx, filetype, fragment) in sorted(js_fragments): filename = '{idx:0=3d}-{hash}.{type}'.format(idx=idx, hash=hashlib.md5(fragment).hexdigest(), type=filetype) contents[filename] = fragment _write_files(output_root, contents, {'.coffee': '.js'}) return [(output_root / filename) for filename in contents.keys()]
[ "def", "_write_js", "(", "output_root", ",", "classes", ")", ":", "contents", "=", "{", "}", "js_fragments", "=", "set", "(", ")", "for", "class_", "in", "classes", ":", "module_js", "=", "class_", ".", "get_javascript", "(", ")", "js_fragments", ".", "add", "(", "(", "0", ",", "'js'", ",", "module_js", ".", "get", "(", "'xmodule_js'", ")", ")", ")", "for", "filetype", "in", "(", "'coffee'", ",", "'js'", ")", ":", "for", "(", "idx", ",", "fragment", ")", "in", "enumerate", "(", "module_js", ".", "get", "(", "filetype", ",", "[", "]", ")", ")", ":", "js_fragments", ".", "add", "(", "(", "(", "idx", "+", "1", ")", ",", "filetype", ",", "fragment", ")", ")", "for", "(", "idx", ",", "filetype", ",", "fragment", ")", "in", "sorted", "(", "js_fragments", ")", ":", "filename", "=", "'{idx:0=3d}-{hash}.{type}'", ".", "format", "(", "idx", "=", "idx", ",", "hash", "=", "hashlib", ".", "md5", "(", "fragment", ")", ".", "hexdigest", "(", ")", ",", "type", "=", "filetype", ")", "contents", "[", "filename", "]", "=", "fragment", "_write_files", "(", "output_root", ",", "contents", ",", "{", "'.coffee'", ":", "'.js'", "}", ")", "return", "[", "(", "output_root", "/", "filename", ")", "for", "filename", "in", "contents", ".", "keys", "(", ")", "]" ]
write the javascript fragments from all xmodules in classes into output_root as individual files .
train
false
54,124
def set_register_stylesheet(obj): qss = get_stylesheet(obj.STYLESHEET) log.config.vdebug('stylesheet for {}: {}'.format(obj.__class__.__name__, qss)) obj.setStyleSheet(qss) objreg.get('config').changed.connect(functools.partial(_update_stylesheet, obj))
[ "def", "set_register_stylesheet", "(", "obj", ")", ":", "qss", "=", "get_stylesheet", "(", "obj", ".", "STYLESHEET", ")", "log", ".", "config", ".", "vdebug", "(", "'stylesheet for {}: {}'", ".", "format", "(", "obj", ".", "__class__", ".", "__name__", ",", "qss", ")", ")", "obj", ".", "setStyleSheet", "(", "qss", ")", "objreg", ".", "get", "(", "'config'", ")", ".", "changed", ".", "connect", "(", "functools", ".", "partial", "(", "_update_stylesheet", ",", "obj", ")", ")" ]
set the stylesheet for an object based on its stylesheet attribute .
train
false
54,125
@dec.skip_win32 def test_arg_split(): tests = [['hi', ['hi']], [u'hi', [u'hi']], ['hello there', ['hello', 'there']], [u'h\u01cello', [u'h\u01cello']], ['something "with quotes"', ['something', '"with quotes"']]] for (argstr, argv) in tests: nt.assert_equal(arg_split(argstr), argv)
[ "@", "dec", ".", "skip_win32", "def", "test_arg_split", "(", ")", ":", "tests", "=", "[", "[", "'hi'", ",", "[", "'hi'", "]", "]", ",", "[", "u'hi'", ",", "[", "u'hi'", "]", "]", ",", "[", "'hello there'", ",", "[", "'hello'", ",", "'there'", "]", "]", ",", "[", "u'h\\u01cello'", ",", "[", "u'h\\u01cello'", "]", "]", ",", "[", "'something \"with quotes\"'", ",", "[", "'something'", ",", "'\"with quotes\"'", "]", "]", "]", "for", "(", "argstr", ",", "argv", ")", "in", "tests", ":", "nt", ".", "assert_equal", "(", "arg_split", "(", "argstr", ")", ",", "argv", ")" ]
ensure that argument lines are correctly split like in a shell .
train
false
54,126
@contextlib.contextmanager def register_dispatcher(disp): assert callable(disp) assert callable(disp.py_func) name = disp.py_func.__name__ _temporary_dispatcher_map[name] = disp try: (yield) finally: del _temporary_dispatcher_map[name]
[ "@", "contextlib", ".", "contextmanager", "def", "register_dispatcher", "(", "disp", ")", ":", "assert", "callable", "(", "disp", ")", "assert", "callable", "(", "disp", ".", "py_func", ")", "name", "=", "disp", ".", "py_func", ".", "__name__", "_temporary_dispatcher_map", "[", "name", "]", "=", "disp", "try", ":", "(", "yield", ")", "finally", ":", "del", "_temporary_dispatcher_map", "[", "name", "]" ]
register a dispatcher for inference while it is not yet stored as global or closure variable (e .
train
false
54,127
def _check_set(ch, projs, ch_type): new_kind = _human2fiff[ch_type] if (ch['kind'] != new_kind): for proj in projs: if (ch['ch_name'] in proj['data']['col_names']): raise RuntimeError(('Cannot change channel type for channel %s in projector "%s"' % (ch['ch_name'], proj['desc']))) ch['kind'] = new_kind
[ "def", "_check_set", "(", "ch", ",", "projs", ",", "ch_type", ")", ":", "new_kind", "=", "_human2fiff", "[", "ch_type", "]", "if", "(", "ch", "[", "'kind'", "]", "!=", "new_kind", ")", ":", "for", "proj", "in", "projs", ":", "if", "(", "ch", "[", "'ch_name'", "]", "in", "proj", "[", "'data'", "]", "[", "'col_names'", "]", ")", ":", "raise", "RuntimeError", "(", "(", "'Cannot change channel type for channel %s in projector \"%s\"'", "%", "(", "ch", "[", "'ch_name'", "]", ",", "proj", "[", "'desc'", "]", ")", ")", ")", "ch", "[", "'kind'", "]", "=", "new_kind" ]
helper to make sure type change is compatible with projectors .
train
false
54,128
def test_step_description(): description = core.StepDescription(10, __file__) assert_equals(description.file, core.fs.relpath(__file__)) assert_not_equals(description.file, __file__) assert_equals(description.line, 10)
[ "def", "test_step_description", "(", ")", ":", "description", "=", "core", ".", "StepDescription", "(", "10", ",", "__file__", ")", "assert_equals", "(", "description", ".", "file", ",", "core", ".", "fs", ".", "relpath", "(", "__file__", ")", ")", "assert_not_equals", "(", "description", ".", "file", ",", "__file__", ")", "assert_equals", "(", "description", ".", "line", ",", "10", ")" ]
step description takes a line and filename .
train
false
54,129
def find_diff(file1, file2): DEBUG = True proc = subprocess.Popen([DIFF, file1, file2], stdout=subprocess.PIPE, stderr=subprocess.PIPE) (diff_output, std_err) = proc.communicate() if DEBUG: print '>>>Config differences:' print diff_output return diff_output
[ "def", "find_diff", "(", "file1", ",", "file2", ")", ":", "DEBUG", "=", "True", "proc", "=", "subprocess", ".", "Popen", "(", "[", "DIFF", ",", "file1", ",", "file2", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "(", "diff_output", ",", "std_err", ")", "=", "proc", ".", "communicate", "(", ")", "if", "DEBUG", ":", "print", "'>>>Config differences:'", "print", "diff_output", "return", "diff_output" ]
find the differences between the two configuraiton files .
train
false
54,130
@logic.auth_audit_exempt def send_email_notifications(context, data_dict): if (not request.environ.get('paste.command_request')): _check_access('send_email_notifications', context, data_dict) if (not converters.asbool(config.get('ckan.activity_streams_email_notifications'))): raise ValidationError('ckan.activity_streams_email_notifications is not enabled in config') email_notifications.get_and_send_notifications_for_all_users()
[ "@", "logic", ".", "auth_audit_exempt", "def", "send_email_notifications", "(", "context", ",", "data_dict", ")", ":", "if", "(", "not", "request", ".", "environ", ".", "get", "(", "'paste.command_request'", ")", ")", ":", "_check_access", "(", "'send_email_notifications'", ",", "context", ",", "data_dict", ")", "if", "(", "not", "converters", ".", "asbool", "(", "config", ".", "get", "(", "'ckan.activity_streams_email_notifications'", ")", ")", ")", ":", "raise", "ValidationError", "(", "'ckan.activity_streams_email_notifications is not enabled in config'", ")", "email_notifications", ".", "get_and_send_notifications_for_all_users", "(", ")" ]
send any pending activity stream notification emails to users .
train
false
54,132
def wait_for_server(server, port): print 'Checking server {server} on port {port}'.format(server=server, port=port) if tasks.environment.dry_run: return True attempts = 0 server_ok = False while (attempts < 30): try: connection = httplib.HTTPConnection(server, port, timeout=10) connection.request('GET', '/') response = connection.getresponse() if (int(response.status) == 200): server_ok = True break except: pass attempts += 1 time.sleep(1) return server_ok
[ "def", "wait_for_server", "(", "server", ",", "port", ")", ":", "print", "'Checking server {server} on port {port}'", ".", "format", "(", "server", "=", "server", ",", "port", "=", "port", ")", "if", "tasks", ".", "environment", ".", "dry_run", ":", "return", "True", "attempts", "=", "0", "server_ok", "=", "False", "while", "(", "attempts", "<", "30", ")", ":", "try", ":", "connection", "=", "httplib", ".", "HTTPConnection", "(", "server", ",", "port", ",", "timeout", "=", "10", ")", "connection", ".", "request", "(", "'GET'", ",", "'/'", ")", "response", "=", "connection", ".", "getresponse", "(", ")", "if", "(", "int", "(", "response", ".", "status", ")", "==", "200", ")", ":", "server_ok", "=", "True", "break", "except", ":", "pass", "attempts", "+=", "1", "time", ".", "sleep", "(", "1", ")", "return", "server_ok" ]
wait for a server to respond with status 200 .
train
false
54,133
def periodic_task(*args, **options): return task(**dict({'base': PeriodicTask}, **options))
[ "def", "periodic_task", "(", "*", "args", ",", "**", "options", ")", ":", "return", "task", "(", "**", "dict", "(", "{", "'base'", ":", "PeriodicTask", "}", ",", "**", "options", ")", ")" ]
decorator to create a task class out of any callable .
train
false
54,134
def opts_to_pp(repair, unpack, delete): if (repair is None): return None pp = 0 if repair: pp = 1 if unpack: pp = 2 if delete: pp = 3 return pp
[ "def", "opts_to_pp", "(", "repair", ",", "unpack", ",", "delete", ")", ":", "if", "(", "repair", "is", "None", ")", ":", "return", "None", "pp", "=", "0", "if", "repair", ":", "pp", "=", "1", "if", "unpack", ":", "pp", "=", "2", "if", "delete", ":", "pp", "=", "3", "return", "pp" ]
convert to numeric process options .
train
false
54,135
def python_3000_not_equal(logical_line): pos = logical_line.find('<>') if (pos > (-1)): (yield (pos, "W603 '<>' is deprecated, use '!='"))
[ "def", "python_3000_not_equal", "(", "logical_line", ")", ":", "pos", "=", "logical_line", ".", "find", "(", "'<>'", ")", "if", "(", "pos", ">", "(", "-", "1", ")", ")", ":", "(", "yield", "(", "pos", ",", "\"W603 '<>' is deprecated, use '!='\"", ")", ")" ]
new code should always use != instead of <> .
train
false
54,136
def _get_item_key(item, key): if (key in item): return item[key] nested_item = item for subkey in key.split('.'): if (not subkey): raise ValueError(('empty subkey in %r' % key)) try: nested_item = nested_item[subkey] except KeyError as e: raise KeyError(('%r - looking up key %r in %r' % (e, key, nested_item))) else: return nested_item
[ "def", "_get_item_key", "(", "item", ",", "key", ")", ":", "if", "(", "key", "in", "item", ")", ":", "return", "item", "[", "key", "]", "nested_item", "=", "item", "for", "subkey", "in", "key", ".", "split", "(", "'.'", ")", ":", "if", "(", "not", "subkey", ")", ":", "raise", "ValueError", "(", "(", "'empty subkey in %r'", "%", "key", ")", ")", "try", ":", "nested_item", "=", "nested_item", "[", "subkey", "]", "except", "KeyError", "as", "e", ":", "raise", "KeyError", "(", "(", "'%r - looking up key %r in %r'", "%", "(", "e", ",", "key", ",", "nested_item", ")", ")", ")", "else", ":", "return", "nested_item" ]
allow for lookups in nested dictionaries using .
train
false
54,137
def _TO_DATESTRING(obj): try: return _GA(obj, 'db_date_created').strftime(_DATESTRING) except AttributeError: try: obj.save() except AttributeError: return None return _GA(obj, 'db_date_created').strftime(_DATESTRING)
[ "def", "_TO_DATESTRING", "(", "obj", ")", ":", "try", ":", "return", "_GA", "(", "obj", ",", "'db_date_created'", ")", ".", "strftime", "(", "_DATESTRING", ")", "except", "AttributeError", ":", "try", ":", "obj", ".", "save", "(", ")", "except", "AttributeError", ":", "return", "None", "return", "_GA", "(", "obj", ",", "'db_date_created'", ")", ".", "strftime", "(", "_DATESTRING", ")" ]
creates datestring hash .
train
false
54,139
def _dhtm(mag): sig = np.zeros(len(mag)) midpt = (len(mag) // 2) sig[1:midpt] = 1 sig[(midpt + 1):] = (-1) recon = ifft((mag * np.exp(fft((sig * ifft(np.log(mag))))))).real return recon
[ "def", "_dhtm", "(", "mag", ")", ":", "sig", "=", "np", ".", "zeros", "(", "len", "(", "mag", ")", ")", "midpt", "=", "(", "len", "(", "mag", ")", "//", "2", ")", "sig", "[", "1", ":", "midpt", "]", "=", "1", "sig", "[", "(", "midpt", "+", "1", ")", ":", "]", "=", "(", "-", "1", ")", "recon", "=", "ifft", "(", "(", "mag", "*", "np", ".", "exp", "(", "fft", "(", "(", "sig", "*", "ifft", "(", "np", ".", "log", "(", "mag", ")", ")", ")", ")", ")", ")", ")", ".", "real", "return", "recon" ]
compute the modified 1d discrete hilbert transform parameters mag : ndarray the magnitude spectrum .
train
false
54,141
def channel_shift_multi(x, intensity, channel_index=2): if is_random: factor = np.random.uniform((- intensity), intensity) else: factor = intensity results = [] for data in x: data = np.rollaxis(data, channel_index, 0) (min_x, max_x) = (np.min(data), np.max(data)) channel_images = [np.clip((x_channel + factor), min_x, max_x) for x_channel in x] data = np.stack(channel_images, axis=0) data = np.rollaxis(x, 0, (channel_index + 1)) results.append(data) return np.asarray(results)
[ "def", "channel_shift_multi", "(", "x", ",", "intensity", ",", "channel_index", "=", "2", ")", ":", "if", "is_random", ":", "factor", "=", "np", ".", "random", ".", "uniform", "(", "(", "-", "intensity", ")", ",", "intensity", ")", "else", ":", "factor", "=", "intensity", "results", "=", "[", "]", "for", "data", "in", "x", ":", "data", "=", "np", ".", "rollaxis", "(", "data", ",", "channel_index", ",", "0", ")", "(", "min_x", ",", "max_x", ")", "=", "(", "np", ".", "min", "(", "data", ")", ",", "np", ".", "max", "(", "data", ")", ")", "channel_images", "=", "[", "np", ".", "clip", "(", "(", "x_channel", "+", "factor", ")", ",", "min_x", ",", "max_x", ")", "for", "x_channel", "in", "x", "]", "data", "=", "np", ".", "stack", "(", "channel_images", ",", "axis", "=", "0", ")", "data", "=", "np", ".", "rollaxis", "(", "x", ",", "0", ",", "(", "channel_index", "+", "1", ")", ")", "results", ".", "append", "(", "data", ")", "return", "np", ".", "asarray", "(", "results", ")" ]
shift the channels of images with the same arguments .
train
true
54,142
def load_path(path, target): with open(path, 'r') as f: target.update(flatten(yaml.load(f, Loader=yaml.Loader)))
[ "def", "load_path", "(", "path", ",", "target", ")", ":", "with", "open", "(", "path", ",", "'r'", ")", "as", "f", ":", "target", ".", "update", "(", "flatten", "(", "yaml", ".", "load", "(", "f", ",", "Loader", "=", "yaml", ".", "Loader", ")", ")", ")" ]
load dictionary implied by yaml file into target dictionary .
train
false
54,143
def test_smote_wrong_kind(): kind = 'rnd' smote = SMOTE(kind=kind, random_state=RND_SEED) assert_raises(ValueError, smote.fit_sample, X, Y)
[ "def", "test_smote_wrong_kind", "(", ")", ":", "kind", "=", "'rnd'", "smote", "=", "SMOTE", "(", "kind", "=", "kind", ",", "random_state", "=", "RND_SEED", ")", "assert_raises", "(", "ValueError", ",", "smote", ".", "fit_sample", ",", "X", ",", "Y", ")" ]
test either if an error is raised when the wrong kind of smote is given .
train
false
54,144
@utils.arg('server', metavar='<server>', help=_('Name or ID of server.')) def do_unrescue(cs, args): _find_server(cs, args.server).unrescue()
[ "@", "utils", ".", "arg", "(", "'server'", ",", "metavar", "=", "'<server>'", ",", "help", "=", "_", "(", "'Name or ID of server.'", ")", ")", "def", "do_unrescue", "(", "cs", ",", "args", ")", ":", "_find_server", "(", "cs", ",", "args", ".", "server", ")", ".", "unrescue", "(", ")" ]
restart the server from normal boot disk again .
train
false