id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
26,228
def formset_initial_value_extractor(request, parameter_names): def _intial_value_extractor(request): if (not submitted): return [] data = [] for param in submitted: i = 0 for val in request.POST.getlist(param): if (len(data) == i): data.append({}) data[i][param] = val i += 1 for kwargs in data: for recurrent in recurring: kwargs[recurrent] = request.POST.get(recurrent) initial_data = data return {'initial': initial_data} return _intial_value_extractor
[ "def", "formset_initial_value_extractor", "(", "request", ",", "parameter_names", ")", ":", "def", "_intial_value_extractor", "(", "request", ")", ":", "if", "(", "not", "submitted", ")", ":", "return", "[", "]", "data", "=", "[", "]", "for", "param", "in", ...
builds a list of data that formsets should use by extending some fields to every object .
train
false
26,229
def hg_hook(ui, repo, node=None, **kwargs): seen = set() paths = [] if len(repo): for rev in range(repo[node], len(repo)): for file_ in repo[rev].files(): file_ = op.join(repo.root, file_) if ((file_ in seen) or (not op.exists(file_))): continue seen.add(file_) paths.append(file_) options = parse_options() setup_logger(options) if paths: process_paths(options, candidates=paths)
[ "def", "hg_hook", "(", "ui", ",", "repo", ",", "node", "=", "None", ",", "**", "kwargs", ")", ":", "seen", "=", "set", "(", ")", "paths", "=", "[", "]", "if", "len", "(", "repo", ")", ":", "for", "rev", "in", "range", "(", "repo", "[", "node"...
run pylama after mercurial commit .
train
true
26,230
def convert_to_http_date(dt): if timezone.is_naive(dt): default_timezone = timezone.get_default_timezone() dt = timezone.make_aware(dt, default_timezone) utc_dt = dt.astimezone(timezone.utc) epoch_dt = calendar.timegm(utc_dt.utctimetuple()) return http_date(epoch_dt)
[ "def", "convert_to_http_date", "(", "dt", ")", ":", "if", "timezone", ".", "is_naive", "(", "dt", ")", ":", "default_timezone", "=", "timezone", ".", "get_default_timezone", "(", ")", "dt", "=", "timezone", ".", "make_aware", "(", "dt", ",", "default_timezon...
given a timezone naive or aware datetime return the http date formatted string to be used in http response headers .
train
false
26,232
def saferepr(object): return _safe_repr(object, {}, None, 0)[0]
[ "def", "saferepr", "(", "object", ")", ":", "return", "_safe_repr", "(", "object", ",", "{", "}", ",", "None", ",", "0", ")", "[", "0", "]" ]
safe version of :func:repr .
train
false
26,233
def _get_kernel_name(): names = ['kernel'] if ('NVPROF_ID' in os.environ): for frame in tb.extract_stack(): if nrv_re.search(frame[0]): break caller = frame[0:2] (file_path, file_name) = os.path.split(caller[0]) (path1, path2) = os.path.split(file_path) (file_base, ext) = os.path.splitext(file_name) for name in (path2, file_base, ext): name = name_re.sub('', name) if name: names.append(name) names.append(native_str(caller[1])) return names
[ "def", "_get_kernel_name", "(", ")", ":", "names", "=", "[", "'kernel'", "]", "if", "(", "'NVPROF_ID'", "in", "os", ".", "environ", ")", ":", "for", "frame", "in", "tb", ".", "extract_stack", "(", ")", ":", "if", "nrv_re", ".", "search", "(", "frame"...
returns the path of the kernel .
train
false
26,235
def upload_to_mugshot(instance, filename): extension = filename.split('.')[(-1)].lower() (salt, hash) = generate_sha1(instance.pk) path = (userena_settings.USERENA_MUGSHOT_PATH % {'username': instance.user.username, 'id': instance.user.id, 'date': instance.user.date_joined, 'date_now': get_datetime_now().date()}) return ('%(path)s%(hash)s.%(extension)s' % {'path': path, 'hash': hash[:10], 'extension': extension})
[ "def", "upload_to_mugshot", "(", "instance", ",", "filename", ")", ":", "extension", "=", "filename", ".", "split", "(", "'.'", ")", "[", "(", "-", "1", ")", "]", ".", "lower", "(", ")", "(", "salt", ",", "hash", ")", "=", "generate_sha1", "(", "in...
uploads a mugshot for a user to the userena_mugshot_path and saving it under unique hash for the image .
train
true
26,236
def stop_reactor(): global _twisted_thread def _stop_reactor(): '"Helper for calling stop from withing the thread.' reactor.stop() reactor.callFromThread(_stop_reactor) reactor_thread.join() for p in reactor.getDelayedCalls(): if p.active(): p.cancel() _twisted_thread = None
[ "def", "stop_reactor", "(", ")", ":", "global", "_twisted_thread", "def", "_stop_reactor", "(", ")", ":", "reactor", ".", "stop", "(", ")", "reactor", ".", "callFromThread", "(", "_stop_reactor", ")", "reactor_thread", ".", "join", "(", ")", "for", "p", "i...
stop the reactor and join the reactor thread until it stops .
train
true
26,237
def memdump(samples=10, file=None): say = partial(print, file=file) if (ps() is None): say(u'- rss: (psutil not installed).') return (prev, after_collect) = _memdump(samples) if prev: say(u'- rss (sample):') for mem in prev: say(u'- > {0},'.format(mem)) say(u'- rss (end): {0}.'.format(after_collect))
[ "def", "memdump", "(", "samples", "=", "10", ",", "file", "=", "None", ")", ":", "say", "=", "partial", "(", "print", ",", "file", "=", "file", ")", "if", "(", "ps", "(", ")", "is", "None", ")", ":", "say", "(", "u'- rss: (psutil not installed).'", ...
dump statistics of previous memsample requests .
train
false
26,238
def get_base(): r = requests.get(URL_BASE) t = r.text return [x[:x.index('/"')] for x in t.split('<a href="') if ('/"' in x)]
[ "def", "get_base", "(", ")", ":", "r", "=", "requests", ".", "get", "(", "URL_BASE", ")", "t", "=", "r", ".", "text", "return", "[", "x", "[", ":", "x", ".", "index", "(", "'/\"'", ")", "]", "for", "x", "in", "t", ".", "split", "(", "'<a href...
returns a list of subpages for remotes .
train
false
26,239
def volume_mute(hass): hass.services.call(DOMAIN, SERVICE_VOLUME_MUTE)
[ "def", "volume_mute", "(", "hass", ")", ":", "hass", ".", "services", ".", "call", "(", "DOMAIN", ",", "SERVICE_VOLUME_MUTE", ")" ]
press the keyboard button for muting volume .
train
false
26,240
def _get_realtime_mask(flavor, image): (flavor_mask, image_mask) = _get_flavor_image_meta('cpu_realtime_mask', flavor, image) return (image_mask or flavor_mask)
[ "def", "_get_realtime_mask", "(", "flavor", ",", "image", ")", ":", "(", "flavor_mask", ",", "image_mask", ")", "=", "_get_flavor_image_meta", "(", "'cpu_realtime_mask'", ",", "flavor", ",", "image", ")", "return", "(", "image_mask", "or", "flavor_mask", ")" ]
returns realtime mask based on flavor/image meta .
train
false
26,242
@then(u'we see the named query executed') def step_see_named_query_executed(context): _expect_exact(context, u'12345', timeout=1) _expect_exact(context, u'SELECT 1', timeout=1)
[ "@", "then", "(", "u'we see the named query executed'", ")", "def", "step_see_named_query_executed", "(", "context", ")", ":", "_expect_exact", "(", "context", ",", "u'12345'", ",", "timeout", "=", "1", ")", "_expect_exact", "(", "context", ",", "u'SELECT 1'", ","...
wait to see select output .
train
false
26,245
def add_kernel_arrays_1D(array_1, array_2): if (array_1.size > array_2.size): new_array = array_1.copy() center = (array_1.size // 2) slice_ = slice((center - (array_2.size // 2)), ((center + (array_2.size // 2)) + 1)) new_array[slice_] += array_2 return new_array elif (array_2.size > array_1.size): new_array = array_2.copy() center = (array_2.size // 2) slice_ = slice((center - (array_1.size // 2)), ((center + (array_1.size // 2)) + 1)) new_array[slice_] += array_1 return new_array return (array_2 + array_1)
[ "def", "add_kernel_arrays_1D", "(", "array_1", ",", "array_2", ")", ":", "if", "(", "array_1", ".", "size", ">", "array_2", ".", "size", ")", ":", "new_array", "=", "array_1", ".", "copy", "(", ")", "center", "=", "(", "array_1", ".", "size", "//", "...
add two 1d kernel arrays of different size .
train
false
26,247
def parse_extra_model_fields(extra_model_fields): fields = defaultdict(list) for entry in extra_model_fields: (model_key, field_name) = parse_field_path(entry[0]) field_class = import_field(entry[1]) (field_args, field_kwargs) = entry[2:] try: field = field_class(*field_args, **field_kwargs) except TypeError as e: raise ImproperlyConfigured((u"The EXTRA_MODEL_FIELDS setting contains arguments for the field '%s' which could not be applied: %s" % (entry[1], e))) fields[model_key].append((field_name, field)) return fields
[ "def", "parse_extra_model_fields", "(", "extra_model_fields", ")", ":", "fields", "=", "defaultdict", "(", "list", ")", "for", "entry", "in", "extra_model_fields", ":", "(", "model_key", ",", "field_name", ")", "=", "parse_field_path", "(", "entry", "[", "0", ...
parses the value of extra_model_fields .
train
true
26,248
@composite def node_strategy(draw, min_number_of_applications=0, stateful_applications=False, uuid=st.uuids(), applications=application_strategy()): applications = {a.name: a for a in draw(st.lists(application_strategy(stateful=stateful_applications), min_size=min_number_of_applications, average_size=2, max_size=5))} return Node(uuid=draw(uuid), applications=applications, manifestations={a.volume.manifestation.dataset_id: a.volume.manifestation for a in applications.values() if (a.volume is not None)})
[ "@", "composite", "def", "node_strategy", "(", "draw", ",", "min_number_of_applications", "=", "0", ",", "stateful_applications", "=", "False", ",", "uuid", "=", "st", ".", "uuids", "(", ")", ",", "applications", "=", "application_strategy", "(", ")", ")", "...
a hypothesis strategy to generate a node .
train
false
26,250
def step_size_to_bound(x, s, lb, ub): non_zero = np.nonzero(s) s_non_zero = s[non_zero] steps = np.empty_like(x) steps.fill(np.inf) with np.errstate(over='ignore'): steps[non_zero] = np.maximum(((lb - x)[non_zero] / s_non_zero), ((ub - x)[non_zero] / s_non_zero)) min_step = np.min(steps) return (min_step, (np.equal(steps, min_step) * np.sign(s).astype(int)))
[ "def", "step_size_to_bound", "(", "x", ",", "s", ",", "lb", ",", "ub", ")", ":", "non_zero", "=", "np", ".", "nonzero", "(", "s", ")", "s_non_zero", "=", "s", "[", "non_zero", "]", "steps", "=", "np", ".", "empty_like", "(", "x", ")", "steps", "....
compute a min_step size required to reach a bound .
train
false
26,252
def newer(source, target): if (not os.path.exists(source)): raise DistutilsFileError(("file '%s' does not exist" % os.path.abspath(source))) if (not os.path.exists(target)): return 1 from stat import ST_MTIME mtime1 = os.stat(source)[ST_MTIME] mtime2 = os.stat(target)[ST_MTIME] return (mtime1 > mtime2)
[ "def", "newer", "(", "source", ",", "target", ")", ":", "if", "(", "not", "os", ".", "path", ".", "exists", "(", "source", ")", ")", ":", "raise", "DistutilsFileError", "(", "(", "\"file '%s' does not exist\"", "%", "os", ".", "path", ".", "abspath", "...
tells if the target is newer than the source .
train
false
26,253
def _basis(G, ring): order = ring.order leading_monomials = [g.LM for g in G] candidates = [ring.zero_monom] basis = [] while candidates: t = candidates.pop() basis.append(t) new_candidates = [_incr_k(t, k) for k in range(ring.ngens) if all(((monomial_div(_incr_k(t, k), lmg) is None) for lmg in leading_monomials))] candidates.extend(new_candidates) candidates.sort(key=(lambda m: order(m)), reverse=True) basis = list(set(basis)) return sorted(basis, key=(lambda m: order(m)))
[ "def", "_basis", "(", "G", ",", "ring", ")", ":", "order", "=", "ring", ".", "order", "leading_monomials", "=", "[", "g", ".", "LM", "for", "g", "in", "G", "]", "candidates", "=", "[", "ring", ".", "zero_monom", "]", "basis", "=", "[", "]", "whil...
computes a list of monomials which are not divisible by the leading monomials wrt to o of g .
train
false
26,254
def build_words_dataset(words=[], vocabulary_size=50000, printable=True, unk_key='UNK'): import collections count = [[unk_key, (-1)]] count.extend(collections.Counter(words).most_common((vocabulary_size - 1))) dictionary = dict() for (word, _) in count: dictionary[word] = len(dictionary) data = list() unk_count = 0 for word in words: if (word in dictionary): index = dictionary[word] else: index = 0 unk_count += 1 data.append(index) count[0][1] = unk_count reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys())) if printable: print ('Real vocabulary size %d' % len(collections.Counter(words).keys())) print 'Limited vocabulary size {}'.format(vocabulary_size) assert (len(collections.Counter(words).keys()) >= vocabulary_size), 'the limited vocabulary_size must be less than or equal to the read vocabulary_size' return (data, count, dictionary, reverse_dictionary)
[ "def", "build_words_dataset", "(", "words", "=", "[", "]", ",", "vocabulary_size", "=", "50000", ",", "printable", "=", "True", ",", "unk_key", "=", "'UNK'", ")", ":", "import", "collections", "count", "=", "[", "[", "unk_key", ",", "(", "-", "1", ")",...
build the words dictionary and replace rare words with unk token .
train
true
26,255
def contravariant_order(expr, _strict=False): if isinstance(expr, Add): orders = [contravariant_order(e) for e in expr.args] if (len(set(orders)) != 1): raise ValueError('Misformed expression containing contravariant fields of varying order.') return orders[0] elif isinstance(expr, Mul): orders = [contravariant_order(e) for e in expr.args] not_zero = [o for o in orders if (o != 0)] if (len(not_zero) > 1): raise ValueError('Misformed expression containing multiplication between vectors.') return (0 if (not not_zero) else not_zero[0]) elif isinstance(expr, Pow): if (covariant_order(expr.base) or covariant_order(expr.exp)): raise ValueError('Misformed expression containing a power of a vector.') return 0 elif isinstance(expr, BaseVectorField): return 1 elif ((not _strict) or expr.atoms(BaseScalarField)): return 0 else: return (-1)
[ "def", "contravariant_order", "(", "expr", ",", "_strict", "=", "False", ")", ":", "if", "isinstance", "(", "expr", ",", "Add", ")", ":", "orders", "=", "[", "contravariant_order", "(", "e", ")", "for", "e", "in", "expr", ".", "args", "]", "if", "(",...
return the contravariant order of an expression .
train
false
26,256
def is_sorted_ascending(a): return (np.fmax.accumulate(a) <= a).all()
[ "def", "is_sorted_ascending", "(", "a", ")", ":", "return", "(", "np", ".", "fmax", ".", "accumulate", "(", "a", ")", "<=", "a", ")", ".", "all", "(", ")" ]
check if a numpy array is sorted .
train
false
26,260
def utf8_decode(value): if isinstance(value, six.binary_type): return _utf8_decoder(value)[0] return six.text_type(value)
[ "def", "utf8_decode", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "six", ".", "binary_type", ")", ":", "return", "_utf8_decoder", "(", "value", ")", "[", "0", "]", "return", "six", ".", "text_type", "(", "value", ")" ]
decode a from utf-8 into unicode .
train
false
26,261
@testing.requires_testing_data def test_rap_music_simulated(): (evoked, noise_cov, forward, forward_surf_ori, forward_fixed) = _get_data() n_dipoles = 2 (sim_evoked, stc) = simu_data(evoked, forward_fixed, noise_cov, n_dipoles, evoked.times) dipoles = rap_music(sim_evoked, forward_fixed, noise_cov, n_dipoles=n_dipoles) _check_dipoles(dipoles, forward_fixed, stc, evoked) (dipoles, residual) = rap_music(sim_evoked, forward_fixed, noise_cov, n_dipoles=n_dipoles, return_residual=True) _check_dipoles(dipoles, forward_fixed, stc, evoked, residual) (dipoles, residual) = rap_music(sim_evoked, forward, noise_cov, n_dipoles=n_dipoles, return_residual=True) _check_dipoles(dipoles, forward_fixed, stc, evoked, residual) (dipoles, residual) = rap_music(sim_evoked, forward_surf_ori, noise_cov, n_dipoles=n_dipoles, return_residual=True) _check_dipoles(dipoles, forward_fixed, stc, evoked, residual)
[ "@", "testing", ".", "requires_testing_data", "def", "test_rap_music_simulated", "(", ")", ":", "(", "evoked", ",", "noise_cov", ",", "forward", ",", "forward_surf_ori", ",", "forward_fixed", ")", "=", "_get_data", "(", ")", "n_dipoles", "=", "2", "(", "sim_ev...
test rap-music with simulated evoked .
train
false
26,262
@register.filter(name='to_name') def to_name(user_id): try: user = User.objects.filter(id=int(user_id)) if user: user = user[0] return user.name except: return '\xe9\x9d\x9e\xe6\xb3\x95\xe7\x94\xa8\xe6\x88\xb7'
[ "@", "register", ".", "filter", "(", "name", "=", "'to_name'", ")", "def", "to_name", "(", "user_id", ")", ":", "try", ":", "user", "=", "User", ".", "objects", ".", "filter", "(", "id", "=", "int", "(", "user_id", ")", ")", "if", "user", ":", "u...
user id 转位用户名称 .
train
false
26,263
def test_smote_fit(): smote = SMOTETomek(random_state=RND_SEED) smote.fit(X, Y) assert_equal(smote.min_c_, 0) assert_equal(smote.maj_c_, 1) assert_equal(smote.stats_c_[0], 8) assert_equal(smote.stats_c_[1], 12)
[ "def", "test_smote_fit", "(", ")", ":", "smote", "=", "SMOTETomek", "(", "random_state", "=", "RND_SEED", ")", "smote", ".", "fit", "(", "X", ",", "Y", ")", "assert_equal", "(", "smote", ".", "min_c_", ",", "0", ")", "assert_equal", "(", "smote", ".", ...
test the fitting method .
train
false
26,264
def listen_print_loop(recognize_stream): num_chars_printed = 0 for resp in recognize_stream: if (resp.error.code != code_pb2.OK): raise RuntimeError(('Server error: ' + resp.error.message)) if (not resp.results): continue result = resp.results[0] transcript = result.alternatives[0].transcript overwrite_chars = (' ' * max(0, (num_chars_printed - len(transcript)))) if (not result.is_final): sys.stdout.write(((transcript + overwrite_chars) + '\r')) sys.stdout.flush() num_chars_printed = len(transcript) else: print (transcript + overwrite_chars) if re.search('\\b(exit|quit)\\b', transcript, re.I): print 'Exiting..' break num_chars_printed = 0
[ "def", "listen_print_loop", "(", "recognize_stream", ")", ":", "num_chars_printed", "=", "0", "for", "resp", "in", "recognize_stream", ":", "if", "(", "resp", ".", "error", ".", "code", "!=", "code_pb2", ".", "OK", ")", ":", "raise", "RuntimeError", "(", "...
iterates through server responses and prints them .
train
false
26,266
def as_sparse_variable(x, name=None): if isinstance(x, gof.Apply): if (len(x.outputs) != 1): raise ValueError('It is ambiguous which output of a multi-output Op has to be fetched.', x) else: x = x.outputs[0] if isinstance(x, gof.Variable): if (not isinstance(x.type, SparseType)): raise TypeError('Variable type field must be a SparseType.', x, x.type) return x try: return constant(x, name=name) except TypeError: raise TypeError(('Cannot convert %s to SparseType' % x), type(x))
[ "def", "as_sparse_variable", "(", "x", ",", "name", "=", "None", ")", ":", "if", "isinstance", "(", "x", ",", "gof", ".", "Apply", ")", ":", "if", "(", "len", "(", "x", ".", "outputs", ")", "!=", "1", ")", ":", "raise", "ValueError", "(", "'It is...
wrapper around sparsevariable constructor to construct a variable with a sparse matrix with the same dtype and format .
train
false
26,267
def ignore_nanwarnings(): return WarningContext((('ignore',), {'category': RuntimeWarning, 'module': 'numpy.lib.nanfunctions'}))
[ "def", "ignore_nanwarnings", "(", ")", ":", "return", "WarningContext", "(", "(", "(", "'ignore'", ",", ")", ",", "{", "'category'", ":", "RuntimeWarning", ",", "'module'", ":", "'numpy.lib.nanfunctions'", "}", ")", ")" ]
helper for building a warningcontext that ignores warnings from numpys nanfunctions .
train
false
26,268
def felzenszwalb(image, scale=1, sigma=0.8, min_size=20, multichannel=True): if ((not multichannel) and (image.ndim > 2)): raise ValueError('This algorithm works only on single or multi-channel 2d images. ') image = np.atleast_3d(image) return _felzenszwalb_cython(image, scale=scale, sigma=sigma, min_size=min_size)
[ "def", "felzenszwalb", "(", "image", ",", "scale", "=", "1", ",", "sigma", "=", "0.8", ",", "min_size", "=", "20", ",", "multichannel", "=", "True", ")", ":", "if", "(", "(", "not", "multichannel", ")", "and", "(", "image", ".", "ndim", ">", "2", ...
computes felsenszwalbs efficient graph based image segmentation .
train
false
26,269
def social_auth_by_type_backends(request): def context_value(): data = backends_data(request.user) data['backends'] = group_backend_by_type(data['backends']) data['not_associated'] = group_backend_by_type(data['not_associated']) data['associated'] = group_backend_by_type(data['associated'], key=(lambda assoc: assoc.provider)) return data return {'social_auth': LazyDict(context_value)}
[ "def", "social_auth_by_type_backends", "(", "request", ")", ":", "def", "context_value", "(", ")", ":", "data", "=", "backends_data", "(", "request", ".", "user", ")", "data", "[", "'backends'", "]", "=", "group_backend_by_type", "(", "data", "[", "'backends'"...
load social auth current user data to context .
train
false
26,270
def step_pointer(caller, step=1): ptr = caller.ndb.batch_stackptr stack = caller.ndb.batch_stack nstack = len(stack) if ((ptr + step) <= 0): caller.msg('{RBeginning of batch file.') if ((ptr + step) >= nstack): caller.msg('{REnd of batch file.') caller.ndb.batch_stackptr = max(0, min((nstack - 1), (ptr + step)))
[ "def", "step_pointer", "(", "caller", ",", "step", "=", "1", ")", ":", "ptr", "=", "caller", ".", "ndb", ".", "batch_stackptr", "stack", "=", "caller", ".", "ndb", ".", "batch_stack", "nstack", "=", "len", "(", "stack", ")", "if", "(", "(", "ptr", ...
step in stack .
train
false
26,271
def validate_repoweb(val): try: (val % {'file': 'file.po', 'line': '9', 'branch': 'master'}) except Exception as error: raise ValidationError((_('Bad format string (%s)') % str(error)))
[ "def", "validate_repoweb", "(", "val", ")", ":", "try", ":", "(", "val", "%", "{", "'file'", ":", "'file.po'", ",", "'line'", ":", "'9'", ",", "'branch'", ":", "'master'", "}", ")", "except", "Exception", "as", "error", ":", "raise", "ValidationError", ...
validates whether url for repository browser is valid and can be filled in using format string .
train
false
26,272
def output_adjacent_tmpdir(output_path): return os.path.dirname(output_path)
[ "def", "output_adjacent_tmpdir", "(", "output_path", ")", ":", "return", "os", ".", "path", ".", "dirname", "(", "output_path", ")" ]
for temp files that will ultimately be moved to output_path anyway just create the file directly in output_paths directory so shutil .
train
false
26,273
def test_cache_size_deactivated(config_stub, tmpdir): config_stub.data = {'storage': {'cache-size': 1024}, 'general': {'private-browsing': True}} disk_cache = cache.DiskCache(str(tmpdir)) assert (disk_cache.cacheSize() == 0)
[ "def", "test_cache_size_deactivated", "(", "config_stub", ",", "tmpdir", ")", ":", "config_stub", ".", "data", "=", "{", "'storage'", ":", "{", "'cache-size'", ":", "1024", "}", ",", "'general'", ":", "{", "'private-browsing'", ":", "True", "}", "}", "disk_c...
confirm that the cache size returns 0 when deactivated .
train
false
26,274
def print_stats(header, total_error, total_time, n): print(header) print('Log Mean Final Error: {:.2f}'.format(np.log10((total_error / n)))) print('Mean epoch time: {:.2f} s'.format((total_time / n)))
[ "def", "print_stats", "(", "header", ",", "total_error", ",", "total_time", ",", "n", ")", ":", "print", "(", "header", ")", "print", "(", "'Log Mean Final Error: {:.2f}'", ".", "format", "(", "np", ".", "log10", "(", "(", "total_error", "/", "n", ")", "...
prints experiment statistics .
train
false
26,275
def tileset_info(filename): if (not tileset_exists(filename)): return None db = _connect(filename) db.text_factory = bytes info = [] for key in ('name', 'type', 'version', 'description', 'format', 'bounds'): value = db.execute('SELECT value FROM metadata WHERE name = ?', (key,)).fetchone() info.append(((value and value[0]) or None)) return info
[ "def", "tileset_info", "(", "filename", ")", ":", "if", "(", "not", "tileset_exists", "(", "filename", ")", ")", ":", "return", "None", "db", "=", "_connect", "(", "filename", ")", "db", ".", "text_factory", "=", "bytes", "info", "=", "[", "]", "for", ...
return name .
train
false
26,276
def makename(package, module): if package: name = package if module: name += ('.' + module) else: name = module return name
[ "def", "makename", "(", "package", ",", "module", ")", ":", "if", "package", ":", "name", "=", "package", "if", "module", ":", "name", "+=", "(", "'.'", "+", "module", ")", "else", ":", "name", "=", "module", "return", "name" ]
join package and module with a dot .
train
true
26,278
def pad_for_eccentric_selems(func): @functools.wraps(func) def func_out(image, selem, out=None, *args, **kwargs): pad_widths = [] padding = False if (out is None): out = np.empty_like(image) for axis_len in selem.shape: if ((axis_len % 2) == 0): axis_pad_width = (axis_len - 1) padding = True else: axis_pad_width = 0 pad_widths.append(((axis_pad_width,) * 2)) if padding: image = np.pad(image, pad_widths, mode='edge') out_temp = np.empty_like(image) else: out_temp = out out_temp = func(image, selem, out=out_temp, *args, **kwargs) if padding: out[:] = crop(out_temp, pad_widths) else: out = out_temp return out return func_out
[ "def", "pad_for_eccentric_selems", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "func_out", "(", "image", ",", "selem", ",", "out", "=", "None", ",", "*", "args", ",", "**", "kwargs", ")", ":", "pad_widths", "=", "[...
pad input images for certain morphological operations .
train
false
26,279
@pytest.mark.django_db def test_format_registry_extensions(no_formats): registry = FormatRegistry() filetype = registry.register('foo', 'foo') filetype2 = registry.register('special_foo', 'foo') assert (str(filetype.extension) == 'foo') assert (str(filetype2.extension) == 'foo') _test_formats(registry, ['foo', 'special_foo'])
[ "@", "pytest", ".", "mark", ".", "django_db", "def", "test_format_registry_extensions", "(", "no_formats", ")", ":", "registry", "=", "FormatRegistry", "(", ")", "filetype", "=", "registry", ".", "register", "(", "'foo'", ",", "'foo'", ")", "filetype2", "=", ...
tests the creation of a file extension .
train
false
26,280
def _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir): trans = _get_trans(fname_trans)[0] bem = read_bem_solution(fname_bem) surf = _bem_find_surface(bem, 'inner_skull') points = surf['rr'] points = apply_trans(trans['trans'], points) depth = _compute_nearest(points, dip.pos, return_dists=True)[1][0] return np.ravel(depth)
[ "def", "_compute_depth", "(", "dip", ",", "fname_bem", ",", "fname_trans", ",", "subject", ",", "subjects_dir", ")", ":", "trans", "=", "_get_trans", "(", "fname_trans", ")", "[", "0", "]", "bem", "=", "read_bem_solution", "(", "fname_bem", ")", "surf", "=...
compute dipole depth .
train
false
26,282
def is_video(td): pricelabels = td('span', 'pricelabel') return ((len(pricelabels) == 1) and pricelabels[0].text.strip().startswith('Video'))
[ "def", "is_video", "(", "td", ")", ":", "pricelabels", "=", "td", "(", "'span'", ",", "'pricelabel'", ")", "return", "(", "(", "len", "(", "pricelabels", ")", "==", "1", ")", "and", "pricelabels", "[", "0", "]", ".", "text", ".", "strip", "(", ")",...
its a video if it has exactly one pricelabel .
train
false
26,283
def function_crazy((alpha, beta), c, d=range(4), **kw): return (alpha, beta, c, d, kw)
[ "def", "function_crazy", "(", "(", "alpha", ",", "beta", ")", ",", "c", ",", "d", "=", "range", "(", "4", ")", ",", "**", "kw", ")", ":", "return", "(", "alpha", ",", "beta", ",", "c", ",", "d", ",", "kw", ")" ]
a function with a mad crazy signature .
train
false
26,284
def canon_path(path, indices=None): if (not isinstance(path, str)): return tuple(map(ensure_str_or_int, path)) if (indices is not None): path = path.format(**indices) path = (path[1:] if path.startswith('/') else path) path = (path[:(-1)] if path.endswith('/') else path) if (len(path) == 0): return () return tuple(map(ensure_str_or_int, path.split('/')))
[ "def", "canon_path", "(", "path", ",", "indices", "=", "None", ")", ":", "if", "(", "not", "isinstance", "(", "path", ",", "str", ")", ")", ":", "return", "tuple", "(", "map", "(", "ensure_str_or_int", ",", "path", ")", ")", "if", "(", "indices", "...
return path in os-independent form .
train
false
26,285
def transpose_sequence(xs): ys = TransposeSequence()(*xs) if (not isinstance(ys, tuple)): ys = (ys,) return ys
[ "def", "transpose_sequence", "(", "xs", ")", ":", "ys", "=", "TransposeSequence", "(", ")", "(", "*", "xs", ")", "if", "(", "not", "isinstance", "(", "ys", ",", "tuple", ")", ")", ":", "ys", "=", "(", "ys", ",", ")", "return", "ys" ]
transpose a list of variables .
train
false
26,287
def parse_rev_list(raw_revs): revs = [] for line in raw_revs.splitlines(): match = REV_LIST_REGEX.match(line) if match: rev_id = match.group(1) summary = match.group(2) revs.append((rev_id, summary)) return revs
[ "def", "parse_rev_list", "(", "raw_revs", ")", ":", "revs", "=", "[", "]", "for", "line", "in", "raw_revs", ".", "splitlines", "(", ")", ":", "match", "=", "REV_LIST_REGEX", ".", "match", "(", "line", ")", "if", "match", ":", "rev_id", "=", "match", ...
parse git log --pretty=online output into pairs .
train
false
26,289
def reject_suggestion(editor_id, thread_id, exploration_id): if _is_suggestion_handled(thread_id, exploration_id): raise Exception('Suggestion has already been accepted/rejected.') else: thread = feedback_models.FeedbackThreadModel.get_by_exp_and_thread_id(exploration_id, thread_id) feedback_services.create_message(exploration_id, thread_id, editor_id, feedback_models.STATUS_CHOICES_IGNORED, None, 'Suggestion rejected.') thread.put()
[ "def", "reject_suggestion", "(", "editor_id", ",", "thread_id", ",", "exploration_id", ")", ":", "if", "_is_suggestion_handled", "(", "thread_id", ",", "exploration_id", ")", ":", "raise", "Exception", "(", "'Suggestion has already been accepted/rejected.'", ")", "else"...
set the state of a suggetion to rejected .
train
false
26,291
def tree_details(text): return leading_whitespace_re.sub(u'', text.rstrip())
[ "def", "tree_details", "(", "text", ")", ":", "return", "leading_whitespace_re", ".", "sub", "(", "u''", ",", "text", ".", "rstrip", "(", ")", ")" ]
trims leading whitespace from the given text specifying tree details so triple-quoted strings can be used to provide tree details in a readable format .
train
false
26,292
def volume_allocate_iscsi_target(context, volume_id, host): return IMPL.volume_allocate_iscsi_target(context, volume_id, host)
[ "def", "volume_allocate_iscsi_target", "(", "context", ",", "volume_id", ",", "host", ")", ":", "return", "IMPL", ".", "volume_allocate_iscsi_target", "(", "context", ",", "volume_id", ",", "host", ")" ]
atomically allocate a free iscsi_target from the pool .
train
false
26,293
def _wait_for_device(session, dev, dom0, max_seconds): dev_path = utils.make_dev_path(dev) found_path = None if dom0: found_path = session.call_plugin_serialized('partition_utils.py', 'wait_for_dev', dev_path, max_seconds) else: for i in range(0, max_seconds): if os.path.exists(dev_path): found_path = dev_path break time.sleep(1) if (found_path is None): raise exception.StorageError(reason=(_('Timeout waiting for device %s to be created') % dev))
[ "def", "_wait_for_device", "(", "session", ",", "dev", ",", "dom0", ",", "max_seconds", ")", ":", "dev_path", "=", "utils", ".", "make_dev_path", "(", "dev", ")", "found_path", "=", "None", "if", "dom0", ":", "found_path", "=", "session", ".", "call_plugin...
wait for device node to appear .
train
false
26,294
def IsOneSMSMessage(value): value = escape.to_unicode(value) utf16_count = (len(value.encode('utf-16-be')) / 2) if _gsm_re.search(value): return (utf16_count <= MAX_GSM_CHARS) return (utf16_count <= MAX_UTF16_CHARS)
[ "def", "IsOneSMSMessage", "(", "value", ")", ":", "value", "=", "escape", ".", "to_unicode", "(", "value", ")", "utf16_count", "=", "(", "len", "(", "value", ".", "encode", "(", "'utf-16-be'", ")", ")", "/", "2", ")", "if", "_gsm_re", ".", "search", ...
returns true if the value can be sent in a single sms message .
train
false
26,295
def test_leap_seconds_rounded_correctly(): t = Time(['2012-06-30 23:59:59.413', '2012-07-01 00:00:00.413'], scale='ut1', precision=3).utc assert np.all((t.iso == np.array(['2012-06-30 23:59:60.000', '2012-07-01 00:00:00.000'])))
[ "def", "test_leap_seconds_rounded_correctly", "(", ")", ":", "t", "=", "Time", "(", "[", "'2012-06-30 23:59:59.413'", ",", "'2012-07-01 00:00:00.413'", "]", ",", "scale", "=", "'ut1'", ",", "precision", "=", "3", ")", ".", "utc", "assert", "np", ".", "all", ...
regression tests against #2083 .
train
false
26,296
def test_smote_sk_estimator(): check_estimator(SMOTETomek)
[ "def", "test_smote_sk_estimator", "(", ")", ":", "check_estimator", "(", "SMOTETomek", ")" ]
test the sklearn estimator compatibility .
train
false
26,297
def _load_webgl_backend(ipython): from .. import app app_instance = app.use_app('ipynb_webgl') if (app_instance.backend_name == 'ipynb_webgl'): ipython.write('Vispy IPython module has loaded successfully') else: ipython.write_err('Unable to load webgl backend of Vispy')
[ "def", "_load_webgl_backend", "(", "ipython", ")", ":", "from", ".", ".", "import", "app", "app_instance", "=", "app", ".", "use_app", "(", "'ipynb_webgl'", ")", "if", "(", "app_instance", ".", "backend_name", "==", "'ipynb_webgl'", ")", ":", "ipython", ".",...
load the webgl backend for the ipython notebook .
train
true
26,298
def _uninstall_flocker_ubuntu(): return run_from_args(['apt-get', 'remove', '-y', '--purge', 'clusterhq-python-flocker'])
[ "def", "_uninstall_flocker_ubuntu", "(", ")", ":", "return", "run_from_args", "(", "[", "'apt-get'", ",", "'remove'", ",", "'-y'", ",", "'--purge'", ",", "'clusterhq-python-flocker'", "]", ")" ]
return an effect for uninstalling the flocker package from an ubuntu machine .
train
false
26,299
def circmoment(data, p=1.0, centered=False, axis=None, weights=None): if centered: phi = circmean(data, axis, weights) else: phi = 0.0 return (_angle(data, p, phi, axis, weights), _length(data, p, phi, axis, weights))
[ "def", "circmoment", "(", "data", ",", "p", "=", "1.0", ",", "centered", "=", "False", ",", "axis", "=", "None", ",", "weights", "=", "None", ")", ":", "if", "centered", ":", "phi", "=", "circmean", "(", "data", ",", "axis", ",", "weights", ")", ...
computes the p-th trigonometric circular moment for an array of circular data .
train
false
26,300
def _ReorderHandlers(handler_list): for (i, j) in itertools.combinations(xrange(len(handler_list)), 2): if handler_list[i].MatchesAll(handler_list[j]): (handler_list[i], handler_list[j]) = (handler_list[j], handler_list[i])
[ "def", "_ReorderHandlers", "(", "handler_list", ")", ":", "for", "(", "i", ",", "j", ")", "in", "itertools", ".", "combinations", "(", "xrange", "(", "len", "(", "handler_list", ")", ")", ",", "2", ")", ":", "if", "handler_list", "[", "i", "]", ".", ...
reorders handlers from specific to general for writing to yaml file .
train
false
26,301
def _AddSerializeToStringMethod(message_descriptor, cls): def SerializeToString(self): errors = [] if (not self.IsInitialized()): raise message_mod.EncodeError(('Message %s is missing required fields: %s' % (self.DESCRIPTOR.full_name, ','.join(self.FindInitializationErrors())))) return self.SerializePartialToString() cls.SerializeToString = SerializeToString
[ "def", "_AddSerializeToStringMethod", "(", "message_descriptor", ",", "cls", ")", ":", "def", "SerializeToString", "(", "self", ")", ":", "errors", "=", "[", "]", "if", "(", "not", "self", ".", "IsInitialized", "(", ")", ")", ":", "raise", "message_mod", "...
helper for _addmessagemethods() .
train
true
26,302
def build_fragments_list(boot_info): res = [] segment_run_table = boot_info[u'segments'][0] fragment_run_entry_table = boot_info[u'fragments'][0][u'fragments'] first_frag_number = fragment_run_entry_table[0][u'first'] fragments_counter = itertools.count(first_frag_number) for (segment, fragments_count) in segment_run_table[u'segment_run']: if ((fragments_count == 4294967295) and boot_info[u'live']): fragments_count = 2 for _ in range(fragments_count): res.append((segment, next(fragments_counter))) if boot_info[u'live']: res = res[(-2):] return res
[ "def", "build_fragments_list", "(", "boot_info", ")", ":", "res", "=", "[", "]", "segment_run_table", "=", "boot_info", "[", "u'segments'", "]", "[", "0", "]", "fragment_run_entry_table", "=", "boot_info", "[", "u'fragments'", "]", "[", "0", "]", "[", "u'fra...
return a list of for each fragment in the video .
train
false
26,305
def _get_activity_rights(activity_type, activity_id): if (activity_type == feconf.ACTIVITY_TYPE_EXPLORATION): return get_exploration_rights(activity_id, strict=False) elif (activity_type == feconf.ACTIVITY_TYPE_COLLECTION): return get_collection_rights(activity_id, strict=False) else: raise Exception(('Cannot get activity rights for unknown activity type: %s' % activity_type))
[ "def", "_get_activity_rights", "(", "activity_type", ",", "activity_id", ")", ":", "if", "(", "activity_type", "==", "feconf", ".", "ACTIVITY_TYPE_EXPLORATION", ")", ":", "return", "get_exploration_rights", "(", "activity_id", ",", "strict", "=", "False", ")", "el...
this function returns none if this function fails to retrieve the rights object for a given activity based on its type .
train
false
26,306
def do_version_list(cs, args): result = cs.versions.list() if ('min_version' in dir(result[0])): columns = ['Id', 'Status', 'Updated', 'Min Version', 'Version'] else: columns = ['Id', 'Status', 'Updated'] print(_('Client supported API versions:')) print((_('Minimum version %(v)s') % {'v': novaclient.API_MIN_VERSION.get_string()})) print((_('Maximum version %(v)s') % {'v': novaclient.API_MAX_VERSION.get_string()})) print(_('\nServer supported API versions:')) utils.print_list(result, columns)
[ "def", "do_version_list", "(", "cs", ",", "args", ")", ":", "result", "=", "cs", ".", "versions", ".", "list", "(", ")", "if", "(", "'min_version'", "in", "dir", "(", "result", "[", "0", "]", ")", ")", ":", "columns", "=", "[", "'Id'", ",", "'Sta...
list all api versions .
train
false
26,308
def test_solveset_real_rational(): assert (solveset_real(((x - (y ** 3)) / ((y ** 2) * sqrt((1 - (y ** 2))))), x) == FiniteSet((y ** 3))) assert (solveset_real((((2 * x) / (x + 2)) - 1), x) == FiniteSet(2))
[ "def", "test_solveset_real_rational", "(", ")", ":", "assert", "(", "solveset_real", "(", "(", "(", "x", "-", "(", "y", "**", "3", ")", ")", "/", "(", "(", "y", "**", "2", ")", "*", "sqrt", "(", "(", "1", "-", "(", "y", "**", "2", ")", ")", ...
test solveset_real for rational functions .
train
false
26,309
def aes_ctr_decrypt(data, key, counter): expanded_key = key_expansion(key) block_count = int(ceil((float(len(data)) / BLOCK_SIZE_BYTES))) decrypted_data = [] for i in range(block_count): counter_block = counter.next_value() block = data[(i * BLOCK_SIZE_BYTES):((i + 1) * BLOCK_SIZE_BYTES)] block += ([0] * (BLOCK_SIZE_BYTES - len(block))) cipher_counter_block = aes_encrypt(counter_block, expanded_key) decrypted_data += xor(block, cipher_counter_block) decrypted_data = decrypted_data[:len(data)] return decrypted_data
[ "def", "aes_ctr_decrypt", "(", "data", ",", "key", ",", "counter", ")", ":", "expanded_key", "=", "key_expansion", "(", "key", ")", "block_count", "=", "int", "(", "ceil", "(", "(", "float", "(", "len", "(", "data", ")", ")", "/", "BLOCK_SIZE_BYTES", "...
decrypt with aes in counter mode .
train
false
26,310
def test_call_shapes(): assert_equal(lae_a(y).shape, (n_y,)) assert_equal(lae_b(y).shape, (n_y,))
[ "def", "test_call_shapes", "(", ")", ":", "assert_equal", "(", "lae_a", "(", "y", ")", ".", "shape", ",", "(", "n_y", ",", ")", ")", "assert_equal", "(", "lae_b", "(", "y", ")", ".", "shape", ",", "(", "n_y", ",", ")", ")" ]
lae: shape of call to lae .
train
false
26,311
def _set_ntp_servers(servers): return __salt__['ntp.set_servers'](commit=False, *servers)
[ "def", "_set_ntp_servers", "(", "servers", ")", ":", "return", "__salt__", "[", "'ntp.set_servers'", "]", "(", "commit", "=", "False", ",", "*", "servers", ")" ]
calls ntp .
train
false
26,312
def Moniker(Pathname, clsctx=pythoncom.CLSCTX_ALL): (moniker, i, bindCtx) = pythoncom.MkParseDisplayName(Pathname) dispatch = moniker.BindToObject(bindCtx, None, pythoncom.IID_IDispatch) return __WrapDispatch(dispatch, Pathname, clsctx=clsctx)
[ "def", "Moniker", "(", "Pathname", ",", "clsctx", "=", "pythoncom", ".", "CLSCTX_ALL", ")", ":", "(", "moniker", ",", "i", ",", "bindCtx", ")", "=", "pythoncom", ".", "MkParseDisplayName", "(", "Pathname", ")", "dispatch", "=", "moniker", ".", "BindToObjec...
python friendly version of getobjects moniker functionality .
train
false
26,313
def test_temporary_files_failed_cleanup(caplog, qtbot, py_proc, runner): (cmd, args) = py_proc("\n import os\n os.remove(os.environ['QUTE_HTML'])\n ") with caplog.at_level(logging.ERROR): with qtbot.waitSignal(runner.finished, timeout=10000): runner.prepare_run(cmd, *args) runner.store_text('') runner.store_html('') assert (len(caplog.records) == 1) expected = 'Failed to delete tempfile' assert caplog.records[0].message.startswith(expected)
[ "def", "test_temporary_files_failed_cleanup", "(", "caplog", ",", "qtbot", ",", "py_proc", ",", "runner", ")", ":", "(", "cmd", ",", "args", ")", "=", "py_proc", "(", "\"\\n import os\\n os.remove(os.environ['QUTE_HTML'])\\n \"", ")", "with", "caplog", ...
delete a temporary file from the script so cleanup fails .
train
false
26,314
def _parse_settings_vlan(opts, iface): vlan = {} if ('reorder_hdr' in opts): if (opts['reorder_hdr'] in (_CONFIG_TRUE + _CONFIG_FALSE)): vlan.update({'reorder_hdr': opts['reorder_hdr']}) else: valid = (_CONFIG_TRUE + _CONFIG_FALSE) _raise_error_iface(iface, 'reorder_hdr', valid) if ('vlan_id' in opts): if (opts['vlan_id'] > 0): vlan.update({'vlan_id': opts['vlan_id']}) else: _raise_error_iface(iface, 'vlan_id', 'Positive integer') if ('phys_dev' in opts): if (len(opts['phys_dev']) > 0): vlan.update({'phys_dev': opts['phys_dev']}) else: _raise_error_iface(iface, 'phys_dev', 'Non-empty string') return vlan
[ "def", "_parse_settings_vlan", "(", "opts", ",", "iface", ")", ":", "vlan", "=", "{", "}", "if", "(", "'reorder_hdr'", "in", "opts", ")", ":", "if", "(", "opts", "[", "'reorder_hdr'", "]", "in", "(", "_CONFIG_TRUE", "+", "_CONFIG_FALSE", ")", ")", ":",...
filters given options and outputs valid settings for a vlan .
train
true
26,316
def xontrib_context(name): spec = find_xontrib(name) if (spec is None): return None m = importlib.import_module(spec.name) pubnames = getattr(m, '__all__', None) if (pubnames is not None): ctx = {k: getattr(m, k) for k in pubnames} else: ctx = {k: getattr(m, k) for k in dir(m) if (not k.startswith('_'))} return ctx
[ "def", "xontrib_context", "(", "name", ")", ":", "spec", "=", "find_xontrib", "(", "name", ")", "if", "(", "spec", "is", "None", ")", ":", "return", "None", "m", "=", "importlib", ".", "import_module", "(", "spec", ".", "name", ")", "pubnames", "=", ...
return a context dictionary for a xontrib of a given name .
train
false
26,318
def _msg_type_to_str(mod, type_): return ('%s(%d)' % (_get_value_name(mod, type_, 'OFPT_'), type_))
[ "def", "_msg_type_to_str", "(", "mod", ",", "type_", ")", ":", "return", "(", "'%s(%d)'", "%", "(", "_get_value_name", "(", "mod", ",", "type_", ",", "'OFPT_'", ")", ",", "type_", ")", ")" ]
this method is registered as ofp_msg_type_to_str method into ryu .
train
false
26,320
def getCraftSequence(): return 'chop preface outset mill multiply drill lift flow feed home lash fillet limit unpause alteration export'.split()
[ "def", "getCraftSequence", "(", ")", ":", "return", "'chop preface outset mill multiply drill lift flow feed home lash fillet limit unpause alteration export'", ".", "split", "(", ")" ]
get the extrusion craft sequence .
train
false
26,323
def sort_objects(objects): return sort_unicode(objects, force_text)
[ "def", "sort_objects", "(", "objects", ")", ":", "return", "sort_unicode", "(", "objects", ",", "force_text", ")" ]
sorts objects alphabetically .
train
false
26,324
def _add_gradients_summaries(grads_and_vars): summaries = [] for (grad, var) in grads_and_vars: if (grad is not None): if isinstance(grad, tf.IndexedSlices): grad_values = grad.values else: grad_values = grad summaries.append(tf.histogram_summary((var.op.name + ':gradient'), grad_values)) summaries.append(tf.histogram_summary((var.op.name + ':gradient_norm'), tf.global_norm([grad_values]))) else: tf.logging.info('Var %s has no gradient', var.op.name) return summaries
[ "def", "_add_gradients_summaries", "(", "grads_and_vars", ")", ":", "summaries", "=", "[", "]", "for", "(", "grad", ",", "var", ")", "in", "grads_and_vars", ":", "if", "(", "grad", "is", "not", "None", ")", ":", "if", "isinstance", "(", "grad", ",", "t...
add histogram summaries to gradients .
train
false
26,325
def test_get_version_error(qtbot): http_stub = HTTPGetStub(success=False) client = autoupdate.PyPIVersionClient(client=http_stub) with qtbot.assertNotEmitted(client.success): with qtbot.waitSignal(client.error): client.get_version('test')
[ "def", "test_get_version_error", "(", "qtbot", ")", ":", "http_stub", "=", "HTTPGetStub", "(", "success", "=", "False", ")", "client", "=", "autoupdate", ".", "PyPIVersionClient", "(", "client", "=", "http_stub", ")", "with", "qtbot", ".", "assertNotEmitted", ...
test get_version() when error is emitted .
train
false
26,328
def get_required_parameters(dictionary, additional_params=None): params = {} additional_params = (additional_params or []) for key in (REQUIRED_PARAMETERS + additional_params): if (key not in dictionary): return None params[key] = dictionary[key] return params
[ "def", "get_required_parameters", "(", "dictionary", ",", "additional_params", "=", "None", ")", ":", "params", "=", "{", "}", "additional_params", "=", "(", "additional_params", "or", "[", "]", ")", "for", "key", "in", "(", "REQUIRED_PARAMETERS", "+", "additi...
extract all required lti parameters from a dictionary and verify that none are missing .
train
false
26,330
def del_temp_file_copies(file_paths): for file_path in file_paths: temp_file_path = get_temp_file_path(file_path) os.remove(temp_file_path)
[ "def", "del_temp_file_copies", "(", "file_paths", ")", ":", "for", "file_path", "in", "file_paths", ":", "temp_file_path", "=", "get_temp_file_path", "(", "file_path", ")", "os", ".", "remove", "(", "temp_file_path", ")" ]
deletes all the provided files .
train
false
26,332
def xlformat_factory(format): key = hash(format) fmt_ = xlformat_factory.created_formats.get(key) if (fmt_ is not None): return fmt_ format = copy.deepcopy(format) xlstyle = excel.XFStyle() if isinstance(format, mlab.FormatPercent): zeros = u''.join(([u'0'] * format.precision)) xlstyle.num_format_str = (u'0.%s%%;[RED]-0.%s%%' % (zeros, zeros)) format.scale = 1.0 elif isinstance(format, mlab.FormatFloat): if (format.precision > 0): zeros = u''.join(([u'0'] * format.precision)) xlstyle.num_format_str = (u'#,##0.%s;[RED]-#,##0.%s' % (zeros, zeros)) else: xlstyle.num_format_str = u'#,##;[RED]-#,##' elif isinstance(format, mlab.FormatInt): xlstyle.num_format_str = u'#,##;[RED]-#,##' else: xlstyle = None format.xlstyle = xlstyle xlformat_factory.created_formats[key] = format return format
[ "def", "xlformat_factory", "(", "format", ")", ":", "key", "=", "hash", "(", "format", ")", "fmt_", "=", "xlformat_factory", ".", "created_formats", ".", "get", "(", "key", ")", "if", "(", "fmt_", "is", "not", "None", ")", ":", "return", "fmt_", "forma...
copy the format .
train
false
26,333
def fnmatchcase(name, pat): match = _compile_pattern(pat) return (match(name) is not None)
[ "def", "fnmatchcase", "(", "name", ",", "pat", ")", ":", "match", "=", "_compile_pattern", "(", "pat", ")", "return", "(", "match", "(", "name", ")", "is", "not", "None", ")" ]
test whether filename matches pattern .
train
false
26,335
def _get_frame_result_type(result, objs): if any((b.is_sparse for b in result.blocks)): from pandas.sparse.api import SparseDataFrame return SparseDataFrame else: return objs[0]
[ "def", "_get_frame_result_type", "(", "result", ",", "objs", ")", ":", "if", "any", "(", "(", "b", ".", "is_sparse", "for", "b", "in", "result", ".", "blocks", ")", ")", ":", "from", "pandas", ".", "sparse", ".", "api", "import", "SparseDataFrame", "re...
return appropriate class of dataframe-like concat if any block is sparseblock .
train
false
26,339
def _config_hint_generate(optname, both_env_and_param): env = optname.replace('-', '_').upper() if both_env_and_param: option = ('--' + optname.lower()) return 'Pass "{0}" or set the environment variable "{1}".'.format(option, env) else: return 'Set the environment variable {0}.'.format(env)
[ "def", "_config_hint_generate", "(", "optname", ",", "both_env_and_param", ")", ":", "env", "=", "optname", ".", "replace", "(", "'-'", ",", "'_'", ")", ".", "upper", "(", ")", "if", "both_env_and_param", ":", "option", "=", "(", "'--'", "+", "optname", ...
generate hint language for missing configuration .
train
true
26,341
def test_basic_sparktext(): chart = Line() chart.add('_', [1, 5, 22, 13, 53]) assert (chart.render_sparktext() == u('\xe2\x96\x81\xe2\x96\x81\xe2\x96\x83\xe2\x96\x82\xe2\x96\x88'))
[ "def", "test_basic_sparktext", "(", ")", ":", "chart", "=", "Line", "(", ")", "chart", ".", "add", "(", "'_'", ",", "[", "1", ",", "5", ",", "22", ",", "13", ",", "53", "]", ")", "assert", "(", "chart", ".", "render_sparktext", "(", ")", "==", ...
test basic sparktext .
train
false
26,342
def test_sameas(value, other): return (value is other)
[ "def", "test_sameas", "(", "value", ",", "other", ")", ":", "return", "(", "value", "is", "other", ")" ]
check if an object points to the same memory address than another object: .
train
false
26,343
def sentence_to_token_ids(sentence, vocabulary, tokenizer=None, normalize_digits=True, UNK_ID=3, _DIGIT_RE=re.compile('\\d')): if tokenizer: words = tokenizer(sentence) else: words = basic_tokenizer(sentence) if (not normalize_digits): return [vocabulary.get(w, UNK_ID) for w in words] return [vocabulary.get(re.sub(_DIGIT_RE, '0', w), UNK_ID) for w in words]
[ "def", "sentence_to_token_ids", "(", "sentence", ",", "vocabulary", ",", "tokenizer", "=", "None", ",", "normalize_digits", "=", "True", ",", "UNK_ID", "=", "3", ",", "_DIGIT_RE", "=", "re", ".", "compile", "(", "'\\\\d'", ")", ")", ":", "if", "tokenizer",...
convert a string to list of integers representing token-ids .
train
true
26,344
def epoch2datetime(epoch): if (epoch is not None): return datetime.datetime.fromtimestamp(epoch, tz=UTC)
[ "def", "epoch2datetime", "(", "epoch", ")", ":", "if", "(", "epoch", "is", "not", "None", ")", ":", "return", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "epoch", ",", "tz", "=", "UTC", ")" ]
convert a unix epoch time to a datetime object .
train
false
26,345
def connection_pool(account_id, pool_size=3, pool_map=dict()): return _get_connection_pool(account_id, pool_size, pool_map, True)
[ "def", "connection_pool", "(", "account_id", ",", "pool_size", "=", "3", ",", "pool_map", "=", "dict", "(", ")", ")", ":", "return", "_get_connection_pool", "(", "account_id", ",", "pool_size", ",", "pool_map", ",", "True", ")" ]
per-account crispin connection pool .
train
false
26,346
def get_dns_servers(interface='Local Area Connection'): interface = interface.split('\\') interface = ''.join(interface) with salt.utils.winapi.Com(): c = wmi.WMI() for iface in c.Win32_NetworkAdapter(NetEnabled=True): if (interface == iface.NetConnectionID): iface_config = c.Win32_NetworkAdapterConfiguration(Index=iface.Index).pop() try: return list(iface_config.DNSServerSearchOrder) except TypeError: return [] log.debug('Interface "{0}" not found'.format(interface)) return False
[ "def", "get_dns_servers", "(", "interface", "=", "'Local Area Connection'", ")", ":", "interface", "=", "interface", ".", "split", "(", "'\\\\'", ")", "interface", "=", "''", ".", "join", "(", "interface", ")", "with", "salt", ".", "utils", ".", "winapi", ...
return a list of the configured dns servers of the specified interface cli example: .
train
true
26,348
def dataToStdout(data, bold=False): if (('quiet' not in conf) or (not conf.quiet)): message = '' if isinstance(data, unicode): message = stdoutencode(data) else: message = data sys.stdout.write(setColor(message, bold)) try: sys.stdout.flush() except IOError: pass return
[ "def", "dataToStdout", "(", "data", ",", "bold", "=", "False", ")", ":", "if", "(", "(", "'quiet'", "not", "in", "conf", ")", "or", "(", "not", "conf", ".", "quiet", ")", ")", ":", "message", "=", "''", "if", "isinstance", "(", "data", ",", "unic...
writes text to the stdout stream .
train
false
26,349
def _bc_adjust_shape_strides(context, builder, shapes, strides, target_shape): bc_shapes = [] bc_strides = [] zero = context.get_constant(types.uintp, 0) one = context.get_constant(types.uintp, 1) mismatch = [builder.icmp_signed('!=', tar, old) for (tar, old) in zip(target_shape, shapes)] src_is_one = [builder.icmp_signed('==', old, one) for old in shapes] preds = [builder.and_(x, y) for (x, y) in zip(mismatch, src_is_one)] bc_shapes = [builder.select(p, tar, old) for (p, tar, old) in zip(preds, target_shape, shapes)] bc_strides = [builder.select(p, zero, old) for (p, old) in zip(preds, strides)] return (bc_shapes, bc_strides)
[ "def", "_bc_adjust_shape_strides", "(", "context", ",", "builder", ",", "shapes", ",", "strides", ",", "target_shape", ")", ":", "bc_shapes", "=", "[", "]", "bc_strides", "=", "[", "]", "zero", "=", "context", ".", "get_constant", "(", "types", ".", "uintp...
broadcast shapes and strides to target_shape given that their ndim already matches .
train
false
26,350
def p_struct_or_union(t): pass
[ "def", "p_struct_or_union", "(", "t", ")", ":", "pass" ]
struct_or_union : struct | union .
train
false
26,351
def test_simple_search(pg_xlog): name = (('1' * 8) * 3) pg_xlog.touch(name, '.ready') segs = worker.WalSegment.from_ready_archive_status('pg_xlog') assert (next(segs).path == ('pg_xlog/' + name)) with pytest.raises(StopIteration): next(segs)
[ "def", "test_simple_search", "(", "pg_xlog", ")", ":", "name", "=", "(", "(", "'1'", "*", "8", ")", "*", "3", ")", "pg_xlog", ".", "touch", "(", "name", ",", "'.ready'", ")", "segs", "=", "worker", ".", "WalSegment", ".", "from_ready_archive_status", "...
must find a .
train
false
26,352
def get_role_push_host(role): pushs = PermPush.objects.filter(role=role) asset_all = Asset.objects.all() asset_pushed = {} for push in pushs: asset_pushed[push.asset] = {'success': push.success, 'key': push.is_public_key, 'password': push.is_password, 'result': push.result} asset_no_push = (set(asset_all) - set(asset_pushed.keys())) return (asset_pushed, asset_no_push)
[ "def", "get_role_push_host", "(", "role", ")", ":", "pushs", "=", "PermPush", ".", "objects", ".", "filter", "(", "role", "=", "role", ")", "asset_all", "=", "Asset", ".", "objects", ".", "all", "(", ")", "asset_pushed", "=", "{", "}", "for", "push", ...
asset_pushed: {success: push .
train
false
26,353
def key_info(pkey, cert, key_info_template): exponent = base64.b64encode(pkey.e[4:]) modulus = m2.bn_to_hex(m2.mpi_to_bn(pkey.n)).decode('hex').encode('base64') x509 = (x509_parse_cert(cert) if cert else None) return (key_info_template % {'modulus': modulus, 'exponent': exponent, 'issuer_name': (x509.get_issuer().as_text() if x509 else ''), 'serial_number': (x509.get_serial_number() if x509 else '')})
[ "def", "key_info", "(", "pkey", ",", "cert", ",", "key_info_template", ")", ":", "exponent", "=", "base64", ".", "b64encode", "(", "pkey", ".", "e", "[", "4", ":", "]", ")", "modulus", "=", "m2", ".", "bn_to_hex", "(", "m2", ".", "mpi_to_bn", "(", ...
convert private key to xml signature format .
train
false
26,354
def detail_twitter_error(twitterException): data = twitterException.response_data try: for m in data.get('errors', dict()): printNicely(yellow(m.get('message'))) except: printNicely(yellow(data))
[ "def", "detail_twitter_error", "(", "twitterException", ")", ":", "data", "=", "twitterException", ".", "response_data", "try", ":", "for", "m", "in", "data", ".", "get", "(", "'errors'", ",", "dict", "(", ")", ")", ":", "printNicely", "(", "yellow", "(", ...
display twitter errors nicely .
train
false
26,355
def calculator(counter_type): def set_calculator(func): _counter_type_calculators[counter_type] = func return func return set_calculator
[ "def", "calculator", "(", "counter_type", ")", ":", "def", "set_calculator", "(", "func", ")", ":", "_counter_type_calculators", "[", "counter_type", "]", "=", "func", "return", "func", "return", "set_calculator" ]
a decorator that assign a counter_type to its calculator .
train
false
26,356
def marshal(data, fields, envelope=None): def make(cls): if isinstance(cls, type): return cls() return cls if isinstance(data, (list, tuple)): return (OrderedDict([(envelope, [marshal(d, fields) for d in data])]) if envelope else [marshal(d, fields) for d in data]) items = ((k, (marshal(data, v) if isinstance(v, dict) else make(v).output(k, data))) for (k, v) in fields.items()) return (OrderedDict([(envelope, OrderedDict(items))]) if envelope else OrderedDict(items))
[ "def", "marshal", "(", "data", ",", "fields", ",", "envelope", "=", "None", ")", ":", "def", "make", "(", "cls", ")", ":", "if", "isinstance", "(", "cls", ",", "type", ")", ":", "return", "cls", "(", ")", "return", "cls", "if", "isinstance", "(", ...
takes raw data and a dict of fields to output and filters the data based on those fields .
train
true
26,357
def get_value_from_system_metadata(instance, key, type, default): value = instance.system_metadata.get(key, default) try: return type(value) except ValueError: LOG.warning(_LW('Metadata value %(value)s for %(key)s is not of type %(type)s. Using default value %(default)s.'), {'value': value, 'key': key, 'type': type, 'default': default}, instance=instance) return default
[ "def", "get_value_from_system_metadata", "(", "instance", ",", "key", ",", "type", ",", "default", ")", ":", "value", "=", "instance", ".", "system_metadata", ".", "get", "(", "key", ",", "default", ")", "try", ":", "return", "type", "(", "value", ")", "...
get a value of a specified type from image metadata .
train
false
26,358
def optimizeTransforms(element, options): num = 0 for transformAttr in ['transform', 'patternTransform', 'gradientTransform']: val = element.getAttribute(transformAttr) if (val != ''): transform = svg_transform_parser.parse(val) optimizeTransform(transform) newVal = serializeTransform(transform) if (len(newVal) < len(val)): if len(newVal): element.setAttribute(transformAttr, newVal) else: element.removeAttribute(transformAttr) num += (len(val) - len(newVal)) for child in element.childNodes: if (child.nodeType == 1): num += optimizeTransforms(child, options) return num
[ "def", "optimizeTransforms", "(", "element", ",", "options", ")", ":", "num", "=", "0", "for", "transformAttr", "in", "[", "'transform'", ",", "'patternTransform'", ",", "'gradientTransform'", "]", ":", "val", "=", "element", ".", "getAttribute", "(", "transfo...
attempts to optimise transform specifications on the given node and its children .
train
true
26,359
def print_as_hex(s): print ':'.join(('{0:x}'.format(ord(c)) for c in s))
[ "def", "print_as_hex", "(", "s", ")", ":", "print", "':'", ".", "join", "(", "(", "'{0:x}'", ".", "format", "(", "ord", "(", "c", ")", ")", "for", "c", "in", "s", ")", ")" ]
print a string as hex bytes .
train
false
26,361
def isIntersectingItself(loop, width): outlines = [] for pointIndex in xrange(len(loop)): pointBegin = loop[pointIndex] pointEnd = loop[((pointIndex + 1) % len(loop))] if euclidean.isLineIntersectingLoops(outlines, pointBegin, pointEnd): return True addSegmentOutline(False, outlines, pointBegin, pointEnd, width) return False
[ "def", "isIntersectingItself", "(", "loop", ",", "width", ")", ":", "outlines", "=", "[", "]", "for", "pointIndex", "in", "xrange", "(", "len", "(", "loop", ")", ")", ":", "pointBegin", "=", "loop", "[", "pointIndex", "]", "pointEnd", "=", "loop", "[",...
determine if the loop is intersecting itself .
train
false
26,363
def standalone(view_func): def inner(request, *args, **kwargs): response = view_func(request, *args, **kwargs) if isinstance(response, HttpResponse): response.standalone = True return response return wraps(view_func)(inner)
[ "def", "standalone", "(", "view_func", ")", ":", "def", "inner", "(", "request", ",", "*", "args", ",", "**", "kwargs", ")", ":", "response", "=", "view_func", "(", "request", ",", "*", "args", ",", "**", "kwargs", ")", "if", "isinstance", "(", "resp...
marks the view method as standalone view; this means that httpresponse objects returned from applicationcontent are returned directly .
train
false
26,364
def eval_node(node): if isinstance(node, ast.Num): return node.n elif isinstance(node, ast.operator): return OPERATORS[type(node)] elif isinstance(node, ast.BinOp): return eval_node(node.op)(eval_node(node.left), eval_node(node.right)) else: raise ValueError(node)
[ "def", "eval_node", "(", "node", ")", ":", "if", "isinstance", "(", "node", ",", "ast", ".", "Num", ")", ":", "return", "node", ".", "n", "elif", "isinstance", "(", "node", ",", "ast", ".", "operator", ")", ":", "return", "OPERATORS", "[", "type", ...
evaluates single ast node .
train
false