id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
31,768
def urlretrieve(url, filename, data=None, timeout=300): logging.debug('Fetching %s -> %s', url, filename) src_file = urlopen(url, data=data, timeout=timeout) try: dest_file = open(filename, 'wb') try: shutil.copyfileobj(src_file, dest_file) finally: dest_file.close() finally: src_file.close()
[ "def", "urlretrieve", "(", "url", ",", "filename", ",", "data", "=", "None", ",", "timeout", "=", "300", ")", ":", "logging", ".", "debug", "(", "'Fetching %s -> %s'", ",", "url", ",", "filename", ")", "src_file", "=", "urlopen", "(", "url", ",", "data...
retrieve a file from given url .
train
false
31,769
def formatSerialNumber(field): sn = field.value return ('%04X-%04X' % ((sn >> 16), (sn & 65535)))
[ "def", "formatSerialNumber", "(", "field", ")", ":", "sn", "=", "field", ".", "value", "return", "(", "'%04X-%04X'", "%", "(", "(", "sn", ">>", "16", ")", ",", "(", "sn", "&", "65535", ")", ")", ")" ]
format an disc serial number .
train
false
31,770
def list_semod(): helptext = __salt__['cmd.run']('semodule -h').splitlines() semodule_version = '' for line in helptext: if line.strip().startswith('full'): semodule_version = 'new' if (semodule_version == 'new'): mdata = __salt__['cmd.run']('semodule -lfull').splitlines() ret = {} for line in mdata: if (not line.strip()): continue comps = line.split() if (len(comps) == 4): ret[comps[1]] = {'Enabled': False, 'Version': None} else: ret[comps[1]] = {'Enabled': True, 'Version': None} else: mdata = __salt__['cmd.run']('semodule -l').splitlines() ret = {} for line in mdata: if (not line.strip()): continue comps = line.split() if (len(comps) == 3): ret[comps[0]] = {'Enabled': False, 'Version': comps[1]} else: ret[comps[0]] = {'Enabled': True, 'Version': comps[1]} return ret
[ "def", "list_semod", "(", ")", ":", "helptext", "=", "__salt__", "[", "'cmd.run'", "]", "(", "'semodule -h'", ")", ".", "splitlines", "(", ")", "semodule_version", "=", "''", "for", "line", "in", "helptext", ":", "if", "line", ".", "strip", "(", ")", "...
return a structure listing all of the selinux modules on the system and what state they are in cli example: .
train
true
31,771
def get_related_files(filename, include_this_file=True): related_files = [] (path, name, this_type) = split_filename(filename) for type_set in related_filetype_sets: if (this_type in type_set): for related_type in type_set: if (include_this_file or (related_type != this_type)): related_files.append(os.path.join(path, (name + related_type))) if (not len(related_files)): related_files = [filename] return related_files
[ "def", "get_related_files", "(", "filename", ",", "include_this_file", "=", "True", ")", ":", "related_files", "=", "[", "]", "(", "path", ",", "name", ",", "this_type", ")", "=", "split_filename", "(", "filename", ")", "for", "type_set", "in", "related_file...
returns a list of related files .
train
false
31,772
def shell_split(text): assert is_text_string(text) pattern = '(\\s+|(?<!\\\\)".*?(?<!\\\\)"|(?<!\\\\)\\\'.*?(?<!\\\\)\\\')' out = [] for token in re.split(pattern, text): if token.strip(): out.append(token.strip('"').strip("'")) return out
[ "def", "shell_split", "(", "text", ")", ":", "assert", "is_text_string", "(", "text", ")", "pattern", "=", "'(\\\\s+|(?<!\\\\\\\\)\".*?(?<!\\\\\\\\)\"|(?<!\\\\\\\\)\\\\\\'.*?(?<!\\\\\\\\)\\\\\\')'", "out", "=", "[", "]", "for", "token", "in", "re", ".", "split", "(", ...
split the string text using shell-like syntax this avoids breaking single/double-quoted strings .
train
true
31,774
def unregister_unpack_format(name): del _UNPACK_FORMATS[name]
[ "def", "unregister_unpack_format", "(", "name", ")", ":", "del", "_UNPACK_FORMATS", "[", "name", "]" ]
removes the pack format from the registry .
train
false
31,776
def true_(): return True
[ "def", "true_", "(", ")", ":", "return", "True" ]
always return true cli example: .
train
false
31,777
def mkXRDSTag(t): return nsTag(XRDS_NS, t)
[ "def", "mkXRDSTag", "(", "t", ")", ":", "return", "nsTag", "(", "XRDS_NS", ",", "t", ")" ]
basestring -> basestring create a tag name in the xrds xml namespace suitable for using with elementtree .
train
false
31,780
def eigenvalues(creation_sequence): degseq = degree_sequence(creation_sequence) degseq.sort() eiglist = [] eig = 0 row = len(degseq) bigdeg = degseq.pop() while row: if (bigdeg < row): eiglist.append(eig) row -= 1 else: eig += 1 if degseq: bigdeg = degseq.pop() else: bigdeg = 0 return eiglist
[ "def", "eigenvalues", "(", "creation_sequence", ")", ":", "degseq", "=", "degree_sequence", "(", "creation_sequence", ")", "degseq", ".", "sort", "(", ")", "eiglist", "=", "[", "]", "eig", "=", "0", "row", "=", "len", "(", "degseq", ")", "bigdeg", "=", ...
return sequence of eigenvalues of the laplacian of the threshold graph for the given creation_sequence .
train
false
31,781
def process_modelformset(request, model_class, queryset, **kwargs): formset_class = modelformset_factory(model_class, **kwargs) if (queryset is None): queryset = model_class.objects.all() if ((request.method == 'POST') and request.POST): objects = paginate(request, queryset) formset = formset_class(request.POST, queryset=objects.object_list) if formset.is_valid(): formset.save() else: return (formset, _('There are errors in the form. Please review the problems below.'), objects) queryset = queryset.filter() objects = paginate(request, queryset) return (formset_class(queryset=objects.object_list), None, objects)
[ "def", "process_modelformset", "(", "request", ",", "model_class", ",", "queryset", ",", "**", "kwargs", ")", ":", "formset_class", "=", "modelformset_factory", "(", "model_class", ",", "**", "kwargs", ")", "if", "(", "queryset", "is", "None", ")", ":", "que...
with the django model class model_class and the given queryset .
train
false
31,782
def _unique_label(previous_labels, label): while (label in previous_labels): label_split = label.split('.') if label_split[(-1)].startswith('copy'): copy_num = 1 if (label_split[(-1)] != 'copy'): copy_num = (int(label_split[(-1)][4:]) + 1) new_label = ('%s.copy%s' % ('.'.join(label_split[:(-1)]), copy_num)) label = new_label else: label += '.copy' return label
[ "def", "_unique_label", "(", "previous_labels", ",", "label", ")", ":", "while", "(", "label", "in", "previous_labels", ")", ":", "label_split", "=", "label", ".", "split", "(", "'.'", ")", "if", "label_split", "[", "(", "-", "1", ")", "]", ".", "start...
returns a unique name if label is already in previous_labels .
train
false
31,783
def toDOT(tree, adaptor=None, treeST=DOTTreeGenerator._treeST, edgeST=DOTTreeGenerator._edgeST): gen = DOTTreeGenerator() return gen.toDOT(tree, adaptor, treeST, edgeST)
[ "def", "toDOT", "(", "tree", ",", "adaptor", "=", "None", ",", "treeST", "=", "DOTTreeGenerator", ".", "_treeST", ",", "edgeST", "=", "DOTTreeGenerator", ".", "_edgeST", ")", ":", "gen", "=", "DOTTreeGenerator", "(", ")", "return", "gen", ".", "toDOT", "...
generate dot for a whole tree not just a node .
train
false
31,785
def getmoduleinfo(path): filename = os.path.basename(path) suffixes = map((lambda info: ((- len(info[0])), info[0], info[1], info[2])), imp.get_suffixes()) suffixes.sort() for (neglen, suffix, mode, mtype) in suffixes: if (filename[neglen:] == suffix): return ModuleInfo(filename[:neglen], suffix, mode, mtype)
[ "def", "getmoduleinfo", "(", "path", ")", ":", "filename", "=", "os", ".", "path", ".", "basename", "(", "path", ")", "suffixes", "=", "map", "(", "(", "lambda", "info", ":", "(", "(", "-", "len", "(", "info", "[", "0", "]", ")", ")", ",", "inf...
get the module name .
train
false
31,786
def pkg_walk(package, top): names = pkg_resources.resource_listdir(package, top) (dirs, nondirs) = ([], []) for name in names: if pkg_resources.resource_isdir(package, posixpath.join(top, name)): dirs.append(name) else: nondirs.append(name) (yield (top, dirs, nondirs)) for name in dirs: new_path = posixpath.join(top, name) for out in pkg_walk(package, new_path): (yield out)
[ "def", "pkg_walk", "(", "package", ",", "top", ")", ":", "names", "=", "pkg_resources", ".", "resource_listdir", "(", "package", ",", "top", ")", "(", "dirs", ",", "nondirs", ")", "=", "(", "[", "]", ",", "[", "]", ")", "for", "name", "in", "names"...
walk the package resources .
train
false
31,787
def haystack_load_apps(): return [i.label for i in apps.get_app_configs() if (i.models_module is not None)]
[ "def", "haystack_load_apps", "(", ")", ":", "return", "[", "i", ".", "label", "for", "i", "in", "apps", ".", "get_app_configs", "(", ")", "if", "(", "i", ".", "models_module", "is", "not", "None", ")", "]" ]
return a list of app labels for all installed applications which have models .
train
false
31,788
def range6(): plt.figure() plt.plot(range(6)) plt.show()
[ "def", "range6", "(", ")", ":", "plt", ".", "figure", "(", ")", "plt", ".", "plot", "(", "range", "(", "6", ")", ")", "plt", ".", "show", "(", ")" ]
this is the function that should be executed .
train
false
31,791
def mk_token(**load): netapi = salt.netapi.NetapiClient(__opts__) if (not netapi._is_master_running()): raise salt.exceptions.SaltDaemonNotRunning('Salt Master must be running.') auth = salt.auth.Resolver(__opts__) return auth.mk_token(load)
[ "def", "mk_token", "(", "**", "load", ")", ":", "netapi", "=", "salt", ".", "netapi", ".", "NetapiClient", "(", "__opts__", ")", "if", "(", "not", "netapi", ".", "_is_master_running", "(", ")", ")", ":", "raise", "salt", ".", "exceptions", ".", "SaltDa...
create an eauth token using provided credentials cli example: .
train
true
31,792
def new_context(environment, template_name, blocks, vars=None, shared=None, globals=None, locals=None): if (vars is None): vars = {} if shared: parent = vars else: parent = dict((globals or ()), **vars) if locals: if shared: parent = dict(parent) for (key, value) in locals.iteritems(): if ((key[:2] == 'l_') and (value is not missing)): parent[key[2:]] = value return Context(environment, parent, template_name, blocks)
[ "def", "new_context", "(", "environment", ",", "template_name", ",", "blocks", ",", "vars", "=", "None", ",", "shared", "=", "None", ",", "globals", "=", "None", ",", "locals", "=", "None", ")", ":", "if", "(", "vars", "is", "None", ")", ":", "vars",...
internal helper to for context creation .
train
true
31,794
def is_request_from_mobile_web_view(request): for mobile_path in PATHS_ACCESSED_BY_MOBILE_WITH_SESSION_COOKIES: if re.match(mobile_path, request.path): return True return False
[ "def", "is_request_from_mobile_web_view", "(", "request", ")", ":", "for", "mobile_path", "in", "PATHS_ACCESSED_BY_MOBILE_WITH_SESSION_COOKIES", ":", "if", "re", ".", "match", "(", "mobile_path", ",", "request", ".", "path", ")", ":", "return", "True", "return", "...
returns whether the given request was made by an open edx mobile web view using a session cookie .
train
false
31,796
def random_combination(iterable, r): pool = tuple(iterable) n = len(pool) indices = sorted(random.sample(xrange(n), r)) return tuple((pool[i] for i in indices))
[ "def", "random_combination", "(", "iterable", ",", "r", ")", ":", "pool", "=", "tuple", "(", "iterable", ")", "n", "=", "len", "(", "pool", ")", "indices", "=", "sorted", "(", "random", ".", "sample", "(", "xrange", "(", "n", ")", ",", "r", ")", ...
random_combination -> tuple arguments: iterable: an iterable .
train
true
31,797
def SetRegistryDefaultValue(subKey, value, rootkey=None): if (rootkey is None): rootkey = GetRootKey() if (type(value) == str): typeId = win32con.REG_SZ elif (type(value) == int): typeId = win32con.REG_DWORD else: raise TypeError(('Value must be string or integer - was passed ' + repr(value))) win32api.RegSetValue(rootkey, subKey, typeId, value)
[ "def", "SetRegistryDefaultValue", "(", "subKey", ",", "value", ",", "rootkey", "=", "None", ")", ":", "if", "(", "rootkey", "is", "None", ")", ":", "rootkey", "=", "GetRootKey", "(", ")", "if", "(", "type", "(", "value", ")", "==", "str", ")", ":", ...
a helper to set the default value for a key in the registry .
train
false
31,800
def image_send_notification(bytes_written, expected_size, image_meta, request, notifier): try: context = request.context payload = {'bytes_sent': bytes_written, 'image_id': image_meta['id'], 'owner_id': image_meta['owner'], 'receiver_tenant_id': context.tenant, 'receiver_user_id': context.user, 'destination_ip': request.remote_addr} if (bytes_written != expected_size): notify = notifier.error else: notify = notifier.info notify('image.send', payload) except Exception as err: msg = (_LE('An error occurred during image.send notification: %(err)s') % {'err': err}) LOG.error(msg)
[ "def", "image_send_notification", "(", "bytes_written", ",", "expected_size", ",", "image_meta", ",", "request", ",", "notifier", ")", ":", "try", ":", "context", "=", "request", ".", "context", "payload", "=", "{", "'bytes_sent'", ":", "bytes_written", ",", "...
send an image .
train
false
31,801
def idd_copycols(A, k, idx): A = np.asfortranarray(A) return _id.idd_copycols(A, k, idx)
[ "def", "idd_copycols", "(", "A", ",", "k", ",", "idx", ")", ":", "A", "=", "np", ".", "asfortranarray", "(", "A", ")", "return", "_id", ".", "idd_copycols", "(", "A", ",", "k", ",", "idx", ")" ]
reconstruct skeleton matrix from real id .
train
false
31,802
def _matchPluginToPrefix(plugins, endpointType): endpointType = endpointType.lower() for plugin in plugins: if (_matchingString(plugin.prefix.lower(), endpointType) == endpointType): return plugin raise ValueError(("Unknown endpoint type: '%s'" % (endpointType,)))
[ "def", "_matchPluginToPrefix", "(", "plugins", ",", "endpointType", ")", ":", "endpointType", "=", "endpointType", ".", "lower", "(", ")", "for", "plugin", "in", "plugins", ":", "if", "(", "_matchingString", "(", "plugin", ".", "prefix", ".", "lower", "(", ...
match plugin to prefix .
train
false
31,803
def remove_heroku_files(): filenames = ['Procfile', 'runtime.txt'] if ('{{ cookiecutter.use_elasticbeanstalk_experimental }}'.lower() != 'y'): filenames.append('requirements.txt') for filename in ['Procfile', 'runtime.txt']: file_name = os.path.join(PROJECT_DIRECTORY, filename) remove_file(file_name)
[ "def", "remove_heroku_files", "(", ")", ":", "filenames", "=", "[", "'Procfile'", ",", "'runtime.txt'", "]", "if", "(", "'{{ cookiecutter.use_elasticbeanstalk_experimental }}'", ".", "lower", "(", ")", "!=", "'y'", ")", ":", "filenames", ".", "append", "(", "'re...
removes files needed for heroku if it isnt going to be used .
train
false
31,804
def _validate(data, schema, context): converted_data = augment_data(data, schema) full_schema = make_full_schema(data, schema) errors = dict(((key, []) for key in full_schema)) for key in sorted(full_schema, key=flattened_order_key): if (key[(-1)] == '__before'): for converter in full_schema[key]: try: convert(converter, key, converted_data, errors, context) except StopOnError: break for key in sorted(full_schema, key=flattened_order_key): if (not key[(-1)].startswith('__')): for converter in full_schema[key]: try: convert(converter, key, converted_data, errors, context) except StopOnError: break for key in sorted(full_schema, key=flattened_order_key): if (key[(-1)] == '__extras'): for converter in full_schema[key]: try: convert(converter, key, converted_data, errors, context) except StopOnError: break for key in reversed(sorted(full_schema, key=flattened_order_key)): if (key[(-1)] == '__after'): for converter in full_schema[key]: try: convert(converter, key, converted_data, errors, context) except StopOnError: break if (('__junk',) in full_schema): for converter in full_schema[('__junk',)]: try: convert(converter, ('__junk',), converted_data, errors, context) except StopOnError: break return (converted_data, errors)
[ "def", "_validate", "(", "data", ",", "schema", ",", "context", ")", ":", "converted_data", "=", "augment_data", "(", "data", ",", "schema", ")", "full_schema", "=", "make_full_schema", "(", "data", ",", "schema", ")", "errors", "=", "dict", "(", "(", "(...
decorator for common function argument validation .
train
false
31,806
def is_ipv6(ip_str): try: addr = ipaddr.IPAddress(ip_str) return (addr.version == 6) except: return False
[ "def", "is_ipv6", "(", "ip_str", ")", ":", "try", ":", "addr", "=", "ipaddr", ".", "IPAddress", "(", "ip_str", ")", "return", "(", "addr", ".", "version", "==", "6", ")", "except", ":", "return", "False" ]
validate whether given string is ipv6 .
train
false
31,809
def aggregate_get_by_uuid(context, uuid): return IMPL.aggregate_get_by_uuid(context, uuid)
[ "def", "aggregate_get_by_uuid", "(", "context", ",", "uuid", ")", ":", "return", "IMPL", ".", "aggregate_get_by_uuid", "(", "context", ",", "uuid", ")" ]
get a specific aggregate by uuid .
train
false
31,811
def episode_title(): rebulk = Rebulk().rules(EpisodeTitleFromPosition, AlternativeTitleReplace, TitleToEpisodeTitle, Filepart3EpisodeTitle, Filepart2EpisodeTitle) return rebulk
[ "def", "episode_title", "(", ")", ":", "rebulk", "=", "Rebulk", "(", ")", ".", "rules", "(", "EpisodeTitleFromPosition", ",", "AlternativeTitleReplace", ",", "TitleToEpisodeTitle", ",", "Filepart3EpisodeTitle", ",", "Filepart2EpisodeTitle", ")", "return", "rebulk" ]
builder for rebulk object .
train
false
31,812
def _footer_legal_links(): links = [('terms_of_service_and_honor_code', marketing_link('TOS_AND_HONOR'), _('Terms of Service & Honor Code')), ('privacy_policy', marketing_link('PRIVACY'), _('Privacy Policy')), ('accessibility_policy', marketing_link('ACCESSIBILITY'), _('Accessibility Policy')), ('sitemap', marketing_link('SITE_MAP'), _('Sitemap')), ('media_kit', marketing_link('MEDIA_KIT'), _('Media Kit'))] tos_and_honor_link = marketing_link('TOS_AND_HONOR') if (not (tos_and_honor_link and (tos_and_honor_link != '#'))): links.extend([('terms_of_service', marketing_link('TOS'), _('Terms of Service')), ('honor_code', marketing_link('HONOR'), _('Honor Code'))]) return [{'name': link_name, 'title': link_title, 'url': link_url} for (link_name, link_url, link_title) in links if (link_url and (link_url != '#'))]
[ "def", "_footer_legal_links", "(", ")", ":", "links", "=", "[", "(", "'terms_of_service_and_honor_code'", ",", "marketing_link", "(", "'TOS_AND_HONOR'", ")", ",", "_", "(", "'Terms of Service & Honor Code'", ")", ")", ",", "(", "'privacy_policy'", ",", "marketing_li...
return the legal footer links .
train
false
31,814
def openWebPage(webPagePath): if (webPagePath.find('#') != (-1)): redirectionText = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">\n<html>\n<head>\n' redirectionText += ('<meta http-equiv="REFRESH" content="0;url=%s"></head>\n</HTML>\n' % webPagePath) webPagePath = archive.getDocumentationPath('redirect.html') archive.writeFileText(webPagePath, redirectionText) webPagePath = ('"%s"' % webPagePath) try: os.startfile(webPagePath) return except: pass webbrowserName = webbrowser.get().name if (webbrowserName == ''): print 'Skeinforge was not able to open the documentation file in a web browser. To see the documentation, open the following file in a web browser:' print webPagePath return os.system(((webbrowserName + ' ') + webPagePath))
[ "def", "openWebPage", "(", "webPagePath", ")", ":", "if", "(", "webPagePath", ".", "find", "(", "'#'", ")", "!=", "(", "-", "1", ")", ")", ":", "redirectionText", "=", "'<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0 Transitional//EN\">\\n<html>\\n<head>\\n'", "redirect...
open a web page in a browser .
train
false
31,817
def _default_controls(ncolors): return np.linspace(0.0, 1.0, ncolors)
[ "def", "_default_controls", "(", "ncolors", ")", ":", "return", "np", ".", "linspace", "(", "0.0", ",", "1.0", ",", "ncolors", ")" ]
generate linearly spaced control points from a set of colors .
train
false
31,818
def n_subplots(ax_im): return len(ax_im.get_figure().get_axes())
[ "def", "n_subplots", "(", "ax_im", ")", ":", "return", "len", "(", "ax_im", ".", "get_figure", "(", ")", ".", "get_axes", "(", ")", ")" ]
return the number of subplots in the figure containing an axesimage .
train
false
31,820
def get_dev_bpf(): for bpf in range(0, 8): try: fd = os.open(('/dev/bpf%i' % bpf), os.O_RDWR) return (fd, bpf) except OSError as err: continue raise Scapy_Exception('No /dev/bpf handle is available !')
[ "def", "get_dev_bpf", "(", ")", ":", "for", "bpf", "in", "range", "(", "0", ",", "8", ")", ":", "try", ":", "fd", "=", "os", ".", "open", "(", "(", "'/dev/bpf%i'", "%", "bpf", ")", ",", "os", ".", "O_RDWR", ")", "return", "(", "fd", ",", "bpf...
returns an opened bpf file object .
train
true
31,821
def envdict2listdict(envdict): sep = os.path.pathsep for key in envdict: if (sep in envdict[key]): envdict[key] = [path.strip() for path in envdict[key].split(sep)] return envdict
[ "def", "envdict2listdict", "(", "envdict", ")", ":", "sep", "=", "os", ".", "path", ".", "pathsep", "for", "key", "in", "envdict", ":", "if", "(", "sep", "in", "envdict", "[", "key", "]", ")", ":", "envdict", "[", "key", "]", "=", "[", "path", "....
dict --> dict of lists .
train
true
31,822
def check_if_project_can_have_more_memberships(project, total_new_memberships): if (project.owner is None): return (False, _('Project without owner')) if project.is_private: total_memberships = (project.memberships.count() + total_new_memberships) max_memberships = project.owner.max_memberships_private_projects error_members_exceeded = _('You have reached your current limit of memberships for private projects') else: total_memberships = (project.memberships.count() + total_new_memberships) max_memberships = project.owner.max_memberships_public_projects error_members_exceeded = _('You have reached your current limit of memberships for public projects') if ((max_memberships is not None) and (total_memberships > max_memberships)): return (False, error_members_exceeded) if ((project.memberships.filter(user=None).count() + total_new_memberships) > settings.MAX_PENDING_MEMBERSHIPS): error_pending_memberships_exceeded = _('You have reached the current limit of pending memberships') return (False, error_pending_memberships_exceeded) return (True, None)
[ "def", "check_if_project_can_have_more_memberships", "(", "project", ",", "total_new_memberships", ")", ":", "if", "(", "project", ".", "owner", "is", "None", ")", ":", "return", "(", "False", ",", "_", "(", "'Project without owner'", ")", ")", "if", "project", ...
return if a project can have more n new memberships .
train
false
31,824
def AppendNodeAnnotation(node, annotation, value): attr = GetNodeAnnotation(node, annotation, set()) attr.add(value) SetNodeAnnotation(node, annotation, attr)
[ "def", "AppendNodeAnnotation", "(", "node", ",", "annotation", ",", "value", ")", ":", "attr", "=", "GetNodeAnnotation", "(", "node", ",", "annotation", ",", "set", "(", ")", ")", "attr", ".", "add", "(", "value", ")", "SetNodeAnnotation", "(", "node", "...
appends an annotation value to a list of annotations on the node .
train
false
31,826
def timings_out(reps, func, *args, **kw): reps = int(reps) assert (reps >= 1), 'reps must be >= 1' if (reps == 1): start = clock() out = func(*args, **kw) tot_time = (clock() - start) else: rng = range((reps - 1)) start = clock() for dummy in rng: func(*args, **kw) out = func(*args, **kw) tot_time = (clock() - start) av_time = (tot_time / reps) return (tot_time, av_time, out)
[ "def", "timings_out", "(", "reps", ",", "func", ",", "*", "args", ",", "**", "kw", ")", ":", "reps", "=", "int", "(", "reps", ")", "assert", "(", "reps", ">=", "1", ")", ",", "'reps must be >= 1'", "if", "(", "reps", "==", "1", ")", ":", "start",...
timings_out -> execute a function reps times .
train
true
31,828
def agent_services(consul_url=None): ret = {} if (not consul_url): consul_url = _get_config() if (not consul_url): log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'agent/services' ret = _query(consul_url=consul_url, function=function, method='GET') return ret
[ "def", "agent_services", "(", "consul_url", "=", "None", ")", ":", "ret", "=", "{", "}", "if", "(", "not", "consul_url", ")", ":", "consul_url", "=", "_get_config", "(", ")", "if", "(", "not", "consul_url", ")", ":", "log", ".", "error", "(", "'No Co...
returns the services the local agent is managing .
train
true
31,829
def create_subscription(topic_name, subscription_name): pubsub_client = pubsub.Client() topic = pubsub_client.topic(topic_name) subscription = topic.subscription(subscription_name) subscription.create() print 'Subscription {} created on topic {}.'.format(subscription.name, topic.name)
[ "def", "create_subscription", "(", "topic_name", ",", "subscription_name", ")", ":", "pubsub_client", "=", "pubsub", ".", "Client", "(", ")", "topic", "=", "pubsub_client", ".", "topic", "(", "topic_name", ")", "subscription", "=", "topic", ".", "subscription", ...
create a new pull subscription on the given topic .
train
false
31,830
@utils.arg('--limit', dest='limit', metavar='<limit>', type=int, default=None, help=_("Maximum number of server groups to display. If limit is bigger than 'CONF.api.max_limit' option of Nova API, limit 'CONF.api.max_limit' will be used instead.")) @utils.arg('--offset', dest='offset', metavar='<offset>', type=int, default=None, help=_('The offset of groups list to display; use with limit to return a slice of server groups.')) @utils.arg('--all-projects', dest='all_projects', action='store_true', default=False, help=_('Display server groups from all projects (Admin only).')) def do_server_group_list(cs, args): server_groups = cs.server_groups.list(all_projects=args.all_projects, limit=args.limit, offset=args.offset) _print_server_group_details(cs, server_groups)
[ "@", "utils", ".", "arg", "(", "'--limit'", ",", "dest", "=", "'limit'", ",", "metavar", "=", "'<limit>'", ",", "type", "=", "int", ",", "default", "=", "None", ",", "help", "=", "_", "(", "\"Maximum number of server groups to display. If limit is bigger than 'C...
print a list of all server groups .
train
false
31,831
def _write_options(name, configuration): _check_portname(name) pkg = next(iter(configuration)) conf_ptr = configuration[pkg] dirname = _options_dir(name) if (not os.path.isdir(dirname)): try: os.makedirs(dirname) except OSError as exc: raise CommandExecutionError('Unable to make {0}: {1}'.format(dirname, exc)) with salt.utils.fopen(os.path.join(dirname, 'options'), 'w') as fp_: sorted_options = list(conf_ptr.keys()) sorted_options.sort() fp_.write('# This file was auto-generated by Salt (http://saltstack.com)\n# Options for {0}\n_OPTIONS_READ={0}\n_FILE_COMPLETE_OPTIONS_LIST={1}\n'.format(pkg, ' '.join(sorted_options))) opt_tmpl = 'OPTIONS_FILE_{0}SET+={1}\n' for opt in sorted_options: fp_.write(opt_tmpl.format(('' if (conf_ptr[opt] == 'on') else 'UN'), opt))
[ "def", "_write_options", "(", "name", ",", "configuration", ")", ":", "_check_portname", "(", "name", ")", "pkg", "=", "next", "(", "iter", "(", "configuration", ")", ")", "conf_ptr", "=", "configuration", "[", "pkg", "]", "dirname", "=", "_options_dir", "...
writes a new options file .
train
true
31,833
def _model_to_entity_schema(document_class): schema = {} for (name, prop) in document_class._properties.iteritems(): _add_schema_entry(prop.__class__, name, schema) return schema
[ "def", "_model_to_entity_schema", "(", "document_class", ")", ":", "schema", "=", "{", "}", "for", "(", "name", ",", "prop", ")", "in", "document_class", ".", "_properties", ".", "iteritems", "(", ")", ":", "_add_schema_entry", "(", "prop", ".", "__class__",...
produce schema from ndb model class .
train
false
31,835
def parse_error(pkt): elen = endian_int(pkt[1:5]) pkt = pkt[20:] error = '' for idx in xrange((elen - 20)): tmp = pkt[idx] if (tmp == '00'): break error += tmp.decode('hex') return error
[ "def", "parse_error", "(", "pkt", ")", ":", "elen", "=", "endian_int", "(", "pkt", "[", "1", ":", "5", "]", ")", "pkt", "=", "pkt", "[", "20", ":", "]", "error", "=", "''", "for", "idx", "in", "xrange", "(", "(", "elen", "-", "20", ")", ")", ...
parse an error message .
train
false
31,836
def export_doc(doctype, name, module=None): from frappe.modules.export_file import write_document_file print doctype, name if (not module): module = frappe.db.get_value(u'DocType', name, u'module') write_document_file(frappe.get_doc(doctype, name), module)
[ "def", "export_doc", "(", "doctype", ",", "name", ",", "module", "=", "None", ")", ":", "from", "frappe", ".", "modules", ".", "export_file", "import", "write_document_file", "print", "doctype", ",", "name", "if", "(", "not", "module", ")", ":", "module", ...
write a doc to standard path .
train
false
31,837
def data_changed(old): data = xw.Range('A1').table.value df = pd.DataFrame(data[1:], columns=data[0]) try: assert_frame_equal(df, old) return None except AssertionError: return df
[ "def", "data_changed", "(", "old", ")", ":", "data", "=", "xw", ".", "Range", "(", "'A1'", ")", ".", "table", ".", "value", "df", "=", "pd", ".", "DataFrame", "(", "data", "[", "1", ":", "]", ",", "columns", "=", "data", "[", "0", "]", ")", "...
returns a new dataframe if data has changed on the excel workbook .
train
false
31,838
def due_followups(): def prep(r): resource = r.resource s3.crud_strings['dvr_case_activity']['title_list'] = T('Activities to follow up') if (not r.record): query = ((((FS('followup') == True) & (FS('followup_date') <= datetime.datetime.utcnow().date())) & (FS('completed') != True)) & ((FS('person_id$dvr_case.archived') == None) | (FS('person_id$dvr_case.archived') == False))) resource.add_filter(query) list_fields = ['case_id$reference', 'person_id$first_name', 'person_id$last_name', 'need_id', 'need_details', 'emergency', 'referral_details', 'followup_date', 'completed'] resource.configure(list_fields=list_fields, insertable=False, deletable=False) return True s3.prep = prep return s3_rest_controller('dvr', 'case_activity')
[ "def", "due_followups", "(", ")", ":", "def", "prep", "(", "r", ")", ":", "resource", "=", "r", ".", "resource", "s3", ".", "crud_strings", "[", "'dvr_case_activity'", "]", "[", "'title_list'", "]", "=", "T", "(", "'Activities to follow up'", ")", "if", ...
restful controller for due follow-ups .
train
false
31,840
def _find_unit_pattern(unit_id, locale=LC_NUMERIC): locale = Locale.parse(locale) unit_patterns = locale._data['unit_patterns'] if (unit_id in unit_patterns): return unit_id for unit_pattern in sorted(unit_patterns, key=len): if unit_pattern.endswith(unit_id): return unit_pattern
[ "def", "_find_unit_pattern", "(", "unit_id", ",", "locale", "=", "LC_NUMERIC", ")", ":", "locale", "=", "Locale", ".", "parse", "(", "locale", ")", "unit_patterns", "=", "locale", ".", "_data", "[", "'unit_patterns'", "]", "if", "(", "unit_id", "in", "unit...
expand an unit into a qualified form .
train
false
31,841
def rotate2D(pts, origin, ang=(pi / 4)): return (dot((pts - origin), ar([[cos(ang), sin(ang)], [(- sin(ang)), cos(ang)]])) + origin)
[ "def", "rotate2D", "(", "pts", ",", "origin", ",", "ang", "=", "(", "pi", "/", "4", ")", ")", ":", "return", "(", "dot", "(", "(", "pts", "-", "origin", ")", ",", "ar", "(", "[", "[", "cos", "(", "ang", ")", ",", "sin", "(", "ang", ")", "...
pts = {} rotates points about center cnt(2) by angle ang(1) in radian .
train
false
31,842
def HT_TRENDLINE(ds, count): return call_talib_with_ds(ds, count, talib.HT_TRENDLINE)
[ "def", "HT_TRENDLINE", "(", "ds", ",", "count", ")", ":", "return", "call_talib_with_ds", "(", "ds", ",", "count", ",", "talib", ".", "HT_TRENDLINE", ")" ]
hilbert transform - instantaneous trendline .
train
false
31,844
def _divisors(n): factordict = factorint(n) ps = sorted(factordict.keys()) def rec_gen(n=0): if (n == len(ps)): (yield 1) else: pows = [1] for j in range(factordict[ps[n]]): pows.append((pows[(-1)] * ps[n])) for q in rec_gen((n + 1)): for p in pows: (yield (p * q)) for p in rec_gen(): (yield p)
[ "def", "_divisors", "(", "n", ")", ":", "factordict", "=", "factorint", "(", "n", ")", "ps", "=", "sorted", "(", "factordict", ".", "keys", "(", ")", ")", "def", "rec_gen", "(", "n", "=", "0", ")", ":", "if", "(", "n", "==", "len", "(", "ps", ...
helper function for divisors which generates the divisors .
train
false
31,845
def json_pretty(func): return json(func, pretty=True)
[ "def", "json_pretty", "(", "func", ")", ":", "return", "json", "(", "func", ",", "pretty", "=", "True", ")" ]
indent and sort returned json .
train
false
31,846
def _datetime_from_json(value, field): if _not_null(value, field): return datetime.datetime.strptime(value, _RFC3339_NO_FRACTION)
[ "def", "_datetime_from_json", "(", "value", ",", "field", ")", ":", "if", "_not_null", "(", "value", ",", "field", ")", ":", "return", "datetime", ".", "datetime", ".", "strptime", "(", "value", ",", "_RFC3339_NO_FRACTION", ")" ]
coerce value to a datetime .
train
false
31,847
def get_indexer_absolute_numbering_for_xem(indexer_id, indexer, sceneAbsoluteNumber, scene_season=None): if ((indexer_id is None) or (sceneAbsoluteNumber is None)): return sceneAbsoluteNumber indexer_id = int(indexer_id) indexer = int(indexer) xem_refresh(indexer_id, indexer) main_db_con = db.DBConnection() if (scene_season is None): rows = main_db_con.select('SELECT absolute_number FROM tv_episodes WHERE indexer = ? and showid = ? and scene_absolute_number = ?', [indexer, indexer_id, sceneAbsoluteNumber]) else: rows = main_db_con.select('SELECT absolute_number FROM tv_episodes WHERE indexer = ? and showid = ? and scene_absolute_number = ? and scene_season = ?', [indexer, indexer_id, sceneAbsoluteNumber, scene_season]) if rows: return int(rows[0]['absolute_number']) return sceneAbsoluteNumber
[ "def", "get_indexer_absolute_numbering_for_xem", "(", "indexer_id", ",", "indexer", ",", "sceneAbsoluteNumber", ",", "scene_season", "=", "None", ")", ":", "if", "(", "(", "indexer_id", "is", "None", ")", "or", "(", "sceneAbsoluteNumber", "is", "None", ")", ")",...
reverse of find_xem_numbering: lookup a tvdb season and episode using scene numbering .
train
false
31,849
def make_transient_to_detached(instance): state = attributes.instance_state(instance) if (state.session_id or state.key): raise sa_exc.InvalidRequestError('Given object must be transient') state.key = state.mapper._identity_key_from_state(state) if state._deleted: del state._deleted state._commit_all(state.dict) state._expire_attributes(state.dict, state.unloaded)
[ "def", "make_transient_to_detached", "(", "instance", ")", ":", "state", "=", "attributes", ".", "instance_state", "(", "instance", ")", "if", "(", "state", ".", "session_id", "or", "state", ".", "key", ")", ":", "raise", "sa_exc", ".", "InvalidRequestError", ...
make the given transient instance :term:detached .
train
false
31,850
def cgsnapshot_get_all(context, filters=None): return IMPL.cgsnapshot_get_all(context, filters)
[ "def", "cgsnapshot_get_all", "(", "context", ",", "filters", "=", "None", ")", ":", "return", "IMPL", ".", "cgsnapshot_get_all", "(", "context", ",", "filters", ")" ]
get all cgsnapshots .
train
false
31,851
def api_enabled(request, course_key): course = _get_course(request, course_key) return notes_enabled_for_course(course)
[ "def", "api_enabled", "(", "request", ",", "course_key", ")", ":", "course", "=", "_get_course", "(", "request", ",", "course_key", ")", "return", "notes_enabled_for_course", "(", "course", ")" ]
returns true if the api is enabled for the course .
train
false
31,852
def _IfdEntryFactory(stream_rdr, offset): ifd_entry_classes = {TIFF_FLD.ASCII: _AsciiIfdEntry, TIFF_FLD.SHORT: _ShortIfdEntry, TIFF_FLD.LONG: _LongIfdEntry, TIFF_FLD.RATIONAL: _RationalIfdEntry} field_type = stream_rdr.read_short(offset, 2) if (field_type in ifd_entry_classes): entry_cls = ifd_entry_classes[field_type] else: entry_cls = _IfdEntry return entry_cls.from_stream(stream_rdr, offset)
[ "def", "_IfdEntryFactory", "(", "stream_rdr", ",", "offset", ")", ":", "ifd_entry_classes", "=", "{", "TIFF_FLD", ".", "ASCII", ":", "_AsciiIfdEntry", ",", "TIFF_FLD", ".", "SHORT", ":", "_ShortIfdEntry", ",", "TIFF_FLD", ".", "LONG", ":", "_LongIfdEntry", ","...
return an |_ifdentry| subclass instance containing the value of the directory entry at *offset* in *stream_rdr* .
train
true
31,853
def addHeightsByGraymap(heights, textLines): divisor = float(textLines[3]) for line in textLines[4:]: for integerWord in line.split(): heights.append((float(integerWord) / divisor))
[ "def", "addHeightsByGraymap", "(", "heights", ",", "textLines", ")", ":", "divisor", "=", "float", "(", "textLines", "[", "3", "]", ")", "for", "line", "in", "textLines", "[", "4", ":", "]", ":", "for", "integerWord", "in", "line", ".", "split", "(", ...
add heights by graymap .
train
false
31,855
def _ssh_run(ssh_bin, address, ec2_key_pair_file, cmd_args, stdin=''): args = (_ssh_args(ssh_bin, address, ec2_key_pair_file) + list(cmd_args)) log.debug(('> %s' % cmd_line(args))) p = Popen(args, stdout=PIPE, stderr=PIPE, stdin=PIPE) return p.communicate(stdin)
[ "def", "_ssh_run", "(", "ssh_bin", ",", "address", ",", "ec2_key_pair_file", ",", "cmd_args", ",", "stdin", "=", "''", ")", ":", "args", "=", "(", "_ssh_args", "(", "ssh_bin", ",", "address", ",", "ec2_key_pair_file", ")", "+", "list", "(", "cmd_args", "...
shortcut to call ssh on a hadoop node via subprocess .
train
false
31,857
def _checkbox(cls, text, tooltip, checked): widget = cls() if text: widget.setText(text) if tooltip: widget.setToolTip(tooltip) if (checked is not None): widget.setChecked(checked) return widget
[ "def", "_checkbox", "(", "cls", ",", "text", ",", "tooltip", ",", "checked", ")", ":", "widget", "=", "cls", "(", ")", "if", "text", ":", "widget", ".", "setText", "(", "text", ")", "if", "tooltip", ":", "widget", ".", "setToolTip", "(", "tooltip", ...
create a widget and apply properties .
train
false
31,859
def _measure_tables(tables, settings): simple_tables = _simple_tables(tables, settings) tab = [x.as_text() for x in simple_tables] length = [len(x.splitlines()[0]) for x in tab] len_max = max(length) pad_sep = [] pad_index = [] for i in range(len(tab)): nsep = (tables[i].shape[1] - 1) pad = int(((len_max - length[i]) / nsep)) pad_sep.append(pad) len_new = (length[i] + (nsep * pad)) pad_index.append((len_max - len_new)) return (pad_sep, pad_index, max(length))
[ "def", "_measure_tables", "(", "tables", ",", "settings", ")", ":", "simple_tables", "=", "_simple_tables", "(", "tables", ",", "settings", ")", "tab", "=", "[", "x", ".", "as_text", "(", ")", "for", "x", "in", "simple_tables", "]", "length", "=", "[", ...
compare width of ascii tables in a list and calculate padding values .
train
false
31,860
def p_labeled_statement_3(t): pass
[ "def", "p_labeled_statement_3", "(", "t", ")", ":", "pass" ]
labeled_statement : default colon statement .
train
false
31,862
def get_flavor_by_name(name, ctxt=None): if (name is None): return get_default_flavor() if (ctxt is None): ctxt = context.get_admin_context() return objects.Flavor.get_by_name(ctxt, name)
[ "def", "get_flavor_by_name", "(", "name", ",", "ctxt", "=", "None", ")", ":", "if", "(", "name", "is", "None", ")", ":", "return", "get_default_flavor", "(", ")", "if", "(", "ctxt", "is", "None", ")", ":", "ctxt", "=", "context", ".", "get_admin_contex...
retrieves single flavor by name .
train
false
31,863
def _arg_ulen1(dvi, delta): return dvi._arg((delta + 1), False)
[ "def", "_arg_ulen1", "(", "dvi", ",", "delta", ")", ":", "return", "dvi", ".", "_arg", "(", "(", "delta", "+", "1", ")", ",", "False", ")" ]
unsigned length *delta*+1 read *delta*+1 bytes .
train
false
31,864
def get_path(*args): url = '/api/v1/events' if args: url += ('/' + '/'.join(map(str, args))) return url
[ "def", "get_path", "(", "*", "args", ")", ":", "url", "=", "'/api/v1/events'", "if", "args", ":", "url", "+=", "(", "'/'", "+", "'/'", ".", "join", "(", "map", "(", "str", ",", "args", ")", ")", ")", "return", "url" ]
return a path corresponding to the scheme .
train
false
31,865
def _extract_inventory_in_use(body): match = _RE_INV_IN_USE.search(body) if match: return match.group(1) return None
[ "def", "_extract_inventory_in_use", "(", "body", ")", ":", "match", "=", "_RE_INV_IN_USE", ".", "search", "(", "body", ")", "if", "match", ":", "return", "match", ".", "group", "(", "1", ")", "return", "None" ]
given an http response body .
train
false
31,866
def random_code_generator(): code_length = getattr(settings, 'REGISTRATION_CODE_LENGTH', 8) return generate_random_string(code_length)
[ "def", "random_code_generator", "(", ")", ":", "code_length", "=", "getattr", "(", "settings", ",", "'REGISTRATION_CODE_LENGTH'", ",", "8", ")", "return", "generate_random_string", "(", "code_length", ")" ]
generate a random alphanumeric code of length defined in registration_code_length settings .
train
false
31,867
def deactivate(): if hasattr(_active, 'value'): del _active.value
[ "def", "deactivate", "(", ")", ":", "if", "hasattr", "(", "_active", ",", "'value'", ")", ":", "del", "_active", ".", "value" ]
deinstalls the currently active translation object so that further _ calls will resolve against the default translation object .
train
false
31,868
def randslice_from_shape(ndim, shape): lslices = ([0] * ndim) rslices = ([0] * ndim) for n in range(ndim): l = shape[n] slicelen = (randrange(1, (l + 1)) if (l > 0) else 0) lslices[n] = randslice_from_slicelen(slicelen, l) rslices[n] = randslice_from_slicelen(slicelen, l) return (tuple(lslices), tuple(rslices))
[ "def", "randslice_from_shape", "(", "ndim", ",", "shape", ")", ":", "lslices", "=", "(", "[", "0", "]", "*", "ndim", ")", "rslices", "=", "(", "[", "0", "]", "*", "ndim", ")", "for", "n", "in", "range", "(", "ndim", ")", ":", "l", "=", "shape",...
create two sets of slices for an array x with shape shape such that shapeof == shapeof .
train
false
31,869
def auth_after_register(bot): if (bot.config.core.auth_method == u'nickserv'): nickserv_name = (bot.config.core.auth_target or u'NickServ') bot.msg(nickserv_name, (u'IDENTIFY %s' % bot.config.core.auth_password)) elif (bot.config.core.auth_method == u'authserv'): account = bot.config.core.auth_username password = bot.config.core.auth_password bot.write((u'AUTHSERV auth', ((account + u' ') + password))) elif (bot.config.core.auth_method == u'Q'): account = bot.config.core.auth_username password = bot.config.core.auth_password bot.write((u'AUTH', ((account + u' ') + password)))
[ "def", "auth_after_register", "(", "bot", ")", ":", "if", "(", "bot", ".", "config", ".", "core", ".", "auth_method", "==", "u'nickserv'", ")", ":", "nickserv_name", "=", "(", "bot", ".", "config", ".", "core", ".", "auth_target", "or", "u'NickServ'", ")...
do nickserv/authserv auth .
train
false
31,870
def set_microsite(domain): def decorator(func): '\n Decorator to set current microsite according to domain\n ' @wraps(func) def inner(request, *args, **kwargs): '\n Execute the function after setting up the microsite.\n ' microsite.set_by_domain(domain) return func(request, *args, **kwargs) return inner return decorator
[ "def", "set_microsite", "(", "domain", ")", ":", "def", "decorator", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "inner", "(", "request", ",", "*", "args", ",", "**", "kwargs", ")", ":", "microsite", ".", "set_by_domain", "(", "doma...
returns a decorator that can be used on a test_case to set a specific microsite for the current test case .
train
false
31,872
def split_fasta(infile, seqs_per_file, outfile_prefix, working_dir=''): if (seqs_per_file <= 0): raise ValueError('seqs_per_file must be > 0!') seq_counter = 0 out_files = [] if (working_dir and (not working_dir.endswith('/'))): working_dir += '/' create_dir(working_dir) for (seq_id, seq) in parse_fasta(infile): if (seq_counter == 0): current_out_fp = ('%s%s.%d.fasta' % (working_dir, outfile_prefix, len(out_files))) current_out_file = open(current_out_fp, 'w') out_files.append(current_out_fp) current_out_file.write(('>%s\n%s\n' % (seq_id, seq))) seq_counter += 1 if (seq_counter == seqs_per_file): current_out_file.close() seq_counter = 0 if (not current_out_file.closed): current_out_file.close() return out_files
[ "def", "split_fasta", "(", "infile", ",", "seqs_per_file", ",", "outfile_prefix", ",", "working_dir", "=", "''", ")", ":", "if", "(", "seqs_per_file", "<=", "0", ")", ":", "raise", "ValueError", "(", "'seqs_per_file must be > 0!'", ")", "seq_counter", "=", "0"...
split infile into files with seqs_per_file sequences in each infile: list of fasta lines or open file object seqs_per_file: the number of sequences to include in each file out_fileprefix: string used to create output filepath - output filepaths are <out_prefix> .
train
false
31,874
def taskqueue_method(handler): def check_if_taskqueue(self, *args, **kwargs): '\n Check if it is executed by Taskqueue in Staging or Production\n Allow run in localhost calling the url\n ' if ((self.request.headers.get('X-AppEngine-TaskName') is None) and (config.get('environment') == 'production') and (not users.is_current_user_admin())): return self.error(403) else: return handler(self, *args, **kwargs) return check_if_taskqueue
[ "def", "taskqueue_method", "(", "handler", ")", ":", "def", "check_if_taskqueue", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", ":", "if", "(", "(", "self", ".", "request", ".", "headers", ".", "get", "(", "'X-AppEngine-TaskName'", ")", "is", ...
decorator to indicate that this is a taskqueue method and applies request .
train
false
31,876
@webob.dec.wsgify @microversion.version_handler(1.2) @util.require_content('application/json') def create_resource_class(req): context = req.environ['placement.context'] data = util.extract_json(req.body, POST_RC_SCHEMA_V1_2) try: rc = objects.ResourceClass(context, name=data['name']) rc.create() except exception.ResourceClassExists: raise webob.exc.HTTPConflict((_('Conflicting resource class already exists: %(name)s') % {'name': data['name']}), json_formatter=util.json_error_formatter) req.response.location = util.resource_class_url(req.environ, rc) req.response.status = 201 req.response.content_type = None return req.response
[ "@", "webob", ".", "dec", ".", "wsgify", "@", "microversion", ".", "version_handler", "(", "1.2", ")", "@", "util", ".", "require_content", "(", "'application/json'", ")", "def", "create_resource_class", "(", "req", ")", ":", "context", "=", "req", ".", "e...
post to create a resource class .
train
false
31,878
def test_image_shape(): point = np.zeros((5, 5), np.float) point[(2, 2)] = 1.0 psf = ndi.gaussian_filter(point, sigma=1.0) image = skimage.img_as_float(camera()[110:155, 225:270]) image_conv = ndi.convolve(image, psf) deconv_sup = restoration.wiener(image_conv, psf, 1) deconv_un = restoration.unsupervised_wiener(image_conv, psf)[0] np.testing.assert_equal(image.shape, deconv_sup.shape) np.testing.assert_equal(image.shape, deconv_un.shape) sup_relative_error = (np.abs((deconv_sup - image)) / image) un_relative_error = (np.abs((deconv_un - image)) / image) np.testing.assert_array_less(np.median(sup_relative_error), 0.1) np.testing.assert_array_less(np.median(un_relative_error), 0.1)
[ "def", "test_image_shape", "(", ")", ":", "point", "=", "np", ".", "zeros", "(", "(", "5", ",", "5", ")", ",", "np", ".", "float", ")", "point", "[", "(", "2", ",", "2", ")", "]", "=", "1.0", "psf", "=", "ndi", ".", "gaussian_filter", "(", "p...
test that shape of output image in deconvolution is same as input .
train
false
31,879
@register.filter def profile_fields(user): fields = OrderedDict() try: profile = get_profile_for_user(user) user_fieldname = get_profile_user_fieldname() exclude = tuple(settings.ACCOUNTS_PROFILE_FORM_EXCLUDE_FIELDS) for field in profile._meta.fields: if (field.name not in ((u'id', user_fieldname) + exclude)): value = getattr(profile, field.name) fields[field.verbose_name.title()] = value except ProfileNotConfigured: pass return list(fields.items())
[ "@", "register", ".", "filter", "def", "profile_fields", "(", "user", ")", ":", "fields", "=", "OrderedDict", "(", ")", "try", ":", "profile", "=", "get_profile_for_user", "(", "user", ")", "user_fieldname", "=", "get_profile_user_fieldname", "(", ")", "exclud...
returns profile fields as a dict for the given user .
train
true
31,880
def random_tournament(n): coins = (random.random() for i in range(((n * (n - 1)) // 2))) pairs = combinations(range(n), 2) edges = (((u, v) if (r < 0.5) else (v, u)) for ((u, v), r) in zip(pairs, coins)) return nx.DiGraph(edges)
[ "def", "random_tournament", "(", "n", ")", ":", "coins", "=", "(", "random", ".", "random", "(", ")", "for", "i", "in", "range", "(", "(", "(", "n", "*", "(", "n", "-", "1", ")", ")", "//", "2", ")", ")", ")", "pairs", "=", "combinations", "(...
returns a random tournament graph on n nodes .
train
false
31,881
@receiver(COURSE_GRADE_CHANGED) def listen_for_grade_calculation(sender, user, course_grade, course_key, deadline, **kwargs): from openedx.core.djangoapps.credit import api course_id = CourseKey.from_string(unicode(course_key)) is_credit = api.is_credit_course(course_id) if is_credit: requirements = api.get_credit_requirements(course_id, namespace='grade') if requirements: criteria = requirements[0].get('criteria') if criteria: min_grade = criteria.get('min_grade') passing_grade = (course_grade.percent >= min_grade) now = timezone.now() status = None reason = None if ((deadline and (now < deadline)) or (not deadline)): if passing_grade: status = 'satisfied' reason = {'final_grade': course_grade.percent} elif passing_grade: status = 'failed' reason = {'current_date': now, 'deadline': deadline} else: status = 'failed' reason = {'final_grade': course_grade.percent, 'minimum_grade': min_grade} if (status and reason): api.set_credit_requirement_status(user, course_id, 'grade', 'grade', status=status, reason=reason)
[ "@", "receiver", "(", "COURSE_GRADE_CHANGED", ")", "def", "listen_for_grade_calculation", "(", "sender", ",", "user", ",", "course_grade", ",", "course_key", ",", "deadline", ",", "**", "kwargs", ")", ":", "from", "openedx", ".", "core", ".", "djangoapps", "."...
receive min_grade_requirement_status signal and update minimum grade requirement status .
train
false
31,882
def test_find_number_5(): s = 'over the laz-91.2y dog' r = find_number(s) assert (s[r[0]:r[1]] == '-91.2')
[ "def", "test_find_number_5", "(", ")", ":", "s", "=", "'over the laz-91.2y dog'", "r", "=", "find_number", "(", "s", ")", "assert", "(", "s", "[", "r", "[", "0", "]", ":", "r", "[", "1", "]", "]", "==", "'-91.2'", ")" ]
tests that we find decimal numbers with negative signs .
train
false
31,884
def _create_execution_log_entry(status): return {'timestamp': date_utils.get_datetime_utc_now(), 'status': status}
[ "def", "_create_execution_log_entry", "(", "status", ")", ":", "return", "{", "'timestamp'", ":", "date_utils", ".", "get_datetime_utc_now", "(", ")", ",", "'status'", ":", "status", "}" ]
create execution log entry object for the provided execution status .
train
false
31,885
def load_item(context, builder, arrayty, ptr): align = (None if arrayty.aligned else 1) return context.unpack_value(builder, arrayty.dtype, ptr, align=align)
[ "def", "load_item", "(", "context", ",", "builder", ",", "arrayty", ",", "ptr", ")", ":", "align", "=", "(", "None", "if", "arrayty", ".", "aligned", "else", "1", ")", "return", "context", ".", "unpack_value", "(", "builder", ",", "arrayty", ".", "dtyp...
load the item at the given array pointer .
train
false
31,886
def test_SAMPHubProxy(): SAMPHubProxy()
[ "def", "test_SAMPHubProxy", "(", ")", ":", "SAMPHubProxy", "(", ")" ]
test that samphubproxy can be instantiated .
train
false
31,887
def oauth_required(decorated_function=None, scopes=None, **decorator_kwargs): def curry_wrapper(wrapped_function): @wraps(wrapped_function) def required_wrapper(request, *args, **kwargs): if (not ((django_util.oauth2_settings.storage_model is None) or request.user.is_authenticated())): redirect_str = '{0}?next={1}'.format(django.conf.settings.LOGIN_URL, parse.quote(request.path)) return shortcuts.redirect(redirect_str) return_url = decorator_kwargs.pop('return_url', request.get_full_path()) user_oauth = django_util.UserOAuth2(request, scopes, return_url) if (not user_oauth.has_credentials()): return shortcuts.redirect(user_oauth.get_authorize_redirect()) setattr(request, django_util.oauth2_settings.request_prefix, user_oauth) return wrapped_function(request, *args, **kwargs) return required_wrapper if decorated_function: return curry_wrapper(decorated_function) else: return curry_wrapper
[ "def", "oauth_required", "(", "decorated_function", "=", "None", ",", "scopes", "=", "None", ",", "**", "decorator_kwargs", ")", ":", "def", "curry_wrapper", "(", "wrapped_function", ")", ":", "@", "wraps", "(", "wrapped_function", ")", "def", "required_wrapper"...
decorator to require oauth2 credentials for a view .
train
true
31,888
def parse_prefs_file(prefs_string): try: prefs = dict(eval(prefs_string)) except TypeError: raise QiimeParseError('Invalid prefs file. Prefs file must contain a valid prefs dictionary.') return prefs
[ "def", "parse_prefs_file", "(", "prefs_string", ")", ":", "try", ":", "prefs", "=", "dict", "(", "eval", "(", "prefs_string", ")", ")", "except", "TypeError", ":", "raise", "QiimeParseError", "(", "'Invalid prefs file. Prefs file must contain a valid prefs dictionary.'"...
returns prefs dict evaluated from prefs_string .
train
false
31,889
def worker_recover(name, workers=None, profile='default'): if (workers is None): workers = [] return _bulk_state('modjk.bulk_recover', name, workers, profile)
[ "def", "worker_recover", "(", "name", ",", "workers", "=", "None", ",", "profile", "=", "'default'", ")", ":", "if", "(", "workers", "is", "None", ")", ":", "workers", "=", "[", "]", "return", "_bulk_state", "(", "'modjk.bulk_recover'", ",", "name", ",",...
recover all the workers in the modjk load balancer example: .
train
true
31,890
def overrides_an_abstract_method(class_node, name): for ancestor in class_node.ancestors(): if ((name in ancestor) and isinstance(ancestor[name], astroid.Function) and ancestor[name].is_abstract(pass_is_abstract=False)): return True return False
[ "def", "overrides_an_abstract_method", "(", "class_node", ",", "name", ")", ":", "for", "ancestor", "in", "class_node", ".", "ancestors", "(", ")", ":", "if", "(", "(", "name", "in", "ancestor", ")", "and", "isinstance", "(", "ancestor", "[", "name", "]", ...
return true if pnode is a parent of node .
train
false
31,892
def _make_module_names(package_dir, paths): package_dir = os.path.abspath(package_dir) package_name = os.path.split(package_dir)[1] prefix_length = len(package_dir) module_names = [] for path in paths: path = os.path.abspath(path) rel_path = path[prefix_length:] rel_path = os.path.splitext(rel_path)[0] parts = [] while True: (rel_path, tail) = os.path.split(rel_path) if (not tail): break parts.insert(0, tail) parts.insert(0, package_name) module = '.'.join(parts) module_names.append(module) return module_names
[ "def", "_make_module_names", "(", "package_dir", ",", "paths", ")", ":", "package_dir", "=", "os", ".", "path", ".", "abspath", "(", "package_dir", ")", "package_name", "=", "os", ".", "path", ".", "split", "(", "package_dir", ")", "[", "1", "]", "prefix...
return a list of fully-qualified module names given a list of module paths .
train
false
31,893
def _check_completions(model, expected): assert (model.rowCount() == len(expected)) for i in range(0, model.rowCount()): actual_cat = model.item(i) catname = actual_cat.text() assert (catname in expected) expected_cat = expected[catname] assert (actual_cat.rowCount() == len(expected_cat)) for j in range(0, actual_cat.rowCount()): name = actual_cat.child(j, 0) desc = actual_cat.child(j, 1) misc = actual_cat.child(j, 2) actual_item = (name.text(), desc.text(), misc.text()) assert (actual_item in expected_cat)
[ "def", "_check_completions", "(", "model", ",", "expected", ")", ":", "assert", "(", "model", ".", "rowCount", "(", ")", "==", "len", "(", "expected", ")", ")", "for", "i", "in", "range", "(", "0", ",", "model", ".", "rowCount", "(", ")", ")", ":",...
check that a model contains the expected items in any order .
train
false
31,894
def tree_selection(tree_item, items): selected = [] count = min(tree_item.childCount(), len(items)) for idx in range(count): if tree_item.child(idx).isSelected(): selected.append(items[idx]) return selected
[ "def", "tree_selection", "(", "tree_item", ",", "items", ")", ":", "selected", "=", "[", "]", "count", "=", "min", "(", "tree_item", ".", "childCount", "(", ")", ",", "len", "(", "items", ")", ")", "for", "idx", "in", "range", "(", "count", ")", ":...
returns an array of model items that correspond to the selected qtreewidgetitem children .
train
false
31,896
@receiver(post_delete, sender=UserPreference) def post_delete_callback(sender, **kwargs): user_preference = kwargs['instance'] emit_setting_changed_event(user_preference.user, sender._meta.db_table, user_preference.key, user_preference.value, None)
[ "@", "receiver", "(", "post_delete", ",", "sender", "=", "UserPreference", ")", "def", "post_delete_callback", "(", "sender", ",", "**", "kwargs", ")", ":", "user_preference", "=", "kwargs", "[", "'instance'", "]", "emit_setting_changed_event", "(", "user_preferen...
event changes to user preferences .
train
false
31,897
def do_credentials(cs, args): catalog = cs.client.service_catalog.catalog utils.print_dict(catalog['access']['user'], 'User Credentials') utils.print_dict(catalog['access']['token'], 'Token')
[ "def", "do_credentials", "(", "cs", ",", "args", ")", ":", "catalog", "=", "cs", ".", "client", ".", "service_catalog", ".", "catalog", "utils", ".", "print_dict", "(", "catalog", "[", "'access'", "]", "[", "'user'", "]", ",", "'User Credentials'", ")", ...
show user credentials returned from auth .
train
false
31,898
def replication_safe(f): from functools import wraps @wraps(f) def wrapper(request, *args, **kwargs): request.replication_safe = True response = f(request, *args, **kwargs) return response return wrapper
[ "def", "replication_safe", "(", "f", ")", ":", "from", "functools", "import", "wraps", "@", "wraps", "(", "f", ")", "def", "wrapper", "(", "request", ",", "*", "args", ",", "**", "kwargs", ")", ":", "request", ".", "replication_safe", "=", "True", "res...
usually views which do a post will require the next page to be read from the master database .
train
false
31,899
def merge_boundary(graph, src, dst): pass
[ "def", "merge_boundary", "(", "graph", ",", "src", ",", "dst", ")", ":", "pass" ]
call back called before merging 2 nodes .
train
false
31,900
def get_review_by_repository_id_changeset_revision_user_id(app, repository_id, changeset_revision, user_id): sa_session = app.model.context.current return sa_session.query(app.model.RepositoryReview).filter(and_((app.model.RepositoryReview.repository_id == app.security.decode_id(repository_id)), (app.model.RepositoryReview.changeset_revision == changeset_revision), (app.model.RepositoryReview.user_id == app.security.decode_id(user_id)))).first()
[ "def", "get_review_by_repository_id_changeset_revision_user_id", "(", "app", ",", "repository_id", ",", "changeset_revision", ",", "user_id", ")", ":", "sa_session", "=", "app", ".", "model", ".", "context", ".", "current", "return", "sa_session", ".", "query", "(",...
get a repository_review from the database via repository id .
train
false
31,901
def resolve_property(style_map, elem, name): inheritable = (name in INHERITED) q = elem while (q is not None): s = style_map.get(q) if (s is not None): val = s.get(name) if (val is not None): return val q = (q.getparent() if inheritable else None) return defvals().get(name)
[ "def", "resolve_property", "(", "style_map", ",", "elem", ",", "name", ")", ":", "inheritable", "=", "(", "name", "in", "INHERITED", ")", "q", "=", "elem", "while", "(", "q", "is", "not", "None", ")", ":", "s", "=", "style_map", ".", "get", "(", "q...
given a style_map previously generated by :func:resolve_styles() and a property name .
train
false
31,902
def get_repository_in_tool_shed(app, id): return get_repository_query(app).get(app.security.decode_id(id))
[ "def", "get_repository_in_tool_shed", "(", "app", ",", "id", ")", ":", "return", "get_repository_query", "(", "app", ")", ".", "get", "(", "app", ".", "security", ".", "decode_id", "(", "id", ")", ")" ]
get a repository on the tool shed side from the database via id .
train
false
31,904
def rot_axis3(theta): ct = cos(theta) st = sin(theta) lil = ((ct, st, 0), ((- st), ct, 0), (0, 0, 1)) return Matrix(lil)
[ "def", "rot_axis3", "(", "theta", ")", ":", "ct", "=", "cos", "(", "theta", ")", "st", "=", "sin", "(", "theta", ")", "lil", "=", "(", "(", "ct", ",", "st", ",", "0", ")", ",", "(", "(", "-", "st", ")", ",", "ct", ",", "0", ")", ",", "(...
returns a rotation matrix for a rotation of theta about the 3-axis .
train
false
31,905
def _convert_minutes_seconds(timeout, in_seconds=False): return (timeout if in_seconds else (timeout * 60))
[ "def", "_convert_minutes_seconds", "(", "timeout", ",", "in_seconds", "=", "False", ")", ":", "return", "(", "timeout", "if", "in_seconds", "else", "(", "timeout", "*", "60", ")", ")" ]
convert timeout to seconds .
train
false
31,906
def rewrite_exception(data): exc_data = data.get('sentry.interfaces.Exception') if (not exc_data): return False rv = False for exc in exc_data['values']: for processor in six.itervalues(error_processors): try: if processor.try_process(exc): rv = True break except Exception as e: logger.error('Failed to run processor "%s": %s', processor.vendor, e, exc_info=True) return rv
[ "def", "rewrite_exception", "(", "data", ")", ":", "exc_data", "=", "data", ".", "get", "(", "'sentry.interfaces.Exception'", ")", "if", "(", "not", "exc_data", ")", ":", "return", "False", "rv", "=", "False", "for", "exc", "in", "exc_data", "[", "'values'...
rewrite an exception in an event if needed .
train
false