id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
48,428
def nestedExpr(opener='(', closer=')', content=None, ignoreExpr=quotedString): if (opener == closer): raise ValueError('opening and closing strings cannot be the same') if (content is None): if (isinstance(opener, __BASE_STRING__) and isinstance(closer, __BASE_STRING__)): content = (empty + CharsNotIn(((opener + closer) + ParserElement.DEFAULT_WHITE_CHARS)).setParseAction((lambda t: t[0].strip()))) else: raise ValueError('opening and closing arguments must be strings if no content expression is given') ret = Forward() if (ignoreExpr is not None): (ret << Group(((Suppress(opener) + ZeroOrMore(((ignoreExpr | ret) | content))) + Suppress(closer)))) else: (ret << Group(((Suppress(opener) + ZeroOrMore((ret | content))) + Suppress(closer)))) return ret
[ "def", "nestedExpr", "(", "opener", "=", "'('", ",", "closer", "=", "')'", ",", "content", "=", "None", ",", "ignoreExpr", "=", "quotedString", ")", ":", "if", "(", "opener", "==", "closer", ")", ":", "raise", "ValueError", "(", "'opening and closing strings cannot be the same'", ")", "if", "(", "content", "is", "None", ")", ":", "if", "(", "isinstance", "(", "opener", ",", "__BASE_STRING__", ")", "and", "isinstance", "(", "closer", ",", "__BASE_STRING__", ")", ")", ":", "content", "=", "(", "empty", "+", "CharsNotIn", "(", "(", "(", "opener", "+", "closer", ")", "+", "ParserElement", ".", "DEFAULT_WHITE_CHARS", ")", ")", ".", "setParseAction", "(", "(", "lambda", "t", ":", "t", "[", "0", "]", ".", "strip", "(", ")", ")", ")", ")", "else", ":", "raise", "ValueError", "(", "'opening and closing arguments must be strings if no content expression is given'", ")", "ret", "=", "Forward", "(", ")", "if", "(", "ignoreExpr", "is", "not", "None", ")", ":", "(", "ret", "<<", "Group", "(", "(", "(", "Suppress", "(", "opener", ")", "+", "ZeroOrMore", "(", "(", "(", "ignoreExpr", "|", "ret", ")", "|", "content", ")", ")", ")", "+", "Suppress", "(", "closer", ")", ")", ")", ")", "else", ":", "(", "ret", "<<", "Group", "(", "(", "(", "Suppress", "(", "opener", ")", "+", "ZeroOrMore", "(", "(", "ret", "|", "content", ")", ")", ")", "+", "Suppress", "(", "closer", ")", ")", ")", ")", "return", "ret" ]
helper method for defining nested lists enclosed in opening and closing delimiters ("" are the default) .
train
true
48,429
def contains_extractor(document): tokens = _get_document_tokens(document) features = dict(((u'contains({0})'.format(w), True) for w in tokens)) return features
[ "def", "contains_extractor", "(", "document", ")", ":", "tokens", "=", "_get_document_tokens", "(", "document", ")", "features", "=", "dict", "(", "(", "(", "u'contains({0})'", ".", "format", "(", "w", ")", ",", "True", ")", "for", "w", "in", "tokens", ")", ")", "return", "features" ]
a basic document feature extractor that returns a dict of words that the document contains .
train
true
48,431
def ctps(data, is_raw=True): if (not (data.ndim == 3)): ValueError(('Data must have 3 dimensions, not %i.' % data.ndim)) if is_raw: phase_angles = _compute_normalized_phase(data) else: phase_angles = data ks_dynamics = np.zeros_like(phase_angles[0]) pk_dynamics = np.zeros_like(phase_angles[0]) for (ii, source) in enumerate(np.transpose(phase_angles, [1, 0, 2])): (ks, pk) = kuiper(source) pk_dynamics[ii, :] = pk ks_dynamics[ii, :] = ks return (ks_dynamics, pk_dynamics, (phase_angles if is_raw else None))
[ "def", "ctps", "(", "data", ",", "is_raw", "=", "True", ")", ":", "if", "(", "not", "(", "data", ".", "ndim", "==", "3", ")", ")", ":", "ValueError", "(", "(", "'Data must have 3 dimensions, not %i.'", "%", "data", ".", "ndim", ")", ")", "if", "is_raw", ":", "phase_angles", "=", "_compute_normalized_phase", "(", "data", ")", "else", ":", "phase_angles", "=", "data", "ks_dynamics", "=", "np", ".", "zeros_like", "(", "phase_angles", "[", "0", "]", ")", "pk_dynamics", "=", "np", ".", "zeros_like", "(", "phase_angles", "[", "0", "]", ")", "for", "(", "ii", ",", "source", ")", "in", "enumerate", "(", "np", ".", "transpose", "(", "phase_angles", ",", "[", "1", ",", "0", ",", "2", "]", ")", ")", ":", "(", "ks", ",", "pk", ")", "=", "kuiper", "(", "source", ")", "pk_dynamics", "[", "ii", ",", ":", "]", "=", "pk", "ks_dynamics", "[", "ii", ",", ":", "]", "=", "ks", "return", "(", "ks_dynamics", ",", "pk_dynamics", ",", "(", "phase_angles", "if", "is_raw", "else", "None", ")", ")" ]
compute cross-trial-phase-statistics [1] .
train
false
48,432
def getScripts(projname, basedir=''): scriptdir = os.path.join(basedir, 'bin', projname) if (not os.path.isdir(scriptdir)): scriptdir = os.path.join(basedir, 'bin') if (not os.path.isdir(scriptdir)): return [] thingies = os.listdir(scriptdir) if ('.svn' in thingies): thingies.remove('.svn') return filter(os.path.isfile, [os.path.join(scriptdir, x) for x in thingies])
[ "def", "getScripts", "(", "projname", ",", "basedir", "=", "''", ")", ":", "scriptdir", "=", "os", ".", "path", ".", "join", "(", "basedir", ",", "'bin'", ",", "projname", ")", "if", "(", "not", "os", ".", "path", ".", "isdir", "(", "scriptdir", ")", ")", ":", "scriptdir", "=", "os", ".", "path", ".", "join", "(", "basedir", ",", "'bin'", ")", "if", "(", "not", "os", ".", "path", ".", "isdir", "(", "scriptdir", ")", ")", ":", "return", "[", "]", "thingies", "=", "os", ".", "listdir", "(", "scriptdir", ")", "if", "(", "'.svn'", "in", "thingies", ")", ":", "thingies", ".", "remove", "(", "'.svn'", ")", "return", "filter", "(", "os", ".", "path", ".", "isfile", ",", "[", "os", ".", "path", ".", "join", "(", "scriptdir", ",", "x", ")", "for", "x", "in", "thingies", "]", ")" ]
returns a list of scripts for a twisted subproject; this works in any of an svn checkout .
train
false
48,434
def _analyze_gens(gens): if ((len(gens) == 1) and hasattr(gens[0], '__iter__')): return tuple(gens[0]) else: return tuple(gens)
[ "def", "_analyze_gens", "(", "gens", ")", ":", "if", "(", "(", "len", "(", "gens", ")", "==", "1", ")", "and", "hasattr", "(", "gens", "[", "0", "]", ",", "'__iter__'", ")", ")", ":", "return", "tuple", "(", "gens", "[", "0", "]", ")", "else", ":", "return", "tuple", "(", "gens", ")" ]
support for passing generators as *gens and [gens] .
train
false
48,435
def from_address(text): try: parts = list(dns.ipv6.inet_aton(text).encode('hex_codec')) origin = ipv6_reverse_domain except: parts = [('%d' % ord(byte)) for byte in dns.ipv4.inet_aton(text)] origin = ipv4_reverse_domain parts.reverse() return dns.name.from_text('.'.join(parts), origin=origin)
[ "def", "from_address", "(", "text", ")", ":", "try", ":", "parts", "=", "list", "(", "dns", ".", "ipv6", ".", "inet_aton", "(", "text", ")", ".", "encode", "(", "'hex_codec'", ")", ")", "origin", "=", "ipv6_reverse_domain", "except", ":", "parts", "=", "[", "(", "'%d'", "%", "ord", "(", "byte", ")", ")", "for", "byte", "in", "dns", ".", "ipv4", ".", "inet_aton", "(", "text", ")", "]", "origin", "=", "ipv4_reverse_domain", "parts", ".", "reverse", "(", ")", "return", "dns", ".", "name", ".", "from_text", "(", "'.'", ".", "join", "(", "parts", ")", ",", "origin", "=", "origin", ")" ]
convert an ipv4 or ipv6 address in textual form into a name object whose value is the reverse-map domain name of the address .
train
true
48,436
def version_match(required, candidate): if (candidate[0] != required[0]): return False if (candidate < required): return False return True
[ "def", "version_match", "(", "required", ",", "candidate", ")", ":", "if", "(", "candidate", "[", "0", "]", "!=", "required", "[", "0", "]", ")", ":", "return", "False", "if", "(", "candidate", "<", "required", ")", ":", "return", "False", "return", "True" ]
test that an available version satisfies the required version .
train
false
48,437
def get_sentences(amount, start_with_lorem=False): sentences = _GENERATOR.generate_sentences(amount, start_with_lorem) return [s[(-1)] for s in sentences]
[ "def", "get_sentences", "(", "amount", ",", "start_with_lorem", "=", "False", ")", ":", "sentences", "=", "_GENERATOR", ".", "generate_sentences", "(", "amount", ",", "start_with_lorem", ")", "return", "[", "s", "[", "(", "-", "1", ")", "]", "for", "s", "in", "sentences", "]" ]
utility function to get specified amount of random sentences .
train
false
48,438
def Verify(*args): for mock in args: mock._Verify()
[ "def", "Verify", "(", "*", "args", ")", ":", "for", "mock", "in", "args", ":", "mock", ".", "_Verify", "(", ")" ]
verify mocks .
train
false
48,440
@non_atomic_requests def search_tools(request, category=None): (APP, TYPE) = (request.APP, amo.ADDON_SEARCH) qs = Category.objects.filter(application=APP.id, type=TYPE) categories = sorted(qs, key=attrgetter('weight', 'name')) (addons, filter) = addon_listing(request, [TYPE], SearchToolsFilter, 'popular') if category: category = get_object_or_404(qs, slug=category) addons = addons.filter(categories__id=category.id) addons = amo.utils.paginate(request, addons) base = Addon.objects.listed(request.APP, amo.STATUS_PUBLIC).filter(type=amo.ADDON_EXTENSION) sidebar_ext = SearchExtensionsFilter(request, base, 'sort', 'popular') return render(request, 'browse/search_tools.html', {'categories': categories, 'category': category, 'addons': addons, 'filter': filter, 'search_extensions_filter': sidebar_ext})
[ "@", "non_atomic_requests", "def", "search_tools", "(", "request", ",", "category", "=", "None", ")", ":", "(", "APP", ",", "TYPE", ")", "=", "(", "request", ".", "APP", ",", "amo", ".", "ADDON_SEARCH", ")", "qs", "=", "Category", ".", "objects", ".", "filter", "(", "application", "=", "APP", ".", "id", ",", "type", "=", "TYPE", ")", "categories", "=", "sorted", "(", "qs", ",", "key", "=", "attrgetter", "(", "'weight'", ",", "'name'", ")", ")", "(", "addons", ",", "filter", ")", "=", "addon_listing", "(", "request", ",", "[", "TYPE", "]", ",", "SearchToolsFilter", ",", "'popular'", ")", "if", "category", ":", "category", "=", "get_object_or_404", "(", "qs", ",", "slug", "=", "category", ")", "addons", "=", "addons", ".", "filter", "(", "categories__id", "=", "category", ".", "id", ")", "addons", "=", "amo", ".", "utils", ".", "paginate", "(", "request", ",", "addons", ")", "base", "=", "Addon", ".", "objects", ".", "listed", "(", "request", ".", "APP", ",", "amo", ".", "STATUS_PUBLIC", ")", ".", "filter", "(", "type", "=", "amo", ".", "ADDON_EXTENSION", ")", "sidebar_ext", "=", "SearchExtensionsFilter", "(", "request", ",", "base", ",", "'sort'", ",", "'popular'", ")", "return", "render", "(", "request", ",", "'browse/search_tools.html'", ",", "{", "'categories'", ":", "categories", ",", "'category'", ":", "category", ",", "'addons'", ":", "addons", ",", "'filter'", ":", "filter", ",", "'search_extensions_filter'", ":", "sidebar_ext", "}", ")" ]
view the search tools page .
train
false
48,441
def _GetClientLibCallback(args, client_func=GetClientLib): (service_class_names, doc_format, language, output_path, hostname) = (args.service, args.format, args.language, args.output, args.hostname) (discovery_paths, client_paths) = client_func(service_class_names, doc_format, language, output_path, hostname=hostname) for discovery_path in discovery_paths: print ('API discovery document written to %s' % discovery_path) for client_path in client_paths: print ('API client library written to %s' % client_path)
[ "def", "_GetClientLibCallback", "(", "args", ",", "client_func", "=", "GetClientLib", ")", ":", "(", "service_class_names", ",", "doc_format", ",", "language", ",", "output_path", ",", "hostname", ")", "=", "(", "args", ".", "service", ",", "args", ".", "format", ",", "args", ".", "language", ",", "args", ".", "output", ",", "args", ".", "hostname", ")", "(", "discovery_paths", ",", "client_paths", ")", "=", "client_func", "(", "service_class_names", ",", "doc_format", ",", "language", ",", "output_path", ",", "hostname", "=", "hostname", ")", "for", "discovery_path", "in", "discovery_paths", ":", "print", "(", "'API discovery document written to %s'", "%", "discovery_path", ")", "for", "client_path", "in", "client_paths", ":", "print", "(", "'API client library written to %s'", "%", "client_path", ")" ]
generate discovery docs and client libraries to files .
train
false
48,443
def _check_fscale(fscale): if ((not isinstance(fscale, string_types)) or (fscale not in ('log', 'linear'))): raise ValueError(('fscale must be "log" or "linear", got %s' % (fscale,)))
[ "def", "_check_fscale", "(", "fscale", ")", ":", "if", "(", "(", "not", "isinstance", "(", "fscale", ",", "string_types", ")", ")", "or", "(", "fscale", "not", "in", "(", "'log'", ",", "'linear'", ")", ")", ")", ":", "raise", "ValueError", "(", "(", "'fscale must be \"log\" or \"linear\", got %s'", "%", "(", "fscale", ",", ")", ")", ")" ]
check for valid fscale .
train
false
48,444
def _label2idx(params, pos): labels = params['ax'].yaxis.get_ticklabels() offsets = (np.array(params['offsets']) + params['offsets'][0]) line_idx = np.searchsorted(offsets, pos[1]) text = labels[line_idx].get_text() if (len(text) == 0): return (None, None) ch_idx = (params['ch_start'] + line_idx) return (text, ch_idx)
[ "def", "_label2idx", "(", "params", ",", "pos", ")", ":", "labels", "=", "params", "[", "'ax'", "]", ".", "yaxis", ".", "get_ticklabels", "(", ")", "offsets", "=", "(", "np", ".", "array", "(", "params", "[", "'offsets'", "]", ")", "+", "params", "[", "'offsets'", "]", "[", "0", "]", ")", "line_idx", "=", "np", ".", "searchsorted", "(", "offsets", ",", "pos", "[", "1", "]", ")", "text", "=", "labels", "[", "line_idx", "]", ".", "get_text", "(", ")", "if", "(", "len", "(", "text", ")", "==", "0", ")", ":", "return", "(", "None", ",", "None", ")", "ch_idx", "=", "(", "params", "[", "'ch_start'", "]", "+", "line_idx", ")", "return", "(", "text", ",", "ch_idx", ")" ]
handle click on labels .
train
false
48,445
def get_project_list(config): eggs_dir = config.get('eggs_dir', 'eggs') if os.path.exists(eggs_dir): projects = os.listdir(eggs_dir) else: projects = [] try: projects += [x[0] for x in config.cp.items('settings')] except NoSectionError: pass return projects
[ "def", "get_project_list", "(", "config", ")", ":", "eggs_dir", "=", "config", ".", "get", "(", "'eggs_dir'", ",", "'eggs'", ")", "if", "os", ".", "path", ".", "exists", "(", "eggs_dir", ")", ":", "projects", "=", "os", ".", "listdir", "(", "eggs_dir", ")", "else", ":", "projects", "=", "[", "]", "try", ":", "projects", "+=", "[", "x", "[", "0", "]", "for", "x", "in", "config", ".", "cp", ".", "items", "(", "'settings'", ")", "]", "except", "NoSectionError", ":", "pass", "return", "projects" ]
get list of projects by inspecting the eggs dir and the ones defined in the scrapyd .
train
false
48,446
def _find_manifestation_and_node(deployment, dataset_id): manifestations_and_nodes = manifestations_from_deployment(deployment, dataset_id) index = 0 for (index, (manifestation, node)) in enumerate(manifestations_and_nodes): if manifestation.primary: (primary_manifestation, origin_node) = (manifestation, node) break else: if (index == 0): raise DATASET_NOT_FOUND else: raise IndexError('No primary manifestations for dataset: {!r}. See https://clusterhq.atlassian.net/browse/FLOC-1403'.format(dataset_id)) return (primary_manifestation, origin_node)
[ "def", "_find_manifestation_and_node", "(", "deployment", ",", "dataset_id", ")", ":", "manifestations_and_nodes", "=", "manifestations_from_deployment", "(", "deployment", ",", "dataset_id", ")", "index", "=", "0", "for", "(", "index", ",", "(", "manifestation", ",", "node", ")", ")", "in", "enumerate", "(", "manifestations_and_nodes", ")", ":", "if", "manifestation", ".", "primary", ":", "(", "primary_manifestation", ",", "origin_node", ")", "=", "(", "manifestation", ",", "node", ")", "break", "else", ":", "if", "(", "index", "==", "0", ")", ":", "raise", "DATASET_NOT_FOUND", "else", ":", "raise", "IndexError", "(", "'No primary manifestations for dataset: {!r}. See https://clusterhq.atlassian.net/browse/FLOC-1403'", ".", "format", "(", "dataset_id", ")", ")", "return", "(", "primary_manifestation", ",", "origin_node", ")" ]
given the id of a dataset .
train
false
48,447
def patch_collection_search_document(collection_id, update): doc = search_services.get_document_from_index(collection_id, SEARCH_INDEX_COLLECTIONS) doc.update(update) search_services.add_documents_to_index([doc], SEARCH_INDEX_COLLECTIONS)
[ "def", "patch_collection_search_document", "(", "collection_id", ",", "update", ")", ":", "doc", "=", "search_services", ".", "get_document_from_index", "(", "collection_id", ",", "SEARCH_INDEX_COLLECTIONS", ")", "doc", ".", "update", "(", "update", ")", "search_services", ".", "add_documents_to_index", "(", "[", "doc", "]", ",", "SEARCH_INDEX_COLLECTIONS", ")" ]
patches an collections current search document .
train
false
48,448
def _build_ezid_metadata(node): doi = settings.EZID_FORMAT.format(namespace=settings.DOI_NAMESPACE, guid=node._id) metadata = {'_target': node.absolute_url, 'datacite': datacite_metadata_for_node(node=node, doi=doi)} return (doi, metadata)
[ "def", "_build_ezid_metadata", "(", "node", ")", ":", "doi", "=", "settings", ".", "EZID_FORMAT", ".", "format", "(", "namespace", "=", "settings", ".", "DOI_NAMESPACE", ",", "guid", "=", "node", ".", "_id", ")", "metadata", "=", "{", "'_target'", ":", "node", ".", "absolute_url", ",", "'datacite'", ":", "datacite_metadata_for_node", "(", "node", "=", "node", ",", "doi", "=", "doi", ")", "}", "return", "(", "doi", ",", "metadata", ")" ]
build metadata for submission to ezid using the datacite profile .
train
false
48,449
def printlist(x, width=70, indent=4): from textwrap import fill blanks = (' ' * indent) print fill(' '.join((str(elt) for elt in sorted(x))), width, initial_indent=blanks, subsequent_indent=blanks)
[ "def", "printlist", "(", "x", ",", "width", "=", "70", ",", "indent", "=", "4", ")", ":", "from", "textwrap", "import", "fill", "blanks", "=", "(", "' '", "*", "indent", ")", "print", "fill", "(", "' '", ".", "join", "(", "(", "str", "(", "elt", ")", "for", "elt", "in", "sorted", "(", "x", ")", ")", ")", ",", "width", ",", "initial_indent", "=", "blanks", ",", "subsequent_indent", "=", "blanks", ")" ]
print the elements of iterable x to stdout .
train
false
48,451
def readlink(path): assert isinstance(path, basestring), path path = os.readlink(path) path = path.split('\x00')[0] if (path.endswith(' (deleted)') and (not path_exists_strict(path))): path = path[:(-10)] return path
[ "def", "readlink", "(", "path", ")", ":", "assert", "isinstance", "(", "path", ",", "basestring", ")", ",", "path", "path", "=", "os", ".", "readlink", "(", "path", ")", "path", "=", "path", ".", "split", "(", "'\\x00'", ")", "[", "0", "]", "if", "(", "path", ".", "endswith", "(", "' (deleted)'", ")", "and", "(", "not", "path_exists_strict", "(", "path", ")", ")", ")", ":", "path", "=", "path", "[", ":", "(", "-", "10", ")", "]", "return", "path" ]
wrapper around os .
train
false
48,452
def django_template_include(file_name, mako_context): dictionary = dict(mako_context) return loader.render_to_string(file_name, dictionary=dictionary)
[ "def", "django_template_include", "(", "file_name", ",", "mako_context", ")", ":", "dictionary", "=", "dict", "(", "mako_context", ")", "return", "loader", ".", "render_to_string", "(", "file_name", ",", "dictionary", "=", "dictionary", ")" ]
this can be used within a mako template to include a django template in the way that a django-style {% include %} does .
train
false
48,453
def get_path_info(environ, charset='utf-8', errors='replace'): path = wsgi_get_bytes(environ.get('PATH_INFO', '')) return to_unicode(path, charset, errors, allow_none_charset=True)
[ "def", "get_path_info", "(", "environ", ",", "charset", "=", "'utf-8'", ",", "errors", "=", "'replace'", ")", ":", "path", "=", "wsgi_get_bytes", "(", "environ", ".", "get", "(", "'PATH_INFO'", ",", "''", ")", ")", "return", "to_unicode", "(", "path", ",", "charset", ",", "errors", ",", "allow_none_charset", "=", "True", ")" ]
returns the path_info from the wsgi environment and properly decodes it .
train
true
48,454
def action_start(context, values): return IMPL.action_start(context, values)
[ "def", "action_start", "(", "context", ",", "values", ")", ":", "return", "IMPL", ".", "action_start", "(", "context", ",", "values", ")" ]
start an action for an instance .
train
false
48,455
def update_port(port, name, admin_state_up=True, profile=None): conn = _auth(profile) return conn.update_port(port, name, admin_state_up)
[ "def", "update_port", "(", "port", ",", "name", ",", "admin_state_up", "=", "True", ",", "profile", "=", "None", ")", ":", "conn", "=", "_auth", "(", "profile", ")", "return", "conn", ".", "update_port", "(", "port", ",", "name", ",", "admin_state_up", ")" ]
updates a port cli example: .
train
true
48,457
@transaction.non_atomic_requests @ensure_csrf_cookie @require_POST def add_users_to_cohort(request, course_key_string, cohort_id): course_key = SlashSeparatedCourseKey.from_deprecated_string(course_key_string) get_course_with_access(request.user, 'staff', course_key) try: cohort = cohorts.get_cohort_by_id(course_key, cohort_id) except CourseUserGroup.DoesNotExist: raise Http404('Cohort (ID {cohort_id}) not found for {course_key_string}'.format(cohort_id=cohort_id, course_key_string=course_key_string)) users = request.POST.get('users', '') added = [] changed = [] present = [] unknown = [] for username_or_email in split_by_comma_and_whitespace(users): if (not username_or_email): continue try: (user, previous_cohort) = cohorts.add_user_to_cohort(cohort, username_or_email) info = {'username': user.username, 'email': user.email} if previous_cohort: info['previous_cohort'] = previous_cohort changed.append(info) else: added.append(info) except ValueError: present.append(username_or_email) except User.DoesNotExist: unknown.append(username_or_email) return json_http_response({'success': True, 'added': added, 'changed': changed, 'present': present, 'unknown': unknown})
[ "@", "transaction", ".", "non_atomic_requests", "@", "ensure_csrf_cookie", "@", "require_POST", "def", "add_users_to_cohort", "(", "request", ",", "course_key_string", ",", "cohort_id", ")", ":", "course_key", "=", "SlashSeparatedCourseKey", ".", "from_deprecated_string", "(", "course_key_string", ")", "get_course_with_access", "(", "request", ".", "user", ",", "'staff'", ",", "course_key", ")", "try", ":", "cohort", "=", "cohorts", ".", "get_cohort_by_id", "(", "course_key", ",", "cohort_id", ")", "except", "CourseUserGroup", ".", "DoesNotExist", ":", "raise", "Http404", "(", "'Cohort (ID {cohort_id}) not found for {course_key_string}'", ".", "format", "(", "cohort_id", "=", "cohort_id", ",", "course_key_string", "=", "course_key_string", ")", ")", "users", "=", "request", ".", "POST", ".", "get", "(", "'users'", ",", "''", ")", "added", "=", "[", "]", "changed", "=", "[", "]", "present", "=", "[", "]", "unknown", "=", "[", "]", "for", "username_or_email", "in", "split_by_comma_and_whitespace", "(", "users", ")", ":", "if", "(", "not", "username_or_email", ")", ":", "continue", "try", ":", "(", "user", ",", "previous_cohort", ")", "=", "cohorts", ".", "add_user_to_cohort", "(", "cohort", ",", "username_or_email", ")", "info", "=", "{", "'username'", ":", "user", ".", "username", ",", "'email'", ":", "user", ".", "email", "}", "if", "previous_cohort", ":", "info", "[", "'previous_cohort'", "]", "=", "previous_cohort", "changed", ".", "append", "(", "info", ")", "else", ":", "added", ".", "append", "(", "info", ")", "except", "ValueError", ":", "present", ".", "append", "(", "username_or_email", ")", "except", "User", ".", "DoesNotExist", ":", "unknown", ".", "append", "(", "username_or_email", ")", "return", "json_http_response", "(", "{", "'success'", ":", "True", ",", "'added'", ":", "added", ",", "'changed'", ":", "changed", ",", "'present'", ":", "present", ",", "'unknown'", ":", "unknown", "}", ")" ]
return json dict of: {success: true .
train
false
48,458
def loglike_ar1(x, rho): x = np.asarray(x) u = np.r_[(x[0], (x[1:] - (rho * x[:(-1)])))] sigma_u2 = (2 * (1 - (rho ** 2))) loglik = (0.5 * ((((- (u ** 2).sum(0)) / sigma_u2) + np.log((1 - (rho ** 2)))) - (x.shape[0] * (np.log((2 * np.pi)) + np.log(sigma_u2))))) return loglik
[ "def", "loglike_ar1", "(", "x", ",", "rho", ")", ":", "x", "=", "np", ".", "asarray", "(", "x", ")", "u", "=", "np", ".", "r_", "[", "(", "x", "[", "0", "]", ",", "(", "x", "[", "1", ":", "]", "-", "(", "rho", "*", "x", "[", ":", "(", "-", "1", ")", "]", ")", ")", ")", "]", "sigma_u2", "=", "(", "2", "*", "(", "1", "-", "(", "rho", "**", "2", ")", ")", ")", "loglik", "=", "(", "0.5", "*", "(", "(", "(", "(", "-", "(", "u", "**", "2", ")", ".", "sum", "(", "0", ")", ")", "/", "sigma_u2", ")", "+", "np", ".", "log", "(", "(", "1", "-", "(", "rho", "**", "2", ")", ")", ")", ")", "-", "(", "x", ".", "shape", "[", "0", "]", "*", "(", "np", ".", "log", "(", "(", "2", "*", "np", ".", "pi", ")", ")", "+", "np", ".", "log", "(", "sigma_u2", ")", ")", ")", ")", ")", "return", "loglik" ]
loglikelihood of ar(1) process .
train
false
48,459
@pytest.mark.cmd @pytest.mark.django_db def test_test_checks_unit(capfd): units = Unit.objects.filter(state=TRANSLATED, target_f__endswith='%s.') call_command('test_checks', ('--unit=%s' % units.first().id)) (out, err) = capfd.readouterr() assert ('No errors found' in out)
[ "@", "pytest", ".", "mark", ".", "cmd", "@", "pytest", ".", "mark", ".", "django_db", "def", "test_test_checks_unit", "(", "capfd", ")", ":", "units", "=", "Unit", ".", "objects", ".", "filter", "(", "state", "=", "TRANSLATED", ",", "target_f__endswith", "=", "'%s.'", ")", "call_command", "(", "'test_checks'", ",", "(", "'--unit=%s'", "%", "units", ".", "first", "(", ")", ".", "id", ")", ")", "(", "out", ",", "err", ")", "=", "capfd", ".", "readouterr", "(", ")", "assert", "(", "'No errors found'", "in", "out", ")" ]
check a --unit .
train
false
48,460
def cooperative_read(fd): def readfn(*args): result = fd.read(*args) sleep(0) return result return readfn
[ "def", "cooperative_read", "(", "fd", ")", ":", "def", "readfn", "(", "*", "args", ")", ":", "result", "=", "fd", ".", "read", "(", "*", "args", ")", "sleep", "(", "0", ")", "return", "result", "return", "readfn" ]
wrap a file descriptors read with a partial function which schedules after each read .
train
false
48,461
def _shallow_annotate(element, annotations): element = element._annotate(annotations) element._copy_internals() return element
[ "def", "_shallow_annotate", "(", "element", ",", "annotations", ")", ":", "element", "=", "element", ".", "_annotate", "(", "annotations", ")", "element", ".", "_copy_internals", "(", ")", "return", "element" ]
annotate the given clauseelement and copy its internals so that internal objects refer to the new annotated object .
train
false
48,462
@bp.route('/in/<city>') def city(city): page = force_int(request.args.get('page', 1), 0) if (not page): return abort(404) paginator = Account.query.filter_by(city=city).paginate(page) return render_template('user/city.html', paginator=paginator, city=city)
[ "@", "bp", ".", "route", "(", "'/in/<city>'", ")", "def", "city", "(", "city", ")", ":", "page", "=", "force_int", "(", "request", ".", "args", ".", "get", "(", "'page'", ",", "1", ")", ",", "0", ")", "if", "(", "not", "page", ")", ":", "return", "abort", "(", "404", ")", "paginator", "=", "Account", ".", "query", ".", "filter_by", "(", "city", "=", "city", ")", ".", "paginate", "(", "page", ")", "return", "render_template", "(", "'user/city.html'", ",", "paginator", "=", "paginator", ",", "city", "=", "city", ")" ]
users in a city .
train
false
48,463
def get_monitor_id(trans, monitor_email): monitor_user_id = None monitor_row = trans.sa_session.query(trans.model.User.table.c.id).filter((trans.model.User.table.c.email == monitor_email)).first() if (monitor_row is not None): monitor_user_id = monitor_row[0] return monitor_user_id
[ "def", "get_monitor_id", "(", "trans", ",", "monitor_email", ")", ":", "monitor_user_id", "=", "None", "monitor_row", "=", "trans", ".", "sa_session", ".", "query", "(", "trans", ".", "model", ".", "User", ".", "table", ".", "c", ".", "id", ")", ".", "filter", "(", "(", "trans", ".", "model", ".", "User", ".", "table", ".", "c", ".", "email", "==", "monitor_email", ")", ")", ".", "first", "(", ")", "if", "(", "monitor_row", "is", "not", "None", ")", ":", "monitor_user_id", "=", "monitor_row", "[", "0", "]", "return", "monitor_user_id" ]
a convenience method to obtain the monitor job id .
train
false
48,464
def install_miniconda(prefix='~/miniconda', use_sudo=False, keep_installer=False): with cd('/tmp'): if (not fabtools.files.is_file('Miniconda-latest-Linux-x86_64.sh')): download(MINICONDA_URL) command = ('bash Miniconda-latest-Linux-x86_64.sh -b -p %(prefix)s' % locals()) if use_sudo: run_as_root(command) else: run(command) files.append('~/.bash_profile', ('export PATH=%(prefix)s/bin:$PATH' % locals())) if (not keep_installer): run('rm -f Miniconda-latest-Linux-x86_64.sh')
[ "def", "install_miniconda", "(", "prefix", "=", "'~/miniconda'", ",", "use_sudo", "=", "False", ",", "keep_installer", "=", "False", ")", ":", "with", "cd", "(", "'/tmp'", ")", ":", "if", "(", "not", "fabtools", ".", "files", ".", "is_file", "(", "'Miniconda-latest-Linux-x86_64.sh'", ")", ")", ":", "download", "(", "MINICONDA_URL", ")", "command", "=", "(", "'bash Miniconda-latest-Linux-x86_64.sh -b -p %(prefix)s'", "%", "locals", "(", ")", ")", "if", "use_sudo", ":", "run_as_root", "(", "command", ")", "else", ":", "run", "(", "command", ")", "files", ".", "append", "(", "'~/.bash_profile'", ",", "(", "'export PATH=%(prefix)s/bin:$PATH'", "%", "locals", "(", ")", ")", ")", "if", "(", "not", "keep_installer", ")", ":", "run", "(", "'rm -f Miniconda-latest-Linux-x86_64.sh'", ")" ]
install the latest version of miniconda_ .
train
false
48,465
def get_volume_type_by_name(context, name): if (name is None): msg = _('name cannot be None') raise exception.InvalidVolumeType(reason=msg) return db.volume_type_get_by_name(context, name)
[ "def", "get_volume_type_by_name", "(", "context", ",", "name", ")", ":", "if", "(", "name", "is", "None", ")", ":", "msg", "=", "_", "(", "'name cannot be None'", ")", "raise", "exception", ".", "InvalidVolumeType", "(", "reason", "=", "msg", ")", "return", "db", ".", "volume_type_get_by_name", "(", "context", ",", "name", ")" ]
retrieves single volume type by name .
train
false
48,469
def valid(): m_data = __salt__['config.merge']('mine_functions', {}) if (not m_data): return data = {} for func in m_data: if (m_data[func] and isinstance(m_data[func], dict)): mine_func = m_data[func].pop('mine_function', func) if (not _mine_function_available(mine_func)): continue data[func] = {mine_func: m_data[func]} elif (m_data[func] and isinstance(m_data[func], list)): mine_func = func if (isinstance(m_data[func][0], dict) and ('mine_function' in m_data[func][0])): mine_func = m_data[func][0]['mine_function'] m_data[func].pop(0) if (not _mine_function_available(mine_func)): continue data[func] = {mine_func: m_data[func]} else: if (not _mine_function_available(func)): continue data[func] = m_data[func] return data
[ "def", "valid", "(", ")", ":", "m_data", "=", "__salt__", "[", "'config.merge'", "]", "(", "'mine_functions'", ",", "{", "}", ")", "if", "(", "not", "m_data", ")", ":", "return", "data", "=", "{", "}", "for", "func", "in", "m_data", ":", "if", "(", "m_data", "[", "func", "]", "and", "isinstance", "(", "m_data", "[", "func", "]", ",", "dict", ")", ")", ":", "mine_func", "=", "m_data", "[", "func", "]", ".", "pop", "(", "'mine_function'", ",", "func", ")", "if", "(", "not", "_mine_function_available", "(", "mine_func", ")", ")", ":", "continue", "data", "[", "func", "]", "=", "{", "mine_func", ":", "m_data", "[", "func", "]", "}", "elif", "(", "m_data", "[", "func", "]", "and", "isinstance", "(", "m_data", "[", "func", "]", ",", "list", ")", ")", ":", "mine_func", "=", "func", "if", "(", "isinstance", "(", "m_data", "[", "func", "]", "[", "0", "]", ",", "dict", ")", "and", "(", "'mine_function'", "in", "m_data", "[", "func", "]", "[", "0", "]", ")", ")", ":", "mine_func", "=", "m_data", "[", "func", "]", "[", "0", "]", "[", "'mine_function'", "]", "m_data", "[", "func", "]", ".", "pop", "(", "0", ")", "if", "(", "not", "_mine_function_available", "(", "mine_func", ")", ")", ":", "continue", "data", "[", "func", "]", "=", "{", "mine_func", ":", "m_data", "[", "func", "]", "}", "else", ":", "if", "(", "not", "_mine_function_available", "(", "func", ")", ")", ":", "continue", "data", "[", "func", "]", "=", "m_data", "[", "func", "]", "return", "data" ]
determines if the individual is valid or not .
train
true
48,470
def merge_paths(base_uri, relative_path): if ((base_uri.path is None) and (base_uri.authority is not None)): return ('/' + relative_path) else: path = (base_uri.path or '') index = path.rfind('/') return ((path[:index] + '/') + relative_path)
[ "def", "merge_paths", "(", "base_uri", ",", "relative_path", ")", ":", "if", "(", "(", "base_uri", ".", "path", "is", "None", ")", "and", "(", "base_uri", ".", "authority", "is", "not", "None", ")", ")", ":", "return", "(", "'/'", "+", "relative_path", ")", "else", ":", "path", "=", "(", "base_uri", ".", "path", "or", "''", ")", "index", "=", "path", ".", "rfind", "(", "'/'", ")", "return", "(", "(", "path", "[", ":", "index", "]", "+", "'/'", ")", "+", "relative_path", ")" ]
merge a base uris path with a relative uris path .
train
false
48,472
def get_multi_product_metrics(db=db, timedelta=timedelta(days=365)): start_date = (timezone.now() - timedelta) pipeline = [{'$match': {'date': {'$gt': start_date}}}, {'$group': {'_id': '$user', 'node_id': {'$addToSet': '$params.node'}, 'action': {'$addToSet': '$action'}}}] user_nodes = db.nodelog.aggregate(pipeline)['result'] multi_product_count = 0 cross_product_count = 0 multi_action_count = 0 for user_node in user_nodes: if user_node['_id']: user_id = user_node['_id'] node_id = user_node['node_id'] products = [] nodes = db.node.find({'_id': {'$in': node_id}}) for node in nodes: products.append(get_entry_point(node['system_tags'])) if (len(set(products)) > 1): multi_product_count += 1 user = db.user.find_one({'_id': user_id}) user_entry_point = get_entry_point(user['system_tags']) for product in products: if (user_entry_point != product): cross_product_count += 1 break if (len(set(user_node['action'])) > 1): multi_action_count += 1 return {'multi_product_count': multi_product_count, 'cross_product_count': cross_product_count, 'multi_action_count': multi_action_count}
[ "def", "get_multi_product_metrics", "(", "db", "=", "db", ",", "timedelta", "=", "timedelta", "(", "days", "=", "365", ")", ")", ":", "start_date", "=", "(", "timezone", ".", "now", "(", ")", "-", "timedelta", ")", "pipeline", "=", "[", "{", "'$match'", ":", "{", "'date'", ":", "{", "'$gt'", ":", "start_date", "}", "}", "}", ",", "{", "'$group'", ":", "{", "'_id'", ":", "'$user'", ",", "'node_id'", ":", "{", "'$addToSet'", ":", "'$params.node'", "}", ",", "'action'", ":", "{", "'$addToSet'", ":", "'$action'", "}", "}", "}", "]", "user_nodes", "=", "db", ".", "nodelog", ".", "aggregate", "(", "pipeline", ")", "[", "'result'", "]", "multi_product_count", "=", "0", "cross_product_count", "=", "0", "multi_action_count", "=", "0", "for", "user_node", "in", "user_nodes", ":", "if", "user_node", "[", "'_id'", "]", ":", "user_id", "=", "user_node", "[", "'_id'", "]", "node_id", "=", "user_node", "[", "'node_id'", "]", "products", "=", "[", "]", "nodes", "=", "db", ".", "node", ".", "find", "(", "{", "'_id'", ":", "{", "'$in'", ":", "node_id", "}", "}", ")", "for", "node", "in", "nodes", ":", "products", ".", "append", "(", "get_entry_point", "(", "node", "[", "'system_tags'", "]", ")", ")", "if", "(", "len", "(", "set", "(", "products", ")", ")", ">", "1", ")", ":", "multi_product_count", "+=", "1", "user", "=", "db", ".", "user", ".", "find_one", "(", "{", "'_id'", ":", "user_id", "}", ")", "user_entry_point", "=", "get_entry_point", "(", "user", "[", "'system_tags'", "]", ")", "for", "product", "in", "products", ":", "if", "(", "user_entry_point", "!=", "product", ")", ":", "cross_product_count", "+=", "1", "break", "if", "(", "len", "(", "set", "(", "user_node", "[", "'action'", "]", ")", ")", ">", "1", ")", ":", "multi_action_count", "+=", "1", "return", "{", "'multi_product_count'", ":", "multi_product_count", ",", "'cross_product_count'", ":", "cross_product_count", ",", "'multi_action_count'", ":", "multi_action_count", "}" ]
get the number of users using 2+ products within a period of time .
train
false
48,473
def list_icmp_block(zone, permanent=True): cmd = '--zone={0} --list-icmp-blocks'.format(zone) if permanent: cmd += ' --permanent' return __firewall_cmd(cmd).split()
[ "def", "list_icmp_block", "(", "zone", ",", "permanent", "=", "True", ")", ":", "cmd", "=", "'--zone={0} --list-icmp-blocks'", ".", "format", "(", "zone", ")", "if", "permanent", ":", "cmd", "+=", "' --permanent'", "return", "__firewall_cmd", "(", "cmd", ")", ".", "split", "(", ")" ]
list icmp blocks on a zone .
train
true
48,474
def _pick_eeg_pos(c): eeg = dict(coord_frame=FIFF.FIFFV_COORD_HEAD, assign_to_chs=False, labels=list(), ids=list(), rr=list(), kinds=list(), np=0) for ch in c['chs']: if ((ch['kind'] == FIFF.FIFFV_EEG_CH) and (not _at_origin(ch['loc'][:3]))): eeg['labels'].append(ch['ch_name']) eeg['ids'].append(ch['logno']) eeg['rr'].append(ch['loc'][:3]) eeg['kinds'].append(FIFF.FIFFV_POINT_EEG) eeg['np'] += 1 if (eeg['np'] == 0): return None logger.info(('Picked positions of %d EEG channels from channel info' % eeg['np'])) return eeg
[ "def", "_pick_eeg_pos", "(", "c", ")", ":", "eeg", "=", "dict", "(", "coord_frame", "=", "FIFF", ".", "FIFFV_COORD_HEAD", ",", "assign_to_chs", "=", "False", ",", "labels", "=", "list", "(", ")", ",", "ids", "=", "list", "(", ")", ",", "rr", "=", "list", "(", ")", ",", "kinds", "=", "list", "(", ")", ",", "np", "=", "0", ")", "for", "ch", "in", "c", "[", "'chs'", "]", ":", "if", "(", "(", "ch", "[", "'kind'", "]", "==", "FIFF", ".", "FIFFV_EEG_CH", ")", "and", "(", "not", "_at_origin", "(", "ch", "[", "'loc'", "]", "[", ":", "3", "]", ")", ")", ")", ":", "eeg", "[", "'labels'", "]", ".", "append", "(", "ch", "[", "'ch_name'", "]", ")", "eeg", "[", "'ids'", "]", ".", "append", "(", "ch", "[", "'logno'", "]", ")", "eeg", "[", "'rr'", "]", ".", "append", "(", "ch", "[", "'loc'", "]", "[", ":", "3", "]", ")", "eeg", "[", "'kinds'", "]", ".", "append", "(", "FIFF", ".", "FIFFV_POINT_EEG", ")", "eeg", "[", "'np'", "]", "+=", "1", "if", "(", "eeg", "[", "'np'", "]", "==", "0", ")", ":", "return", "None", "logger", ".", "info", "(", "(", "'Picked positions of %d EEG channels from channel info'", "%", "eeg", "[", "'np'", "]", ")", ")", "return", "eeg" ]
pick eeg positions .
train
false
48,476
def is_third_party(request): req_host = request_host(request) if (not domain_match(req_host, reach(request.get_origin_req_host()))): return True else: return False
[ "def", "is_third_party", "(", "request", ")", ":", "req_host", "=", "request_host", "(", "request", ")", "if", "(", "not", "domain_match", "(", "req_host", ",", "reach", "(", "request", ".", "get_origin_req_host", "(", ")", ")", ")", ")", ":", "return", "True", "else", ":", "return", "False" ]
rfc 2965 .
train
true
48,478
@step('There should be (\\d+) ([a-z][a-z0-9_ ]*) in the database') def model_count(step, count, model): model = get_model(model) expected = int(count) found = model.objects.count() assert (found == expected), ('Expected %d %s, found %d.' % (expected, model._meta.verbose_name_plural, found))
[ "@", "step", "(", "'There should be (\\\\d+) ([a-z][a-z0-9_ ]*) in the database'", ")", "def", "model_count", "(", "step", ",", "count", ",", "model", ")", ":", "model", "=", "get_model", "(", "model", ")", "expected", "=", "int", "(", "count", ")", "found", "=", "model", ".", "objects", ".", "count", "(", ")", "assert", "(", "found", "==", "expected", ")", ",", "(", "'Expected %d %s, found %d.'", "%", "(", "expected", ",", "model", ".", "_meta", ".", "verbose_name_plural", ",", "found", ")", ")" ]
then there should be 0 goals in the database .
train
false
48,479
def idz_id2svd(B, idx, proj): B = np.asfortranarray(B) (U, V, S, ier) = _id.idz_id2svd(B, idx, proj) if ier: raise _RETCODE_ERROR return (U, V, S)
[ "def", "idz_id2svd", "(", "B", ",", "idx", ",", "proj", ")", ":", "B", "=", "np", ".", "asfortranarray", "(", "B", ")", "(", "U", ",", "V", ",", "S", ",", "ier", ")", "=", "_id", ".", "idz_id2svd", "(", "B", ",", "idx", ",", "proj", ")", "if", "ier", ":", "raise", "_RETCODE_ERROR", "return", "(", "U", ",", "V", ",", "S", ")" ]
convert complex id to svd .
train
false
48,480
def ignore_logger(name_or_logger, allow_level=None): def handler(logger, level, msg, args, kwargs): if ((allow_level is not None) and (level >= allow_level)): return False return True register_special_log_handler(name_or_logger, handler)
[ "def", "ignore_logger", "(", "name_or_logger", ",", "allow_level", "=", "None", ")", ":", "def", "handler", "(", "logger", ",", "level", ",", "msg", ",", "args", ",", "kwargs", ")", ":", "if", "(", "(", "allow_level", "is", "not", "None", ")", "and", "(", "level", ">=", "allow_level", ")", ")", ":", "return", "False", "return", "True", "register_special_log_handler", "(", "name_or_logger", ",", "handler", ")" ]
ignores a logger for the regular breadcrumb code .
train
true
48,481
def images2neibs(ten4, neib_shape, neib_step=None, mode='valid'): return Images2Neibs(mode)(ten4, neib_shape, neib_step)
[ "def", "images2neibs", "(", "ten4", ",", "neib_shape", ",", "neib_step", "=", "None", ",", "mode", "=", "'valid'", ")", ":", "return", "Images2Neibs", "(", "mode", ")", "(", "ten4", ",", "neib_shape", ",", "neib_step", ")" ]
function :func:images2neibs <theano .
train
false
48,482
def set_course_tag(user, course_id, key, value): (record, _) = UserCourseTag.objects.get_or_create(user=user, course_id=course_id, key=key) record.value = value record.save()
[ "def", "set_course_tag", "(", "user", ",", "course_id", ",", "key", ",", "value", ")", ":", "(", "record", ",", "_", ")", "=", "UserCourseTag", ".", "objects", ".", "get_or_create", "(", "user", "=", "user", ",", "course_id", "=", "course_id", ",", "key", "=", "key", ")", "record", ".", "value", "=", "value", "record", ".", "save", "(", ")" ]
sets the value of the users course tag for the specified key in the specified course_id .
train
false
48,484
def rstrip(s, chars=None): return s.rstrip(chars)
[ "def", "rstrip", "(", "s", ",", "chars", "=", "None", ")", ":", "return", "s", ".", "rstrip", "(", "chars", ")" ]
rstrip -> string return a copy of the string s with trailing whitespace removed .
train
false
48,485
def is_id_pair_list(lst): (a, b) = list(zip(*lst)) return is_id_list((a + b))
[ "def", "is_id_pair_list", "(", "lst", ")", ":", "(", "a", ",", "b", ")", "=", "list", "(", "zip", "(", "*", "lst", ")", ")", "return", "is_id_list", "(", "(", "a", "+", "b", ")", ")" ]
returns true if the given list is made up of all pairs .
train
false
48,486
@register_useless @register_canonicalize @register_specialize @gof.local_optimizer([T.Split]) def local_useless_split(node): if isinstance(node.op, T.Split): if (node.op.len_splits == 1): (x, axis, splits) = node.inputs out = assert_op(x, T.eq(splits.shape[0], 1)) copy_stack_trace(node.outputs, out) out2 = assert_op(out, T.eq(x.shape[axis], splits[0])) copy_stack_trace(out, out2) return [out2]
[ "@", "register_useless", "@", "register_canonicalize", "@", "register_specialize", "@", "gof", ".", "local_optimizer", "(", "[", "T", ".", "Split", "]", ")", "def", "local_useless_split", "(", "node", ")", ":", "if", "isinstance", "(", "node", ".", "op", ",", "T", ".", "Split", ")", ":", "if", "(", "node", ".", "op", ".", "len_splits", "==", "1", ")", ":", "(", "x", ",", "axis", ",", "splits", ")", "=", "node", ".", "inputs", "out", "=", "assert_op", "(", "x", ",", "T", ".", "eq", "(", "splits", ".", "shape", "[", "0", "]", ",", "1", ")", ")", "copy_stack_trace", "(", "node", ".", "outputs", ",", "out", ")", "out2", "=", "assert_op", "(", "out", ",", "T", ".", "eq", "(", "x", ".", "shape", "[", "axis", "]", ",", "splits", "[", "0", "]", ")", ")", "copy_stack_trace", "(", "out", ",", "out2", ")", "return", "[", "out2", "]" ]
split{n_splits=1} -> x remove split with only 1 split .
train
false
48,487
def test_scenario_matches_tags(): scenario = Scenario.from_string(SCENARIO1, original_string=SCENARIO1.strip(), tags=['onetag', 'another-one']) expect(scenario.tags).to.equal(['onetag', 'another-one']) assert scenario.matches_tags(['onetag']) assert scenario.matches_tags(['another-one'])
[ "def", "test_scenario_matches_tags", "(", ")", ":", "scenario", "=", "Scenario", ".", "from_string", "(", "SCENARIO1", ",", "original_string", "=", "SCENARIO1", ".", "strip", "(", ")", ",", "tags", "=", "[", "'onetag'", ",", "'another-one'", "]", ")", "expect", "(", "scenario", ".", "tags", ")", ".", "to", ".", "equal", "(", "[", "'onetag'", ",", "'another-one'", "]", ")", "assert", "scenario", ".", "matches_tags", "(", "[", "'onetag'", "]", ")", "assert", "scenario", ".", "matches_tags", "(", "[", "'another-one'", "]", ")" ]
a scenario with tags should respond with true when .
train
false
48,488
def compile_subscriptions(node, event_type, event=None, level=0): subscriptions = check_node(node, event_type) if event: subscriptions = check_node(node, event) parent_subscriptions = compile_subscriptions(node, event_type, level=(level + 1)) elif node.parent_id: parent_subscriptions = compile_subscriptions(AbstractNode.load(node.parent_id), event_type, level=(level + 1)) else: parent_subscriptions = check_node(None, event_type) for notification_type in parent_subscriptions: p_sub_n = parent_subscriptions[notification_type] p_sub_n.extend(subscriptions[notification_type]) for nt in subscriptions: if (notification_type != nt): p_sub_n = list(set(p_sub_n).difference(set(subscriptions[nt]))) if (level == 0): (p_sub_n, removed) = utils.separate_users(node, p_sub_n) parent_subscriptions[notification_type] = p_sub_n return parent_subscriptions
[ "def", "compile_subscriptions", "(", "node", ",", "event_type", ",", "event", "=", "None", ",", "level", "=", "0", ")", ":", "subscriptions", "=", "check_node", "(", "node", ",", "event_type", ")", "if", "event", ":", "subscriptions", "=", "check_node", "(", "node", ",", "event", ")", "parent_subscriptions", "=", "compile_subscriptions", "(", "node", ",", "event_type", ",", "level", "=", "(", "level", "+", "1", ")", ")", "elif", "node", ".", "parent_id", ":", "parent_subscriptions", "=", "compile_subscriptions", "(", "AbstractNode", ".", "load", "(", "node", ".", "parent_id", ")", ",", "event_type", ",", "level", "=", "(", "level", "+", "1", ")", ")", "else", ":", "parent_subscriptions", "=", "check_node", "(", "None", ",", "event_type", ")", "for", "notification_type", "in", "parent_subscriptions", ":", "p_sub_n", "=", "parent_subscriptions", "[", "notification_type", "]", "p_sub_n", ".", "extend", "(", "subscriptions", "[", "notification_type", "]", ")", "for", "nt", "in", "subscriptions", ":", "if", "(", "notification_type", "!=", "nt", ")", ":", "p_sub_n", "=", "list", "(", "set", "(", "p_sub_n", ")", ".", "difference", "(", "set", "(", "subscriptions", "[", "nt", "]", ")", ")", ")", "if", "(", "level", "==", "0", ")", ":", "(", "p_sub_n", ",", "removed", ")", "=", "utils", ".", "separate_users", "(", "node", ",", "p_sub_n", ")", "parent_subscriptions", "[", "notification_type", "]", "=", "p_sub_n", "return", "parent_subscriptions" ]
recurse through node and parents for subscriptions .
train
false
48,490
def check_skip_travis(): if (os.environ.get('TRAVIS') == 'true'): raise SkipTest('This test needs to be skipped on Travis')
[ "def", "check_skip_travis", "(", ")", ":", "if", "(", "os", ".", "environ", ".", "get", "(", "'TRAVIS'", ")", "==", "'true'", ")", ":", "raise", "SkipTest", "(", "'This test needs to be skipped on Travis'", ")" ]
skip test if being run on travis .
train
false
48,492
def _ca_exists(ca_name, cacert_path=None): return ca_exists(ca_name, cacert_path)
[ "def", "_ca_exists", "(", "ca_name", ",", "cacert_path", "=", "None", ")", ":", "return", "ca_exists", "(", "ca_name", ",", "cacert_path", ")" ]
retrocompatible wrapper .
train
false
48,493
def _Execute(statements, context, callback): for (i, statement) in enumerate(statements): if isinstance(statement, basestring): callback(statement) else: try: (func, args) = statement func(args, context, callback) except UndefinedVariable as e: start = max(0, (i - 3)) end = (i + 3) e.near = statements[start:end] raise
[ "def", "_Execute", "(", "statements", ",", "context", ",", "callback", ")", ":", "for", "(", "i", ",", "statement", ")", "in", "enumerate", "(", "statements", ")", ":", "if", "isinstance", "(", "statement", ",", "basestring", ")", ":", "callback", "(", "statement", ")", "else", ":", "try", ":", "(", "func", ",", "args", ")", "=", "statement", "func", "(", "args", ",", "context", ",", "callback", ")", "except", "UndefinedVariable", "as", "e", ":", "start", "=", "max", "(", "0", ",", "(", "i", "-", "3", ")", ")", "end", "=", "(", "i", "+", "3", ")", "e", ".", "near", "=", "statements", "[", "start", ":", "end", "]", "raise" ]
execute a bunch of template statements in a scopedcontext .
train
false
48,494
@step('the problem display name is "(.*)"$') def verify_problem_display_name(step, name): assert_equal(name, world.browser.find_by_css('.problem-header').text)
[ "@", "step", "(", "'the problem display name is \"(.*)\"$'", ")", "def", "verify_problem_display_name", "(", "step", ",", "name", ")", ":", "assert_equal", "(", "name", ",", "world", ".", "browser", ".", "find_by_css", "(", "'.problem-header'", ")", ".", "text", ")" ]
name is uppercased because the heading styles are uppercase in css .
train
false
48,496
def create_topic(topic_name): pubsub_client = pubsub.Client() topic = pubsub_client.topic(topic_name) topic.create() print 'Topic {} created.'.format(topic.name)
[ "def", "create_topic", "(", "topic_name", ")", ":", "pubsub_client", "=", "pubsub", ".", "Client", "(", ")", "topic", "=", "pubsub_client", ".", "topic", "(", "topic_name", ")", "topic", ".", "create", "(", ")", "print", "'Topic {} created.'", ".", "format", "(", "topic", ".", "name", ")" ]
create a new pub/sub topic .
train
false
48,497
def single_client(h=client_context.host, p=client_context.port, **kwargs): return _mongo_client(h, p, direct=True, **kwargs)
[ "def", "single_client", "(", "h", "=", "client_context", ".", "host", ",", "p", "=", "client_context", ".", "port", ",", "**", "kwargs", ")", ":", "return", "_mongo_client", "(", "h", ",", "p", ",", "direct", "=", "True", ",", "**", "kwargs", ")" ]
make a direct connection .
train
false
48,498
def traversal_path(path): if isinstance(path, text_type): path = path.encode('ascii') path = unquote_bytes_to_wsgi(path) return traversal_path_info(path)
[ "def", "traversal_path", "(", "path", ")", ":", "if", "isinstance", "(", "path", ",", "text_type", ")", ":", "path", "=", "path", ".", "encode", "(", "'ascii'", ")", "path", "=", "unquote_bytes_to_wsgi", "(", "path", ")", "return", "traversal_path_info", "(", "path", ")" ]
variant of :func:pyramid .
train
false
48,499
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): with tf.variable_scope(scope, 'Block17', [net], reuse=reuse): with tf.variable_scope('Branch_0'): tower_conv = slim.conv2d(net, 128, 1, scope='Conv2d_1x1') with tf.variable_scope('Branch_1'): tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1') tower_conv1_1 = slim.conv2d(tower_conv1_0, 128, [1, 7], scope='Conv2d_0b_1x7') tower_conv1_2 = slim.conv2d(tower_conv1_1, 128, [7, 1], scope='Conv2d_0c_7x1') mixed = tf.concat(3, [tower_conv, tower_conv1_2]) up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None, activation_fn=None, scope='Conv2d_1x1') net += (scale * up) if activation_fn: net = activation_fn(net) return net
[ "def", "block17", "(", "net", ",", "scale", "=", "1.0", ",", "activation_fn", "=", "tf", ".", "nn", ".", "relu", ",", "scope", "=", "None", ",", "reuse", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "scope", ",", "'Block17'", ",", "[", "net", "]", ",", "reuse", "=", "reuse", ")", ":", "with", "tf", ".", "variable_scope", "(", "'Branch_0'", ")", ":", "tower_conv", "=", "slim", ".", "conv2d", "(", "net", ",", "128", ",", "1", ",", "scope", "=", "'Conv2d_1x1'", ")", "with", "tf", ".", "variable_scope", "(", "'Branch_1'", ")", ":", "tower_conv1_0", "=", "slim", ".", "conv2d", "(", "net", ",", "128", ",", "1", ",", "scope", "=", "'Conv2d_0a_1x1'", ")", "tower_conv1_1", "=", "slim", ".", "conv2d", "(", "tower_conv1_0", ",", "128", ",", "[", "1", ",", "7", "]", ",", "scope", "=", "'Conv2d_0b_1x7'", ")", "tower_conv1_2", "=", "slim", ".", "conv2d", "(", "tower_conv1_1", ",", "128", ",", "[", "7", ",", "1", "]", ",", "scope", "=", "'Conv2d_0c_7x1'", ")", "mixed", "=", "tf", ".", "concat", "(", "3", ",", "[", "tower_conv", ",", "tower_conv1_2", "]", ")", "up", "=", "slim", ".", "conv2d", "(", "mixed", ",", "net", ".", "get_shape", "(", ")", "[", "3", "]", ",", "1", ",", "normalizer_fn", "=", "None", ",", "activation_fn", "=", "None", ",", "scope", "=", "'Conv2d_1x1'", ")", "net", "+=", "(", "scale", "*", "up", ")", "if", "activation_fn", ":", "net", "=", "activation_fn", "(", "net", ")", "return", "net" ]
builds the 17x17 resnet block .
train
true
48,500
def OAuthGetAccessTokenCGI(outfile): outfile.write('Status: 200\r\n') outfile.write('Content-Type: text/plain\r\n') outfile.write('\r\n') outfile.write('oauth_token=ACCESS_TOKEN') outfile.write('&') outfile.write('oauth_token_secret=ACCESS_TOKEN_SECRET')
[ "def", "OAuthGetAccessTokenCGI", "(", "outfile", ")", ":", "outfile", ".", "write", "(", "'Status: 200\\r\\n'", ")", "outfile", ".", "write", "(", "'Content-Type: text/plain\\r\\n'", ")", "outfile", ".", "write", "(", "'\\r\\n'", ")", "outfile", ".", "write", "(", "'oauth_token=ACCESS_TOKEN'", ")", "outfile", ".", "write", "(", "'&'", ")", "outfile", ".", "write", "(", "'oauth_token_secret=ACCESS_TOKEN_SECRET'", ")" ]
runs the oauthgetaccesstoken cgi .
train
false
48,501
def on_closing(): pass
[ "def", "on_closing", "(", ")", ":", "pass" ]
called at the end .
train
false
48,502
def systemInformationType16(): a = L2PseudoLength(l2pLength=1) b = TpPd(pd=6) c = MessageType(mesType=61) d = Si16RestOctets() packet = (((a / b) / c) / d) return packet
[ "def", "systemInformationType16", "(", ")", ":", "a", "=", "L2PseudoLength", "(", "l2pLength", "=", "1", ")", "b", "=", "TpPd", "(", "pd", "=", "6", ")", "c", "=", "MessageType", "(", "mesType", "=", "61", ")", "d", "=", "Si16RestOctets", "(", ")", "packet", "=", "(", "(", "(", "a", "/", "b", ")", "/", "c", ")", "/", "d", ")", "return", "packet" ]
system information type 16 section 9 .
train
true
48,505
def decode_network_number(ptype, plen, buf): return number.unpack_from(buf, header.size)[0]
[ "def", "decode_network_number", "(", "ptype", ",", "plen", ",", "buf", ")", ":", "return", "number", ".", "unpack_from", "(", "buf", ",", "header", ".", "size", ")", "[", "0", "]" ]
decodes a number from collectd network format .
train
false
48,506
@pytest.mark.parametrize('inp, exp', [('foo', 'foo'), ('$foo $bar', 'bar $bar'), ('$foobar', '$foobar'), ('$foo $spam', 'bar eggs'), ('$an_int$spam$a_bool', '42eggsTrue'), ('bar$foo$spam$foo $an_int $none', 'barbareggsbar 42 None'), ('$foo/bar', 'bar/bar'), ("${'foo'} $spam", 'bar eggs'), ("${'foo'} ${'a_bool'}", 'bar True'), ("${'foo'}bar", 'barbar'), ("${'foo'}/bar", 'bar/bar'), ('${"foo\'}', '${"foo\'}'), ('$?bar', '$?bar'), ('$foo}bar', 'bar}bar'), ("${'foo", "${'foo"), skip_if_on_unix(('%foo%bar', 'barbar')), skip_if_on_unix(('%foo% %a_bool%', 'bar True')), skip_if_on_unix(('%foo%%an_int%', 'bar42')), skip_if_on_unix(("%foo% $spam ${'a_bool'}", 'bar eggs True')), ('foo', 'foo'), ('$foo bar', 'bar bar'), ("${'foo'}bar", 'barbar'), skip_if_on_unix(('%foo%bar', 'barbar'))]) def test_expandvars(inp, exp, xonsh_builtins): env = Env({'foo': 'bar', 'spam': 'eggs', 'a_bool': True, 'an_int': 42, 'none': None}) xonsh_builtins.__xonsh_env__ = env assert (expandvars(inp) == exp)
[ "@", "pytest", ".", "mark", ".", "parametrize", "(", "'inp, exp'", ",", "[", "(", "'foo'", ",", "'foo'", ")", ",", "(", "'$foo $bar'", ",", "'bar $bar'", ")", ",", "(", "'$foobar'", ",", "'$foobar'", ")", ",", "(", "'$foo $spam'", ",", "'bar eggs'", ")", ",", "(", "'$an_int$spam$a_bool'", ",", "'42eggsTrue'", ")", ",", "(", "'bar$foo$spam$foo $an_int $none'", ",", "'barbareggsbar 42 None'", ")", ",", "(", "'$foo/bar'", ",", "'bar/bar'", ")", ",", "(", "\"${'foo'} $spam\"", ",", "'bar eggs'", ")", ",", "(", "\"${'foo'} ${'a_bool'}\"", ",", "'bar True'", ")", ",", "(", "\"${'foo'}bar\"", ",", "'barbar'", ")", ",", "(", "\"${'foo'}/bar\"", ",", "'bar/bar'", ")", ",", "(", "'${\"foo\\'}'", ",", "'${\"foo\\'}'", ")", ",", "(", "'$?bar'", ",", "'$?bar'", ")", ",", "(", "'$foo}bar'", ",", "'bar}bar'", ")", ",", "(", "\"${'foo\"", ",", "\"${'foo\"", ")", ",", "skip_if_on_unix", "(", "(", "'%foo%bar'", ",", "'barbar'", ")", ")", ",", "skip_if_on_unix", "(", "(", "'%foo% %a_bool%'", ",", "'bar True'", ")", ")", ",", "skip_if_on_unix", "(", "(", "'%foo%%an_int%'", ",", "'bar42'", ")", ")", ",", "skip_if_on_unix", "(", "(", "\"%foo% $spam ${'a_bool'}\"", ",", "'bar eggs True'", ")", ")", ",", "(", "'foo'", ",", "'foo'", ")", ",", "(", "'$foo bar'", ",", "'bar bar'", ")", ",", "(", "\"${'foo'}bar\"", ",", "'barbar'", ")", ",", "skip_if_on_unix", "(", "(", "'%foo%bar'", ",", "'barbar'", ")", ")", "]", ")", "def", "test_expandvars", "(", "inp", ",", "exp", ",", "xonsh_builtins", ")", ":", "env", "=", "Env", "(", "{", "'foo'", ":", "'bar'", ",", "'spam'", ":", "'eggs'", ",", "'a_bool'", ":", "True", ",", "'an_int'", ":", "42", ",", "'none'", ":", "None", "}", ")", "xonsh_builtins", ".", "__xonsh_env__", "=", "env", "assert", "(", "expandvars", "(", "inp", ")", "==", "exp", ")" ]
tweaked for xonsh cases from cpython test_genericpath .
train
false
48,507
def test_mapnode_crash2(tmpdir): cwd = os.getcwd() node = pe.MapNode(niu.Function(input_names=[u'WRONG'], output_names=[u'newstring'], function=dummy_func), iterfield=[u'WRONG'], name=u'myfunc') node.inputs.WRONG = [u'string{}'.format(i) for i in range(3)] node.base_dir = str(tmpdir) with pytest.raises(Exception): node.run() os.chdir(cwd)
[ "def", "test_mapnode_crash2", "(", "tmpdir", ")", ":", "cwd", "=", "os", ".", "getcwd", "(", ")", "node", "=", "pe", ".", "MapNode", "(", "niu", ".", "Function", "(", "input_names", "=", "[", "u'WRONG'", "]", ",", "output_names", "=", "[", "u'newstring'", "]", ",", "function", "=", "dummy_func", ")", ",", "iterfield", "=", "[", "u'WRONG'", "]", ",", "name", "=", "u'myfunc'", ")", "node", ".", "inputs", ".", "WRONG", "=", "[", "u'string{}'", ".", "format", "(", "i", ")", "for", "i", "in", "range", "(", "3", ")", "]", "node", ".", "base_dir", "=", "str", "(", "tmpdir", ")", "with", "pytest", ".", "raises", "(", "Exception", ")", ":", "node", ".", "run", "(", ")", "os", ".", "chdir", "(", "cwd", ")" ]
test mapnode crash when stop_on_first_crash is false .
train
false
48,508
def is_valid_state(state): return (state in const.BGP_FSM_VALID_STATES)
[ "def", "is_valid_state", "(", "state", ")", ":", "return", "(", "state", "in", "const", ".", "BGP_FSM_VALID_STATES", ")" ]
returns true if given state is a valid bgp finite state machine state .
train
false
48,509
def _get_boolean(data, position, dummy0, dummy1, dummy2): end = (position + 1) boolean_byte = data[position:end] if (boolean_byte == '\x00'): return (False, end) elif (boolean_byte == '\x01'): return (True, end) raise InvalidBSON(('invalid boolean value: %r' % boolean_byte))
[ "def", "_get_boolean", "(", "data", ",", "position", ",", "dummy0", ",", "dummy1", ",", "dummy2", ")", ":", "end", "=", "(", "position", "+", "1", ")", "boolean_byte", "=", "data", "[", "position", ":", "end", "]", "if", "(", "boolean_byte", "==", "'\\x00'", ")", ":", "return", "(", "False", ",", "end", ")", "elif", "(", "boolean_byte", "==", "'\\x01'", ")", ":", "return", "(", "True", ",", "end", ")", "raise", "InvalidBSON", "(", "(", "'invalid boolean value: %r'", "%", "boolean_byte", ")", ")" ]
decode a bson true/false to python true/false .
train
true
48,512
def atomic_benchmark_estimator(estimator, X_test, verbose=False): n_instances = X_test.shape[0] runtimes = np.zeros(n_instances, dtype=np.float) for i in range(n_instances): instance = X_test[[i], :] start = time.time() estimator.predict(instance) runtimes[i] = (time.time() - start) if verbose: print('atomic_benchmark runtimes:', min(runtimes), scoreatpercentile(runtimes, 50), max(runtimes)) return runtimes
[ "def", "atomic_benchmark_estimator", "(", "estimator", ",", "X_test", ",", "verbose", "=", "False", ")", ":", "n_instances", "=", "X_test", ".", "shape", "[", "0", "]", "runtimes", "=", "np", ".", "zeros", "(", "n_instances", ",", "dtype", "=", "np", ".", "float", ")", "for", "i", "in", "range", "(", "n_instances", ")", ":", "instance", "=", "X_test", "[", "[", "i", "]", ",", ":", "]", "start", "=", "time", ".", "time", "(", ")", "estimator", ".", "predict", "(", "instance", ")", "runtimes", "[", "i", "]", "=", "(", "time", ".", "time", "(", ")", "-", "start", ")", "if", "verbose", ":", "print", "(", "'atomic_benchmark runtimes:'", ",", "min", "(", "runtimes", ")", ",", "scoreatpercentile", "(", "runtimes", ",", "50", ")", ",", "max", "(", "runtimes", ")", ")", "return", "runtimes" ]
measure runtime prediction of each instance .
train
false
48,513
def ensure_uint255(arr): if (arr.dtype == 'uint8'): return arr elif (arr.dtype in ('float32', 'float64')): return np.array((arr * 255), dtype='uint8') else: raise Exception(('ensure_uint255 expects uint8 or float input but got %s with range [%g,%g,].' % (arr.dtype, arr.min(), arr.max())))
[ "def", "ensure_uint255", "(", "arr", ")", ":", "if", "(", "arr", ".", "dtype", "==", "'uint8'", ")", ":", "return", "arr", "elif", "(", "arr", ".", "dtype", "in", "(", "'float32'", ",", "'float64'", ")", ")", ":", "return", "np", ".", "array", "(", "(", "arr", "*", "255", ")", ",", "dtype", "=", "'uint8'", ")", "else", ":", "raise", "Exception", "(", "(", "'ensure_uint255 expects uint8 or float input but got %s with range [%g,%g,].'", "%", "(", "arr", ".", "dtype", ",", "arr", ".", "min", "(", ")", ",", "arr", ".", "max", "(", ")", ")", ")", ")" ]
if data is float .
train
false
48,514
def bingify(s): return "'{}'".format(s)
[ "def", "bingify", "(", "s", ")", ":", "return", "\"'{}'\"", ".", "format", "(", "s", ")" ]
because bing has to be an asshole and require special params .
train
false
48,516
def qualities(quality_ids): def q(qid): try: return quality_ids.index(qid) except ValueError: return (-1) return q
[ "def", "qualities", "(", "quality_ids", ")", ":", "def", "q", "(", "qid", ")", ":", "try", ":", "return", "quality_ids", ".", "index", "(", "qid", ")", "except", "ValueError", ":", "return", "(", "-", "1", ")", "return", "q" ]
get a numeric quality value out of a list of possible values .
train
false
48,518
def _validate_cert_path(name): cmd = "Test-Path -Path '{0}'".format(name) if (not ast.literal_eval(_cmd_run(cmd=cmd))): raise SaltInvocationError('Invalid path specified: {0}'.format(name))
[ "def", "_validate_cert_path", "(", "name", ")", ":", "cmd", "=", "\"Test-Path -Path '{0}'\"", ".", "format", "(", "name", ")", "if", "(", "not", "ast", ".", "literal_eval", "(", "_cmd_run", "(", "cmd", "=", "cmd", ")", ")", ")", ":", "raise", "SaltInvocationError", "(", "'Invalid path specified: {0}'", ".", "format", "(", "name", ")", ")" ]
ensure that the certificate path .
train
true
48,519
def blocknum(ch): cp = ord(ch) i = (bisect_right(_starts, cp) - 1) end = _ends[i] if (cp > end): return None return i
[ "def", "blocknum", "(", "ch", ")", ":", "cp", "=", "ord", "(", "ch", ")", "i", "=", "(", "bisect_right", "(", "_starts", ",", "cp", ")", "-", "1", ")", "end", "=", "_ends", "[", "i", "]", "if", "(", "cp", ">", "end", ")", ":", "return", "None", "return", "i" ]
returns the unicode block number for ch .
train
false
48,520
def findAcceptablePatterns(tp, t, whichSequence, trainingSequences, nAcceptable=1): upTo = (t + 2) if tp.doPooling: upTo += min(tp.segUpdateValidDuration, nAcceptable) assert (upTo <= len(trainingSequences[whichSequence])) acceptablePatterns = [] if ((len(trainingSequences) == 2) and (trainingSequences[0][0] == trainingSequences[1][0]).all()): if ((trainingSequences[0][t] == trainingSequences[1][t]).all() and (trainingSequences[0][(t + 1)] != trainingSequences[1][(t + 1)]).any()): acceptablePatterns.append(trainingSequences[0][(t + 1)]) acceptablePatterns.append(trainingSequences[1][(t + 1)]) acceptablePatterns += [trainingSequences[whichSequence][t] for t in xrange(t, upTo)] return acceptablePatterns
[ "def", "findAcceptablePatterns", "(", "tp", ",", "t", ",", "whichSequence", ",", "trainingSequences", ",", "nAcceptable", "=", "1", ")", ":", "upTo", "=", "(", "t", "+", "2", ")", "if", "tp", ".", "doPooling", ":", "upTo", "+=", "min", "(", "tp", ".", "segUpdateValidDuration", ",", "nAcceptable", ")", "assert", "(", "upTo", "<=", "len", "(", "trainingSequences", "[", "whichSequence", "]", ")", ")", "acceptablePatterns", "=", "[", "]", "if", "(", "(", "len", "(", "trainingSequences", ")", "==", "2", ")", "and", "(", "trainingSequences", "[", "0", "]", "[", "0", "]", "==", "trainingSequences", "[", "1", "]", "[", "0", "]", ")", ".", "all", "(", ")", ")", ":", "if", "(", "(", "trainingSequences", "[", "0", "]", "[", "t", "]", "==", "trainingSequences", "[", "1", "]", "[", "t", "]", ")", ".", "all", "(", ")", "and", "(", "trainingSequences", "[", "0", "]", "[", "(", "t", "+", "1", ")", "]", "!=", "trainingSequences", "[", "1", "]", "[", "(", "t", "+", "1", ")", "]", ")", ".", "any", "(", ")", ")", ":", "acceptablePatterns", ".", "append", "(", "trainingSequences", "[", "0", "]", "[", "(", "t", "+", "1", ")", "]", ")", "acceptablePatterns", ".", "append", "(", "trainingSequences", "[", "1", "]", "[", "(", "t", "+", "1", ")", "]", ")", "acceptablePatterns", "+=", "[", "trainingSequences", "[", "whichSequence", "]", "[", "t", "]", "for", "t", "in", "xrange", "(", "t", ",", "upTo", ")", "]", "return", "acceptablePatterns" ]
tries to infer the set of acceptable patterns for prediction at the given time step and for the give sequence .
train
false
48,521
def driver_initiator_data_insert_by_key(context, initiator, namespace, key, value): return IMPL.driver_initiator_data_insert_by_key(context, initiator, namespace, key, value)
[ "def", "driver_initiator_data_insert_by_key", "(", "context", ",", "initiator", ",", "namespace", ",", "key", ",", "value", ")", ":", "return", "IMPL", ".", "driver_initiator_data_insert_by_key", "(", "context", ",", "initiator", ",", "namespace", ",", "key", ",", "value", ")" ]
updates driverinitiatordata entry .
train
false
48,522
def feature_clustering(data, distance=PearsonR, linkage=AVERAGE): matrix = distance(data, axis=0) return dist_matrix_clustering(matrix, linkage=linkage)
[ "def", "feature_clustering", "(", "data", ",", "distance", "=", "PearsonR", ",", "linkage", "=", "AVERAGE", ")", ":", "matrix", "=", "distance", "(", "data", ",", "axis", "=", "0", ")", "return", "dist_matrix_clustering", "(", "matrix", ",", "linkage", "=", "linkage", ")" ]
return the hierarchical clustering of the data sets columns .
train
false
48,523
def elgamal_public_key(key): (p, r, e) = key return (p, r, pow(r, e, p))
[ "def", "elgamal_public_key", "(", "key", ")", ":", "(", "p", ",", "r", ",", "e", ")", "=", "key", "return", "(", "p", ",", "r", ",", "pow", "(", "r", ",", "e", ",", "p", ")", ")" ]
return three number tuple as public key .
train
false
48,524
def remove_objects(code, count=1): replacements = {} br = bracket_split(code, ['{}', '[]']) res = '' last = '' for e in br: if (e[0] == '{'): (n, temp_rep, cand_count) = remove_objects(e[1:(-1)], count) if is_object(n, last): res += (' ' + (OBJECT_LVAL % count)) replacements[(OBJECT_LVAL % count)] = e count += 1 else: res += ('{%s}' % n) count = cand_count replacements.update(temp_rep) elif (e[0] == '['): if is_array(last): res += e else: (n, rep, count) = remove_objects(e[1:(-1)], count) res += ('[%s]' % n) replacements.update(rep) else: res += e last = e return (res, replacements, count)
[ "def", "remove_objects", "(", "code", ",", "count", "=", "1", ")", ":", "replacements", "=", "{", "}", "br", "=", "bracket_split", "(", "code", ",", "[", "'{}'", ",", "'[]'", "]", ")", "res", "=", "''", "last", "=", "''", "for", "e", "in", "br", ":", "if", "(", "e", "[", "0", "]", "==", "'{'", ")", ":", "(", "n", ",", "temp_rep", ",", "cand_count", ")", "=", "remove_objects", "(", "e", "[", "1", ":", "(", "-", "1", ")", "]", ",", "count", ")", "if", "is_object", "(", "n", ",", "last", ")", ":", "res", "+=", "(", "' '", "+", "(", "OBJECT_LVAL", "%", "count", ")", ")", "replacements", "[", "(", "OBJECT_LVAL", "%", "count", ")", "]", "=", "e", "count", "+=", "1", "else", ":", "res", "+=", "(", "'{%s}'", "%", "n", ")", "count", "=", "cand_count", "replacements", ".", "update", "(", "temp_rep", ")", "elif", "(", "e", "[", "0", "]", "==", "'['", ")", ":", "if", "is_array", "(", "last", ")", ":", "res", "+=", "e", "else", ":", "(", "n", ",", "rep", ",", "count", ")", "=", "remove_objects", "(", "e", "[", "1", ":", "(", "-", "1", ")", "]", ",", "count", ")", "res", "+=", "(", "'[%s]'", "%", "n", ")", "replacements", ".", "update", "(", "rep", ")", "else", ":", "res", "+=", "e", "last", "=", "e", "return", "(", "res", ",", "replacements", ",", "count", ")" ]
this function replaces objects with objects_lvals .
train
true
48,525
def natsortKey(string_): return [(int(s) if s.isdigit() else s) for s in re.split('(\\d+)', string_)]
[ "def", "natsortKey", "(", "string_", ")", ":", "return", "[", "(", "int", "(", "s", ")", "if", "s", ".", "isdigit", "(", ")", "else", "s", ")", "for", "s", "in", "re", ".", "split", "(", "'(\\\\d+)'", ",", "string_", ")", "]" ]
see URL .
train
false
48,526
def ispackage(m): try: name = m.__file__ except AttributeError: return 0 return re.match('.*__init__.py[co]?$', name)
[ "def", "ispackage", "(", "m", ")", ":", "try", ":", "name", "=", "m", ".", "__file__", "except", "AttributeError", ":", "return", "0", "return", "re", ".", "match", "(", "'.*__init__.py[co]?$'", ",", "name", ")" ]
determine if a module is a package - that means .
train
false
48,527
def break_long_words(value, max_word_length=8): tagless = django.utils.html.strip_tags(value) re_capitalized_word = re.compile('([A-Z][a-z][a-z]+)', re.UNICODE) words = re_capitalized_word.split(tagless) re_too_many_letters_in_a_row = re.compile(('([\\w]{%d}|[.\\_^/])' % max_word_length), re.UNICODE) broken_words = [] for word in words: if word: broken_words += re_too_many_letters_in_a_row.split(word) broken_words = filter((lambda x: x), broken_words) return u'\u200b'.join(broken_words)
[ "def", "break_long_words", "(", "value", ",", "max_word_length", "=", "8", ")", ":", "tagless", "=", "django", ".", "utils", ".", "html", ".", "strip_tags", "(", "value", ")", "re_capitalized_word", "=", "re", ".", "compile", "(", "'([A-Z][a-z][a-z]+)'", ",", "re", ".", "UNICODE", ")", "words", "=", "re_capitalized_word", ".", "split", "(", "tagless", ")", "re_too_many_letters_in_a_row", "=", "re", ".", "compile", "(", "(", "'([\\\\w]{%d}|[.\\\\_^/])'", "%", "max_word_length", ")", ",", "re", ".", "UNICODE", ")", "broken_words", "=", "[", "]", "for", "word", "in", "words", ":", "if", "word", ":", "broken_words", "+=", "re_too_many_letters_in_a_row", ".", "split", "(", "word", ")", "broken_words", "=", "filter", "(", "(", "lambda", "x", ":", "x", ")", ",", "broken_words", ")", "return", "u'\\u200b'", ".", "join", "(", "broken_words", ")" ]
this filter does two things: * it removes all html tags from the input .
train
false
48,529
def clean_groupname(string): string = string.strip(groupname_seps) if ((not (string.endswith(tuple(groupname_ignore_seps)) and string.startswith(tuple(groupname_ignore_seps)))) and (not any(((i in string.strip(groupname_ignore_seps)) for i in groupname_ignore_seps)))): string = string.strip(groupname_ignore_seps) for forbidden in forbidden_groupnames: if string.lower().startswith(forbidden): string = string[len(forbidden):] string = string.strip(groupname_seps) if string.lower().endswith(forbidden): string = string[:len(forbidden)] string = string.strip(groupname_seps) return string
[ "def", "clean_groupname", "(", "string", ")", ":", "string", "=", "string", ".", "strip", "(", "groupname_seps", ")", "if", "(", "(", "not", "(", "string", ".", "endswith", "(", "tuple", "(", "groupname_ignore_seps", ")", ")", "and", "string", ".", "startswith", "(", "tuple", "(", "groupname_ignore_seps", ")", ")", ")", ")", "and", "(", "not", "any", "(", "(", "(", "i", "in", "string", ".", "strip", "(", "groupname_ignore_seps", ")", ")", "for", "i", "in", "groupname_ignore_seps", ")", ")", ")", ")", ":", "string", "=", "string", ".", "strip", "(", "groupname_ignore_seps", ")", "for", "forbidden", "in", "forbidden_groupnames", ":", "if", "string", ".", "lower", "(", ")", ".", "startswith", "(", "forbidden", ")", ":", "string", "=", "string", "[", "len", "(", "forbidden", ")", ":", "]", "string", "=", "string", ".", "strip", "(", "groupname_seps", ")", "if", "string", ".", "lower", "(", ")", ".", "endswith", "(", "forbidden", ")", ":", "string", "=", "string", "[", ":", "len", "(", "forbidden", ")", "]", "string", "=", "string", ".", "strip", "(", "groupname_seps", ")", "return", "string" ]
removes and strip separators from input_string .
train
false
48,531
def _convert_input(x, y, z=None, n_models=1, model_set_axis=0): x = np.asarray(x, dtype=np.float) y = np.asarray(y, dtype=np.float) if (z is not None): z = np.asarray(z, dtype=np.float) if (n_models > 1): if (z is None): if (y.shape[model_set_axis] != n_models): raise ValueError(u'Number of data sets (y array is expected to equal the number of parameter sets)') y = np.rollaxis(y, model_set_axis, y.ndim) else: z_shape = (z.shape[:model_set_axis] + z.shape[(model_set_axis + 1):]) if (not (x.shape == y.shape == z_shape)): raise ValueError(u'x, y and z should have the same shape') if (z is None): farg = (x, y) else: farg = (x, y, z) return farg
[ "def", "_convert_input", "(", "x", ",", "y", ",", "z", "=", "None", ",", "n_models", "=", "1", ",", "model_set_axis", "=", "0", ")", ":", "x", "=", "np", ".", "asarray", "(", "x", ",", "dtype", "=", "np", ".", "float", ")", "y", "=", "np", ".", "asarray", "(", "y", ",", "dtype", "=", "np", ".", "float", ")", "if", "(", "z", "is", "not", "None", ")", ":", "z", "=", "np", ".", "asarray", "(", "z", ",", "dtype", "=", "np", ".", "float", ")", "if", "(", "n_models", ">", "1", ")", ":", "if", "(", "z", "is", "None", ")", ":", "if", "(", "y", ".", "shape", "[", "model_set_axis", "]", "!=", "n_models", ")", ":", "raise", "ValueError", "(", "u'Number of data sets (y array is expected to equal the number of parameter sets)'", ")", "y", "=", "np", ".", "rollaxis", "(", "y", ",", "model_set_axis", ",", "y", ".", "ndim", ")", "else", ":", "z_shape", "=", "(", "z", ".", "shape", "[", ":", "model_set_axis", "]", "+", "z", ".", "shape", "[", "(", "model_set_axis", "+", "1", ")", ":", "]", ")", "if", "(", "not", "(", "x", ".", "shape", "==", "y", ".", "shape", "==", "z_shape", ")", ")", ":", "raise", "ValueError", "(", "u'x, y and z should have the same shape'", ")", "if", "(", "z", "is", "None", ")", ":", "farg", "=", "(", "x", ",", "y", ")", "else", ":", "farg", "=", "(", "x", ",", "y", ",", "z", ")", "return", "farg" ]
convert inputs to float arrays .
train
false
48,532
def str_to_unicode(s, encoding=None): if (not (type(s) == str)): return s if (not encoding): encoding = ENCODING for c in [encoding, 'utf-8', 'latin-1']: try: return s.decode(c) except UnicodeDecodeError: pass return s.decode(encoding, 'replace')
[ "def", "str_to_unicode", "(", "s", ",", "encoding", "=", "None", ")", ":", "if", "(", "not", "(", "type", "(", "s", ")", "==", "str", ")", ")", ":", "return", "s", "if", "(", "not", "encoding", ")", ":", "encoding", "=", "ENCODING", "for", "c", "in", "[", "encoding", ",", "'utf-8'", ",", "'latin-1'", "]", ":", "try", ":", "return", "s", ".", "decode", "(", "c", ")", "except", "UnicodeDecodeError", ":", "pass", "return", "s", ".", "decode", "(", "encoding", ",", "'replace'", ")" ]
attempts to convert a string of unknown character set to a unicode string .
train
false
48,533
def as_sparse_or_tensor_variable(x, name=None): try: return as_sparse_variable(x, name) except (ValueError, TypeError): return theano.tensor.as_tensor_variable(x, name)
[ "def", "as_sparse_or_tensor_variable", "(", "x", ",", "name", "=", "None", ")", ":", "try", ":", "return", "as_sparse_variable", "(", "x", ",", "name", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "return", "theano", ".", "tensor", ".", "as_tensor_variable", "(", "x", ",", "name", ")" ]
same as as_sparse_variable but if we cant make a sparse variable .
train
false
48,534
def load_hdf5(filename, obj): _check_available() with h5py.File(filename, 'r') as f: d = HDF5Deserializer(f) d.load(obj)
[ "def", "load_hdf5", "(", "filename", ",", "obj", ")", ":", "_check_available", "(", ")", "with", "h5py", ".", "File", "(", "filename", ",", "'r'", ")", "as", "f", ":", "d", "=", "HDF5Deserializer", "(", "f", ")", "d", ".", "load", "(", "obj", ")" ]
loads an object from the file in hdf5 format .
train
false
48,535
def pretty_ts(timestamp, tz=True): dt = timestamp if (not isinstance(timestamp, datetime.datetime)): dt = ts_to_dt(timestamp) if tz: dt = dt.astimezone(dateutil.tz.tzlocal()) return dt.strftime('%Y-%m-%d %H:%M %Z')
[ "def", "pretty_ts", "(", "timestamp", ",", "tz", "=", "True", ")", ":", "dt", "=", "timestamp", "if", "(", "not", "isinstance", "(", "timestamp", ",", "datetime", ".", "datetime", ")", ")", ":", "dt", "=", "ts_to_dt", "(", "timestamp", ")", "if", "tz", ":", "dt", "=", "dt", ".", "astimezone", "(", "dateutil", ".", "tz", ".", "tzlocal", "(", ")", ")", "return", "dt", ".", "strftime", "(", "'%Y-%m-%d %H:%M %Z'", ")" ]
pretty-format the given timestamp .
train
false
48,536
@pytest.mark.skipif(u'not six.PY2') def test_sharedmethod_imfunc(): def foo(self): pass actual_foo = foo class Bar(object, ): foo = sharedmethod(actual_foo) assert (Bar.foo.im_func is actual_foo) assert (Bar().foo.im_func is actual_foo) def foo(cls): pass actual_foo_2 = foo class MetaBar(type, ): foo = actual_foo_2 class Bar(object, ): __metaclass__ = MetaBar foo = sharedmethod(actual_foo) assert (Bar.foo.im_func is actual_foo_2) assert (Bar().foo.im_func is actual_foo) class MetaBar(type, ): foo = None class Bar(object, ): __metaclass__ = MetaBar foo = sharedmethod(actual_foo) assert (Bar.foo.im_func is actual_foo) assert (Bar().foo.im_func is actual_foo)
[ "@", "pytest", ".", "mark", ".", "skipif", "(", "u'not six.PY2'", ")", "def", "test_sharedmethod_imfunc", "(", ")", ":", "def", "foo", "(", "self", ")", ":", "pass", "actual_foo", "=", "foo", "class", "Bar", "(", "object", ",", ")", ":", "foo", "=", "sharedmethod", "(", "actual_foo", ")", "assert", "(", "Bar", ".", "foo", ".", "im_func", "is", "actual_foo", ")", "assert", "(", "Bar", "(", ")", ".", "foo", ".", "im_func", "is", "actual_foo", ")", "def", "foo", "(", "cls", ")", ":", "pass", "actual_foo_2", "=", "foo", "class", "MetaBar", "(", "type", ",", ")", ":", "foo", "=", "actual_foo_2", "class", "Bar", "(", "object", ",", ")", ":", "__metaclass__", "=", "MetaBar", "foo", "=", "sharedmethod", "(", "actual_foo", ")", "assert", "(", "Bar", ".", "foo", ".", "im_func", "is", "actual_foo_2", ")", "assert", "(", "Bar", "(", ")", ".", "foo", ".", "im_func", "is", "actual_foo", ")", "class", "MetaBar", "(", "type", ",", ")", ":", "foo", "=", "None", "class", "Bar", "(", "object", ",", ")", ":", "__metaclass__", "=", "MetaBar", "foo", "=", "sharedmethod", "(", "actual_foo", ")", "assert", "(", "Bar", ".", "foo", ".", "im_func", "is", "actual_foo", ")", "assert", "(", "Bar", "(", ")", ".", "foo", ".", "im_func", "is", "actual_foo", ")" ]
test that the im_func of a sharedmethod always points to the correct underlying function .
train
false
48,538
def mkdirs(newdir, mode=511): if six.PY3: os.makedirs(newdir, mode=mode, exist_ok=True) else: try: os.makedirs(newdir, mode=mode) except OSError as exception: if (exception.errno != errno.EEXIST): raise
[ "def", "mkdirs", "(", "newdir", ",", "mode", "=", "511", ")", ":", "if", "six", ".", "PY3", ":", "os", ".", "makedirs", "(", "newdir", ",", "mode", "=", "mode", ",", "exist_ok", "=", "True", ")", "else", ":", "try", ":", "os", ".", "makedirs", "(", "newdir", ",", "mode", "=", "mode", ")", "except", "OSError", "as", "exception", ":", "if", "(", "exception", ".", "errno", "!=", "errno", ".", "EEXIST", ")", ":", "raise" ]
creates the directory specified by path .
train
false
48,539
def _gen_vol_xml(vmname, diskname, size, hypervisor, **kwargs): size = (int(size) * 1024) disk_info = _get_image_info(hypervisor, vmname, **kwargs) context = {'name': vmname, 'filename': '{0}.{1}'.format(diskname, disk_info['disktype']), 'volname': diskname, 'disktype': disk_info['disktype'], 'size': str(size), 'pool': disk_info['pool']} fn_ = 'libvirt_volume.jinja' try: template = JINJA.get_template(fn_) except jinja2.exceptions.TemplateNotFound: log.error('Could not load template {0}'.format(fn_)) return '' return template.render(**context)
[ "def", "_gen_vol_xml", "(", "vmname", ",", "diskname", ",", "size", ",", "hypervisor", ",", "**", "kwargs", ")", ":", "size", "=", "(", "int", "(", "size", ")", "*", "1024", ")", "disk_info", "=", "_get_image_info", "(", "hypervisor", ",", "vmname", ",", "**", "kwargs", ")", "context", "=", "{", "'name'", ":", "vmname", ",", "'filename'", ":", "'{0}.{1}'", ".", "format", "(", "diskname", ",", "disk_info", "[", "'disktype'", "]", ")", ",", "'volname'", ":", "diskname", ",", "'disktype'", ":", "disk_info", "[", "'disktype'", "]", ",", "'size'", ":", "str", "(", "size", ")", ",", "'pool'", ":", "disk_info", "[", "'pool'", "]", "}", "fn_", "=", "'libvirt_volume.jinja'", "try", ":", "template", "=", "JINJA", ".", "get_template", "(", "fn_", ")", "except", "jinja2", ".", "exceptions", ".", "TemplateNotFound", ":", "log", ".", "error", "(", "'Could not load template {0}'", ".", "format", "(", "fn_", ")", ")", "return", "''", "return", "template", ".", "render", "(", "**", "context", ")" ]
generate the xml string to define a libvirt storage volume .
train
false
48,540
def run_cmds(cmds, fail_sequential=False): to_return = True for (cmd, tries) in cmds: if ((not to_return) and fail_sequential): return False elif run_cmd(cmd, attempts=tries): continue else: to_return = False return to_return
[ "def", "run_cmds", "(", "cmds", ",", "fail_sequential", "=", "False", ")", ":", "to_return", "=", "True", "for", "(", "cmd", ",", "tries", ")", "in", "cmds", ":", "if", "(", "(", "not", "to_return", ")", "and", "fail_sequential", ")", ":", "return", "False", "elif", "run_cmd", "(", "cmd", ",", "attempts", "=", "tries", ")", ":", "continue", "else", ":", "to_return", "=", "False", "return", "to_return" ]
run a list of tuples .
train
false
48,542
def connect_signals(): review_request_published.connect(review_request_published_cb, sender=ReviewRequest) review_published.connect(review_published_cb, sender=Review) reply_published.connect(reply_published_cb, sender=Review) review_request_closed.connect(review_request_closed_cb, sender=ReviewRequest) user_registered.connect(user_registered_cb) post_save.connect(webapi_token_saved_cb, sender=WebAPIToken) post_delete.connect(webapi_token_deleted_cb, sender=WebAPIToken)
[ "def", "connect_signals", "(", ")", ":", "review_request_published", ".", "connect", "(", "review_request_published_cb", ",", "sender", "=", "ReviewRequest", ")", "review_published", ".", "connect", "(", "review_published_cb", ",", "sender", "=", "Review", ")", "reply_published", ".", "connect", "(", "reply_published_cb", ",", "sender", "=", "Review", ")", "review_request_closed", ".", "connect", "(", "review_request_closed_cb", ",", "sender", "=", "ReviewRequest", ")", "user_registered", ".", "connect", "(", "user_registered_cb", ")", "post_save", ".", "connect", "(", "webapi_token_saved_cb", ",", "sender", "=", "WebAPIToken", ")", "post_delete", ".", "connect", "(", "webapi_token_deleted_cb", ",", "sender", "=", "WebAPIToken", ")" ]
connect e-mail callbacks to signals .
train
false
48,543
def is_linked(prefix, dist): return load_meta(prefix, dist)
[ "def", "is_linked", "(", "prefix", ",", "dist", ")", ":", "return", "load_meta", "(", "prefix", ",", "dist", ")" ]
return the install metadata for a linked package in a prefix .
train
false
48,545
def expand_empty(tensor_var, size): if (size == 0): return tensor_var shapes = [tensor_var.shape[x] for x in xrange(tensor_var.ndim)] new_shape = ([(size + shapes[0])] + shapes[1:]) empty = tensor.AllocEmpty(tensor_var.dtype)(*new_shape) ret = tensor.set_subtensor(empty[:shapes[0]], tensor_var) ret.tag.nan_guard_mode_check = False return ret
[ "def", "expand_empty", "(", "tensor_var", ",", "size", ")", ":", "if", "(", "size", "==", "0", ")", ":", "return", "tensor_var", "shapes", "=", "[", "tensor_var", ".", "shape", "[", "x", "]", "for", "x", "in", "xrange", "(", "tensor_var", ".", "ndim", ")", "]", "new_shape", "=", "(", "[", "(", "size", "+", "shapes", "[", "0", "]", ")", "]", "+", "shapes", "[", "1", ":", "]", ")", "empty", "=", "tensor", ".", "AllocEmpty", "(", "tensor_var", ".", "dtype", ")", "(", "*", "new_shape", ")", "ret", "=", "tensor", ".", "set_subtensor", "(", "empty", "[", ":", "shapes", "[", "0", "]", "]", ",", "tensor_var", ")", "ret", ".", "tag", ".", "nan_guard_mode_check", "=", "False", "return", "ret" ]
transforms the shape of a tensor from to by adding uninitialized memory at the end of the tensor .
train
false
48,548
def parse_signed_request(signed_request, app_secret): try: l = signed_request.split('.', 2) encoded_sig = str(l[0]) payload = str(l[1]) sig = base64.urlsafe_b64decode((encoded_sig + ('=' * ((4 - (len(encoded_sig) % 4)) % 4)))) data = base64.urlsafe_b64decode((payload + ('=' * ((4 - (len(payload) % 4)) % 4)))) except IndexError: return False except TypeError: return False data = _parse_json(data) if (data.get('algorithm', '').upper() != 'HMAC-SHA256'): return False app_secret = app_secret.encode('ascii') payload = payload.encode('ascii') expected_sig = hmac.new(app_secret, msg=payload, digestmod=hashlib.sha256).digest() if (sig != expected_sig): return False return data
[ "def", "parse_signed_request", "(", "signed_request", ",", "app_secret", ")", ":", "try", ":", "l", "=", "signed_request", ".", "split", "(", "'.'", ",", "2", ")", "encoded_sig", "=", "str", "(", "l", "[", "0", "]", ")", "payload", "=", "str", "(", "l", "[", "1", "]", ")", "sig", "=", "base64", ".", "urlsafe_b64decode", "(", "(", "encoded_sig", "+", "(", "'='", "*", "(", "(", "4", "-", "(", "len", "(", "encoded_sig", ")", "%", "4", ")", ")", "%", "4", ")", ")", ")", ")", "data", "=", "base64", ".", "urlsafe_b64decode", "(", "(", "payload", "+", "(", "'='", "*", "(", "(", "4", "-", "(", "len", "(", "payload", ")", "%", "4", ")", ")", "%", "4", ")", ")", ")", ")", "except", "IndexError", ":", "return", "False", "except", "TypeError", ":", "return", "False", "data", "=", "_parse_json", "(", "data", ")", "if", "(", "data", ".", "get", "(", "'algorithm'", ",", "''", ")", ".", "upper", "(", ")", "!=", "'HMAC-SHA256'", ")", ":", "return", "False", "app_secret", "=", "app_secret", ".", "encode", "(", "'ascii'", ")", "payload", "=", "payload", ".", "encode", "(", "'ascii'", ")", "expected_sig", "=", "hmac", ".", "new", "(", "app_secret", ",", "msg", "=", "payload", ",", "digestmod", "=", "hashlib", ".", "sha256", ")", ".", "digest", "(", ")", "if", "(", "sig", "!=", "expected_sig", ")", ":", "return", "False", "return", "data" ]
return dictionary with signed request data .
train
true
48,549
def setUnjellyableForClass(classname, unjellyable): global unjellyableRegistry classname = _maybeClass(classname) unjellyableRegistry[classname] = unjellyable globalSecurity.allowTypes(classname)
[ "def", "setUnjellyableForClass", "(", "classname", ",", "unjellyable", ")", ":", "global", "unjellyableRegistry", "classname", "=", "_maybeClass", "(", "classname", ")", "unjellyableRegistry", "[", "classname", "]", "=", "unjellyable", "globalSecurity", ".", "allowTypes", "(", "classname", ")" ]
set which local class will represent a remote type .
train
false
48,550
def ParseHostPort(address): host_port_re = re.match('([a-zA-Z0-9-\\.]+):([0-9]{1,5})$', address) if (not host_port_re): raise TypeError(('bad host:port: %s' % address)) host = host_port_re.group(1) port = int(host_port_re.group(2)) if (port >= 65536): raise TypeError(('invalid port: %d' % port)) return (host, port)
[ "def", "ParseHostPort", "(", "address", ")", ":", "host_port_re", "=", "re", ".", "match", "(", "'([a-zA-Z0-9-\\\\.]+):([0-9]{1,5})$'", ",", "address", ")", "if", "(", "not", "host_port_re", ")", ":", "raise", "TypeError", "(", "(", "'bad host:port: %s'", "%", "address", ")", ")", "host", "=", "host_port_re", ".", "group", "(", "1", ")", "port", "=", "int", "(", "host_port_re", ".", "group", "(", "2", ")", ")", "if", "(", "port", ">=", "65536", ")", ":", "raise", "TypeError", "(", "(", "'invalid port: %d'", "%", "port", ")", ")", "return", "(", "host", ",", "port", ")" ]
parses the provided address string as host:port and returns a tuple of .
train
false
48,552
def cross_below(values1, values2, start=(-2), end=None): return _cross_impl(values1, values2, start, end, (lambda x: (x < 0)))
[ "def", "cross_below", "(", "values1", ",", "values2", ",", "start", "=", "(", "-", "2", ")", ",", "end", "=", "None", ")", ":", "return", "_cross_impl", "(", "values1", ",", "values2", ",", "start", ",", "end", ",", "(", "lambda", "x", ":", "(", "x", "<", "0", ")", ")", ")" ]
checks for a cross below conditions over the specified period between two dataseries objects .
train
false
48,553
def _display(s): if (not isinstance(s, unicode)): s = s.decode('utf-8') s = _indent(_escaped_text_from_text(s, 'whitespace'), 4) if (not s.endswith('\n')): s += '\n' return s
[ "def", "_display", "(", "s", ")", ":", "if", "(", "not", "isinstance", "(", "s", ",", "unicode", ")", ")", ":", "s", "=", "s", ".", "decode", "(", "'utf-8'", ")", "s", "=", "_indent", "(", "_escaped_text_from_text", "(", "s", ",", "'whitespace'", ")", ",", "4", ")", "if", "(", "not", "s", ".", "endswith", "(", "'\\n'", ")", ")", ":", "s", "+=", "'\\n'", "return", "s" ]
markup the given string for useful display .
train
false
48,554
def _get_csr_extensions(csr): ret = OrderedDict() csrtempfile = tempfile.NamedTemporaryFile() csrtempfile.write(csr.as_pem()) csrtempfile.flush() csryaml = _parse_openssl_req(csrtempfile.name) csrtempfile.close() if (csryaml and ('Requested Extensions' in csryaml['Certificate Request']['Data'])): csrexts = csryaml['Certificate Request']['Data']['Requested Extensions'] if (not csrexts): return ret for (short_name, long_name) in six.iteritems(EXT_NAME_MAPPINGS): if (csrexts and (long_name in csrexts)): ret[short_name] = csrexts[long_name] return ret
[ "def", "_get_csr_extensions", "(", "csr", ")", ":", "ret", "=", "OrderedDict", "(", ")", "csrtempfile", "=", "tempfile", ".", "NamedTemporaryFile", "(", ")", "csrtempfile", ".", "write", "(", "csr", ".", "as_pem", "(", ")", ")", "csrtempfile", ".", "flush", "(", ")", "csryaml", "=", "_parse_openssl_req", "(", "csrtempfile", ".", "name", ")", "csrtempfile", ".", "close", "(", ")", "if", "(", "csryaml", "and", "(", "'Requested Extensions'", "in", "csryaml", "[", "'Certificate Request'", "]", "[", "'Data'", "]", ")", ")", ":", "csrexts", "=", "csryaml", "[", "'Certificate Request'", "]", "[", "'Data'", "]", "[", "'Requested Extensions'", "]", "if", "(", "not", "csrexts", ")", ":", "return", "ret", "for", "(", "short_name", ",", "long_name", ")", "in", "six", ".", "iteritems", "(", "EXT_NAME_MAPPINGS", ")", ":", "if", "(", "csrexts", "and", "(", "long_name", "in", "csrexts", ")", ")", ":", "ret", "[", "short_name", "]", "=", "csrexts", "[", "long_name", "]", "return", "ret" ]
returns a list of dicts containing the name .
train
true
48,555
def align_check(device, part_type, partition): _validate_device(device) if (part_type not in set(['minimal', 'optimal'])): raise CommandExecutionError('Invalid part_type passed to partition.align_check') try: int(partition) except Exception: raise CommandExecutionError('Invalid partition passed to partition.align_check') cmd = 'parted -m -s {0} align-check {1} {2}'.format(device, part_type, partition) out = __salt__['cmd.run'](cmd).splitlines() return out
[ "def", "align_check", "(", "device", ",", "part_type", ",", "partition", ")", ":", "_validate_device", "(", "device", ")", "if", "(", "part_type", "not", "in", "set", "(", "[", "'minimal'", ",", "'optimal'", "]", ")", ")", ":", "raise", "CommandExecutionError", "(", "'Invalid part_type passed to partition.align_check'", ")", "try", ":", "int", "(", "partition", ")", "except", "Exception", ":", "raise", "CommandExecutionError", "(", "'Invalid partition passed to partition.align_check'", ")", "cmd", "=", "'parted -m -s {0} align-check {1} {2}'", ".", "format", "(", "device", ",", "part_type", ",", "partition", ")", "out", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ")", ".", "splitlines", "(", ")", "return", "out" ]
check if partition satisfies the alignment constraint of part_type .
train
true
48,556
def create_custom_token(uid, valid_minutes=60): client_email = app_identity.get_service_account_name() now = int(time.time()) payload = base64.b64encode(json.dumps({'iss': client_email, 'sub': client_email, 'aud': _IDENTITY_ENDPOINT, 'uid': uid, 'iat': now, 'exp': (now + (valid_minutes * 60))})) header = base64.b64encode(json.dumps({'typ': 'JWT', 'alg': 'RS256'})) to_sign = '{}.{}'.format(header, payload) return '{}.{}'.format(to_sign, base64.b64encode(app_identity.sign_blob(to_sign)[1]))
[ "def", "create_custom_token", "(", "uid", ",", "valid_minutes", "=", "60", ")", ":", "client_email", "=", "app_identity", ".", "get_service_account_name", "(", ")", "now", "=", "int", "(", "time", ".", "time", "(", ")", ")", "payload", "=", "base64", ".", "b64encode", "(", "json", ".", "dumps", "(", "{", "'iss'", ":", "client_email", ",", "'sub'", ":", "client_email", ",", "'aud'", ":", "_IDENTITY_ENDPOINT", ",", "'uid'", ":", "uid", ",", "'iat'", ":", "now", ",", "'exp'", ":", "(", "now", "+", "(", "valid_minutes", "*", "60", ")", ")", "}", ")", ")", "header", "=", "base64", ".", "b64encode", "(", "json", ".", "dumps", "(", "{", "'typ'", ":", "'JWT'", ",", "'alg'", ":", "'RS256'", "}", ")", ")", "to_sign", "=", "'{}.{}'", ".", "format", "(", "header", ",", "payload", ")", "return", "'{}.{}'", ".", "format", "(", "to_sign", ",", "base64", ".", "b64encode", "(", "app_identity", ".", "sign_blob", "(", "to_sign", ")", "[", "1", "]", ")", ")" ]
create a secure token for the given id .
train
false
48,558
def tensor_product(G, H): GH = _init_product_graph(G, H) GH.add_nodes_from(_node_product(G, H)) GH.add_edges_from(_directed_edges_cross_edges(G, H)) if (not GH.is_directed()): GH.add_edges_from(_undirected_edges_cross_edges(G, H)) GH.name = (((('Tensor product(' + G.name) + ',') + H.name) + ')') return GH
[ "def", "tensor_product", "(", "G", ",", "H", ")", ":", "GH", "=", "_init_product_graph", "(", "G", ",", "H", ")", "GH", ".", "add_nodes_from", "(", "_node_product", "(", "G", ",", "H", ")", ")", "GH", ".", "add_edges_from", "(", "_directed_edges_cross_edges", "(", "G", ",", "H", ")", ")", "if", "(", "not", "GH", ".", "is_directed", "(", ")", ")", ":", "GH", ".", "add_edges_from", "(", "_undirected_edges_cross_edges", "(", "G", ",", "H", ")", ")", "GH", ".", "name", "=", "(", "(", "(", "(", "'Tensor product('", "+", "G", ".", "name", ")", "+", "','", ")", "+", "H", ".", "name", ")", "+", "')'", ")", "return", "GH" ]
return the tensor product of g and h .
train
false