_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q45000
scoreatpercentile
train
def scoreatpercentile(inlist, percent): """ Returns the score at a given percentile relative to the distribution given by inlist. Usage: lscoreatpercentile(inlist,percent) """ if percent > 1: print("\nDividing percent>1 by 100 in lscoreatpercentile().\n") percent = percent / 100.0 targetcf = percent * len(inlist) h, lrl, binsize, extras = histogram(inlist) cumhist = cumsum(copy.deepcopy(h)) for i in range(len(cumhist)): if cumhist[i] >= targetcf: break score = binsize * ((targetcf - cumhist[i - 1]) / float(h[i])) + (lrl + binsize * i) return score
python
{ "resource": "" }
q45001
cumfreq
train
def cumfreq(inlist, numbins=10, defaultreallimits=None): """ Returns a cumulative frequency histogram, using the histogram function. Usage: lcumfreq(inlist,numbins=10,defaultreallimits=None) Returns: list of cumfreq bin values, lowerreallimit, binsize, extrapoints """ h, l, b, e = histogram(inlist, numbins, defaultreallimits) cumhist = cumsum(copy.deepcopy(h)) return cumhist, l, b, e
python
{ "resource": "" }
q45002
relfreq
train
def relfreq(inlist, numbins=10, defaultreallimits=None): """ Returns a relative frequency histogram, using the histogram function. Usage: lrelfreq(inlist,numbins=10,defaultreallimits=None) Returns: list of cumfreq bin values, lowerreallimit, binsize, extrapoints """ h, l, b, e = histogram(inlist, numbins, defaultreallimits) for i in range(len(h)): h[i] = h[i] / float(len(inlist)) return h, l, b, e
python
{ "resource": "" }
q45003
lincc
train
def lincc(x, y): """ Calculates Lin's concordance correlation coefficient. Usage: alincc(x,y) where x, y are equal-length arrays Returns: Lin's CC """ covar = cov(x, y) * (len(x) - 1) / float(len(x)) # correct denom to n xvar = var(x) * (len(x) - 1) / float(len(x)) # correct denom to n yvar = var(y) * (len(y) - 1) / float(len(y)) # correct denom to n lincc = (2 * covar) / ((xvar + yvar) + ((mean(x) - mean(y)) ** 2)) return lincc
python
{ "resource": "" }
q45004
wilcoxont
train
def wilcoxont(x, y): """ Calculates the Wilcoxon T-test for related samples and returns the result. A non-parametric T-test. Usage: lwilcoxont(x,y) Returns: a t-statistic, two-tail probability estimate """ if len(x) != len(y): raise ValueError('Unequal N in wilcoxont. Aborting.') d = [] for i in range(len(x)): diff = x[i] - y[i] if diff != 0: d.append(diff) count = len(d) absd = map(abs, d) absranked = rankdata(absd) r_plus = 0.0 r_minus = 0.0 for i in range(len(absd)): if d[i] < 0: r_minus = r_minus + absranked[i] else: r_plus = r_plus + absranked[i] wt = min(r_plus, r_minus) mn = count * (count + 1) * 0.25 se = math.sqrt(count * (count + 1) * (2.0 * count + 1.0) / 24.0) z = math.fabs(wt - mn) / se prob = 2 * (1.0 - zprob(abs(z))) return wt, prob
python
{ "resource": "" }
q45005
shellsort
train
def shellsort(inlist): """ Shellsort algorithm. Sorts a 1D-list. Usage: lshellsort(inlist) Returns: sorted-inlist, sorting-index-vector (for original list) """ n = len(inlist) svec = copy.deepcopy(inlist) ivec = range(n) gap = n / 2 # integer division needed while gap > 0: for i in range(gap, n): for j in range(i - gap, -1, -gap): while j >= 0 and svec[j] > svec[j + gap]: temp = svec[j] svec[j] = svec[j + gap] svec[j + gap] = temp itemp = ivec[j] ivec[j] = ivec[j + gap] ivec[j + gap] = itemp gap = gap / 2 # integer division needed # svec is now sorted inlist, and ivec has the order svec[i] = vec[ivec[i]] return svec, ivec
python
{ "resource": "" }
q45006
rankdata
train
def rankdata(inlist): """ Ranks the data in inlist, dealing with ties appropritely. Assumes a 1D inlist. Adapted from Gary Perlman's |Stat ranksort. Usage: rankdata(inlist) Returns: a list of length equal to inlist, containing rank scores """ n = len(inlist) svec, ivec = shellsort(inlist) sumranks = 0 dupcount = 0 newlist = [0] * n for i in range(n): sumranks = sumranks + i dupcount = dupcount + 1 if i == n - 1 or svec[i] != svec[i + 1]: averank = sumranks / float(dupcount) + 1 for j in range(i - dupcount + 1, i + 1): newlist[ivec[j]] = averank sumranks = 0 dupcount = 0 return newlist
python
{ "resource": "" }
q45007
AdminSite.register_model
train
def register_model(self, model, bundle): """ Registers a bundle as the main bundle for a model. Used when we need to lookup urls by a model. """ if model in self._model_registry: raise AlreadyRegistered('The model %s is already registered' \ % model) if bundle.url_params: raise Exception("A primary model bundle cannot have dynamic \ url_parameters") self._model_registry[model] = bundle
python
{ "resource": "" }
q45008
AdminSite.unregister_model
train
def unregister_model(self, model): """ Unregisters the given model. """ if model not in self._model_registry: raise NotRegistered('The model %s is not registered' % model) del self._model_registry[model]
python
{ "resource": "" }
q45009
AdminSite.register
train
def register(self, slug, bundle, order=1, title=None): """ Registers the bundle for a certain slug. If a slug is already registered, this will raise AlreadyRegistered. :param slug: The slug to register. :param bundle: The bundle instance being registered. :param order: An integer that controls where this bundle's \ dashboard links appear in relation to others. """ if slug in self._registry: raise AlreadyRegistered('The url %s is already registered' % slug) # Instantiate the admin class to save in the registry. self._registry[slug] = bundle self._order[slug] = order if title: self._titles[slug] = title bundle.set_admin_site(self)
python
{ "resource": "" }
q45010
AdminSite.unregister
train
def unregister(self, slug): """ Unregisters the given url. If a slug isn't already registered, this will raise NotRegistered. """ if slug not in self._registry: raise NotRegistered('The slug %s is not registered' % slug) bundle = self._registry[slug] if bundle._meta.model and bundle._meta.primary_model_bundle: self.unregister_model(bundle._meta.model) del self._registry[slug] del self._order[slug]
python
{ "resource": "" }
q45011
AdminSite.password_change_done
train
def password_change_done(self, request, extra_context=None): """ Displays the "success" page after a password change. """ from django.contrib.auth.views import password_change_done defaults = { 'extra_context': extra_context or {}, 'template_name': 'cms/password_change_done.html', } if self.password_change_done_template is not None: defaults['template_name'] = self.password_change_done_template return password_change_done(request, **defaults)
python
{ "resource": "" }
q45012
AdminSite.logout
train
def logout(self, request, extra_context=None): """ Logs out the user for the given HttpRequest. This should *not* assume the user is already logged in. """ from django.contrib.auth.views import logout defaults = { 'extra_context': extra_context or {}, 'template_name': 'cms/logged_out.html', } if self.logout_template is not None: defaults['template_name'] = self.logout_template return logout(request, **defaults)
python
{ "resource": "" }
q45013
AdminSite._get_allowed_sections
train
def _get_allowed_sections(self, dashboard): """ Get the sections to display based on dashboard """ allowed_titles = [x[0] for x in dashboard] allowed_sections = [x[2] for x in dashboard] return tuple(allowed_sections), tuple(allowed_titles)
python
{ "resource": "" }
q45014
AdminSite.index
train
def index(self, request, extra_context=None): """ Displays the dashboard. Includes the main navigation that the user has permission for as well as the cms log for those sections. The log list can be filtered by those same sections and is paginated. """ dashboard = self.get_dashboard_urls(request) dash_blocks = self.get_dashboard_blocks(request) sections, titles = self._get_allowed_sections(dashboard) choices = zip(sections, titles) choices.sort(key=lambda tup: tup[1]) choices.insert(0, ('', 'All')) class SectionFilterForm(BaseFilterForm): section = forms.ChoiceField(required=False, choices=choices) form = SectionFilterForm(request.GET) filter_kwargs = form.get_filter_kwargs() if not filter_kwargs and not request.user.is_superuser: filter_kwargs['section__in'] = sections cms_logs = models.CMSLog.objects.filter(**filter_kwargs ).order_by('-when') template = self.dashboard_template or 'cms/dashboard.html' paginator = Paginator(cms_logs[:20 * 100], 20, allow_empty_first_page=True) page_number = request.GET.get('page') or 1 try: page_number = int(page_number) except ValueError: page_number = 1 page = paginator.page(page_number) return TemplateResponse(request, [template], { 'dashboard': dashboard, 'blocks': dash_blocks, 'page': page, 'bundle': self._registry.values()[0], 'form': form},)
python
{ "resource": "" }
q45015
CacheGroup.register_models
train
def register_models(self, *models, **kwargs): """ Register multiple models with the same arguments. Calls register for each argument passed along with all keyword arguments. """ for model in models: self.register(model, **kwargs)
python
{ "resource": "" }
q45016
CacheGroup.register
train
def register(self, model, values=None, instance_values=None): """ Registers a model with this group. :param values: A list of values that should be incremented \ whenever invalidate_cache is called for a instance or class \ of this type. :param instance_values: A list of attribute names that will \ be looked up on the instance of this model that is passed to \ invalidate_cache. The value resulting from that lookup \ will then be incremented. """ if model in self._models: raise Exception("%s is already registered" % model) self._models[model] = CacheConfig(values, instance_values)
python
{ "resource": "" }
q45017
CacheGroup.get_version
train
def get_version(self, extra=None): """ This will return a string that can be used as a prefix for django's cache key. Something like key.1 or key.1.2 If a version was not found '1' will be stored and returned as the number for that key. If extra is given a version will be returned for that value. Otherwise the major version will be returned. :param extra: the minor version to get. Defaults to None. """ if extra: key = self._get_extra_key(extra) else: key = self.key v = self._get_cache(key).get(key) if v == None: v = self._increment_version(extra=extra) return "%s.%s" % (key, v)
python
{ "resource": "" }
q45018
_ancestors
train
def _ancestors(collection): """Get the ancestors of the collection.""" for index, c in enumerate(collection.path_to_root()): if index > 0 and c.dbquery is not None: raise StopIteration yield c.name raise StopIteration
python
{ "resource": "" }
q45019
_build_cache
train
def _build_cache(): """Preprocess collection queries.""" query = current_app.config['COLLECTIONS_DELETED_RECORDS'] for collection in Collection.query.filter( Collection.dbquery.isnot(None)).all(): yield collection.name, dict( query=query.format(dbquery=collection.dbquery), ancestors=set(_ancestors(collection)), ) raise StopIteration
python
{ "resource": "" }
q45020
_find_matching_collections_internally
train
def _find_matching_collections_internally(collections, record): """Find matching collections with internal engine. :param collections: set of collections where search :param record: record to match """ for name, data in iteritems(collections): if _build_query(data['query']).match(record): yield data['ancestors'] raise StopIteration
python
{ "resource": "" }
q45021
get_record_collections
train
def get_record_collections(record, matcher): """Return list of collections to which record belongs to. :param record: Record instance. :param matcher: Function used to check if a record belongs to a collection. :return: list of collection names. """ collections = current_collections.collections if collections is None: # build collections cache collections = current_collections.collections = dict(_build_cache()) output = set() for collections in matcher(collections, record): output |= collections return list(output)
python
{ "resource": "" }
q45022
dfs
train
def dfs(graph, func, head, reverse=None): """ DEPTH FIRST SEARCH IF func RETURNS FALSE, THEN PATH IS NO LONGER TAKEN IT'S EXPECTED func TAKES 3 ARGUMENTS node - THE CURRENT NODE IN THE path - PATH FROM head TO node graph - THE WHOLE GRAPH """ todo = deque() todo.append(head) path = deque() done = set() while todo: node = todo.popleft() if node in done: path.pop() continue done.add(node) path.append(node) result = func(node, path, graph) if result: if reverse: children = graph.get_parents(node) else: children = graph.get_children(node) todo.extend(children)
python
{ "resource": "" }
q45023
bfs
train
def bfs(graph, func, head, reverse=None): """ BREADTH FIRST SEARCH IF func RETURNS FALSE, THEN NO MORE PATHS DOWN THE BRANCH ARE TAKEN IT'S EXPECTED func TAKES THESE ARGUMENTS: node - THE CURRENT NODE IN THE path - PATH FROM head TO node graph - THE WHOLE GRAPH todo - WHAT'S IN THE QUEUE TO BE DONE """ todo = deque() # LIST OF PATHS todo.append(Step(None, head)) while todo: path = todo.popleft() keep_going = func(path.node, Path(path), graph, todo) if keep_going: todo.extend(Step(path, c) for c in graph.get_children(path.node))
python
{ "resource": "" }
q45024
dominator_tree
train
def dominator_tree(graph): """ RETURN DOMINATOR FOREST THERE ARE TWO TREES, "ROOTS" and "LOOPS" ROOTS HAVE NO PARENTS LOOPS ARE NODES THAT ARE A MEMBER OF A CYCLE THAT HAS NO EXTRNAL PARENT roots = dominator_tree(graph).get_children(ROOTS) """ todo = Queue() done = set() dominator = Tree(None) nodes = list(graph.nodes) while True: # FIGURE OUT NET ITEM TO WORK ON if todo: node = todo.pop() elif nodes: node = nodes.pop() if len(nodes) % 1000 == 0: Log.note("{{num}} nodes remaining", num=len(nodes)) else: break if node in done: continue parents = graph.get_parents(node) - {node} if not parents: # node WITHOUT parents IS A ROOT done.add(node) dominator.add_edge(Edge(ROOTS, node)) continue not_done = parents - done if not_done: # THERE ARE MORE parents TO DO FIRST more_todo = not_done - todo if not more_todo: # ALL PARENTS ARE PART OF A CYCLE, MAKE node A ROOT done.add(node) dominator.add_edge(Edge(LOOPS, node)) else: # DO THE PARENTS BEFORE node todo.push(node) for p in more_todo: todo.push(p) continue # WE CAN GET THE DOMINATORS FOR ALL parents if len(parents) == 1: # SHORTCUT dominator.add_edge(Edge(list(parents)[0], node)) done.add(node) continue paths_from_roots = [ list(reversed(dominator.get_path_to_root(p))) for p in parents ] if any(p[0] is ROOTS for p in paths_from_roots): # THIS OBJECT CAN BE REACHED FROM A ROOT, IGNORE PATHS FROM LOOPS paths_from_roots = [p for p in paths_from_roots if p[0] is ROOTS] if len(paths_from_roots) == 1: # SHORTCUT dom = paths_from_roots[0][-1] dominator.add_edge(Edge(dom, node)) done.add(node) continue # FIND COMMON PATH FROM root num_paths = len(paths_from_roots) for i, x in enumerate(zip_longest(*paths_from_roots)): if x.count(x[0]) != num_paths: dom = paths_from_roots[0][i-1] if dom is LOOPS: # CAN BE REACHED FROM MORE THAN ONE LOOP, PICK ONE TO BLAME dom = paths_from_roots[0][-1] break else: # ALL PATHS IDENTICAL dom = paths_from_roots[0][-1] dominator.add_edge(Edge(dom, node)) done.add(node) return dominator
python
{ "resource": "" }
q45025
get_schema_from_list
train
def get_schema_from_list(table_name, frum): """ SCAN THE LIST FOR COLUMN TYPES """ columns = UniqueIndex(keys=("name",)) _get_schema_from_list(frum, ".", parent=".", nested_path=ROOT_PATH, columns=columns) return Schema(table_name=table_name, columns=list(columns))
python
{ "resource": "" }
q45026
ColumnList.denormalized
train
def denormalized(self): """ THE INTERNAL STRUCTURE FOR THE COLUMN METADATA IS VERY DIFFERENT FROM THE DENORMALIZED PERSPECITVE. THIS PROVIDES THAT PERSPECTIVE FOR QUERIES """ with self.locker: self._update_meta() output = [ { "table": c.es_index, "name": untype_path(c.name), "cardinality": c.cardinality, "es_column": c.es_column, "es_index": c.es_index, "last_updated": c.last_updated, "count": c.count, "nested_path": [unnest_path(n) for n in c.nested_path], "es_type": c.es_type, "type": c.jx_type, } for tname, css in self.data.items() for cname, cs in css.items() for c in cs if c.jx_type not in STRUCT # and c.es_column != "_id" ] from jx_python.containers.list_usingPythonList import ListContainer return ListContainer( self.name, data=output, schema=jx_base.Schema("meta.columns", SIMPLE_METADATA_COLUMNS), )
python
{ "resource": "" }
q45027
parse_tibiadata_datetime
train
def parse_tibiadata_datetime(date_dict) -> Optional[datetime.datetime]: """Parses time objects from the TibiaData API. Time objects are made of a dictionary with three keys: date: contains a string representation of the time timezone: a string representation of the timezone the date time is based on timezone_type: the type of representation used in the timezone key Parameters ---------- date_dict: :class:`dict` Dictionary representing the time object. Returns ------- :class:`datetime.date`, optional The represented datetime, in UTC. """ try: t = datetime.datetime.strptime(date_dict["date"], "%Y-%m-%d %H:%M:%S.%f") except (KeyError, ValueError, TypeError): return None if date_dict["timezone"] == "CET": timezone_offset = 1 elif date_dict["timezone"] == "CEST": timezone_offset = 2 else: return None # We subtract the offset to convert the time to UTC t = t - datetime.timedelta(hours=timezone_offset) return t.replace(tzinfo=datetime.timezone.utc)
python
{ "resource": "" }
q45028
try_datetime
train
def try_datetime(obj) -> Optional[datetime.datetime]: """Attempts to convert an object into a datetime. If the date format is known, it's recommended to use the corresponding function This is meant to be used in constructors. Parameters ---------- obj: :class:`str`, :class:`dict`, :class:`datetime.datetime` The object to convert. Returns ------- :class:`datetime.datetime`, optional The represented datetime, or ``None`` if conversion wasn't possible. """ if obj is None: return None if isinstance(obj, datetime.datetime): return obj res = parse_tibia_datetime(obj) if res is not None: return res res = parse_tibiadata_datetime(obj) return res
python
{ "resource": "" }
q45029
try_date
train
def try_date(obj) -> Optional[datetime.date]: """Attempts to convert an object into a date. If the date format is known, it's recommended to use the corresponding function This is meant to be used in constructors. Parameters ---------- obj: :class:`str`, :class:`datetime.datetime`, :class:`datetime.date` The object to convert. Returns ------- :class:`datetime.date`, optional The represented date. """ if obj is None: return None if isinstance(obj, datetime.datetime): return obj.date() if isinstance(obj, datetime.date): return obj res = parse_tibia_date(obj) if res is not None: return res res = parse_tibia_full_date(obj) if res is not None: return res res = parse_tibiadata_date(obj) return res
python
{ "resource": "" }
q45030
try_enum
train
def try_enum(cls: Type[T], val, default: D = None) -> Union[T, D]: """Attempts to convert a value into their enum value Parameters ---------- cls: :class:`Enum` The enum to convert to. val: The value to try to convert to Enum default: optional The value to return if no enum value is found. Returns ------- obj: The enum value if found, otherwise None. """ if isinstance(val, cls): return val try: return cls(val) except ValueError: return default
python
{ "resource": "" }
q45031
parse_json
train
def parse_json(content): """Tries to parse a string into a json object. This also performs a trim of all values, recursively removing leading and trailing whitespace. Parameters ---------- content: A JSON format string. Returns ------- obj: The object represented by the json string. Raises ------ InvalidContent If the content is not a valid json string. """ try: json_content = json.loads(content) return _recursive_strip(json_content) except json.JSONDecodeError: raise InvalidContent("content is not a json string.")
python
{ "resource": "" }
q45032
get_social_share_link
train
def get_social_share_link(context, share_link, object_url, object_title): """ Construct the social share link for the request object. """ request = context['request'] url = unicode(object_url) if 'http' not in object_url.lower(): full_path = ''.join(('http', ('', 's')[request.is_secure()], '://', request.META['HTTP_HOST'], url)) else: full_path = url return share_link.get_share_url(full_path, object_title)
python
{ "resource": "" }
q45033
NaiveGraph.get_family
train
def get_family(self, node): """ RETURN ALL ADJACENT NODES """ return set(p if c == node else c for p, c in self.get_edges(node))
python
{ "resource": "" }
q45034
ChoicesFieldListFilter.choices
train
def choices(self, cl): """ Take choices from field's 'choices' attribute for 'ChoicesField' and use 'flatchoices' as usual for other fields. """ #: Just tidy up standard implementation for the sake of DRY principle. def _choice_item(is_selected, query_string, title): return { 'selected': is_selected, 'query_string': query_string, 'display': force_text(title), } yield _choice_item( self.lookup_val is None, cl.get_query_string({}, [self.lookup_kwarg]), _('All')) container = (self.field.choices if isinstance(self.field, ChoicesField) else self.field.flatchoices) for lookup, title in container: yield _choice_item( smart_text(lookup) == self.lookup_val, cl.get_query_string({self.lookup_kwarg: lookup}), title)
python
{ "resource": "" }
q45035
page_factory
train
def page_factory(request): """ Page factory. Config models example: .. code-block:: python models = { '': [WebPage, CatalogResource], 'catalogue': CatalogResource, 'news': NewsResource, } """ prefix = request.matchdict['prefix'] # /{prefix}/page1/page2/page3... settings = request.registry.settings dbsession = settings[CONFIG_DBSESSION] config = settings[CONFIG_MODELS] if prefix not in config: # prepend {prefix} to *traverse request.matchdict['traverse'] =\ tuple([prefix] + list(request.matchdict['traverse'])) prefix = None # Get all resources and models from config with the same prefix. resources = config.get( prefix, config.get( # 1. get resources with prefix same as URL prefix '', config.get( # 2. if not, then try to get empty prefix '/', None))) # 3. else try to get prefix '/' otherwise None if not hasattr(resources, '__iter__'): resources = (resources, ) tree = {} if not resources: return tree # Add top level nodes of resources in the tree for resource in resources: table = None if not hasattr(resource, '__table__')\ and hasattr(resource, 'model'): table = resource.model else: table = resource if not hasattr(table, 'slug'): continue nodes = dbsession.query(table) if hasattr(table, 'parent_id'): nodes = nodes.filter(or_( table.parent_id == None, # noqa table.parent.has(table.slug == '/') )) for node in nodes: if not node.slug: continue resource = resource_of_node(resources, node) tree[node.slug] = resource(node, prefix=prefix) return tree
python
{ "resource": "" }
q45036
register_views
train
def register_views(*args): """ Registration view for each resource from config. """ config = args[0] settings = config.get_settings() pages_config = settings[CONFIG_MODELS] resources = resources_of_config(pages_config) for resource in resources: if hasattr(resource, '__table__')\ and not hasattr(resource, 'model'): continue resource.model.pyramid_pages_template = resource.template config.add_view(resource.view, attr=resource.attr, route_name=PREFIX_PAGE, renderer=resource.template, context=resource, permission=PREFIX_PAGE)
python
{ "resource": "" }
q45037
Tracker.add_phase
train
def add_phase(self): """Context manager for when adding all the tokens""" # add stuff yield self # Make sure we output eveything self.finish_hanging() # Remove trailing indents and dedents while len(self.result) > 1 and self.result[-2][0] in (INDENT, ERRORTOKEN, NEWLINE): self.result.pop(-2)
python
{ "resource": "" }
q45038
Tracker.next_token
train
def next_token(self, tokenum, value, scol): """Determine what to do with the next token""" # Make self.current reflect these values self.current.set(tokenum, value, scol) # Determine indent_type based on this token if self.current.tokenum == INDENT and self.current.value: self.indent_type = self.current.value[0] # Only proceed if we shouldn't ignore this token if not self.ignore_token(): # Determining if this token is whitespace self.determine_if_whitespace() # Determine if inside a container self.determine_inside_container() # Change indentation as necessary self.determine_indentation() # See if we are force inserting this token if self.forced_insert(): return # If we have a newline after an inserted line, then we don't need to worry about semicolons if self.inserted_line and self.current.tokenum == NEWLINE: self.inserted_line = False # If we have a non space, non comment after an inserted line, then insert a semicolon if self.result and not self.is_space and self.inserted_line: if self.current.tokenum != COMMENT: self.result.append((OP, ';')) self.inserted_line = False # Progress the tracker self.progress() # Add a newline if we just skipped a single if self.single and self.single.skipped: self.single.skipped = False self.result.append((NEWLINE, '\n')) # Set after_space so next line knows if it is after space self.after_space = self.is_space
python
{ "resource": "" }
q45039
Tracker.progress
train
def progress(self): """ Deal with next token Used to create, fillout and end groups and singles As well as just append everything else """ tokenum, value, scol = self.current.values() # Default to not appending anything just_append = False # Prevent from group having automatic pass given to it # If it already has a pass if tokenum == NAME and value == 'pass': self.groups.empty = False # Set variables to be used later on to determine if this will likely make group not empty created_group = False found_content = False if not self.groups.starting_group and not self.is_space: found_content = True if self.groups.starting_group: # Inside a group signature, add to it if tokenum == STRING: self.groups.name = value elif tokenum == NAME or (tokenum == OP and value == '.'): # Modify super class for group self.groups.modify_kls(value) elif tokenum == NEWLINE: # Premature end of group self.add_tokens_for_group(with_pass=True) elif tokenum == OP and value == ":": # Proper end of group self.add_tokens_for_group() elif self.groups.starting_single: # Inside single signature, add to it if tokenum == STRING: self.single.name = value elif tokenum == NEWLINE and not self.in_container: # Premature end of single self.add_tokens_for_single(ignore=True) elif tokenum == OP and value == ":": # Proper end of single self.add_tokens_for_single() elif value and self.single.name: # Only want to add args after the name for the single has been specified self.single.add_to_arg(tokenum, value) elif self.after_space or self.after_an_async or scol == 0 and tokenum == NAME: # set after_an_async if we found an async by itself # So that we can just have that prepended and still be able to interpret our special blocks with_async = self.after_an_async if not self.after_an_async and value == "async": self.after_an_async = True else: self.after_an_async = False if value in ('describe', 'context'): created_group = True # add pass to previous group if nothing added between then and now if self.groups.empty and not self.groups.root: self.add_tokens_for_pass() # Start new group self.groups = self.groups.start_group(scol, value) self.all_groups.append(self.groups) elif value in ('it', 'ignore'): self.single = self.groups.start_single(value, scol) elif value in ('before_each', 'after_each'): setattr(self.groups, "has_%s" % value, True) if with_async: setattr(self.groups, "async_%s" % value, True) self.add_tokens_for_test_helpers(value, with_async=with_async) else: just_append = True else: # Don't care about it, append! just_append = True # Found something that isn't whitespace or a new group # Hence current group isn't empty ! if found_content and not created_group: self.groups.empty = False # Just append if token should be if just_append: self.result.append([tokenum, value])
python
{ "resource": "" }
q45040
Tracker.reset_indentation
train
def reset_indentation(self, amount): """Replace previous indentation with desired amount""" while self.result and self.result[-1][0] == INDENT: self.result.pop() self.result.append((INDENT, amount))
python
{ "resource": "" }
q45041
Tracker.ignore_token
train
def ignore_token(self): """Determine if we should ignore current token""" def get_next_ignore(remove=False): """Get next ignore from ignore_next and remove from ignore_next""" next_ignore = self.ignore_next # Just want to return it, don't want to remove yet if not remove: if type(self.ignore_next) in (list, tuple): next_ignore = self.ignore_next[0] return next_ignore # Want to remove it from ignore_next if type(next_ignore) in (list, tuple) and next_ignore: next_ignore = self.ignore_next.pop(0) elif not next_ignore: self.next_ignore = None next_ignore = None else: self.next_ignore = None return next_ignore # If we have tokens to be ignored and we're not just inserting till some token if not self.insert_till and self.ignore_next: # Determine what the next ignore is next_ignore = get_next_ignore() if next_ignore == (self.current.tokenum, self.current.value): # Found the next ignore token, remove it from the stack # So that the next ignorable token can be considered get_next_ignore(remove=True) return True else: # If not a wildcard, then return now if type(next_ignore) is not WildCard: return False # Go through tokens untill we find one that isn't a wildcard while type(next_ignore) == WildCard: next_ignore = get_next_ignore(remove=True) # If the next token is next ignore then we're done here! if next_ignore == (self.current.tokenum, self.current.value): get_next_ignore(remove=True) return True else: # If there is another token to ignore, then consider the wildcard # And keep inserting till we reach this next ignorable token if next_ignore: self.insert_till = next_ignore return False
python
{ "resource": "" }
q45042
Tracker.make_describe_attrs
train
def make_describe_attrs(self): """Create tokens for setting is_noy_spec on describes""" lst = [] if self.all_groups: lst.append((NEWLINE, '\n')) lst.append((INDENT, '')) for group in self.all_groups: if group.name: lst.extend(self.tokens.make_describe_attr(group.kls_name)) return lst
python
{ "resource": "" }
q45043
Tracker.forced_insert
train
def forced_insert(self): """ Insert tokens if self.insert_till hasn't been reached yet Will respect self.inserted_line and make sure token is inserted before it Returns True if it appends anything or if it reached the insert_till token """ # If we have any tokens we are waiting for if self.insert_till: # Determine where to append this token append_at = -1 if self.inserted_line: append_at = -self.inserted_line+1 # Reset insert_till if we found it if self.current.tokenum == self.insert_till[0] and self.current.value == self.insert_till[1]: self.insert_till = None else: # Adjust self.adjust_indent_at to take into account the new token for index, value in enumerate(self.adjust_indent_at): if value < len(self.result) - append_at: self.adjust_indent_at[index] = value + 1 # Insert the new token self.result.insert(append_at, (self.current.tokenum, self.current.value)) # We appended the token return True
python
{ "resource": "" }
q45044
Tracker.add_tokens_for_pass
train
def add_tokens_for_pass(self): """Add tokens for a pass to result""" # Make sure pass not added to group again self.groups.empty = False # Remove existing newline/indentation while self.result[-1][0] in (INDENT, NEWLINE): self.result.pop() # Add pass and indentation self.add_tokens( [ (NAME, 'pass') , (NEWLINE, '\n') , (INDENT, self.indent_type * self.current.scol) ] )
python
{ "resource": "" }
q45045
Tracker.add_tokens_for_group
train
def add_tokens_for_group(self, with_pass=False): """Add the tokens for the group signature""" kls = self.groups.super_kls name = self.groups.kls_name # Reset indentation to beginning and add signature self.reset_indentation('') self.result.extend(self.tokens.make_describe(kls, name)) # Add pass if necessary if with_pass: self.add_tokens_for_pass() self.groups.finish_signature()
python
{ "resource": "" }
q45046
Tracker.add_tokens_for_single
train
def add_tokens_for_single(self, ignore=False): """Add the tokens for the single signature""" args = self.single.args name = self.single.python_name # Reset indentation to proper amount and add signature self.reset_indentation(self.indent_type * self.single.indent) self.result.extend(self.tokens.make_single(name, args)) # Add skip if necessary if ignore: self.single.skipped = True self.result.extend(self.tokens.test_skip) self.groups.finish_signature()
python
{ "resource": "" }
q45047
Tracker.finish_hanging
train
def finish_hanging(self): """Add tokens for hanging singature if any""" if self.groups.starting_signature: if self.groups.starting_group: self.add_tokens_for_group(with_pass=True) elif self.groups.starting_single: self.add_tokens_for_single(ignore=True)
python
{ "resource": "" }
q45048
Tracker.determine_indentation
train
def determine_indentation(self): """Reset indentation for current token and in self.result to be consistent and normalized""" # Ensuring NEWLINE tokens are actually specified as such if self.current.tokenum != NEWLINE and self.current.value == '\n': self.current.tokenum = NEWLINE # I want to change dedents into indents, because they seem to screw nesting up if self.current.tokenum == DEDENT: self.current.tokenum, self.current.value = self.convert_dedent() if self.after_space and not self.is_space and (not self.in_container or self.just_started_container): # Record current indentation level if not self.indent_amounts or self.current.scol > self.indent_amounts[-1]: self.indent_amounts.append(self.current.scol) # Adjust indent as necessary while self.adjust_indent_at: self.result[self.adjust_indent_at.pop()] = (INDENT, self.indent_type * (self.current.scol - self.groups.level)) # Roll back groups as necessary if not self.is_space and not self.in_container: while not self.groups.root and self.groups.level >= self.current.scol: self.finish_hanging() self.groups = self.groups.parent # Reset indentation to deal with nesting if self.current.tokenum == INDENT and not self.groups.root: self.current.value = self.current.value[self.groups.level:]
python
{ "resource": "" }
q45049
Tracker.convert_dedent
train
def convert_dedent(self): """Convert a dedent into an indent""" # Dedent means go back to last indentation if self.indent_amounts: self.indent_amounts.pop() # Change the token tokenum = INDENT # Get last indent amount last_indent = 0 if self.indent_amounts: last_indent = self.indent_amounts[-1] # Make sure we don't have multiple indents in a row while self.result[-1][0] == INDENT: self.result.pop() value = self.indent_type * last_indent return tokenum, value
python
{ "resource": "" }
q45050
lookupAll
train
def lookupAll(data, configFields, lookupType, db, histObj={}): """ Return a record after having cleaning rules of specified type applied to all fields in the config :param dict data: single record (dictionary) to which cleaning rules should be applied :param dict configFields: "fields" object from DWM config (see DataDictionary) :param string lookupType: Type of lookup to perform/MongoDB collection name. One of 'genericLookup', 'fieldSpecificLookup', 'normLookup', 'genericRegex', 'fieldSpecificRegex', 'normRegex', 'normIncludes' :param MongoClient db: MongoClient instance connected to MongoDB :param dict histObj: History object to which changes should be appended """ for field in data.keys(): if field in configFields.keys() and data[field]!='': if lookupType in configFields[field]["lookup"]: if lookupType in ['genericLookup', 'fieldSpecificLookup', 'normLookup']: fieldValNew, histObj = DataLookup(fieldVal=data[field], db=db, lookupType=lookupType, fieldName=field, histObj=histObj) elif lookupType in ['genericRegex', 'fieldSpecificRegex', 'normRegex']: fieldValNew, histObj = RegexLookup(fieldVal=data[field], db=db, fieldName=field, lookupType=lookupType, histObj=histObj) elif lookupType=='normIncludes': fieldValNew, histObj, checkMatch = IncludesLookup(fieldVal=data[field], lookupType='normIncludes', db=db, fieldName=field, histObj=histObj) data[field] = fieldValNew return data, histObj
python
{ "resource": "" }
q45051
DeriveDataLookupAll
train
def DeriveDataLookupAll(data, configFields, db, histObj={}): """ Return a record after performing derive rules for all fields, based on config :param dict data: single record (dictionary) to which cleaning rules should be applied :param dict configFields: "fields" object from DWM config (see DataDictionary) :param MongoClient db: MongoClient instance connected to MongoDB :param dict histObj: History object to which changes should be appended """ def checkDeriveOptions(option, derive_set_config): """ Check derive option is exist into options list and return relevant flag. :param option: drive options value :param derive_set_config: options list :return: boolean True or False based on option exist into options list """ return option in derive_set_config for field in configFields.keys(): if field in data.keys(): fieldVal = data[field] fieldValNew = fieldVal for deriveSet in configFields[field]['derive'].keys(): deriveSetConfig = configFields[field]['derive'][deriveSet] checkMatch = False if set.issubset(set(deriveSetConfig['fieldSet']), data.keys()): deriveInput = {} # sorting here to ensure subdocument match from query for val in deriveSetConfig['fieldSet']: deriveInput[val] = data[val] if deriveSetConfig['type']=='deriveValue': fieldValNew, histObj, checkMatch = DeriveDataLookup(fieldName=field, db=db, deriveInput=deriveInput, overwrite=checkDeriveOptions('overwrite', deriveSetConfig["options"]), fieldVal=fieldVal, histObj=histObj, blankIfNoMatch=checkDeriveOptions('blankIfNoMatch', deriveSetConfig["options"])) elif deriveSetConfig['type']=='copyValue': fieldValNew, histObj, checkMatch = DeriveDataCopyValue(fieldName=field, deriveInput=deriveInput, overwrite=checkDeriveOptions('overwrite', deriveSetConfig["options"]), fieldVal=fieldVal, histObj=histObj) elif deriveSetConfig['type']=='deriveRegex': fieldValNew, histObj, checkMatch = DeriveDataRegex(fieldName=field, db=db, deriveInput=deriveInput, overwrite=checkDeriveOptions('overwrite', deriveSetConfig["options"]), fieldVal=fieldVal, histObj=histObj, blankIfNoMatch=checkDeriveOptions('blankIfNoMatch', deriveSetConfig["options"])) elif deriveSetConfig['type']=='deriveIncludes': fieldValNew, histObj, checkMatch = IncludesLookup(fieldVal=data[field], lookupType='deriveIncludes', deriveFieldName=deriveSetConfig['fieldSet'][0], deriveInput=deriveInput, db=db, fieldName=field, histObj=histObj, overwrite=checkDeriveOptions('overwrite', deriveSetConfig["options"]), blankIfNoMatch=checkDeriveOptions('blankIfNoMatch', deriveSetConfig["options"])) if checkMatch or fieldValNew!=fieldVal: data[field] = fieldValNew break return data, histObj
python
{ "resource": "" }
q45052
Router._get_generators
train
def _get_generators(self): """Get installed banana plugins. :return: dictionary of installed generators name: distribution """ # on using entrypoints: # http://stackoverflow.com/questions/774824/explain-python-entry-points # TODO: make sure we do not have conflicting generators installed! generators = [ep.name for ep in pkg_resources.iter_entry_points(self.group)] return generators
python
{ "resource": "" }
q45053
Router._get_generator
train
def _get_generator(self, name): """Load the generator plugin and execute its lifecycle. :param dist: distribution """ for ep in pkg_resources.iter_entry_points(self.group, name=None): if ep.name == name: generator = ep.load() return generator
python
{ "resource": "" }
q45054
Router.parse_args
train
def parse_args(self, doc, argv): """Parse ba arguments :param args: sys.argv[1:] :return: arguments """ # first a little sneak peak if we have a generator arguments = docopt(doc, argv=argv, help=False) if arguments.get('<generator>'): name = arguments['<generator>'] generator = self._get_generator(name) if hasattr(generator, 'DOC'): # register help for generator! # this runs after the generator was loaded so we have to # prepend the cmd! def _banana_help(args, router): print(doc) cmd.register(lambda args: args.get('--help'), name, _banana_help) doc = generator.DOC arguments = docopt(doc, argv=argv, help=False) # register generator '--version' cmd version = 'not provided by %s generator' % name if hasattr(generator, '__version__'): version = generator.__version__ def _banana_version(args, router): print(version) cmd.register(lambda args: args.get('--version'), name, _banana_version) # register generator interactive mode (last cmd for this generator) def _banana_run(args, router): router.navigate('run', name) router.navigate('exit') cmd.register(lambda args: True, name, _banana_run) return arguments
python
{ "resource": "" }
q45055
Router.register_route
train
def register_route(self, name, route): """Register a route handler :param name: Name of the route :param route: Route handler """ try: self.routes[name] = route.handle except Exception as e: print('could not import handle, maybe something wrong ', 'with your code?') print(e)
python
{ "resource": "" }
q45056
Serializable.to_json
train
def to_json(self, *, indent=None, sort_keys = False): """Gets the object's JSON representation. Parameters ---------- indent: :class:`int`, optional Number of spaces used as indentation, ``None`` will return the shortest possible string. sort_keys: :class:`bool`, optional Whether keys should be sorted alphabetically or preserve the order defined by the object. Returns ------- :class:`str` JSON representation of the object. """ return json.dumps({k: v for k, v in dict(self).items() if v is not None}, indent=indent, sort_keys=sort_keys, default=self._try_dict)
python
{ "resource": "" }
q45057
get_game
train
def get_game(site, description="", create=False): """ get the current game, if its still active, else creates a new game, if the current time is inside the GAME_START_TIMES interval and create=True @param create: create a game, if there is no active game @returns: None if there is no active Game, and none shoul be created or the (new) active Game. """ game = None games = Game.objects.filter(site=site).order_by("-created") try: game = games[0] except IndexError: game = None # no game, yet, or game expired if game is None or game.is_expired() or is_after_endtime(): if create: if is_starttime(): game = Game(site=site, description=description) game.save() else: raise TimeRangeError( _(u"game start outside of the valid timerange")) else: game = None # game exists and its not after the GAME_END_TIME elif not is_after_endtime(): game = games[0] return game
python
{ "resource": "" }
q45058
Game.words_with_votes
train
def words_with_votes(self, only_topics=True): """ returns a list with words ordered by the number of votes annotated with the number of votes in the "votes" property. """ result = Word.objects.filter( bingofield__board__game__id=self.id).exclude( type=WORD_TYPE_MIDDLE) if only_topics: result = result.exclude(bingofield__word__type=WORD_TYPE_META) result = result.annotate( votes=Sum("bingofield__vote")).order_by("-votes").values() for item in result: item['votes'] = max(0, item['votes']) if result[0]['votes'] != 0: item['percent'] = float(item['votes']) / result[0]['votes'] * 100 else: item['percent'] = 0 return result
python
{ "resource": "" }
q45059
literals
train
def literals(choices, prefix="", suffix=""): """Create a regex from a space-separated list of literal `choices`. If provided, `prefix` and `suffix` will be attached to each choice individually. """ return "|".join(prefix + re.escape(c) + suffix for c in choices.split())
python
{ "resource": "" }
q45060
Lexer.lex
train
def lex(self, text, start=0): """Lexically analyze `text`. Yields pairs (`name`, `tokentext`). """ max = len(text) eaten = start s = self.state r = self.regexes toks = self.toks while eaten < max: for match in r[s].finditer(text, eaten): name = match.lastgroup tok = toks[name] toktext = match.group(name) eaten += len(toktext) yield (tok.name, toktext) if tok.next: s = tok.next break self.state = s
python
{ "resource": "" }
q45061
new_instance
train
def new_instance(settings): """ MAKE A PYTHON INSTANCE `settings` HAS ALL THE `kwargs`, PLUS `class` ATTRIBUTE TO INDICATE THE CLASS TO CREATE """ settings = set_default({}, settings) if not settings["class"]: Log.error("Expecting 'class' attribute with fully qualified class name") # IMPORT MODULE FOR HANDLER path = settings["class"].split(".") class_name = path[-1] path = ".".join(path[:-1]) constructor = None try: temp = __import__(path, globals(), locals(), [class_name], 0) constructor = object.__getattribute__(temp, class_name) except Exception as e: Log.error("Can not find class {{class}}", {"class": path}, cause=e) settings['class'] = None try: return constructor(kwargs=settings) # MAYBE IT TAKES A KWARGS OBJECT except Exception as e: pass try: return constructor(**settings) except Exception as e: Log.error("Can not create instance of {{name}}", name=".".join(path), cause=e)
python
{ "resource": "" }
q45062
normalize_fieldsets
train
def normalize_fieldsets(fieldsets): """ Make sure the keys in fieldset dictionaries are strings. Returns the normalized data. """ result = [] for name, options in fieldsets: result.append((name, normalize_dictionary(options))) return result
python
{ "resource": "" }
q45063
get_sort_field
train
def get_sort_field(attr, model): """ Get's the field to sort on for the given attr. Currently returns attr if it is a field on the given model. If the models has an attribute matching that name and that value has an attribute 'sort_field' than that value is used. TODO: Provide a way to sort based on a non field attribute. """ try: if model._meta.get_field(attr): return attr except FieldDoesNotExist: if isinstance(attr, basestring): val = getattr(model, attr, None) if val and hasattr(val, 'sort_field'): return getattr(model, attr).sort_field return None
python
{ "resource": "" }
q45064
AdminList.labels
train
def labels(self): """ Get field label for fields """ if type(self.object_list) == type([]): model = self.formset.model else: model = self.object_list.model for field in self.visible_fields: name = None if self.formset: f = self.formset.empty_form.fields.get(field, None) if f: name = f.label if name is None: name = label_for_field(field, model) if name == model._meta.verbose_name: name = self.model_name and self.model_name or \ model._meta.verbose_name stype = None cur_sorted = False sortable = False if self.order_type: sortable = get_sort_field(field, model) stype = self.ASC # change order_type so that next sorting on the same # field will give reversed results if sortable and field == self.sort_field: cur_sorted = True if self.order_type == self.ASC: stype = self.DESC elif self.order_type == self.DESC: stype = self.ASC else: stype = self.ASC yield AdminListLabel(name, field, stype, cur_sorted, bool(sortable))
python
{ "resource": "" }
q45065
password_change
train
def password_change(request, username, template_name='accounts/password_form.html', pass_form=PasswordChangeForm, success_url=None, extra_context=None): """ Change password of user. This view is almost a mirror of the view supplied in :func:`contrib.auth.views.password_change`, with the minor change that in this view we also use the username to change the password. This was needed to keep our URLs logical (and REST) across the entire application. And that in a later stadium administrators can also change the users password through the web application itself. :param username: String supplying the username of the user who's password is about to be changed. :param template_name: String of the name of the template that is used to display the password change form. Defaults to ``accounts/password_form.html``. :param pass_form: Form used to change password. Default is the form supplied by Django itself named ``PasswordChangeForm``. :param success_url: Named URL that is passed onto a :func:`reverse` function with ``username`` of the active user. Defaults to the ``accounts_password_complete`` URL. :param extra_context: Dictionary of extra variables that are passed on to the template. The ``form`` key is always used by the form supplied by ``pass_form``. **Context** ``form`` Form used to change the password. """ user = get_object_or_404(get_user_model(), username__iexact=username) form = pass_form(user=user) if request.method == "POST": form = pass_form(user=user, data=request.POST) if form.is_valid(): form.save() # Send a signal that the password has changed accounts_signals.password_complete.send(sender=None, user=user) if success_url: redirect_to = success_url else: redirect_to = reverse('accounts_password_change_complete', kwargs={'username': user.username}) return redirect(redirect_to) if not extra_context: extra_context = dict() extra_context['form'] = form extra_context['profile'] = user.get_profile() return ExtraContextTemplateView.as_view(template_name=template_name, extra_context=extra_context)(request)
python
{ "resource": "" }
q45066
account_delete
train
def account_delete(request, username, template_name=accounts_settings.ACCOUNTS_PROFILE_DETAIL_TEMPLATE, extra_context=None, **kwargs): """ Delete an account. """ user = get_object_or_404(get_user_model(), username__iexact=username) user.is_active = False user.save() return redirect(reverse('accounts_admin'))
python
{ "resource": "" }
q45067
parse_properties
train
def parse_properties(parent_index_name, parent_name, nested_path, esProperties): """ RETURN THE COLUMN DEFINITIONS IN THE GIVEN esProperties OBJECT """ columns = FlatList() for name, property in esProperties.items(): index_name = parent_index_name column_name = concat_field(parent_name, name) jx_name = column_name if property.type == "nested" and property.properties: # NESTED TYPE IS A NEW TYPE DEFINITION # MARKUP CHILD COLUMNS WITH THE EXTRA DEPTH self_columns = parse_properties(index_name, column_name, [column_name] + nested_path, property.properties) columns.extend(self_columns) columns.append(Column( name=jx_name, es_index=index_name, es_column=column_name, es_type="nested", jx_type=NESTED, last_updated=Date.now(), nested_path=nested_path )) continue if property.properties: child_columns = parse_properties(index_name, column_name, nested_path, property.properties) columns.extend(child_columns) columns.append(Column( name=jx_name, es_index=index_name, es_column=column_name, es_type="source" if property.enabled == False else "object", jx_type=OBJECT, last_updated=Date.now(), nested_path=nested_path )) if property.dynamic: continue if not property.type: continue cardinality = 0 if not (property.store or property.enabled) and name != '_id' else None if property.fields: child_columns = parse_properties(index_name, column_name, nested_path, property.fields) if cardinality is None: for cc in child_columns: cc.cardinality = None columns.extend(child_columns) if property.type in es_type_to_json_type.keys(): columns.append(Column( name=jx_name, es_index=index_name, es_column=column_name, es_type=property.type, jx_type=es_type_to_json_type[property.type], cardinality=cardinality, last_updated=Date.now(), nested_path=nested_path )) if property.index_name and name != property.index_name: columns.append(Column( name=jx_name, es_index=index_name, es_column=column_name, es_type=property.type, jx_type=es_type_to_json_type[property.type], cardinality=0 if property.store else None, last_updated=Date.now(), nested_path=nested_path )) elif property.enabled == None or property.enabled == False: columns.append(Column( name=jx_name, es_index=index_name, es_column=column_name, es_type="source" if property.enabled == False else "object", jx_type=OBJECT, cardinality=0 if property.store else None, last_updated=Date.now(), nested_path=nested_path )) else: Log.warning("unknown type {{type}} for property {{path}}", type=property.type, path=parent_name) return columns
python
{ "resource": "" }
q45068
_merge_mapping
train
def _merge_mapping(a, b): """ MERGE TWO MAPPINGS, a TAKES PRECEDENCE """ for name, b_details in b.items(): a_details = a[literal_field(name)] if a_details.properties and not a_details.type: a_details.type = "object" if b_details.properties and not b_details.type: b_details.type = "object" if a_details: a_details.type = _merge_type[a_details.type][b_details.type] if b_details.type in ES_STRUCT: _merge_mapping(a_details.properties, b_details.properties) else: a[literal_field(name)] = deepcopy(b_details) return a
python
{ "resource": "" }
q45069
Index.delete_all_but_self
train
def delete_all_but_self(self): """ DELETE ALL INDEXES WITH GIVEN PREFIX, EXCEPT name """ prefix = self.settings.alias name = self.settings.index if prefix == name: Log.note("{{index_name}} will not be deleted", index_name= prefix) for a in self.cluster.get_aliases(): # MATCH <prefix>YYMMDD_HHMMSS FORMAT if re.match(re.escape(prefix) + "\\d{8}_\\d{6}", a.index) and a.index != name: self.cluster.delete_index(a.index)
python
{ "resource": "" }
q45070
Index.is_proto
train
def is_proto(self, index): """ RETURN True IF THIS INDEX HAS NOT BEEN ASSIGNED ITS ALIAS """ for a in self.cluster.get_aliases(): if a.index == index and a.alias: return False return True
python
{ "resource": "" }
q45071
Cluster.get_index
train
def get_index(self, index, type, alias=None, typed=None, read_only=True, kwargs=None): """ TESTS THAT THE INDEX EXISTS BEFORE RETURNING A HANDLE """ if kwargs.tjson != None: Log.error("used `typed` parameter, not `tjson`") if read_only: # GET EXACT MATCH, OR ALIAS aliases = wrap(self.get_aliases()) if index in aliases.index: pass elif index in aliases.alias: match = [a for a in aliases if a.alias == index][0] kwargs.alias = match.alias kwargs.index = match.index else: Log.error("Can not find index {{index_name}}", index_name=kwargs.index) return Index(kwargs=kwargs, cluster=self) else: # GET BEST MATCH, INCLUDING PROTOTYPE best = self.get_best_matching_index(index, alias) if not best: Log.error("Can not find index {{index_name}}", index_name=kwargs.index) if best.alias != None: kwargs.alias = best.alias kwargs.index = best.index elif kwargs.alias == None: kwargs.alias = kwargs.index kwargs.index = best.index return Index(kwargs=kwargs, cluster=self)
python
{ "resource": "" }
q45072
Cluster.get_prototype
train
def get_prototype(self, alias): """ RETURN ALL INDEXES THAT ARE INTENDED TO BE GIVEN alias, BUT HAVE NO ALIAS YET BECAUSE INCOMPLETE """ output = sort([ a.index for a in self.get_aliases() if re.match(re.escape(alias) + "\\d{8}_\\d{6}", a.index) and not a.alias ]) return output
python
{ "resource": "" }
q45073
SQLBuilder.add_sql
train
def add_sql(self, value, clause): """ Add a WHERE clause to the state. :param value: The unknown to bind into the state. Uses SQLBuilder._map_value() to map this into an appropriate database compatible type. :param clause: A SQL fragment defining the restriction on the unknown value """ if value is not None: self.sql_args.append(SQLBuilder.map_value(value)) self.where_clauses.append(clause)
python
{ "resource": "" }
q45074
SQLBuilder.add_metadata_query_properties
train
def add_metadata_query_properties(self, meta_constraints, id_table, id_column): """ Construct WHERE clauses from a list of MetaConstraint objects, adding them to the query state. :param meta_constraints: A list of MetaConstraint objects, each of which defines a condition over metadata which must be satisfied for results to be included in the overall query. :raises: ValueError if an unknown meta constraint type is encountered. """ for mc in meta_constraints: meta_key = str(mc.key) ct = mc.constraint_type sql_template = """ {0}.uid IN ( SELECT m.{1} FROM archive_metadata m INNER JOIN archive_metadataFields k ON m.fieldId=k.uid WHERE m.{2} {3} %s AND k.metaKey = %s )""" # Add metadata value to list of SQL arguments self.sql_args.append(SQLBuilder.map_value(mc.value)) # Add metadata key to list of SQL arguments self.sql_args.append(meta_key) # Put an appropriate WHERE clause if ct == 'less': self.where_clauses.append(sql_template.format(id_table, id_column, 'floatValue', '<=')) elif ct == 'greater': self.where_clauses.append(sql_template.format(id_table, id_column, 'floatValue', '>=')) elif ct == 'number_equals': self.where_clauses.append(sql_template.format(id_table, id_column, 'floatValue', '=')) elif ct == 'string_equals': self.where_clauses.append(sql_template.format(id_table, id_column, 'stringValue', '=')) else: raise ValueError("Unknown meta constraint type!")
python
{ "resource": "" }
q45075
SQLBuilder.get_select_sql
train
def get_select_sql(self, columns, order=None, limit=0, skip=0): """ Build a SELECT query based on the current state of the builder. :param columns: SQL fragment describing which columns to select i.e. 'e.obstoryID, s.statusID' :param order: Optional ordering constraint, i.e. 'e.eventTime DESC' :param limit: Optional, used to build the 'LIMIT n' clause. If not specified no limit is imposed. :param skip: Optional, used to build the 'OFFSET n' clause. If not specified results are returned from the first item available. Note that this parameter must be combined with 'order', otherwise there's no ordering imposed on the results and subsequent queries may return overlapping data randomly. It's unlikely that this will actually happen as almost all databases do in fact create an internal ordering, but there's no guarantee of this (and some operations such as indexing will definitely break this property unless explicitly set). :returns: A SQL SELECT query, which will make use of self.sql_args when executed. """ sql = 'SELECT ' sql += '{0} FROM {1} '.format(columns, self.tables) if len(self.where_clauses) > 0: sql += ' WHERE ' sql += ' AND '.join(self.where_clauses) if order is not None: sql += ' ORDER BY {0}'.format(order) if limit > 0: sql += ' LIMIT {0} '.format(limit) if skip > 0: sql += ' OFFSET {0} '.format(skip) return sql
python
{ "resource": "" }
q45076
SQLBuilder.get_count_sql
train
def get_count_sql(self): """ Build a SELECT query which returns the count of items for an unlimited SELECT :return: A SQL SELECT query which returns the count of items for an unlimited query based on this SQLBuilder """ sql = 'SELECT COUNT(*) FROM ' + self.tables if len(self.where_clauses) > 0: sql += ' WHERE ' sql += ' AND '.join(self.where_clauses) return sql
python
{ "resource": "" }
q45077
SinonBase.wrap2spy
train
def wrap2spy(self): """ Wrapping the inspector as a spy based on the type """ if self.args_type == "MODULE_FUNCTION": self.orig_func = deepcopy(getattr(self.obj, self.prop)) setattr(self.obj, self.prop, Wrapper.wrap_spy(getattr(self.obj, self.prop))) elif self.args_type == "MODULE": setattr(self.obj, "__SINONLOCK__", True) elif self.args_type == "FUNCTION": self.orig_func = deepcopy(getattr(CPSCOPE, self.obj.__name__)) setattr(CPSCOPE, self.obj.__name__, Wrapper.wrap_spy(getattr(CPSCOPE, self.obj.__name__))) elif self.args_type == "PURE": self.orig_func = deepcopy(getattr(self.pure, "func")) setattr(self.pure, "func", Wrapper.wrap_spy(getattr(self.pure, "func")))
python
{ "resource": "" }
q45078
SinonBase.unwrap
train
def unwrap(self): """ Unwrapping the inspector based on the type """ if self.args_type == "MODULE_FUNCTION": setattr(self.obj, self.prop, self.orig_func) elif self.args_type == "MODULE": delattr(self.obj, "__SINONLOCK__") elif self.args_type == "FUNCTION": setattr(CPSCOPE, self.obj.__name__, self.orig_func) elif self.args_type == "PURE": setattr(self.pure, "func", self.orig_func)
python
{ "resource": "" }
q45079
jx_type
train
def jx_type(column): """ return the jx_type for given column """ if column.es_column.endswith(EXISTS_TYPE): return EXISTS return es_type_to_json_type[column.es_type]
python
{ "resource": "" }
q45080
ElasticsearchMetadata.get_columns
train
def get_columns(self, table_name, column_name=None, after=None, timeout=None): """ RETURN METADATA COLUMNS :param table_name: TABLE WE WANT COLUMNS FOR :param column_name: OPTIONAL NAME, IF INTERESTED IN ONLY ONE COLUMN :param after: FORCE LOAD, WAITING FOR last_updated TO BE AFTER THIS TIME :param timeout: Signal; True when should give up :return: """ DEBUG and after and Log.note("getting columns for after {{time}}", time=after) table_path = split_field(table_name) root_table_name = table_path[0] alias = self._find_alias(root_table_name) if not alias: self.es_cluster.get_metadata(force=True) alias = self._find_alias(root_table_name) if not alias: Log.error("{{table|quote}} does not exist", table=table_name) try: table = self.get_table(alias)[0] # LAST TIME WE GOT INFO FOR THIS TABLE if not table: table = TableDesc( name=alias, url=None, query_path=["."], timestamp=Date.MIN ) with self.meta.tables.locker: self.meta.tables.add(table) columns = self._reload_columns(table) DEBUG and Log.note("columns from reload") elif after or table.timestamp < self.es_cluster.metatdata_last_updated: columns = self._reload_columns(table) DEBUG and Log.note("columns from reload") else: columns = self.meta.columns.find(alias, column_name) DEBUG and Log.note("columns from find()") DEBUG and Log.note("columns are {{ids}}", ids=[id(c) for c in columns]) columns = jx.sort(columns, "name") if after is None: return columns # DO NOT WAIT FOR COMPLETE COLUMNS # WAIT FOR THE COLUMNS TO UPDATE while True: pending = [c for c in columns if after >= c.last_updated or (c.cardinality == None and c.jx_type not in STRUCT)] if not pending: break if timeout: Log.error("trying to gets columns timed out") if DEBUG: if len(pending) > 10: Log.note("waiting for {{num}} columns to update by {{timestamp}}", num=len(pending), timestamp=after) else: Log.note("waiting for columns to update by {{timestamp}}; {{columns|json}}", timestamp=after, columns=[c.es_index + "." + c.es_column + " id="+text_type(id(c)) for c in pending]) Till(seconds=1).wait() return columns except Exception as e: Log.error("Failure to get columns for {{table}}", table=table_name, cause=e) return []
python
{ "resource": "" }
q45081
Snowflake.query_paths
train
def query_paths(self): """ RETURN A LIST OF ALL NESTED COLUMNS """ output = self.namespace.alias_to_query_paths.get(self.name) if output: return output Log.error("Can not find index {{index|quote}}", index=self.name)
python
{ "resource": "" }
q45082
Snowflake.sorted_query_paths
train
def sorted_query_paths(self): """ RETURN A LIST OF ALL SCHEMA'S IN DEPTH-FIRST TOPOLOGICAL ORDER """ return list(reversed(sorted(p[0] for p in self.namespace.alias_to_query_paths.get(self.name))))
python
{ "resource": "" }
q45083
Schema.values
train
def values(self, column_name, exclude_type=STRUCT): """ RETURN ALL COLUMNS THAT column_name REFERS TO """ column_name = unnest_path(column_name) columns = self.columns output = [] for path in self.query_path: full_path = untype_path(concat_field(path, column_name)) for c in columns: if c.jx_type in exclude_type: continue # if c.cardinality == 0: # continue if untype_path(c.name) == full_path: output.append(c) if output: return output return []
python
{ "resource": "" }
q45084
get_decoders_by_path
train
def get_decoders_by_path(query): """ RETURN MAP FROM QUERY PATH TO LIST OF DECODER ARRAYS :param query: :return: """ schema = query.frum.schema output = Data() if query.edges: if query.sort and query.format != "cube": # REORDER EDGES/GROUPBY TO MATCH THE SORT query.edges = sort_edges(query, "edges") elif query.groupby: if query.sort and query.format != "cube": query.groupby = sort_edges(query, "groupby") for edge in wrap(coalesce(query.edges, query.groupby, [])): limit = coalesce(edge.domain.limit, query.limit, DEFAULT_LIMIT) if edge.value != None and not edge.value is NULL: edge = edge.copy() vars_ = edge.value.vars() for v in vars_: if not schema.leaves(v.var): Log.error("{{var}} does not exist in schema", var=v) elif edge.range: vars_ = edge.range.min.vars() | edge.range.max.vars() for v in vars_: if not schema[v.var]: Log.error("{{var}} does not exist in schema", var=v) elif edge.domain.dimension: vars_ = edge.domain.dimension.fields edge.domain.dimension = edge.domain.dimension.copy() edge.domain.dimension.fields = [schema[v].es_column for v in vars_] elif all(edge.domain.partitions.where): vars_ = set() for p in edge.domain.partitions: vars_ |= p.where.vars() vars_ |= edge.value.vars() depths = set(c.nested_path[0] for v in vars_ for c in schema.leaves(v.var)) if not depths: Log.error( "Do not know of column {{column}}", column=unwraplist([v for v in vars_ if schema[v] == None]) ) if len(depths) > 1: Log.error("expression {{expr|quote}} spans tables, can not handle", expr=edge.value) decoder = AggsDecoder(edge, query, limit) output[literal_field(first(depths))] += [decoder] return output
python
{ "resource": "" }
q45085
make_default
train
def make_default(spec): """Create an empty document that follows spec. Any field with a default will take that value, required or not. Required fields with no default will get a value of None. If your default value does not match your type or otherwise customized Field class, this can create a spec that fails validation.""" doc = {} for key, field in spec.iteritems(): if field.default is not no_default: doc[key] = field.default return doc
python
{ "resource": "" }
q45086
validate
train
def validate(document, spec): """Validate that a document meets a specification. Returns True if validation was successful, but otherwise raises a ValueError.""" if not spec: return True missing = [] for key, field in spec.iteritems(): if field.required and key not in document: missing.append(key) failed = [] for key, field in spec.iteritems(): if key in document: try: document[key] = field.validate(document[key]) except ValueError: failed.append(key) if missing or failed: if missing and not failed: raise ValueError("Required fields missing: %s" % (missing)) if failed and not missing: raise ValueError("Keys did not match spec: %s" % (failed)) raise ValueError("Missing fields: %s, Invalid fields: %s" % (missing, failed)) # just a token of my kindness, a return for you return True
python
{ "resource": "" }
q45087
Field.typecheck
train
def typecheck(self, t): """Create a typecheck from some value ``t``. This behaves differently depending on what ``t`` is. It should take a value and return True if the typecheck passes, or False otherwise. Override ``pre_validate`` in a child class to do type coercion. * If ``t`` is a type, like basestring, int, float, *or* a tuple of base types, then a simple isinstance typecheck is returned. * If ``t`` is a list or tuple of instances, such as a tuple or list of integers or of strings, it's treated as the definition of an enum and a simple "in" check is returned. * If ``t`` is callable, ``t`` is assumed to be a valid typecheck. * If ``t`` is None, a typecheck that always passes is returned. If none of these conditions are met, a TypeError is raised. """ if t is None: return lambda x: True def _isinstance(types, value): return isinstance(value, types) def _enum(values, value): return value in values if t.__class__ is type: return partial(_isinstance, t) elif isinstance(t, (tuple, list)): if all([x.__class__ is type for x in t]): return partial(_isinstance, t) return partial(_enum, t) elif callable(t): return t raise TypeError('%r is not a valid field type' % r)
python
{ "resource": "" }
q45088
Field.validate
train
def validate(self, value): """Validate a value for this field. If the field is invalid, this will raise a ValueError. Runs ``pre_validate`` hook prior to validation, and returns value if validation passes.""" value = self.pre_validate(value) if not self._typecheck(value): raise ValueError('%r failed type check' % value) return value
python
{ "resource": "" }
q45089
TUIDService.init_db
train
def init_db(self): ''' Creates all the tables, and indexes needed for the service. :return: None ''' with self.conn.transaction() as t: t.execute(''' CREATE TABLE temporal ( tuid INTEGER, revision CHAR(12) NOT NULL, file TEXT, line INTEGER );''') t.execute(''' CREATE TABLE annotations ( revision CHAR(12) NOT NULL, file TEXT, annotation TEXT, PRIMARY KEY(revision, file) );''') # Used in frontier updating t.execute(''' CREATE TABLE latestFileMod ( file TEXT, revision CHAR(12) NOT NULL, PRIMARY KEY(file) );''') t.execute("CREATE UNIQUE INDEX temporal_rev_file ON temporal(revision, file, line)") Log.note("Tables created successfully")
python
{ "resource": "" }
q45090
TUIDService.get_tuids_from_revision
train
def get_tuids_from_revision(self, revision): """ Gets the TUIDs for the files modified by a revision. :param revision: revision to get files from :return: list of (file, list(tuids)) tuples """ result = [] URL_TO_FILES = self.hg_url / self.config.hg.branch / 'json-info' / revision try: mozobject = http.get_json(url=URL_TO_FILES, retry=RETRY) except Exception as e: Log.warning("Unexpected error trying to get file list for revision {{revision}}", cause=e) return None files = mozobject[revision]['files'] results = self.get_tuids(files, revision) return results
python
{ "resource": "" }
q45091
TUIDService._check_branch
train
def _check_branch(self, revision, branch): ''' Used to find out if the revision is in the given branch. :param revision: Revision to check. :param branch: Branch to check revision on. :return: True/False - Found it/Didn't find it ''' # Get a changelog clog_url = self.hg_url / branch / 'json-log' / revision try: Log.note("Searching through changelog {{url}}", url=clog_url) clog_obj = http.get_json(clog_url, retry=RETRY) if isinstance(clog_obj, (text_type, str)): Log.note( "Revision {{cset}} does not exist in the {{branch}} branch", cset=revision, branch=branch ) return False except Exception as e: Log.note("Unexpected error getting changset-log for {{url}}: {{error}}", url=clog_url, error=e) return False return True
python
{ "resource": "" }
q45092
TUIDService.get_tuids
train
def get_tuids(self, files, revision, commit=True, chunk=50, repo=None): ''' Wrapper for `_get_tuids` to limit the number of annotation calls to hg and separate the calls from DB transactions. Also used to simplify `_get_tuids`. :param files: :param revision: :param commit: :param chunk: :param repo: :return: ''' results = [] revision = revision[:12] # For a single file, there is no need # to put it in an array when given. if not isinstance(files, list): files = [files] if repo is None: repo = self.config.hg.branch for _, new_files in jx.groupby(files, size=chunk): for count, file in enumerate(new_files): new_files[count] = file.lstrip('/') annotations_to_get = [] for file in new_files: with self.conn.transaction() as t: already_ann = self._get_annotation(revision, file, transaction=t) if already_ann: results.append((file, self.destringify_tuids(already_ann))) elif already_ann == '': results.append((file, [])) else: annotations_to_get.append(file) if not annotations_to_get: # No new annotations to get, so get next set continue # Get all the annotations in parallel and # store in annotated_files annotated_files = [None] * len(annotations_to_get) threads = [ Thread.run( str(thread_count), self._get_hg_annotate, revision, annotations_to_get[thread_count], annotated_files, thread_count, repo ) for thread_count, _ in enumerate(annotations_to_get) ] for t in threads: t.join() # Help for memory, because `chunk` (or a lot of) # threads are started at once. del threads with self.conn.transaction() as transaction: results.extend( self._get_tuids( transaction, annotations_to_get, revision, annotated_files, commit=commit, repo=repo ) ) # Help for memory gc.collect() return results
python
{ "resource": "" }
q45093
addSubparser
train
def addSubparser(subparsers, subcommand, description): """ Add a subparser with subcommand to the subparsers object """ parser = subparsers.add_parser( subcommand, description=description, help=description) return parser
python
{ "resource": "" }
q45094
createArgumentParser
train
def createArgumentParser(description): """ Create an argument parser """ parser = argparse.ArgumentParser( description=description, formatter_class=SortedHelpFormatter) return parser
python
{ "resource": "" }
q45095
SortedHelpFormatter.add_arguments
train
def add_arguments(self, actions): """ Sort the flags alphabetically """ actions = sorted( actions, key=operator.attrgetter('option_strings')) super(SortedHelpFormatter, self).add_arguments(actions)
python
{ "resource": "" }
q45096
SortedHelpFormatter._iter_indented_subactions
train
def _iter_indented_subactions(self, action): """ Sort the subcommands alphabetically """ try: get_subactions = action._get_subactions except AttributeError: pass else: self._indent() if isinstance(action, argparse._SubParsersAction): for subaction in sorted( get_subactions(), key=lambda x: x.dest): yield subaction else: for subaction in get_subactions(): yield subaction self._dedent()
python
{ "resource": "" }
q45097
resource_of_node
train
def resource_of_node(resources, node): """ Returns resource of node. """ for resource in resources: model = getattr(resource, 'model', None) if type(node) == model: return resource return BasePageResource
python
{ "resource": "" }
q45098
resources_of_config
train
def resources_of_config(config): """ Returns all resources and models from config. """ return set( # unique values sum([ # join lists to flat list list(value) # if value is iter (ex: list of resources) if hasattr(value, '__iter__') else [value, ] # if value is not iter (ex: model or resource) for value in config.values() ], []) )
python
{ "resource": "" }
q45099
models_of_config
train
def models_of_config(config): """ Return list of models from all resources in config. """ resources = resources_of_config(config) models = [] for resource in resources: if not hasattr(resource, '__table__') and hasattr(resource, 'model'): models.append(resource.model) else: models.append(resource) return models
python
{ "resource": "" }