sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def syncBuffer( self ): """ I detect and correct corruption in the buffer. Corruption in the buffer is defined as the following conditions both being true: 1. The buffer contains at least one newline; 2. The text until the first newline is not a STOMP command. In this case, we heuristically try to flush bits of the buffer until one of the following conditions becomes true: 1. the buffer starts with a STOMP command; 2. the buffer does not contain a newline. 3. the buffer is empty; If the buffer is deemed corrupt, the first step is to flush the buffer up to and including the first occurrence of the string '\x00\n', which is likely to be a frame boundary. Note that this is not guaranteed to be a frame boundary, as a binary payload could contain the string '\x00\n'. That condition would get handled on the next loop iteration. If the string '\x00\n' does not occur, the entire buffer is cleared. An earlier version progressively removed strings until the next newline, but this gets complicated because the body could contain strings that look like STOMP commands. Note that we do not check "partial" strings to see if they *could* match a command; that would be too resource-intensive. In other words, a buffer containing the string 'BUNK' with no newline is clearly corrupt, but we sit and wait until the buffer contains a newline before attempting to see if it's a STOMP command. """ while True: if not self.buffer: # Buffer is empty; no need to do anything. break m = command_re.match ( self.buffer ) if m is None: # Buffer doesn't even contain a single newline, so we can't # determine whether it's corrupt or not. Assume it's OK. break cmd = m.groups()[0] if cmd in stomper.VALID_COMMANDS: # Good: the buffer starts with a command. break else: # Bad: the buffer starts with bunk, so strip it out. We first # try to strip to the first occurrence of '\x00\n', which # is likely to be a frame boundary, but if this fails, we # strip until the first newline. ( self.buffer, nsubs ) = sync_re.subn ( '', self.buffer ) if nsubs: # Good: we managed to strip something out, so restart the # loop to see if things look better. continue else: # Bad: we failed to strip anything out, so kill the # entire buffer. Since this resets the buffer to a # known good state, we can break out of the loop. self.buffer = '' break
I detect and correct corruption in the buffer. Corruption in the buffer is defined as the following conditions both being true: 1. The buffer contains at least one newline; 2. The text until the first newline is not a STOMP command. In this case, we heuristically try to flush bits of the buffer until one of the following conditions becomes true: 1. the buffer starts with a STOMP command; 2. the buffer does not contain a newline. 3. the buffer is empty; If the buffer is deemed corrupt, the first step is to flush the buffer up to and including the first occurrence of the string '\x00\n', which is likely to be a frame boundary. Note that this is not guaranteed to be a frame boundary, as a binary payload could contain the string '\x00\n'. That condition would get handled on the next loop iteration. If the string '\x00\n' does not occur, the entire buffer is cleared. An earlier version progressively removed strings until the next newline, but this gets complicated because the body could contain strings that look like STOMP commands. Note that we do not check "partial" strings to see if they *could* match a command; that would be too resource-intensive. In other words, a buffer containing the string 'BUNK' with no newline is clearly corrupt, but we sit and wait until the buffer contains a newline before attempting to see if it's a STOMP command.
entailment
def connected(self, msg): """Once I've connected I want to subscribe to my the message queue. """ super(MyStomp, self).connected(msg) self.log.info("connected: session %s" % msg['headers']['session']) f = stomper.Frame() f.unpack(stomper.subscribe(DESTINATION)) return f.pack()
Once I've connected I want to subscribe to my the message queue.
entailment
def delete_entity_signal_handler(sender, instance, **kwargs): """ Defines a signal handler for syncing an individual entity. Called when an entity is saved or deleted. """ if instance.__class__ in entity_registry.entity_registry: Entity.all_objects.delete_for_obj(instance)
Defines a signal handler for syncing an individual entity. Called when an entity is saved or deleted.
entailment
def save_entity_signal_handler(sender, instance, **kwargs): """ Defines a signal handler for saving an entity. Syncs the entity to the entity mirror table. """ if instance.__class__ in entity_registry.entity_registry: sync_entities(instance) if instance.__class__ in entity_registry.entity_watching: sync_entities_watching(instance)
Defines a signal handler for saving an entity. Syncs the entity to the entity mirror table.
entailment
def m2m_changed_entity_signal_handler(sender, instance, action, **kwargs): """ Defines a signal handler for a manytomany changed signal. Only listens for the post actions so that entities are synced once (rather than twice for a pre and post action). """ if action == 'post_add' or action == 'post_remove' or action == 'post_clear': save_entity_signal_handler(sender, instance, **kwargs)
Defines a signal handler for a manytomany changed signal. Only listens for the post actions so that entities are synced once (rather than twice for a pre and post action).
entailment
def turn_off_syncing(for_post_save=True, for_post_delete=True, for_m2m_changed=True, for_post_bulk_operation=True): """ Disables all of the signals for syncing entities. By default, everything is turned off. If the user wants to turn off everything but one signal, for example the post_save signal, they would do: turn_off_sync(for_post_save=False) """ if for_post_save: post_save.disconnect(save_entity_signal_handler, dispatch_uid='save_entity_signal_handler') if for_post_delete: post_delete.disconnect(delete_entity_signal_handler, dispatch_uid='delete_entity_signal_handler') if for_m2m_changed: m2m_changed.disconnect(m2m_changed_entity_signal_handler, dispatch_uid='m2m_changed_entity_signal_handler') if for_post_bulk_operation: post_bulk_operation.disconnect(bulk_operation_signal_handler, dispatch_uid='bulk_operation_signal_handler')
Disables all of the signals for syncing entities. By default, everything is turned off. If the user wants to turn off everything but one signal, for example the post_save signal, they would do: turn_off_sync(for_post_save=False)
entailment
def turn_on_syncing(for_post_save=True, for_post_delete=True, for_m2m_changed=True, for_post_bulk_operation=False): """ Enables all of the signals for syncing entities. Everything is True by default, except for the post_bulk_operation signal. The reason for this is because when any bulk operation occurs on any mirrored entity model, it will result in every single entity being synced again. This is not a desired behavior by the majority of users, and should only be turned on explicitly. """ if for_post_save: post_save.connect(save_entity_signal_handler, dispatch_uid='save_entity_signal_handler') if for_post_delete: post_delete.connect(delete_entity_signal_handler, dispatch_uid='delete_entity_signal_handler') if for_m2m_changed: m2m_changed.connect(m2m_changed_entity_signal_handler, dispatch_uid='m2m_changed_entity_signal_handler') if for_post_bulk_operation: post_bulk_operation.connect(bulk_operation_signal_handler, dispatch_uid='bulk_operation_signal_handler')
Enables all of the signals for syncing entities. Everything is True by default, except for the post_bulk_operation signal. The reason for this is because when any bulk operation occurs on any mirrored entity model, it will result in every single entity being synced again. This is not a desired behavior by the majority of users, and should only be turned on explicitly.
entailment
def add(self, value): """Add element *value* to the set.""" # Raise TypeError if value is not hashable hash(value) self.redis.sadd(self.key, self._pickle(value))
Add element *value* to the set.
entailment
def discard(self, value): """Remove element *value* from the set if it is present.""" # Raise TypeError if value is not hashable hash(value) self.redis.srem(self.key, self._pickle(value))
Remove element *value* from the set if it is present.
entailment
def isdisjoint(self, other): """ Return ``True`` if the set has no elements in common with *other*. Sets are disjoint if and only if their intersection is the empty set. :param other: Any kind of iterable. :rtype: boolean """ def isdisjoint_trans_pure(pipe): return not pipe.sinter(self.key, other.key) def isdisjoint_trans_mixed(pipe): self_values = set(self.__iter__(pipe)) if use_redis: other_values = set(other.__iter__(pipe)) else: other_values = set(other) return self_values.isdisjoint(other_values) if self._same_redis(other): return self._transaction(isdisjoint_trans_pure, other.key) if self._same_redis(other, RedisCollection): use_redis = True return self._transaction(isdisjoint_trans_mixed, other.key) use_redis = False return self._transaction(isdisjoint_trans_mixed)
Return ``True`` if the set has no elements in common with *other*. Sets are disjoint if and only if their intersection is the empty set. :param other: Any kind of iterable. :rtype: boolean
entailment
def pop(self): """ Remove and return an arbitrary element from the set. Raises :exc:`KeyError` if the set is empty. """ result = self.redis.spop(self.key) if result is None: raise KeyError return self._unpickle(result)
Remove and return an arbitrary element from the set. Raises :exc:`KeyError` if the set is empty.
entailment
def random_sample(self, k=1): """ Return a *k* length list of unique elements chosen from the Set. Elements are not removed. Similar to :func:`random.sample` function from standard library. :param k: Size of the sample, defaults to 1. :rtype: :class:`list` """ # k == 0: no work to do if k == 0: results = [] # k == 1: same behavior on all versions of Redis elif k == 1: results = [self.redis.srandmember(self.key)] # k != 1, Redis version >= 2.6: compute in Redis else: results = self.redis.srandmember(self.key, k) return [self._unpickle(x) for x in results]
Return a *k* length list of unique elements chosen from the Set. Elements are not removed. Similar to :func:`random.sample` function from standard library. :param k: Size of the sample, defaults to 1. :rtype: :class:`list`
entailment
def remove(self, value): """ Remove element *value* from the set. Raises :exc:`KeyError` if it is not contained in the set. """ # Raise TypeError if value is not hashable hash(value) result = self.redis.srem(self.key, self._pickle(value)) if not result: raise KeyError(value)
Remove element *value* from the set. Raises :exc:`KeyError` if it is not contained in the set.
entailment
def scan_elements(self): """ Yield each of the elements from the collection, without pulling them all into memory. .. warning:: This method is not available on the set collections provided by Python. This method may return the element multiple times. See the `Redis SCAN documentation <http://redis.io/commands/scan#scan-guarantees>`_ for details. """ for x in self.redis.sscan_iter(self.key): yield self._unpickle(x)
Yield each of the elements from the collection, without pulling them all into memory. .. warning:: This method is not available on the set collections provided by Python. This method may return the element multiple times. See the `Redis SCAN documentation <http://redis.io/commands/scan#scan-guarantees>`_ for details.
entailment
def intersection_update(self, *others): """ Update the set, keeping only elements found in it and all *others*. :param others: Iterables, each one as a single positional argument. :rtype: None .. note:: The same behavior as at :func:`difference_update` applies. """ return self._op_update_helper( tuple(others), operator.and_, 'sinterstore', update=True )
Update the set, keeping only elements found in it and all *others*. :param others: Iterables, each one as a single positional argument. :rtype: None .. note:: The same behavior as at :func:`difference_update` applies.
entailment
def update(self, *others): """ Update the set, adding elements from all *others*. :param others: Iterables, each one as a single positional argument. :rtype: None .. note:: If all *others* are :class:`Set` instances, the operation is performed completely in Redis. Otherwise, values are retrieved from Redis and the operation is performed in Python. """ return self._op_update_helper( tuple(others), operator.or_, 'sunionstore', update=True )
Update the set, adding elements from all *others*. :param others: Iterables, each one as a single positional argument. :rtype: None .. note:: If all *others* are :class:`Set` instances, the operation is performed completely in Redis. Otherwise, values are retrieved from Redis and the operation is performed in Python.
entailment
def difference_update(self, *others): """ Update the set, removing elements found in *others*. :param others: Iterables, each one as a single positional argument. :rtype: None .. note:: The same behavior as at :func:`update` applies. """ return self._op_update_helper( tuple(others), operator.sub, 'sdiffstore', update=True )
Update the set, removing elements found in *others*. :param others: Iterables, each one as a single positional argument. :rtype: None .. note:: The same behavior as at :func:`update` applies.
entailment
def discard_member(self, member, pipe=None): """ Remove *member* from the collection, unconditionally. """ pipe = self.redis if pipe is None else pipe pipe.zrem(self.key, self._pickle(member))
Remove *member* from the collection, unconditionally.
entailment
def scan_items(self): """ Yield each of the ``(member, score)`` tuples from the collection, without pulling them all into memory. .. warning:: This method may return the same (member, score) tuple multiple times. See the `Redis SCAN documentation <http://redis.io/commands/scan#scan-guarantees>`_ for details. """ for m, s in self.redis.zscan_iter(self.key): yield self._unpickle(m), s
Yield each of the ``(member, score)`` tuples from the collection, without pulling them all into memory. .. warning:: This method may return the same (member, score) tuple multiple times. See the `Redis SCAN documentation <http://redis.io/commands/scan#scan-guarantees>`_ for details.
entailment
def update(self, other): """ Update the collection with items from *other*. Accepts other :class:`SortedSetBase` instances, dictionaries mapping members to numeric scores, or sequences of ``(member, score)`` tuples. """ def update_trans(pipe): other_items = method(pipe=pipe) if use_redis else method() pipe.multi() for member, score in other_items: pipe.zadd(self.key, {self._pickle(member): float(score)}) watches = [] if self._same_redis(other, RedisCollection): use_redis = True watches.append(other.key) else: use_redis = False if hasattr(other, 'items'): method = other.items elif hasattr(other, '__iter__'): method = other.__iter__ self._transaction(update_trans, *watches)
Update the collection with items from *other*. Accepts other :class:`SortedSetBase` instances, dictionaries mapping members to numeric scores, or sequences of ``(member, score)`` tuples.
entailment
def count_between(self, min_score=None, max_score=None): """ Returns the number of members whose score is between *min_score* and *max_score* (inclusive). """ min_score = float('-inf') if min_score is None else float(min_score) max_score = float('inf') if max_score is None else float(max_score) return self.redis.zcount(self.key, min_score, max_score)
Returns the number of members whose score is between *min_score* and *max_score* (inclusive).
entailment
def discard_between( self, min_rank=None, max_rank=None, min_score=None, max_score=None, ): """ Remove members whose ranking is between *min_rank* and *max_rank* OR whose score is between *min_score* and *max_score* (both ranges inclusive). If no bounds are specified, no members will be removed. """ no_ranks = (min_rank is None) and (max_rank is None) no_scores = (min_score is None) and (max_score is None) # Default scope: nothing if no_ranks and no_scores: return # Scope widens to given score range if no_ranks and (not no_scores): return self.discard_by_score(min_score, max_score) # Scope widens to given rank range if (not no_ranks) and no_scores: return self.discard_by_rank(min_rank, max_rank) # Scope widens to score range and then rank range with self.redis.pipeline() as pipe: self.discard_by_score(min_score, max_score, pipe) self.discard_by_rank(min_rank, max_rank, pipe) pipe.execute()
Remove members whose ranking is between *min_rank* and *max_rank* OR whose score is between *min_score* and *max_score* (both ranges inclusive). If no bounds are specified, no members will be removed.
entailment
def get_score(self, member, default=None, pipe=None): """ Return the score of *member*, or *default* if it is not in the collection. """ pipe = self.redis if pipe is None else pipe score = pipe.zscore(self.key, self._pickle(member)) if (score is None) and (default is not None): score = float(default) return score
Return the score of *member*, or *default* if it is not in the collection.
entailment
def get_or_set_score(self, member, default=0): """ If *member* is in the collection, return its value. If not, store it with a score of *default* and return *default*. *default* defaults to 0. """ default = float(default) def get_or_set_score_trans(pipe): pickled_member = self._pickle(member) score = pipe.zscore(self.key, pickled_member) if score is None: pipe.zadd(self.key, {self._pickle(member): default}) return default return score return self._transaction(get_or_set_score_trans)
If *member* is in the collection, return its value. If not, store it with a score of *default* and return *default*. *default* defaults to 0.
entailment
def get_rank(self, member, reverse=False, pipe=None): """ Return the rank of *member* in the collection. By default, the member with the lowest score has rank 0. If *reverse* is ``True``, the member with the highest score has rank 0. """ pipe = self.redis if pipe is None else pipe method = getattr(pipe, 'zrevrank' if reverse else 'zrank') rank = method(self.key, self._pickle(member)) return rank
Return the rank of *member* in the collection. By default, the member with the lowest score has rank 0. If *reverse* is ``True``, the member with the highest score has rank 0.
entailment
def increment_score(self, member, amount=1): """ Adjust the score of *member* by *amount*. If *member* is not in the collection it will be stored with a score of *amount*. """ return self.redis.zincrby( self.key, float(amount), self._pickle(member) )
Adjust the score of *member* by *amount*. If *member* is not in the collection it will be stored with a score of *amount*.
entailment
def items( self, min_rank=None, max_rank=None, min_score=None, max_score=None, reverse=False, pipe=None, ): """ Return a list of ``(member, score)`` tuples whose ranking is between *min_rank* and *max_rank* AND whose score is between *min_score* and *max_score* (both ranges inclusive). If no bounds are specified, all items will be returned. """ pipe = self.redis if pipe is None else pipe no_ranks = (min_rank is None) and (max_rank is None) no_scores = (min_score is None) and (max_score is None) # Default scope: everything if no_ranks and no_scores: ret = self.items_by_score(min_score, max_score, reverse, pipe) # Scope narrows to given score range elif no_ranks and (not no_scores): ret = self.items_by_score(min_score, max_score, reverse, pipe) # Scope narrows to given rank range elif (not no_ranks) and no_scores: ret = self.items_by_rank(min_rank, max_rank, reverse, pipe) # Scope narrows twice - once by rank and once by score else: results = self.items_by_rank(min_rank, max_rank, reverse, pipe) ret = [] for member, score in results: if (min_score is not None) and (score < min_score): continue if (max_score is not None) and (score > max_score): continue ret.append((member, score)) return ret
Return a list of ``(member, score)`` tuples whose ranking is between *min_rank* and *max_rank* AND whose score is between *min_score* and *max_score* (both ranges inclusive). If no bounds are specified, all items will be returned.
entailment
def set_score(self, member, score, pipe=None): """ Set the score of *member* to *score*. """ pipe = self.redis if pipe is None else pipe pipe.zadd(self.key, {self._pickle(member): float(score)})
Set the score of *member* to *score*.
entailment
def distance_between(self, place_1, place_2, unit='km'): """ Return the great-circle distance between *place_1* and *place_2*, in the *unit* specified. The default unit is ``'km'``, but ``'m'``, ``'mi'``, and ``'ft'`` can also be specified. """ pickled_place_1 = self._pickle(place_1) pickled_place_2 = self._pickle(place_2) try: return self.redis.geodist( self.key, pickled_place_1, pickled_place_2, unit=unit ) except TypeError: return None
Return the great-circle distance between *place_1* and *place_2*, in the *unit* specified. The default unit is ``'km'``, but ``'m'``, ``'mi'``, and ``'ft'`` can also be specified.
entailment
def get_hash(self, place): """ Return the Geohash of *place*. If it's not present in the collection, ``None`` will be returned instead. """ pickled_place = self._pickle(place) try: return self.redis.geohash(self.key, pickled_place)[0] except (AttributeError, TypeError): return None
Return the Geohash of *place*. If it's not present in the collection, ``None`` will be returned instead.
entailment
def get_location(self, place): """ Return a dict with the coordinates *place*. The dict's keys are ``'latitude'`` and ``'longitude'``. If it's not present in the collection, ``None`` will be returned instead. """ pickled_place = self._pickle(place) try: longitude, latitude = self.redis.geopos(self.key, pickled_place)[0] except (AttributeError, TypeError): return None return {'latitude': latitude, 'longitude': longitude}
Return a dict with the coordinates *place*. The dict's keys are ``'latitude'`` and ``'longitude'``. If it's not present in the collection, ``None`` will be returned instead.
entailment
def places_within_radius( self, place=None, latitude=None, longitude=None, radius=0, **kwargs ): """ Return descriptions of the places stored in the collection that are within the circle specified by the given location and radius. A list of dicts will be returned. The center of the circle can be specified by the identifier of another place in the collection with the *place* keyword argument. Or, it can be specified by using both the *latitude* and *longitude* keyword arguments. By default the *radius* is given in kilometers, but you may also set the *unit* keyword argument to ``'m'``, ``'mi'``, or ``'ft'``. Limit the number of results returned with the *count* keyword argument. Change the sorted order by setting the *sort* keyword argument to ``b'DESC'``. """ kwargs['withdist'] = True kwargs['withcoord'] = True kwargs['withhash'] = False kwargs.setdefault('sort', 'ASC') unit = kwargs.setdefault('unit', 'km') # Make the query if place is not None: response = self.redis.georadiusbymember( self.key, self._pickle(place), radius, **kwargs ) elif (latitude is not None) and (longitude is not None): response = self.redis.georadius( self.key, longitude, latitude, radius, **kwargs ) else: raise ValueError( 'Must specify place, or both latitude and longitude' ) # Assemble the result ret = [] for item in response: ret.append( { 'place': self._unpickle(item[0]), 'distance': item[1], 'unit': unit, 'latitude': item[2][1], 'longitude': item[2][0], } ) return ret
Return descriptions of the places stored in the collection that are within the circle specified by the given location and radius. A list of dicts will be returned. The center of the circle can be specified by the identifier of another place in the collection with the *place* keyword argument. Or, it can be specified by using both the *latitude* and *longitude* keyword arguments. By default the *radius* is given in kilometers, but you may also set the *unit* keyword argument to ``'m'``, ``'mi'``, or ``'ft'``. Limit the number of results returned with the *count* keyword argument. Change the sorted order by setting the *sort* keyword argument to ``b'DESC'``.
entailment
def set_location(self, place, latitude, longitude, pipe=None): """ Set the location of *place* to the location specified by *latitude* and *longitude*. *place* can be any pickle-able Python object. """ pipe = self.redis if pipe is None else pipe pipe.geoadd(self.key, longitude, latitude, self._pickle(place))
Set the location of *place* to the location specified by *latitude* and *longitude*. *place* can be any pickle-able Python object.
entailment
def update(self, other): """ Update the collection with items from *other*. Accepts other :class:`GeoDB` instances, dictionaries mapping places to ``{'latitude': latitude, 'longitude': longitude}`` dicts, or sequences of ``(place, latitude, longitude)`` tuples. """ # other is another Sorted Set def update_sortedset_trans(pipe): items = other._data(pipe=pipe) if use_redis else other._data() pipe.multi() for member, score in items: pipe.zadd(self.key, {self._pickle(member): float(score)}) # other is dict-like def update_mapping_trans(pipe): items = other.items(pipe=pipe) if use_redis else other.items() pipe.multi() for place, value in items: self.set_location( place, value['latitude'], value['longitude'], pipe=pipe ) # other is a list of tuples def update_tuples_trans(pipe): items = ( other.__iter__(pipe=pipe) if use_redis else other.__iter__() ) pipe.multi() for place, latitude, longitude in items: self.set_location(place, latitude, longitude, pipe=pipe) watches = [] if self._same_redis(other, RedisCollection): use_redis = True watches.append(other.key) else: use_redis = False if isinstance(other, SortedSetBase): func = update_sortedset_trans elif hasattr(other, 'items'): func = update_mapping_trans elif hasattr(other, '__iter__'): func = update_tuples_trans self._transaction(func, *watches)
Update the collection with items from *other*. Accepts other :class:`GeoDB` instances, dictionaries mapping places to ``{'latitude': latitude, 'longitude': longitude}`` dicts, or sequences of ``(place, latitude, longitude)`` tuples.
entailment
def copy(self, key=None): """ Creates another collection with the same items and maxsize with the given *key*. """ other = self.__class__( maxsize=self.maxsize, redis=self.persistence.redis, key=key ) other.update(self) return other
Creates another collection with the same items and maxsize with the given *key*.
entailment
def fromkeys(cls, seq, value=None, **kwargs): """ Create a new collection with keys from *seq* and values set to *value*. The keyword arguments are passed to the persistent ``Dict``. """ other = cls(**kwargs) other.update(((key, value) for key in seq)) return other
Create a new collection with keys from *seq* and values set to *value*. The keyword arguments are passed to the persistent ``Dict``.
entailment
def sync(self, clear_cache=False): """ Copy items from the local cache to the persistent Dict. If *clear_cache* is ``True``, clear out the local cache after pushing its items to Redis. """ self.persistence.update(self) if clear_cache: self.cache.clear()
Copy items from the local cache to the persistent Dict. If *clear_cache* is ``True``, clear out the local cache after pushing its items to Redis.
entailment
def _data(self, pipe=None): """ Return a :obj:`list` of all values from Redis (without checking the local cache). """ pipe = self.redis if pipe is None else pipe return [self._unpickle(v) for v in pipe.lrange(self.key, 0, -1)]
Return a :obj:`list` of all values from Redis (without checking the local cache).
entailment
def append(self, value): """Insert *value* at the end of this collection.""" len_self = self.redis.rpush(self.key, self._pickle(value)) if self.writeback: self.cache[len_self - 1] = value
Insert *value* at the end of this collection.
entailment
def copy(self, key=None): """ Return a new collection with the same items as this one. If *key* is specified, create the new collection with the given Redis key. """ other = self.__class__( redis=self.redis, key=key, writeback=self.writeback ) other.extend(self) return other
Return a new collection with the same items as this one. If *key* is specified, create the new collection with the given Redis key.
entailment
def extend(self, other): """ Adds the values from the iterable *other* to the end of this collection. """ def extend_trans(pipe): values = list(other.__iter__(pipe)) if use_redis else other len_self = pipe.rpush(self.key, *(self._pickle(v) for v in values)) if self.writeback: for i, v in enumerate(values, len_self - len(values)): self.cache[i] = v if self._same_redis(other, RedisCollection): use_redis = True self._transaction(extend_trans, other.key) else: use_redis = False self._transaction(extend_trans)
Adds the values from the iterable *other* to the end of this collection.
entailment
def index(self, value, start=None, stop=None): """ Return the index of the first occurence of *value*. If *start* or *stop* are provided, return the smallest index such that ``s[index] == value`` and ``start <= index < stop``. """ def index_trans(pipe): len_self, normal_start = self._normalize_index(start or 0, pipe) __, normal_stop = self._normalize_index(stop or len_self, pipe) for i, v in enumerate(self.__iter__(pipe=pipe)): if v == value: if i < normal_start: continue if i >= normal_stop: break return i raise ValueError return self._transaction(index_trans)
Return the index of the first occurence of *value*. If *start* or *stop* are provided, return the smallest index such that ``s[index] == value`` and ``start <= index < stop``.
entailment
def insert(self, index, value): """ Insert *value* into the collection at *index*. """ if index == 0: return self._insert_left(value) def insert_middle_trans(pipe): self._insert_middle(index, value, pipe=pipe) return self._transaction(insert_middle_trans)
Insert *value* into the collection at *index*.
entailment
def pop(self, index=-1): """ Retrieve the value at *index*, remove it from the collection, and return it. """ if index == 0: return self._pop_left() elif index == -1: return self._pop_right() else: return self._pop_middle(index)
Retrieve the value at *index*, remove it from the collection, and return it.
entailment
def remove(self, value): """Remove the first occurence of *value*.""" def remove_trans(pipe): # If we're caching, we'll need to synchronize before removing. if self.writeback: self._sync_helper(pipe) delete_count = pipe.lrem(self.key, 1, self._pickle(value)) if delete_count == 0: raise ValueError self._transaction(remove_trans)
Remove the first occurence of *value*.
entailment
def reverse(self): """ Reverses the items of this collection "in place" (only two values are retrieved from Redis at a time). """ def reverse_trans(pipe): if self.writeback: self._sync_helper(pipe) n = self.__len__(pipe) for i in range(n // 2): left = pipe.lindex(self.key, i) right = pipe.lindex(self.key, n - i - 1) pipe.lset(self.key, i, right) pipe.lset(self.key, n - i - 1, left) self._transaction(reverse_trans)
Reverses the items of this collection "in place" (only two values are retrieved from Redis at a time).
entailment
def sort(self, key=None, reverse=False): """ Sort the items of this collection according to the optional callable *key*. If *reverse* is set then the sort order is reversed. .. note:: This sort requires all items to be retrieved from Redis and stored in memory. """ def sort_trans(pipe): values = list(self.__iter__(pipe)) values.sort(key=key, reverse=reverse) pipe.multi() pipe.delete(self.key) pipe.rpush(self.key, *(self._pickle(v) for v in values)) if self.writeback: self.cache = {} return self._transaction(sort_trans)
Sort the items of this collection according to the optional callable *key*. If *reverse* is set then the sort order is reversed. .. note:: This sort requires all items to be retrieved from Redis and stored in memory.
entailment
def append(self, value): """Add *value* to the right side of the collection.""" def append_trans(pipe): self._append_helper(value, pipe) self._transaction(append_trans)
Add *value* to the right side of the collection.
entailment
def appendleft(self, value): """Add *value* to the left side of the collection.""" def appendleft_trans(pipe): self._appendleft_helper(value, pipe) self._transaction(appendleft_trans)
Add *value* to the left side of the collection.
entailment
def copy(self, key=None): """ Return a new collection with the same items as this one. If *key* is specified, create the new collection with the given Redis key. """ other = self.__class__( self.__iter__(), self.maxlen, redis=self.redis, key=key, writeback=self.writeback, ) return other
Return a new collection with the same items as this one. If *key* is specified, create the new collection with the given Redis key.
entailment
def extend(self, other): """ Extend the right side of the the collection by appending values from the iterable *other*. """ def extend_trans(pipe): values = list(other.__iter__(pipe)) if use_redis else other for v in values: self._append_helper(v, pipe) if self._same_redis(other, RedisCollection): use_redis = True self._transaction(extend_trans, other.key) else: use_redis = False self._transaction(extend_trans)
Extend the right side of the the collection by appending values from the iterable *other*.
entailment
def extendleft(self, other): """ Extend the left side of the the collection by appending values from the iterable *other*. Note that the appends will reverse the order of the given values. """ def extendleft_trans(pipe): values = list(other.__iter__(pipe)) if use_redis else other for v in values: self._appendleft_helper(v, pipe) if self._same_redis(other, RedisCollection): use_redis = True self._transaction(extendleft_trans, other.key) else: use_redis = False self._transaction(extendleft_trans)
Extend the left side of the the collection by appending values from the iterable *other*. Note that the appends will reverse the order of the given values.
entailment
def insert(self, index, value): """ Insert *value* into the collection at *index*. If the insertion would the collection to grow beyond ``maxlen``, raise ``IndexError``. """ def insert_trans(pipe): len_self = self.__len__(pipe) if (self.maxlen is not None) and (len_self >= self.maxlen): raise IndexError if index == 0: self._insert_left(value, pipe) else: self._insert_middle(index, value, pipe=pipe) self._transaction(insert_trans)
Insert *value* into the collection at *index*. If the insertion would the collection to grow beyond ``maxlen``, raise ``IndexError``.
entailment
def rotate(self, n=1): """ Rotate the deque n steps to the right. If n is negative, rotate to the left. """ # No work to do for a 0-step rotate if n == 0: return def rotate_trans(pipe): # Synchronize the cache before rotating if self.writeback: self._sync_helper(pipe) # Rotating len(self) times has no effect. len_self = self.__len__(pipe) steps = abs_n % len_self # When n is positive we can use the built-in Redis command if forward: pipe.multi() for __ in range(steps): pipe.rpoplpush(self.key, self.key) # When n is negative we must use Python else: for __ in range(steps): pickled_value = pipe.lpop(self.key) pipe.rpush(self.key, pickled_value) forward = n >= 0 abs_n = abs(n) self._transaction(rotate_trans)
Rotate the deque n steps to the right. If n is negative, rotate to the left.
entailment
def get_entities_by_kind(membership_cache=None, is_active=True): """ Builds a dict with keys of entity kinds if and values are another dict. Each of these dicts are keyed off of a super entity id and optional have an 'all' key for any group that has a null super entity. Example structure: { entity_kind_id: { entity1_id: [1, 2, 3], entity2_id: [4, 5, 6], 'all': [1, 2, 3, 4, 5, 6] } } :rtype: dict """ # Accept an existing cache or build a new one if membership_cache is None: membership_cache = EntityGroup.objects.get_membership_cache(is_active=is_active) entities_by_kind = {} kinds_with_all = set() kinds_with_supers = set() super_ids = set() # Loop over each group for group_id, memberships in membership_cache.items(): # Look at each membership for entity_id, entity_kind_id in memberships: # Only care about memberships with entity kind if entity_kind_id: # Make sure a dict exists for this kind entities_by_kind.setdefault(entity_kind_id, {}) # Check if this is all entities of a kind under a specific entity if entity_id: entities_by_kind[entity_kind_id][entity_id] = [] kinds_with_supers.add(entity_kind_id) super_ids.add(entity_id) else: # This is all entities of this kind entities_by_kind[entity_kind_id]['all'] = [] kinds_with_all.add(entity_kind_id) # Get entities for 'all' all_entities_for_types = Entity.objects.filter( entity_kind_id__in=kinds_with_all ).values_list('id', 'entity_kind_id') # Add entity ids to entity kind's all list for id, entity_kind_id in all_entities_for_types: entities_by_kind[entity_kind_id]['all'].append(id) # Get relationships relationships = EntityRelationship.objects.filter( super_entity_id__in=super_ids, sub_entity__entity_kind_id__in=kinds_with_supers ).values_list( 'super_entity_id', 'sub_entity_id', 'sub_entity__entity_kind_id' ) # Add entity ids to each super entity's list for super_entity_id, sub_entity_id, sub_entity__entity_kind_id in relationships: entities_by_kind[sub_entity__entity_kind_id].setdefault(super_entity_id, []) entities_by_kind[sub_entity__entity_kind_id][super_entity_id].append(sub_entity_id) return entities_by_kind
Builds a dict with keys of entity kinds if and values are another dict. Each of these dicts are keyed off of a super entity id and optional have an 'all' key for any group that has a null super entity. Example structure: { entity_kind_id: { entity1_id: [1, 2, 3], entity2_id: [4, 5, 6], 'all': [1, 2, 3, 4, 5, 6] } } :rtype: dict
entailment
def is_sub_to_all(self, *super_entities): """ Given a list of super entities, return the entities that have those as a subset of their super entities. """ if super_entities: if len(super_entities) == 1: # Optimize for the case of just one super entity since this is a much less intensive query has_subset = EntityRelationship.objects.filter( super_entity=super_entities[0]).values_list('sub_entity', flat=True) else: # Get a list of entities that have super entities with all types has_subset = EntityRelationship.objects.filter( super_entity__in=super_entities).values('sub_entity').annotate(Count('super_entity')).filter( super_entity__count=len(set(super_entities))).values_list('sub_entity', flat=True) return self.filter(id__in=has_subset) else: return self
Given a list of super entities, return the entities that have those as a subset of their super entities.
entailment
def is_sub_to_any(self, *super_entities): """ Given a list of super entities, return the entities that have super entities that interset with those provided. """ if super_entities: return self.filter(id__in=EntityRelationship.objects.filter( super_entity__in=super_entities).values_list('sub_entity', flat=True)) else: return self
Given a list of super entities, return the entities that have super entities that interset with those provided.
entailment
def is_sub_to_all_kinds(self, *super_entity_kinds): """ Each returned entity will have superentites whos combined entity_kinds included *super_entity_kinds """ if super_entity_kinds: if len(super_entity_kinds) == 1: # Optimize for the case of just one has_subset = EntityRelationship.objects.filter( super_entity__entity_kind=super_entity_kinds[0]).values_list('sub_entity', flat=True) else: # Get a list of entities that have super entities with all types has_subset = EntityRelationship.objects.filter( super_entity__entity_kind__in=super_entity_kinds).values('sub_entity').annotate( Count('super_entity')).filter(super_entity__count=len(set(super_entity_kinds))).values_list( 'sub_entity', flat=True) return self.filter(pk__in=has_subset) else: return self
Each returned entity will have superentites whos combined entity_kinds included *super_entity_kinds
entailment
def is_sub_to_any_kind(self, *super_entity_kinds): """ Find all entities that have super_entities of any of the specified kinds """ if super_entity_kinds: # get the pks of the desired subs from the relationships table if len(super_entity_kinds) == 1: entity_pks = EntityRelationship.objects.filter( super_entity__entity_kind=super_entity_kinds[0] ).select_related('entity_kind', 'sub_entity').values_list('sub_entity', flat=True) else: entity_pks = EntityRelationship.objects.filter( super_entity__entity_kind__in=super_entity_kinds ).select_related('entity_kind', 'sub_entity').values_list('sub_entity', flat=True) # return a queryset limited to only those pks return self.filter(pk__in=entity_pks) else: return self
Find all entities that have super_entities of any of the specified kinds
entailment
def cache_relationships(self, cache_super=True, cache_sub=True): """ Caches the super and sub relationships by doing a prefetch_related. """ relationships_to_cache = compress( ['super_relationships__super_entity', 'sub_relationships__sub_entity'], [cache_super, cache_sub]) return self.prefetch_related(*relationships_to_cache)
Caches the super and sub relationships by doing a prefetch_related.
entailment
def get_for_obj(self, entity_model_obj): """ Given a saved entity model object, return the associated entity. """ return self.get(entity_type=ContentType.objects.get_for_model( entity_model_obj, for_concrete_model=False), entity_id=entity_model_obj.id)
Given a saved entity model object, return the associated entity.
entailment
def delete_for_obj(self, entity_model_obj): """ Delete the entities associated with a model object. """ return self.filter( entity_type=ContentType.objects.get_for_model( entity_model_obj, for_concrete_model=False), entity_id=entity_model_obj.id).delete( force=True)
Delete the entities associated with a model object.
entailment
def cache_relationships(self, cache_super=True, cache_sub=True): """ Caches the super and sub relationships by doing a prefetch_related. """ return self.get_queryset().cache_relationships(cache_super=cache_super, cache_sub=cache_sub)
Caches the super and sub relationships by doing a prefetch_related.
entailment
def get_membership_cache(self, group_ids=None, is_active=True): """ Build a dict cache with the group membership info. Keyed off the group id and the values are a 2 element list of entity id and entity kind id (same values as the membership model). If no group ids are passed, then all groups will be fetched :param is_active: Flag indicating whether to filter on entity active status. None will not filter. :rtype: dict """ membership_queryset = EntityGroupMembership.objects.filter( Q(entity__isnull=True) | (Q(entity__isnull=False) & Q(entity__is_active=is_active)) ) if is_active is None: membership_queryset = EntityGroupMembership.objects.all() if group_ids: membership_queryset = membership_queryset.filter(entity_group_id__in=group_ids) membership_queryset = membership_queryset.values_list('entity_group_id', 'entity_id', 'sub_entity_kind_id') # Iterate over the query results and build the cache dict membership_cache = {} for entity_group_id, entity_id, sub_entity_kind_id in membership_queryset: membership_cache.setdefault(entity_group_id, []) membership_cache[entity_group_id].append([entity_id, sub_entity_kind_id]) return membership_cache
Build a dict cache with the group membership info. Keyed off the group id and the values are a 2 element list of entity id and entity kind id (same values as the membership model). If no group ids are passed, then all groups will be fetched :param is_active: Flag indicating whether to filter on entity active status. None will not filter. :rtype: dict
entailment
def all_entities(self, is_active=True): """ Return all the entities in the group. Because groups can contain both individual entities, as well as whole groups of entities, this method acts as a convenient way to get a queryset of all the entities in the group. """ return self.get_all_entities(return_models=True, is_active=is_active)
Return all the entities in the group. Because groups can contain both individual entities, as well as whole groups of entities, this method acts as a convenient way to get a queryset of all the entities in the group.
entailment
def get_all_entities(self, membership_cache=None, entities_by_kind=None, return_models=False, is_active=True): """ Returns a list of all entity ids in this group or optionally returns a queryset for all entity models. In order to reduce queries for multiple group lookups, it is expected that the membership_cache and entities_by_kind are built outside of this method and passed in as arguments. :param membership_cache: A group cache dict generated from `EntityGroup.objects.get_membership_cache()` :type membership_cache: dict :param entities_by_kind: An entities by kind dict generated from the `get_entities_by_kind` function :type entities_by_kind: dict :param return_models: If True, returns an Entity queryset, if False, returns a set of entity ids :type return_models: bool :param is_active: Flag to control entities being returned. Defaults to True for active entities only :type is_active: bool """ # If cache args were not passed, generate the cache if membership_cache is None: membership_cache = EntityGroup.objects.get_membership_cache([self.id], is_active=is_active) if entities_by_kind is None: entities_by_kind = entities_by_kind or get_entities_by_kind(membership_cache=membership_cache) # Build set of all entity ids for this group entity_ids = set() # This group does have entities if membership_cache.get(self.id): # Loop over each membership in this group for entity_id, entity_kind_id in membership_cache[self.id]: if entity_id: if entity_kind_id: # All sub entities of this kind under this entity entity_ids.update(entities_by_kind[entity_kind_id][entity_id]) else: # Individual entity entity_ids.add(entity_id) else: # All entities of this kind entity_ids.update(entities_by_kind[entity_kind_id]['all']) # Check if a queryset needs to be returned if return_models: return Entity.objects.filter(id__in=entity_ids) return entity_ids
Returns a list of all entity ids in this group or optionally returns a queryset for all entity models. In order to reduce queries for multiple group lookups, it is expected that the membership_cache and entities_by_kind are built outside of this method and passed in as arguments. :param membership_cache: A group cache dict generated from `EntityGroup.objects.get_membership_cache()` :type membership_cache: dict :param entities_by_kind: An entities by kind dict generated from the `get_entities_by_kind` function :type entities_by_kind: dict :param return_models: If True, returns an Entity queryset, if False, returns a set of entity ids :type return_models: bool :param is_active: Flag to control entities being returned. Defaults to True for active entities only :type is_active: bool
entailment
def add_entity(self, entity, sub_entity_kind=None): """ Add an entity, or sub-entity group to this EntityGroup. :type entity: Entity :param entity: The entity to add. :type sub_entity_kind: Optional EntityKind :param sub_entity_kind: If a sub_entity_kind is given, all sub_entities of the entity will be added to this EntityGroup. """ membership = EntityGroupMembership.objects.create( entity_group=self, entity=entity, sub_entity_kind=sub_entity_kind, ) return membership
Add an entity, or sub-entity group to this EntityGroup. :type entity: Entity :param entity: The entity to add. :type sub_entity_kind: Optional EntityKind :param sub_entity_kind: If a sub_entity_kind is given, all sub_entities of the entity will be added to this EntityGroup.
entailment
def bulk_add_entities(self, entities_and_kinds): """ Add many entities and sub-entity groups to this EntityGroup. :type entities_and_kinds: List of (Entity, EntityKind) pairs. :param entities_and_kinds: A list of entity, entity-kind pairs to add to the group. In the pairs the entity-kind can be ``None``, to add a single entity, or some entity kind to add all sub-entities of that kind. """ memberships = [EntityGroupMembership( entity_group=self, entity=entity, sub_entity_kind=sub_entity_kind, ) for entity, sub_entity_kind in entities_and_kinds] created = EntityGroupMembership.objects.bulk_create(memberships) return created
Add many entities and sub-entity groups to this EntityGroup. :type entities_and_kinds: List of (Entity, EntityKind) pairs. :param entities_and_kinds: A list of entity, entity-kind pairs to add to the group. In the pairs the entity-kind can be ``None``, to add a single entity, or some entity kind to add all sub-entities of that kind.
entailment
def remove_entity(self, entity, sub_entity_kind=None): """ Remove an entity, or sub-entity group to this EntityGroup. :type entity: Entity :param entity: The entity to remove. :type sub_entity_kind: Optional EntityKind :param sub_entity_kind: If a sub_entity_kind is given, all sub_entities of the entity will be removed from this EntityGroup. """ EntityGroupMembership.objects.get( entity_group=self, entity=entity, sub_entity_kind=sub_entity_kind, ).delete()
Remove an entity, or sub-entity group to this EntityGroup. :type entity: Entity :param entity: The entity to remove. :type sub_entity_kind: Optional EntityKind :param sub_entity_kind: If a sub_entity_kind is given, all sub_entities of the entity will be removed from this EntityGroup.
entailment
def bulk_remove_entities(self, entities_and_kinds): """ Remove many entities and sub-entity groups to this EntityGroup. :type entities_and_kinds: List of (Entity, EntityKind) pairs. :param entities_and_kinds: A list of entity, entity-kind pairs to remove from the group. In the pairs, the entity-kind can be ``None``, to add a single entity, or some entity kind to add all sub-entities of that kind. """ criteria = [ Q(entity=entity, sub_entity_kind=entity_kind) for entity, entity_kind in entities_and_kinds ] criteria = reduce(lambda q1, q2: q1 | q2, criteria, Q()) EntityGroupMembership.objects.filter( criteria, entity_group=self).delete()
Remove many entities and sub-entity groups to this EntityGroup. :type entities_and_kinds: List of (Entity, EntityKind) pairs. :param entities_and_kinds: A list of entity, entity-kind pairs to remove from the group. In the pairs, the entity-kind can be ``None``, to add a single entity, or some entity kind to add all sub-entities of that kind.
entailment
def bulk_overwrite(self, entities_and_kinds): """ Update the group to the given entities and sub-entity groups. After this operation, the only members of this EntityGroup will be the given entities, and sub-entity groups. :type entities_and_kinds: List of (Entity, EntityKind) pairs. :param entities_and_kinds: A list of entity, entity-kind pairs to set to the EntityGroup. In the pairs the entity-kind can be ``None``, to add a single entity, or some entity kind to add all sub-entities of that kind. """ EntityGroupMembership.objects.filter(entity_group=self).delete() return self.bulk_add_entities(entities_and_kinds)
Update the group to the given entities and sub-entity groups. After this operation, the only members of this EntityGroup will be the given entities, and sub-entity groups. :type entities_and_kinds: List of (Entity, EntityKind) pairs. :param entities_and_kinds: A list of entity, entity-kind pairs to set to the EntityGroup. In the pairs the entity-kind can be ``None``, to add a single entity, or some entity kind to add all sub-entities of that kind.
entailment
def generate_slug(value): "A copy of spectator.core.models.SluggedModelMixin._generate_slug()" alphabet = 'abcdefghijkmnopqrstuvwxyz23456789' salt = 'Django Spectator' if hasattr(settings, 'SPECTATOR_SLUG_ALPHABET'): alphabet = settings.SPECTATOR_SLUG_ALPHABET if hasattr(settings, 'SPECTATOR_SLUG_SALT'): salt = settings.SPECTATOR_SLUG_SALT hashids = Hashids(alphabet=alphabet, salt=salt, min_length=5) return hashids.encode(value)
A copy of spectator.core.models.SluggedModelMixin._generate_slug()
entailment
def set_slug(apps, schema_editor, class_name): """ Create a slug for each Work already in the DB. """ Cls = apps.get_model('spectator_events', class_name) for obj in Cls.objects.all(): obj.slug = generate_slug(obj.pk) obj.save(update_fields=['slug'])
Create a slug for each Work already in the DB.
entailment
def kind_name(self): "e.g. 'Gig' or 'Movie'." return {k:v for (k,v) in self.KIND_CHOICES}[self.kind]
e.g. 'Gig' or 'Movie'.
entailment
def get_kind_name_plural(kind): "e.g. 'Gigs' or 'Movies'." if kind in ['comedy', 'cinema', 'dance', 'theatre']: return kind.title() elif kind == 'museum': return 'Galleries/Museums' else: return '{}s'.format(Event.get_kind_name(kind))
e.g. 'Gigs' or 'Movies'.
entailment
def get_kinds_data(): """ Returns a dict of all the data about the kinds, keyed to the kind value. e.g: { 'gig': { 'name': 'Gig', 'slug': 'gigs', 'name_plural': 'Gigs', }, # etc } """ kinds = {k:{'name':v} for k,v in Event.KIND_CHOICES} for k,data in kinds.items(): kinds[k]['slug'] = Event.KIND_SLUGS[k] kinds[k]['name_plural'] = Event.get_kind_name_plural(k) return kinds
Returns a dict of all the data about the kinds, keyed to the kind value. e.g: { 'gig': { 'name': 'Gig', 'slug': 'gigs', 'name_plural': 'Gigs', }, # etc }
entailment
def get_list_url(self, kind_slug=None): """ Get the list URL for this Work. You can also pass a kind_slug in (e.g. 'movies') and it will use that instead of the Work's kind_slug. (Why? Useful in views. Or tests of views, at least.) """ if kind_slug is None: kind_slug = self.KIND_SLUGS[self.kind] return reverse('spectator:events:work_list', kwargs={'kind_slug': kind_slug})
Get the list URL for this Work. You can also pass a kind_slug in (e.g. 'movies') and it will use that instead of the Work's kind_slug. (Why? Useful in views. Or tests of views, at least.)
entailment
def convert_descriptor_and_rows(self, descriptor, rows): """Convert descriptor and rows to Pandas """ # Prepare primary_key = None schema = tableschema.Schema(descriptor) if len(schema.primary_key) == 1: primary_key = schema.primary_key[0] elif len(schema.primary_key) > 1: message = 'Multi-column primary keys are not supported' raise tableschema.exceptions.StorageError(message) # Get data/index data_rows = [] index_rows = [] jtstypes_map = {} for row in rows: values = [] index = None for field, value in zip(schema.fields, row): try: if isinstance(value, float) and np.isnan(value): value = None if value and field.type == 'integer': value = int(value) value = field.cast_value(value) except tableschema.exceptions.CastError: value = json.loads(value) # http://pandas.pydata.org/pandas-docs/stable/gotchas.html#support-for-integer-na if value is None and field.type in ('number', 'integer'): jtstypes_map[field.name] = 'number' value = np.NaN if field.name == primary_key: index = value else: values.append(value) data_rows.append(tuple(values)) index_rows.append(index) # Get dtypes dtypes = [] for field in schema.fields: if field.name != primary_key: field_name = field.name if six.PY2: field_name = field.name.encode('utf-8') dtype = self.convert_type(jtstypes_map.get(field.name, field.type)) dtypes.append((field_name, dtype)) # Create dataframe index = None columns = schema.headers array = np.array(data_rows, dtype=dtypes) if primary_key: index_field = schema.get_field(primary_key) index_dtype = self.convert_type(index_field.type) index_class = pd.Index if index_field.type in ['datetime', 'date']: index_class = pd.DatetimeIndex index = index_class(index_rows, name=primary_key, dtype=index_dtype) columns = filter(lambda column: column != primary_key, schema.headers) dataframe = pd.DataFrame(array, index=index, columns=columns) return dataframe
Convert descriptor and rows to Pandas
entailment
def convert_type(self, type): """Convert type to Pandas """ # Mapping mapping = { 'any': np.dtype('O'), 'array': np.dtype(list), 'boolean': np.dtype(bool), 'date': np.dtype('O'), 'datetime': np.dtype('datetime64[ns]'), 'duration': np.dtype('O'), 'geojson': np.dtype('O'), 'geopoint': np.dtype('O'), 'integer': np.dtype(int), 'number': np.dtype(float), 'object': np.dtype(dict), 'string': np.dtype('O'), 'time': np.dtype('O'), 'year': np.dtype(int), 'yearmonth': np.dtype('O'), } # Get type if type not in mapping: message = 'Type "%s" is not supported' % type raise tableschema.exceptions.StorageError(message) return mapping[type]
Convert type to Pandas
entailment
def restore_descriptor(self, dataframe): """Restore descriptor from Pandas """ # Prepare fields = [] primary_key = None # Primary key if dataframe.index.name: field_type = self.restore_type(dataframe.index.dtype) field = { 'name': dataframe.index.name, 'type': field_type, 'constraints': {'required': True}, } fields.append(field) primary_key = dataframe.index.name # Fields for column, dtype in dataframe.dtypes.iteritems(): sample = dataframe[column].iloc[0] if len(dataframe) else None field_type = self.restore_type(dtype, sample=sample) field = {'name': column, 'type': field_type} # TODO: provide better required indication # if dataframe[column].isnull().sum() == 0: # field['constraints'] = {'required': True} fields.append(field) # Descriptor descriptor = {} descriptor['fields'] = fields if primary_key: descriptor['primaryKey'] = primary_key return descriptor
Restore descriptor from Pandas
entailment
def restore_row(self, row, schema, pk): """Restore row from Pandas """ result = [] for field in schema.fields: if schema.primary_key and schema.primary_key[0] == field.name: if field.type == 'number' and np.isnan(pk): pk = None if pk and field.type == 'integer': pk = int(pk) result.append(field.cast_value(pk)) else: value = row[field.name] if field.type == 'number' and np.isnan(value): value = None if value and field.type == 'integer': value = int(value) elif field.type == 'datetime': value = value.to_pydatetime() result.append(field.cast_value(value)) return result
Restore row from Pandas
entailment
def restore_type(self, dtype, sample=None): """Restore type from Pandas """ # Pandas types if pdc.is_bool_dtype(dtype): return 'boolean' elif pdc.is_datetime64_any_dtype(dtype): return 'datetime' elif pdc.is_integer_dtype(dtype): return 'integer' elif pdc.is_numeric_dtype(dtype): return 'number' # Python types if sample is not None: if isinstance(sample, (list, tuple)): return 'array' elif isinstance(sample, datetime.date): return 'date' elif isinstance(sample, isodate.Duration): return 'duration' elif isinstance(sample, dict): return 'object' elif isinstance(sample, six.string_types): return 'string' elif isinstance(sample, datetime.time): return 'time' return 'string'
Restore type from Pandas
entailment
def change_object_link_card(obj, perms): """ If the user has permission to change `obj`, show a link to its Admin page. obj -- An object like Movie, Play, ClassicalWork, Publication, etc. perms -- The `perms` object that it's the template. """ # eg: 'movie' or 'classicalwork': name = obj.__class__.__name__.lower() permission = 'spectator.can_edit_{}'.format(name) # eg: 'admin:events_classicalwork_change': change_url_name = 'admin:{}_{}_change'.format(obj._meta.app_label, name) return { 'display_link': (permission in perms), 'change_url': reverse(change_url_name, args=[obj.id]) }
If the user has permission to change `obj`, show a link to its Admin page. obj -- An object like Movie, Play, ClassicalWork, Publication, etc. perms -- The `perms` object that it's the template.
entailment
def domain_urlize(value): """ Returns an HTML link to the supplied URL, but only using the domain as the text. Strips 'www.' from the start of the domain, if present. e.g. if `my_url` is 'http://www.example.org/foo/' then: {{ my_url|domain_urlize }} returns: <a href="http://www.example.org/foo/" rel="nofollow">example.org</a> """ parsed_uri = urlparse(value) domain = '{uri.netloc}'.format(uri=parsed_uri) if domain.startswith('www.'): domain = domain[4:] return format_html('<a href="{}" rel="nofollow">{}</a>', value, domain )
Returns an HTML link to the supplied URL, but only using the domain as the text. Strips 'www.' from the start of the domain, if present. e.g. if `my_url` is 'http://www.example.org/foo/' then: {{ my_url|domain_urlize }} returns: <a href="http://www.example.org/foo/" rel="nofollow">example.org</a>
entailment
def current_url_name(context): """ Returns the name of the current URL, namespaced, or False. Example usage: {% current_url_name as url_name %} <a href="#"{% if url_name == 'myapp:home' %} class="active"{% endif %}">Home</a> """ url_name = False if context.request.resolver_match: url_name = "{}:{}".format( context.request.resolver_match.namespace, context.request.resolver_match.url_name ) return url_name
Returns the name of the current URL, namespaced, or False. Example usage: {% current_url_name as url_name %} <a href="#"{% if url_name == 'myapp:home' %} class="active"{% endif %}">Home</a>
entailment
def query_string(context, key, value): """ For adding/replacing a key=value pair to the GET string for a URL. eg, if we're viewing ?p=3 and we do {% query_string order 'taken' %} then this returns "p=3&order=taken" And, if we're viewing ?p=3&order=uploaded and we do the same thing, we get the same result (ie, the existing "order=uploaded" is replaced). Expects the request object in context to do the above; otherwise it will just return a query string with the supplied key=value pair. """ try: request = context['request'] args = request.GET.copy() except KeyError: args = QueryDict('').copy() args[key] = value return args.urlencode()
For adding/replacing a key=value pair to the GET string for a URL. eg, if we're viewing ?p=3 and we do {% query_string order 'taken' %} then this returns "p=3&order=taken" And, if we're viewing ?p=3&order=uploaded and we do the same thing, we get the same result (ie, the existing "order=uploaded" is replaced). Expects the request object in context to do the above; otherwise it will just return a query string with the supplied key=value pair.
entailment
def most_read_creators_card(num=10): """ Displays a card showing the Creators who have the most Readings associated with their Publications. In spectator_core tags, rather than spectator_reading so it can still be used on core pages, even if spectator_reading isn't installed. """ if spectator_apps.is_enabled('reading'): object_list = most_read_creators(num=num) object_list = chartify(object_list, 'num_readings', cutoff=1) return { 'card_title': 'Most read authors', 'score_attr': 'num_readings', 'object_list': object_list, }
Displays a card showing the Creators who have the most Readings associated with their Publications. In spectator_core tags, rather than spectator_reading so it can still be used on core pages, even if spectator_reading isn't installed.
entailment
def most_visited_venues_card(num=10): """ Displays a card showing the Venues that have the most Events. In spectator_core tags, rather than spectator_events so it can still be used on core pages, even if spectator_events isn't installed. """ if spectator_apps.is_enabled('events'): object_list = most_visited_venues(num=num) object_list = chartify(object_list, 'num_visits', cutoff=1) return { 'card_title': 'Most visited venues', 'score_attr': 'num_visits', 'object_list': object_list, }
Displays a card showing the Venues that have the most Events. In spectator_core tags, rather than spectator_events so it can still be used on core pages, even if spectator_events isn't installed.
entailment
def has_urls(self): "Handy for templates." if self.isbn_uk or self.isbn_us or self.official_url or self.notes_url: return True else: return False
Handy for templates.
entailment
def get_entity(package, entity): """ eg, get_entity('spectator', 'version') returns `__version__` value in `__init__.py`. """ init_py = open(os.path.join(package, '__init__.py')).read() find = "__%s__ = ['\"]([^'\"]+)['\"]" % entity return re.search(find, init_py).group(1)
eg, get_entity('spectator', 'version') returns `__version__` value in `__init__.py`.
entailment
def get_queryset(self): "Reduce the number of queries and speed things up." qs = super().get_queryset() qs = qs.select_related('publication__series') \ .prefetch_related('publication__roles__creator') return qs
Reduce the number of queries and speed things up.
entailment
def set_slug(apps, schema_editor): """ Create a slug for each Creator already in the DB. """ Creator = apps.get_model('spectator_core', 'Creator') for c in Creator.objects.all(): c.slug = generate_slug(c.pk) c.save(update_fields=['slug'])
Create a slug for each Creator already in the DB.
entailment
def forwards(apps, schema_editor): """ Copy the ClassicalWork and DancePiece data to use the new through models. """ Event = apps.get_model('spectator_events', 'Event') ClassicalWorkSelection = apps.get_model( 'spectator_events', 'ClassicalWorkSelection') DancePieceSelection = apps.get_model( 'spectator_events', 'DancePieceSelection') for event in Event.objects.all(): for work in event.classicalworks.all(): selection = ClassicalWorkSelection( classical_work=work, event=event) selection.save() for piece in event.dancepieces.all(): selection = DancePieceSelection( dance_piece=piece, event=event) selection.save()
Copy the ClassicalWork and DancePiece data to use the new through models.
entailment
def forwards(apps, schema_editor): """ Set the venue_name field of all Events that have a Venue. """ Event = apps.get_model('spectator_events', 'Event') for event in Event.objects.all(): if event.venue is not None: event.venue_name = event.venue.name event.save()
Set the venue_name field of all Events that have a Venue.
entailment
def forwards(apps, schema_editor): """ Migrate all 'exhibition' Events to the new 'museum' Event kind. """ Event = apps.get_model('spectator_events', 'Event') for ev in Event.objects.filter(kind='exhibition'): ev.kind = 'museum' ev.save()
Migrate all 'exhibition' Events to the new 'museum' Event kind.
entailment
def truncate_string(text, strip_html=True, chars=255, truncate='…', at_word_boundary=False): """Truncate a string to a certain length, removing line breaks and mutliple spaces, optionally removing HTML, and appending a 'truncate' string. Keyword arguments: strip_html -- boolean. chars -- Number of characters to return. at_word_boundary -- Only truncate at a word boundary, which will probably result in a string shorter than chars. truncate -- String to add to the end. """ if strip_html: text = strip_tags(text) text = text.replace('\n', ' ').replace('\r', '') text = ' '.join(text.split()) if at_word_boundary: if len(text) > chars: text = text[:chars].rsplit(' ', 1)[0] + truncate else: text = Truncator(text).chars(chars, html=False, truncate=truncate) return text
Truncate a string to a certain length, removing line breaks and mutliple spaces, optionally removing HTML, and appending a 'truncate' string. Keyword arguments: strip_html -- boolean. chars -- Number of characters to return. at_word_boundary -- Only truncate at a word boundary, which will probably result in a string shorter than chars. truncate -- String to add to the end.
entailment
def chartify(qs, score_field, cutoff=0, ensure_chartiness=True): """ Given a QuerySet it will go through and add a `chart_position` property to each object returning a list of the objects. If adjacent objects have the same 'score' (based on `score_field`) then they will have the same `chart_position`. This can then be used in templates for the `value` of <li> elements in an <ol>. By default any objects with a score of 0 or less will be removed. By default, if all the items in the chart have the same position, no items will be returned (it's not much of a chart). Keyword arguments: qs -- The QuerySet score_field -- The name of the numeric field that each object in the QuerySet has, that will be used to compare their positions. cutoff -- Any objects with a score of this value or below will be removed from the list. Set to None to disable this. ensure_chartiness -- If True, then if all items in the list have the same score, an empty list will be returned. """ chart = [] position = 0 prev_obj = None for counter, obj in enumerate(qs): score = getattr(obj, score_field) if score != getattr(prev_obj, score_field, None): position = counter + 1 if cutoff is None or score > cutoff: obj.chart_position = position chart.append(obj) prev_obj = obj if ensure_chartiness and len(chart) > 0: if getattr(chart[0], score_field) == getattr(chart[-1], score_field): chart = [] return chart
Given a QuerySet it will go through and add a `chart_position` property to each object returning a list of the objects. If adjacent objects have the same 'score' (based on `score_field`) then they will have the same `chart_position`. This can then be used in templates for the `value` of <li> elements in an <ol>. By default any objects with a score of 0 or less will be removed. By default, if all the items in the chart have the same position, no items will be returned (it's not much of a chart). Keyword arguments: qs -- The QuerySet score_field -- The name of the numeric field that each object in the QuerySet has, that will be used to compare their positions. cutoff -- Any objects with a score of this value or below will be removed from the list. Set to None to disable this. ensure_chartiness -- If True, then if all items in the list have the same score, an empty list will be returned.
entailment
def by_visits(self, event_kind=None): """ Gets Venues in order of how many Events have been held there. Adds a `num_visits` field to each one. event_kind filters by kind of Event, e.g. 'theatre', 'cinema', etc. """ qs = self.get_queryset() if event_kind is not None: qs = qs.filter(event__kind=event_kind) qs = qs.annotate(num_visits=Count('event')) \ .order_by('-num_visits', 'name_sort') return qs
Gets Venues in order of how many Events have been held there. Adds a `num_visits` field to each one. event_kind filters by kind of Event, e.g. 'theatre', 'cinema', etc.
entailment
def by_views(self, kind=None): """ Gets Works in order of how many times they've been attached to Events. kind is the kind of Work, e.g. 'play', 'movie', etc. """ qs = self.get_queryset() if kind is not None: qs = qs.filter(kind=kind) qs = qs.annotate(num_views=Count('event')) \ .order_by('-num_views', 'title_sort') return qs
Gets Works in order of how many times they've been attached to Events. kind is the kind of Work, e.g. 'play', 'movie', etc.
entailment
def naturalize_thing(self, string): """ Make a naturalized version of a general string, not a person's name. e.g., title of a book, a band's name, etc. string -- a lowercase string. """ # Things we want to move to the back of the string: articles = [ 'a', 'an', 'the', 'un', 'une', 'le', 'la', 'les', "l'", "l’", 'ein', 'eine', 'der', 'die', 'das', 'una', 'el', 'los', 'las', ] sort_string = string parts = string.split(' ') if len(parts) > 1 and parts[0] in articles: if parts[0] != parts[1]: # Don't do this if the name is 'The The' or 'La La Land'. # Makes 'long blondes, the': sort_string = '{}, {}'.format(' '.join(parts[1:]), parts[0]) sort_string = self._naturalize_numbers(sort_string) return sort_string
Make a naturalized version of a general string, not a person's name. e.g., title of a book, a band's name, etc. string -- a lowercase string.
entailment