sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def naturalize_person(self, string): """ Attempt to make a version of the string that has the surname, if any, at the start. 'John, Brown' to 'Brown, John' 'Sir John Brown Jr' to 'Brown, Sir John Jr' 'Prince' to 'Prince' string -- The string to change. """ suffixes = [ 'Jr', 'Jr.', 'Sr', 'Sr.', 'I', 'II', 'III', 'IV', 'V', ] # Add lowercase versions: suffixes = suffixes + [s.lower() for s in suffixes] # If a name has a capitalised particle in we use that to sort. # So 'Le Carre, John' but 'Carre, John le'. particles = [ 'Le', 'La', 'Von', 'Van', 'Du', 'De', ] surname = '' # Smith names = '' # Fred James suffix = '' # Jr sort_string = string parts = string.split(' ') if parts[-1] in suffixes: # Remove suffixes entirely, as we'll add them back on the end. suffix = parts[-1] parts = parts[0:-1] # Remove suffix from parts sort_string = ' '.join(parts) if len(parts) > 1: if parts[-2] in particles: # From ['Alan', 'Barry', 'Le', 'Carré'] # to ['Alan', 'Barry', 'Le Carré']: parts = parts[0:-2] + [ ' '.join(parts[-2:]) ] # From 'David Foster Wallace' to 'Wallace, David Foster': sort_string = '{}, {}'.format(parts[-1], ' '.join(parts[:-1])) if suffix: # Add it back on. sort_string = '{} {}'.format(sort_string, suffix) # In case this name has any numbers in it. sort_string = self._naturalize_numbers(sort_string) return sort_string
Attempt to make a version of the string that has the surname, if any, at the start. 'John, Brown' to 'Brown, John' 'Sir John Brown Jr' to 'Brown, Sir John Jr' 'Prince' to 'Prince' string -- The string to change.
entailment
def _naturalize_numbers(self, string): """ Makes any integers into very zero-padded numbers. e.g. '1' becomes '00000001'. """ def naturalize_int_match(match): return '%08d' % (int(match.group(0)),) string = re.sub(r'\d+', naturalize_int_match, string) return string
Makes any integers into very zero-padded numbers. e.g. '1' becomes '00000001'.
entailment
def annual_reading_counts(kind='all'): """ Returns a list of dicts, one per year of reading. In year order. Each dict is like this (if kind is 'all'): {'year': datetime.date(2003, 1, 1), 'book': 12, # only included if kind is 'all' or 'book' 'periodical': 18, # only included if kind is 'all' or 'periodical' 'total': 30, # only included if kind is 'all' } We use the end_date of a Reading to count when that thing was read. kind is one of 'book', 'periodical' or 'all', for both. """ if kind == 'all': kinds = ['book', 'periodical'] else: kinds = [kind] # This will have keys of years (strings) and dicts of data: # { # '2003': {'books': 12, 'periodicals': 18}, # } counts = OrderedDict() for k in kinds: qs = Reading.objects.exclude(end_date__isnull=True) \ .filter(publication__kind=k) \ .annotate(year=TruncYear('end_date')) \ .values('year') \ .annotate(count=Count('id')) \ .order_by('year') for year_data in qs: year_str = year_data['year'].strftime('%Y') if not year_str in counts: counts[year_str] = { 'year': year_data['year'], } counts[year_str][k] = year_data['count'] # Now translate counts into our final list, with totals, and 0s for kinds # when they have no Readings for that year. counts_list = [] for year_str, data in counts.items(): year_data = { 'year': data['year'], } if kind == 'all': year_data['total'] = 0 for k in kinds: if k in data: year_data[k] = data[k] if kind == 'all': year_data['total'] += data[k] else: year_data[k] = 0 counts_list.append(year_data) return counts_list
Returns a list of dicts, one per year of reading. In year order. Each dict is like this (if kind is 'all'): {'year': datetime.date(2003, 1, 1), 'book': 12, # only included if kind is 'all' or 'book' 'periodical': 18, # only included if kind is 'all' or 'periodical' 'total': 30, # only included if kind is 'all' } We use the end_date of a Reading to count when that thing was read. kind is one of 'book', 'periodical' or 'all', for both.
entailment
def lookups(self, request, model_admin): """ Returns a list of tuples like: [ ('AU', 'Australia'), ('GB', 'UK'), ('US', 'USA'), ] One for each country that has at least one Venue. Sorted by the label names. """ list_of_countries = [] # We don't need the country_count but we need to annotate them in order # to group the results. qs = Venue.objects.exclude(country='') \ .values('country') \ .annotate(country_count=Count('country')) \ .order_by('country') for obj in qs: country = obj['country'] list_of_countries.append( (country, Venue.COUNTRIES[country]) ) return sorted(list_of_countries, key=lambda c: c[1])
Returns a list of tuples like: [ ('AU', 'Australia'), ('GB', 'UK'), ('US', 'USA'), ] One for each country that has at least one Venue. Sorted by the label names.
entailment
def queryset(self, request, queryset): """ Returns the filtered queryset based on the value provided in the query string and retrievable via `self.value()`. """ if self.value(): return queryset.filter(country=self.value()) else: return queryset
Returns the filtered queryset based on the value provided in the query string and retrievable via `self.value()`.
entailment
def forward(apps, schema_editor): """ Copying data from the old `Event.movie` and `Event.play` ForeignKey fields into the new `Event.movies` and `Event.plays` ManyToManyFields. """ Event = apps.get_model('spectator_events', 'Event') MovieSelection = apps.get_model('spectator_events', 'MovieSelection') PlaySelection = apps.get_model('spectator_events', 'PlaySelection') for event in Event.objects.all(): if event.movie is not None: selection = MovieSelection(event=event, movie=event.movie) selection.save() if event.play is not None: selection = PlaySelection(event=event, play=event.play) selection.save()
Copying data from the old `Event.movie` and `Event.play` ForeignKey fields into the new `Event.movies` and `Event.plays` ManyToManyFields.
entailment
def set_slug(apps, schema_editor): """ Create a slug for each Event already in the DB. """ Event = apps.get_model('spectator_events', 'Event') for e in Event.objects.all(): e.slug = generate_slug(e.pk) e.save(update_fields=['slug'])
Create a slug for each Event already in the DB.
entailment
def page(self, number, *args, **kwargs): """Return a standard ``Page`` instance with custom, digg-specific page ranges attached. """ page = super().page(number, *args, **kwargs) number = int(number) # we know this will work # easier access num_pages, body, tail, padding, margin = \ self.num_pages, self.body, self.tail, self.padding, self.margin # put active page in middle of main range main_range = list(map(int, [ math.floor(number-body/2.0)+1, # +1 = shift odd body to right math.floor(number+body/2.0)])) # adjust bounds if main_range[0] < 1: main_range = list(map(abs(main_range[0]-1).__add__, main_range)) if main_range[1] > num_pages: main_range = list(map((num_pages-main_range[1]).__add__, main_range)) # Determine leading and trailing ranges; if possible and appropriate, # combine them with the main range, in which case the resulting main # block might end up considerable larger than requested. While we # can't guarantee the exact size in those cases, we can at least try # to come as close as possible: we can reduce the other boundary to # max padding, instead of using half the body size, which would # otherwise be the case. If the padding is large enough, this will # of course have no effect. # Example: # total pages=100, page=4, body=5, (default padding=2) # 1 2 3 [4] 5 6 ... 99 100 # total pages=100, page=4, body=5, padding=1 # 1 2 3 [4] 5 ... 99 100 # If it were not for this adjustment, both cases would result in the # first output, regardless of the padding value. if main_range[0] <= tail+margin: leading = [] main_range = [1, max(body, min(number+padding, main_range[1]))] main_range[0] = 1 else: leading = list(range(1, tail+1)) # basically same for trailing range, but not in ``left_align`` mode if self.align_left: trailing = [] else: if main_range[1] >= num_pages-(tail+margin)+1: trailing = [] if not leading: # ... but handle the special case of neither leading nor # trailing ranges; otherwise, we would now modify the # main range low bound, which we just set in the previous # section, again. main_range = [1, num_pages] else: main_range = [min(num_pages-body+1, max(number-padding, main_range[0])), num_pages] else: trailing = list(range(num_pages-tail+1, num_pages+1)) # finally, normalize values that are out of bound; this basically # fixes all the things the above code screwed up in the simple case # of few enough pages where one range would suffice. main_range = [max(main_range[0], 1), min(main_range[1], num_pages)] # make the result of our calculations available as custom ranges # on the ``Page`` instance. page.main_range = list(range(main_range[0], main_range[1]+1)) page.leading_range = leading page.trailing_range = trailing page.page_range = reduce(lambda x, y: x+((x and y) and [False])+y, [page.leading_range, page.main_range, page.trailing_range]) page.__class__ = DiggPage return page
Return a standard ``Page`` instance with custom, digg-specific page ranges attached.
entailment
def version(): """Get the version number without importing the mrcfile package.""" namespace = {} with open(os.path.join('mrcfile', 'version.py')) as f: exec(f.read(), namespace) return namespace['__version__']
Get the version number without importing the mrcfile package.
entailment
def get_event_counts(self): """ Returns a dict like: {'counts': { 'all': 30, 'movie': 12, 'gig': 10, }} """ counts = {'all': Event.objects.count(),} for k,v in Event.KIND_CHOICES: # e.g. 'movie_count': counts[k] = Event.objects.filter(kind=k).count() return {'counts': counts,}
Returns a dict like: {'counts': { 'all': 30, 'movie': 12, 'gig': 10, }}
entailment
def get_event_kind(self): """ Unless we're on the front page we'll have a kind_slug like 'movies'. We need to translate that into an event `kind` like 'movie'. """ slug = self.kwargs.get('kind_slug', None) if slug is None: return None # Front page; showing all Event kinds. else: slugs_to_kinds = {v:k for k,v in Event.KIND_SLUGS.items()} return slugs_to_kinds.get(slug, None)
Unless we're on the front page we'll have a kind_slug like 'movies'. We need to translate that into an event `kind` like 'movie'.
entailment
def get_queryset(self): "Restrict to a single kind of event, if any, and include Venue data." qs = super().get_queryset() kind = self.get_event_kind() if kind is not None: qs = qs.filter(kind=kind) qs = qs.select_related('venue') return qs
Restrict to a single kind of event, if any, and include Venue data.
entailment
def get_work_kind(self): """ We'll have a kind_slug like 'movies'. We need to translate that into a work `kind` like 'movie'. """ slugs_to_kinds = {v:k for k,v in Work.KIND_SLUGS.items()} return slugs_to_kinds.get(self.kind_slug, None)
We'll have a kind_slug like 'movies'. We need to translate that into a work `kind` like 'movie'.
entailment
def get_countries(self): """ Returns a list of dicts, one per country that has at least one Venue in it. Each dict has 'code' and 'name' elements. The list is sorted by the country 'name's. """ qs = Venue.objects.values('country') \ .exclude(country='') \ .distinct() \ .order_by('country') countries = [] for c in qs: countries.append({ 'code': c['country'], 'name': Venue.get_country_name(c['country']) }) return sorted(countries, key=lambda k: k['name'])
Returns a list of dicts, one per country that has at least one Venue in it. Each dict has 'code' and 'name' elements. The list is sorted by the country 'name's.
entailment
def forwards(apps, schema_editor): """ Re-save all the Works because something earlier didn't create their slugs. """ Work = apps.get_model('spectator_events', 'Work') for work in Work.objects.all(): if not work.slug: work.slug = generate_slug(work.pk) work.save()
Re-save all the Works because something earlier didn't create their slugs.
entailment
def annual_event_counts(kind='all'): """ Returns a QuerySet of dicts, each one with these keys: * year - a date object representing the year * total - the number of events of `kind` that year kind - The Event `kind`, or 'all' for all kinds (default). """ qs = Event.objects if kind != 'all': qs = qs.filter(kind=kind) qs = qs.annotate(year=TruncYear('date')) \ .values('year') \ .annotate(total=Count('id')) \ .order_by('year') return qs
Returns a QuerySet of dicts, each one with these keys: * year - a date object representing the year * total - the number of events of `kind` that year kind - The Event `kind`, or 'all' for all kinds (default).
entailment
def annual_event_counts_card(kind='all', current_year=None): """ Displays years and the number of events per year. kind is an Event kind (like 'cinema', 'gig', etc.) or 'all' (default). current_year is an optional date object representing the year we're already showing information about. """ if kind == 'all': card_title = 'Events per year' else: card_title = '{} per year'.format(Event.get_kind_name_plural(kind)) return { 'card_title': card_title, 'kind': kind, 'years': annual_event_counts(kind=kind), 'current_year': current_year }
Displays years and the number of events per year. kind is an Event kind (like 'cinema', 'gig', etc.) or 'all' (default). current_year is an optional date object representing the year we're already showing information about.
entailment
def display_date(d): """ Render a date/datetime (d) as a date, using the SPECTATOR_DATE_FORMAT setting. Wrap the output in a <time> tag. Time tags: http://www.brucelawson.co.uk/2012/best-of-time/ """ stamp = d.strftime('%Y-%m-%d') visible_date = d.strftime(app_settings.DATE_FORMAT) return format_html('<time datetime="%(stamp)s">%(visible)s</time>' % { 'stamp': stamp, 'visible': visible_date })
Render a date/datetime (d) as a date, using the SPECTATOR_DATE_FORMAT setting. Wrap the output in a <time> tag. Time tags: http://www.brucelawson.co.uk/2012/best-of-time/
entailment
def event_list_tabs(counts, current_kind, page_number=1): """ Displays the tabs to different event_list pages. `counts` is a dict of number of events for each kind, like: {'all': 30, 'gig': 12, 'movie': 18,} `current_kind` is the event kind that's active, if any. e.g. 'gig', 'movie', etc. `page_number` is the current page of this kind of events we're on. """ return { 'counts': counts, 'current_kind': current_kind, 'page_number': page_number, # A list of all the kinds we might show tabs for, like # ['gig', 'movie', 'play', ...] 'event_kinds': Event.get_kinds(), # A dict of data about each kind, keyed by kind ('gig') including # data about 'name', 'name_plural' and 'slug': 'event_kinds_data': Event.get_kinds_data(), }
Displays the tabs to different event_list pages. `counts` is a dict of number of events for each kind, like: {'all': 30, 'gig': 12, 'movie': 18,} `current_kind` is the event kind that's active, if any. e.g. 'gig', 'movie', etc. `page_number` is the current page of this kind of events we're on.
entailment
def day_events_card(date): """ Displays Events that happened on the supplied date. `date` is a date object. """ d = date.strftime(app_settings.DATE_FORMAT) card_title = 'Events on {}'.format(d) return { 'card_title': card_title, 'event_list': day_events(date=date), }
Displays Events that happened on the supplied date. `date` is a date object.
entailment
def most_seen_creators_card(event_kind=None, num=10): """ Displays a card showing the Creators that are associated with the most Events. """ object_list = most_seen_creators(event_kind=event_kind, num=num) object_list = chartify(object_list, 'num_events', cutoff=1) return { 'card_title': 'Most seen people/groups', 'score_attr': 'num_events', 'object_list': object_list, }
Displays a card showing the Creators that are associated with the most Events.
entailment
def most_seen_creators_by_works(work_kind=None, role_name=None, num=10): """ Returns a QuerySet of the Creators that are associated with the most Works. """ return Creator.objects.by_works(kind=work_kind, role_name=role_name)[:num]
Returns a QuerySet of the Creators that are associated with the most Works.
entailment
def most_seen_creators_by_works_card(work_kind=None, role_name=None, num=10): """ Displays a card showing the Creators that are associated with the most Works. e.g.: {% most_seen_creators_by_works_card work_kind='movie' role_name='Director' num=5 %} """ object_list = most_seen_creators_by_works( work_kind=work_kind, role_name=role_name, num=num) object_list = chartify(object_list, 'num_works', cutoff=1) # Attempt to create a sensible card title... if role_name: # Yes, this pluralization is going to break at some point: creators_name = '{}s'.format(role_name.capitalize()) else: creators_name = 'People/groups' if work_kind: works_name = Work.get_kind_name_plural(work_kind).lower() else: works_name = 'works' card_title = '{} with most {}'.format(creators_name, works_name) return { 'card_title': card_title, 'score_attr': 'num_works', 'object_list': object_list, }
Displays a card showing the Creators that are associated with the most Works. e.g.: {% most_seen_creators_by_works_card work_kind='movie' role_name='Director' num=5 %}
entailment
def most_seen_works_card(kind=None, num=10): """ Displays a card showing the Works that are associated with the most Events. """ object_list = most_seen_works(kind=kind, num=num) object_list = chartify(object_list, 'num_views', cutoff=1) if kind: card_title = 'Most seen {}'.format( Work.get_kind_name_plural(kind).lower()) else: card_title = 'Most seen works' return { 'card_title': card_title, 'score_attr': 'num_views', 'object_list': object_list, 'name_attr': 'title', 'use_cite': True, }
Displays a card showing the Works that are associated with the most Events.
entailment
def _generate_slug(self, value): """ Generates a slug using a Hashid of `value`. """ alphabet = app_settings.SLUG_ALPHABET salt = app_settings.SLUG_SALT hashids = Hashids(alphabet=alphabet, salt=salt, min_length=5) return hashids.encode(value)
Generates a slug using a Hashid of `value`.
entailment
def create(self, bucket, descriptor, force=False): """https://github.com/frictionlessdata/tableschema-pandas-py#storage """ # Make lists buckets = bucket if isinstance(bucket, six.string_types): buckets = [bucket] descriptors = descriptor if isinstance(descriptor, dict): descriptors = [descriptor] # Check buckets for existence for bucket in buckets: if bucket in self.buckets: if not force: message = 'Bucket "%s" already exists' % bucket raise tableschema.exceptions.StorageError(message) self.delete(bucket) # Define dataframes for bucket, descriptor in zip(buckets, descriptors): tableschema.validate(descriptor) self.__descriptors[bucket] = descriptor self.__dataframes[bucket] = pd.DataFrame()
https://github.com/frictionlessdata/tableschema-pandas-py#storage
entailment
def delete(self, bucket=None, ignore=False): """https://github.com/frictionlessdata/tableschema-pandas-py#storage """ # Make lists buckets = bucket if isinstance(bucket, six.string_types): buckets = [bucket] elif bucket is None: buckets = reversed(self.buckets) # Iterate over buckets for bucket in buckets: # Non existent bucket if bucket not in self.buckets: if not ignore: message = 'Bucket "%s" doesn\'t exist' % bucket raise tableschema.exceptions.StorageError(message) return # Remove from descriptors if bucket in self.__descriptors: del self.__descriptors[bucket] # Remove from dataframes if bucket in self.__dataframes: del self.__dataframes[bucket]
https://github.com/frictionlessdata/tableschema-pandas-py#storage
entailment
def describe(self, bucket, descriptor=None): """https://github.com/frictionlessdata/tableschema-pandas-py#storage """ # Set descriptor if descriptor is not None: self.__descriptors[bucket] = descriptor # Get descriptor else: descriptor = self.__descriptors.get(bucket) if descriptor is None: dataframe = self.__dataframes[bucket] descriptor = self.__mapper.restore_descriptor(dataframe) return descriptor
https://github.com/frictionlessdata/tableschema-pandas-py#storage
entailment
def iter(self, bucket): """https://github.com/frictionlessdata/tableschema-pandas-py#storage """ # Check existense if bucket not in self.buckets: message = 'Bucket "%s" doesn\'t exist.' % bucket raise tableschema.exceptions.StorageError(message) # Prepare descriptor = self.describe(bucket) schema = tableschema.Schema(descriptor) # Yield rows for pk, row in self.__dataframes[bucket].iterrows(): row = self.__mapper.restore_row(row, schema=schema, pk=pk) yield row
https://github.com/frictionlessdata/tableschema-pandas-py#storage
entailment
def write(self, bucket, rows): """https://github.com/frictionlessdata/tableschema-pandas-py#storage """ # Prepare descriptor = self.describe(bucket) new_data_frame = self.__mapper.convert_descriptor_and_rows(descriptor, rows) # Just set new DataFrame if current is empty if self.__dataframes[bucket].size == 0: self.__dataframes[bucket] = new_data_frame # Append new data frame to the old one setting new data frame # containing data from both old and new data frames else: self.__dataframes[bucket] = pd.concat([ self.__dataframes[bucket], new_data_frame, ])
https://github.com/frictionlessdata/tableschema-pandas-py#storage
entailment
def forwards(apps, schema_editor): """ Change all Movie objects into Work objects, and their associated data into WorkRole and WorkSelection models, then delete the Movie. """ Movie = apps.get_model('spectator_events', 'Movie') Work = apps.get_model('spectator_events', 'Work') WorkRole = apps.get_model('spectator_events', 'WorkRole') WorkSelection = apps.get_model('spectator_events', 'WorkSelection') for m in Movie.objects.all(): work = Work.objects.create( kind='movie', title=m.title, title_sort=m.title_sort, year=m.year, imdb_id=m.imdb_id ) for role in m.roles.all(): WorkRole.objects.create( creator=role.creator, work=work, role_name=role.role_name, role_order=role.role_order ) for selection in m.events.all(): WorkSelection.objects.create( event=selection.event, work=work, order=selection.order ) m.delete()
Change all Movie objects into Work objects, and their associated data into WorkRole and WorkSelection models, then delete the Movie.
entailment
def paginate_queryset(self, queryset, page_size): """ Paginate the queryset, if needed. This is EXACTLY the same as the standard ListView.paginate_queryset() except for this line: page = paginator.page(page_number, softlimit=True) Because we want to use the DiggPaginator's softlimit option. So that if you're viewing a page of, say, Flickr photos, and you switch from viewing by Uploaded Time to viewing by Taken Time, the new ordering might have fewer pages. In that case we want to see the final page, not a 404. The softlimit does that, but I can't see how to use it without copying all of this... """ paginator = self.get_paginator( queryset, page_size, orphans = self.get_paginate_orphans(), allow_empty_first_page = self.get_allow_empty(), body = self.paginator_body, margin = self.paginator_margin, padding = self.paginator_padding, tail = self.paginator_tail, ) page_kwarg = self.page_kwarg page = self.kwargs.get(page_kwarg) or self.request.GET.get(page_kwarg) or 1 try: page_number = int(page) except ValueError: if page == 'last': page_number = paginator.num_pages else: raise Http404(_("Page is not 'last', nor can it be converted to an int.")) try: page = paginator.page(page_number, softlimit=False) return (paginator, page, page.object_list, page.has_other_pages()) except InvalidPage as e: raise Http404(_('Invalid page (%(page_number)s): %(message)s') % { 'page_number': page_number, 'message': str(e) })
Paginate the queryset, if needed. This is EXACTLY the same as the standard ListView.paginate_queryset() except for this line: page = paginator.page(page_number, softlimit=True) Because we want to use the DiggPaginator's softlimit option. So that if you're viewing a page of, say, Flickr photos, and you switch from viewing by Uploaded Time to viewing by Taken Time, the new ordering might have fewer pages. In that case we want to see the final page, not a 404. The softlimit does that, but I can't see how to use it without copying all of this...
entailment
def annual_reading_counts_card(kind='all', current_year=None): """ Displays years and the number of books/periodicals read per year. kind is one of 'book', 'periodical', 'all' (default). current_year is an optional date object representing the year we're already showing information about. """ if kind == 'book': card_title = 'Books per year' elif kind == 'periodical': card_title = 'Periodicals per year' else: card_title = 'Reading per year' return { 'card_title': card_title, 'kind': kind, 'years': utils.annual_reading_counts(kind), 'current_year': current_year }
Displays years and the number of books/periodicals read per year. kind is one of 'book', 'periodical', 'all' (default). current_year is an optional date object representing the year we're already showing information about.
entailment
def day_publications(date): """ Returns a QuerySet of Publications that were being read on `date`. `date` is a date tobject. """ readings = Reading.objects \ .filter(start_date__lte=date) \ .filter( Q(end_date__gte=date) | Q(end_date__isnull=True) ) if readings: return Publication.objects.filter(reading__in=readings) \ .select_related('series') \ .prefetch_related('roles__creator') \ .distinct() else: return Publication.objects.none()
Returns a QuerySet of Publications that were being read on `date`. `date` is a date tobject.
entailment
def day_publications_card(date): """ Displays Publications that were being read on `date`. `date` is a date tobject. """ d = date.strftime(app_settings.DATE_FORMAT) card_title = 'Reading on {}'.format(d) return { 'card_title': card_title, 'publication_list': day_publications(date=date), }
Displays Publications that were being read on `date`. `date` is a date tobject.
entailment
def reading_dates(reading): """ Given a Reading, with start and end dates and granularities[1] it returns an HTML string representing that period. eg: * '1–6 Feb 2017' * '1 Feb to 3 Mar 2017' * 'Feb 2017 to Mar 2018' * '2017–2018' etc. [1] https://www.flickr.com/services/api/misc.dates.html """ # 3 September 2017 full_format = '<time datetime="%Y-%m-%d">{}</time>'.format('%-d %B %Y') # September 2017 month_year_format = '<time datetime="%Y-%m">{}</time>'.format('%B %Y') # 2017 year_format = '<time datetime="%Y">{}</time>'.format('%Y') # 3 day_format = '<time datetime="%Y-%m-%d">{}</time>'.format('%-d') # 3 September day_month_format = '<time datetime="%Y-%m-%d">{}</time>'.format('%-d %B') # September month_format = '<time datetime="%Y-%m">{}</time>'.format('%B') period_format_short = '{}–{}' period_format_long = '{} to {}' # For brevity: start_date = reading.start_date end_date = reading.end_date start_gran = reading.start_granularity end_gran = reading.end_granularity # Are start and end in the same day, year or month? same_day = False same_month = False same_year = False if start_date and end_date: if start_date.strftime('%Y') == end_date.strftime('%Y'): same_year = True if start_date.strftime('%m%Y') == end_date.strftime('%m%Y'): same_month = True if start_date.strftime('%d%m%Y') == end_date.strftime('%d%m%Y'): same_day = True start_str = '' end_str = '' output = '' # Make some basic start and end strings, which we might use... if start_date: if start_gran == 3: start_str = start_date.strftime(full_format) elif start_gran == 4: start_str = start_date.strftime(month_year_format) else: start_str = start_date.strftime(year_format) if end_date: if end_gran == 3: end_str = end_date.strftime(full_format) elif end_gran == 4: end_str = end_date.strftime(month_year_format) else: end_str = end_date.strftime(year_format) # Now make the final strings we'll return: if start_date and end_date: # A default which will be overridden in many cases. This covers: # 1 February 2017 to 3 March 2018 # 1 February 2017 to March 2018 # 1 February 2017 to 2018 # February 2017 to 3 March 2018 # February 2017 to March 2018 # February 2017 to 2018 # 2017 to 3 March 2018 # 2017 to March 2018 # 2017 to 2018 output = period_format_long.format(start_str, end_str) if (start_gran == 4 or end_gran == 4) and same_month: # Only have enough to output 'February 2017'. output = start_str elif (start_gran == 6 or end_gran == 6) and same_year: # Only have enough to output '2017'. output = start_str elif start_gran == 3: if end_gran == 3: if same_day: # 1 February 2017 output = start_str elif same_month: # 1–6 February 2017 output = period_format_short.format( start_date.strftime(day_format), end_str) elif same_year: # 1 February to 3 March 2017 output = period_format_long.format( start_date.strftime(day_month_format), end_str) elif end_gran == 4: if same_year: # 1 February to March 2017 output = period_format_long.format( start_date.strftime(day_month_format), end_str) elif start_gran == 4: if end_gran == 3: if same_year: # February to 3 March 2017 output = period_format_long.format( start_date.strftime(month_format), end_str) elif end_gran == 4: if same_year: # February to March 2017 output = period_format_long.format( start_date.strftime(month_format), end_str) elif end_date: # Only an end_date. if end_gran == 3: # Finished on 1 February 2017 output = "Finished on {}".format(end_str) else: # Finished in February 2017 # Finished in 2017 output = "Finished in {}".format(end_str) else: # No end_date: the reading has started, but not ended. if start_gran == 3: # Started on 1 February 2017 output = "Started on {}".format(start_str) else: # Started in February 2017 # Started in 2017 output = "Started in {}".format(start_str) return format_html(output)
Given a Reading, with start and end dates and granularities[1] it returns an HTML string representing that period. eg: * '1–6 Feb 2017' * '1 Feb to 3 Mar 2017' * 'Feb 2017 to Mar 2018' * '2017–2018' etc. [1] https://www.flickr.com/services/api/misc.dates.html
entailment
def forwards(apps, schema_editor): """ Change Events with kind 'movie' to 'cinema' and Events with kind 'play' to 'theatre'. Purely for more consistency. """ Event = apps.get_model('spectator_events', 'Event') for ev in Event.objects.filter(kind='movie'): ev.kind = 'cinema' ev.save() for ev in Event.objects.filter(kind='play'): ev.kind = 'theatre' ev.save()
Change Events with kind 'movie' to 'cinema' and Events with kind 'play' to 'theatre'. Purely for more consistency.
entailment
def get_env_variable(var_name, default=None): """Get the environment variable or return exception.""" try: return os.environ[var_name] except KeyError: if default is None: error_msg = "Set the %s environment variable" % var_name raise ImproperlyConfigured(error_msg) else: return default
Get the environment variable or return exception.
entailment
def generate_slug(value): """ Generates a slug using a Hashid of `value`. COPIED from spectator.core.models.SluggedModelMixin() because migrations don't make this happen automatically and perhaps the least bad thing is to copy the method here, ugh. """ alphabet = app_settings.SLUG_ALPHABET salt = app_settings.SLUG_SALT hashids = Hashids(alphabet=alphabet, salt=salt, min_length=5) return hashids.encode(value)
Generates a slug using a Hashid of `value`. COPIED from spectator.core.models.SluggedModelMixin() because migrations don't make this happen automatically and perhaps the least bad thing is to copy the method here, ugh.
entailment
def forwards(apps, schema_editor): """ Having added the new 'exhibition' Work type, we're going to assume that every Event of type 'museum' should actually have one Exhibition attached. So, we'll add one, with the same title as the Event. And we'll move all Creators from the Event to the Exhibition. """ Event = apps.get_model('spectator_events', 'Event') Work = apps.get_model('spectator_events', 'Work') WorkRole = apps.get_model('spectator_events', 'WorkRole') WorkSelection = apps.get_model('spectator_events', 'WorkSelection') for event in Event.objects.filter(kind='museum'): # Create a new Work based on this Event's details. work = Work.objects.create( kind='exhibition', title=event.title, title_sort=event.title_sort ) # This doesn't generate the slug field automatically because Django. # So we'll have to do it manually. Graarhhh. work.slug = generate_slug(work.pk) work.save() # Associate the new Work with the Event. WorkSelection.objects.create( event=event, work=work ) # Associate any Creators on the Event with the new Work. for role in event.roles.all(): WorkRole.objects.create( creator=role.creator, work=work, role_name=role.role_name, role_order=role.role_order ) # Remove Creators from the Event. role.delete()
Having added the new 'exhibition' Work type, we're going to assume that every Event of type 'museum' should actually have one Exhibition attached. So, we'll add one, with the same title as the Event. And we'll move all Creators from the Event to the Exhibition.
entailment
def by_publications(self): """ The Creators who have been most-read, ordered by number of read publications (ignoring if any of those publicatinos have been read multiple times.) Each Creator will have a `num_publications` attribute. """ if not spectator_apps.is_enabled('reading'): raise ImproperlyConfigured("To use the CreatorManager.by_publications() method, 'spectator.reading' must by in INSTALLED_APPS.") qs = self.get_queryset() qs = qs.exclude(publications__reading__isnull=True) \ .annotate(num_publications=Count('publications')) \ .order_by('-num_publications', 'name_sort') return qs
The Creators who have been most-read, ordered by number of read publications (ignoring if any of those publicatinos have been read multiple times.) Each Creator will have a `num_publications` attribute.
entailment
def by_readings(self, role_names=['', 'Author']): """ The Creators who have been most-read, ordered by number of readings. By default it will only include Creators whose role was left empty, or is 'Author'. Each Creator will have a `num_readings` attribute. """ if not spectator_apps.is_enabled('reading'): raise ImproperlyConfigured("To use the CreatorManager.by_readings() method, 'spectator.reading' must by in INSTALLED_APPS.") qs = self.get_queryset() qs = qs.filter(publication_roles__role_name__in=role_names) \ .exclude(publications__reading__isnull=True) \ .annotate(num_readings=Count('publications__reading')) \ .order_by('-num_readings', 'name_sort') return qs
The Creators who have been most-read, ordered by number of readings. By default it will only include Creators whose role was left empty, or is 'Author'. Each Creator will have a `num_readings` attribute.
entailment
def by_events(self, kind=None): """ Get the Creators involved in the most Events. This only counts Creators directly involved in an Event. i.e. if a Creator is the director of a movie Work, and an Event was a viewing of that movie, that Event wouldn't count. Unless they were also directly involved in the Event (e.g. speaking after the movie). kind - If supplied, only Events with that `kind` value will be counted. """ if not spectator_apps.is_enabled('events'): raise ImproperlyConfigured("To use the CreatorManager.by_events() method, 'spectator.events' must by in INSTALLED_APPS.") qs = self.get_queryset() if kind is not None: qs = qs.filter(events__kind=kind) qs = qs.annotate(num_events=Count('events', distinct=True)) \ .order_by('-num_events', 'name_sort') return qs
Get the Creators involved in the most Events. This only counts Creators directly involved in an Event. i.e. if a Creator is the director of a movie Work, and an Event was a viewing of that movie, that Event wouldn't count. Unless they were also directly involved in the Event (e.g. speaking after the movie). kind - If supplied, only Events with that `kind` value will be counted.
entailment
def by_works(self, kind=None, role_name=None): """ Get the Creators involved in the most Works. kind - If supplied, only Works with that `kind` value will be counted. role_name - If supplied, only Works on which the role is that will be counted. e.g. To get all 'movie' Works on which the Creators had the role 'Director': Creator.objects.by_works(kind='movie', role_name='Director') """ if not spectator_apps.is_enabled('events'): raise ImproperlyConfigured("To use the CreatorManager.by_works() method, 'spectator.events' must by in INSTALLED_APPS.") qs = self.get_queryset() filter_kwargs = {} if kind is not None: filter_kwargs['works__kind'] = kind if role_name is not None: filter_kwargs['work_roles__role_name'] = role_name if filter_kwargs: qs = qs.filter(**filter_kwargs) qs = qs.annotate(num_works=Count('works', distinct=True)) \ .order_by('-num_works', 'name_sort') return qs
Get the Creators involved in the most Works. kind - If supplied, only Works with that `kind` value will be counted. role_name - If supplied, only Works on which the role is that will be counted. e.g. To get all 'movie' Works on which the Creators had the role 'Director': Creator.objects.by_works(kind='movie', role_name='Director')
entailment
def index(): """Query Elasticsearch using Invenio query syntax.""" page = request.values.get('page', 1, type=int) size = request.values.get('size', 2, type=int) search = ExampleSearch()[(page - 1) * size:page * size] if 'q' in request.values: search = search.query(QueryString(query=request.values.get('q'))) search = search.sort( request.values.get('sort', 'title') ) search = ExampleSearch.faceted_search(search=search) results = search.execute().to_dict() return jsonify({'hits': results.get('hits')})
Query Elasticsearch using Invenio query syntax.
entailment
def clean_options(self, using_keytab=False, principal=None, keytab_file=None, ccache_file=None, password=None): """Clean argument to related object :param bool using_keytab: refer to ``krbContext.__init__``. :param str principal: refer to ``krbContext.__init__``. :param str keytab_file: refer to ``krbContext.__init__``. :param str ccache_file: refer to ``krbContext.__init__``. :param str password: refer to ``krbContext.__init__``. :return: a mapping containing cleaned names and values, which are used internally. :rtype: dict :raises ValueError: principal is missing or given keytab file does not exist, when initialize from a keytab. """ cleaned = {} if using_keytab: if principal is None: raise ValueError('Principal is required when using key table.') princ_name = gssapi.names.Name( principal, gssapi.names.NameType.kerberos_principal) if keytab_file is None: cleaned['keytab'] = DEFAULT_KEYTAB elif not os.path.exists(keytab_file): raise ValueError( 'Keytab file {0} does not exist.'.format(keytab_file)) else: cleaned['keytab'] = keytab_file else: if principal is None: principal = get_login() princ_name = gssapi.names.Name(principal, gssapi.names.NameType.user) cleaned['using_keytab'] = using_keytab cleaned['principal'] = princ_name cleaned['ccache'] = ccache_file or DEFAULT_CCACHE cleaned['password'] = password return cleaned
Clean argument to related object :param bool using_keytab: refer to ``krbContext.__init__``. :param str principal: refer to ``krbContext.__init__``. :param str keytab_file: refer to ``krbContext.__init__``. :param str ccache_file: refer to ``krbContext.__init__``. :param str password: refer to ``krbContext.__init__``. :return: a mapping containing cleaned names and values, which are used internally. :rtype: dict :raises ValueError: principal is missing or given keytab file does not exist, when initialize from a keytab.
entailment
def init_with_keytab(self): """Initialize credential cache with keytab""" creds_opts = { 'usage': 'initiate', 'name': self._cleaned_options['principal'], } store = {} if self._cleaned_options['keytab'] != DEFAULT_KEYTAB: store['client_keytab'] = self._cleaned_options['keytab'] if self._cleaned_options['ccache'] != DEFAULT_CCACHE: store['ccache'] = self._cleaned_options['ccache'] if store: creds_opts['store'] = store creds = gssapi.creds.Credentials(**creds_opts) try: creds.lifetime except gssapi.exceptions.ExpiredCredentialsError: new_creds_opts = copy.deepcopy(creds_opts) # Get new credential and put it into a temporary ccache if 'store' in new_creds_opts: new_creds_opts['store']['ccache'] = _get_temp_ccache() else: new_creds_opts['store'] = {'ccache': _get_temp_ccache()} creds = gssapi.creds.Credentials(**new_creds_opts) # Then, store new credential back to original specified ccache, # whatever a given ccache file or the default one. _store = None # If default cccache is used, no need to specify ccache in store # parameter passed to ``creds.store``. if self._cleaned_options['ccache'] != DEFAULT_CCACHE: _store = {'ccache': store['ccache']} creds.store(usage='initiate', store=_store, overwrite=True)
Initialize credential cache with keytab
entailment
def init_with_password(self): """Initialize credential cache with password **Causion:** once you enter password from command line, or pass it to API directly, the given password is not encrypted always. Although getting credential with password works, from security point of view, it is strongly recommended **NOT** use it in any formal production environment. If you need to initialize credential in an application to application Kerberos authentication context, keytab has to be used. :raises IOError: when trying to prompt to input password from command line but no attry is available. """ creds_opts = { 'usage': 'initiate', 'name': self._cleaned_options['principal'], } if self._cleaned_options['ccache'] != DEFAULT_CCACHE: creds_opts['store'] = {'ccache': self._cleaned_options['ccache']} cred = gssapi.creds.Credentials(**creds_opts) try: cred.lifetime except gssapi.exceptions.ExpiredCredentialsError: password = self._cleaned_options['password'] if not password: if not sys.stdin.isatty(): raise IOError( 'krbContext is not running from a terminal. So, you ' 'need to run kinit with your principal manually before' ' anything goes.') # If there is no password specified via API call, prompt to # enter one in order to continue to get credential. BUT, in # some cases, blocking program and waiting for input of # password is really bad, which may be only suitable for some # simple use cases, for example, writing some scripts to test # something that need Kerberos authentication. Anyway, whether # it is really to enter a password from command line, it # depends on concrete use cases totally. password = getpass.getpass() cred = gssapi.raw.acquire_cred_with_password( self._cleaned_options['principal'], password) ccache = self._cleaned_options['ccache'] if ccache == DEFAULT_CCACHE: gssapi.raw.store_cred(cred.creds, usage='initiate', overwrite=True) else: gssapi.raw.store_cred_into({'ccache': ccache}, cred.creds, usage='initiate', overwrite=True)
Initialize credential cache with password **Causion:** once you enter password from command line, or pass it to API directly, the given password is not encrypted always. Although getting credential with password works, from security point of view, it is strongly recommended **NOT** use it in any formal production environment. If you need to initialize credential in an application to application Kerberos authentication context, keytab has to be used. :raises IOError: when trying to prompt to input password from command line but no attry is available.
entailment
def _prepare_context(self): """Prepare context Initialize credential cache with keytab or password according to ``using_keytab`` parameter. Then, ``KRB5CCNAME`` is set properly so that Kerberos library called in current context is able to get credential from correct cache. Internal use only. """ ccache = self._cleaned_options['ccache'] # Whatever there is KRB5CCNAME was set in current process, # original_krb5ccname will contain current value even if None if # that variable wasn't set, and when leave krbcontext, it can be # handled properly. self._original_krb5ccname = os.environ.get(ENV_KRB5CCNAME) if ccache == DEFAULT_CCACHE: # If user wants to use default ccache, existing KRB5CCNAME in # current environment variable should be removed. if self._original_krb5ccname: del os.environ[ENV_KRB5CCNAME] else: # When not using default ccache to initialize new credential, let # us point to the given ccache by KRB5CCNAME. os.environ[ENV_KRB5CCNAME] = ccache if self._cleaned_options['using_keytab']: self.init_with_keytab() else: self.init_with_password()
Prepare context Initialize credential cache with keytab or password according to ``using_keytab`` parameter. Then, ``KRB5CCNAME`` is set properly so that Kerberos library called in current context is able to get credential from correct cache. Internal use only.
entailment
def templates(self): """Generate a dictionary with template names and file paths.""" templates = {} result = [] if self.entry_point_group_templates: result = self.load_entry_point_group_templates( self.entry_point_group_templates) or [] for template in result: for name, path in template.items(): templates[name] = path return templates
Generate a dictionary with template names and file paths.
entailment
def register_mappings(self, alias, package_name): """Register mappings from a package under given alias. :param alias: The alias. :param package_name: The package name. """ # For backwards compatibility, we also allow for ES2 mappings to be # placed at the root level of the specified package path, and not in # the `<package-path>/v2` directory. if ES_VERSION[0] == 2: try: resource_listdir(package_name, 'v2') package_name += '.v2' except (OSError, IOError) as ex: if getattr(ex, 'errno', 0) != errno.ENOENT: raise warnings.warn( "Having mappings in a path which doesn't specify the " "Elasticsearch version is deprecated. Please move your " "mappings to a subfolder named according to the " "Elasticsearch version which your mappings are intended " "for. (e.g. '{}/v2/{}')".format( package_name, alias), PendingDeprecationWarning) else: package_name = '{}.v{}'.format(package_name, ES_VERSION[0]) def _walk_dir(aliases, *parts): root_name = build_index_name(self.app, *parts) resource_name = os.path.join(*parts) if root_name not in aliases: self.number_of_indexes += 1 data = aliases.get(root_name, {}) for filename in resource_listdir(package_name, resource_name): index_name = build_index_name( self.app, *(parts + (filename, )) ) file_path = os.path.join(resource_name, filename) if resource_isdir(package_name, file_path): _walk_dir(data, *(parts + (filename, ))) continue ext = os.path.splitext(filename)[1] if ext not in {'.json', }: continue assert index_name not in data, 'Duplicate index' data[index_name] = self.mappings[index_name] = \ resource_filename( package_name, os.path.join(resource_name, filename)) self.number_of_indexes += 1 aliases[root_name] = data # Start the recursion here: _walk_dir(self.aliases, alias)
Register mappings from a package under given alias. :param alias: The alias. :param package_name: The package name.
entailment
def register_templates(self, directory): """Register templates from the provided directory. :param directory: The templates directory. """ try: resource_listdir(directory, 'v{}'.format(ES_VERSION[0])) directory = '{}/v{}'.format(directory, ES_VERSION[0]) except (OSError, IOError) as ex: if getattr(ex, 'errno', 0) == errno.ENOENT: raise OSError( "Please move your templates to a subfolder named " "according to the Elasticsearch version " "which your templates are intended " "for. (e.g. '{}.v{}')".format(directory, ES_VERSION[0])) result = {} module_name, parts = directory.split('.')[0], directory.split('.')[1:] parts = tuple(parts) def _walk_dir(parts): resource_name = os.path.join(*parts) for filename in resource_listdir(module_name, resource_name): template_name = build_index_name( self.app, *(parts[1:] + (filename, )) ) file_path = os.path.join(resource_name, filename) if resource_isdir(module_name, file_path): _walk_dir((parts + (filename, ))) continue ext = os.path.splitext(filename)[1] if ext not in {'.json', }: continue result[template_name] = resource_filename( module_name, os.path.join(resource_name, filename)) # Start the recursion here: _walk_dir(parts) return result
Register templates from the provided directory. :param directory: The templates directory.
entailment
def load_entry_point_group_mappings(self, entry_point_group_mappings): """Load actions from an entry point group.""" for ep in iter_entry_points(group=entry_point_group_mappings): self.register_mappings(ep.name, ep.module_name)
Load actions from an entry point group.
entailment
def load_entry_point_group_templates(self, entry_point_group_templates): """Load actions from an entry point group.""" result = [] for ep in iter_entry_points(group=entry_point_group_templates): with self.app.app_context(): for template_dir in ep.load()(): result.append(self.register_templates(template_dir)) return result
Load actions from an entry point group.
entailment
def _client_builder(self): """Build Elasticsearch client.""" client_config = self.app.config.get('SEARCH_CLIENT_CONFIG') or {} client_config.setdefault( 'hosts', self.app.config.get('SEARCH_ELASTIC_HOSTS')) client_config.setdefault('connection_class', RequestsHttpConnection) return Elasticsearch(**client_config)
Build Elasticsearch client.
entailment
def client(self): """Return client for current application.""" if self._client is None: self._client = self._client_builder() return self._client
Return client for current application.
entailment
def flush_and_refresh(self, index): """Flush and refresh one or more indices. .. warning:: Do not call this method unless you know what you are doing. This method is only intended to be called during tests. """ self.client.indices.flush(wait_if_ongoing=True, index=index) self.client.indices.refresh(index=index) self.client.cluster.health( wait_for_status='yellow', request_timeout=30) return True
Flush and refresh one or more indices. .. warning:: Do not call this method unless you know what you are doing. This method is only intended to be called during tests.
entailment
def cluster_version(self): """Get version of Elasticsearch running on the cluster.""" versionstr = self.client.info()['version']['number'] return [int(x) for x in versionstr.split('.')]
Get version of Elasticsearch running on the cluster.
entailment
def active_aliases(self): """Get a filtered list of aliases based on configuration. Returns aliases and their mappings that are defined in the `SEARCH_MAPPINGS` config variable. If the `SEARCH_MAPPINGS` is set to `None` (the default), all aliases are included. """ whitelisted_aliases = self.app.config.get('SEARCH_MAPPINGS') if whitelisted_aliases is None: return self.aliases else: return {k: v for k, v in self.aliases.items() if k in whitelisted_aliases}
Get a filtered list of aliases based on configuration. Returns aliases and their mappings that are defined in the `SEARCH_MAPPINGS` config variable. If the `SEARCH_MAPPINGS` is set to `None` (the default), all aliases are included.
entailment
def create(self, ignore=None): """Yield tuple with created index name and responses from a client.""" ignore = ignore or [] def _create(tree_or_filename, alias=None): """Create indices and aliases by walking DFS.""" # Iterate over aliases: for name, value in tree_or_filename.items(): if isinstance(value, dict): for result in _create(value, alias=name): yield result else: with open(value, 'r') as body: yield name, self.client.indices.create( index=name, body=json.load(body), ignore=ignore, ) if alias: yield alias, self.client.indices.put_alias( index=list(_get_indices(tree_or_filename)), name=alias, ignore=ignore, ) for result in _create(self.active_aliases): yield result
Yield tuple with created index name and responses from a client.
entailment
def put_templates(self, ignore=None): """Yield tuple with registered template and response from client.""" ignore = ignore or [] def _replace_prefix(template_path, body): """Replace index prefix in template request body.""" pattern = '__SEARCH_INDEX_PREFIX__' prefix = self.app.config['SEARCH_INDEX_PREFIX'] or '' if prefix: assert pattern in body, "You are using the prefix `{0}`, " "but the template `{1}` does not contain the " "pattern `{2}`.".format(prefix, template_path, pattern) return body.replace(pattern, prefix) def _put_template(template): """Put template in search client.""" with open(self.templates[template], 'r') as fp: body = fp.read() replaced_body = _replace_prefix(self.templates[template], body) return self.templates[template],\ current_search_client.indices.put_template( name=template, body=json.loads(replaced_body), ignore=ignore, ) for template in self.templates: yield _put_template(template)
Yield tuple with registered template and response from client.
entailment
def delete(self, ignore=None): """Yield tuple with deleted index name and responses from a client.""" ignore = ignore or [] def _delete(tree_or_filename, alias=None): """Delete indexes and aliases by walking DFS.""" if alias: yield alias, self.client.indices.delete_alias( index=list(_get_indices(tree_or_filename)), name=alias, ignore=ignore, ) # Iterate over aliases: for name, value in tree_or_filename.items(): if isinstance(value, dict): for result in _delete(value, alias=name): yield result else: yield name, self.client.indices.delete( index=name, ignore=ignore, ) for result in _delete(self.active_aliases): yield result
Yield tuple with deleted index name and responses from a client.
entailment
def init_app(self, app, entry_point_group_mappings='invenio_search.mappings', entry_point_group_templates='invenio_search.templates', **kwargs): """Flask application initialization. :param app: An instance of :class:`~flask.app.Flask`. """ self.init_config(app) app.cli.add_command(index_cmd) state = _SearchState( app, entry_point_group_mappings=entry_point_group_mappings, entry_point_group_templates=entry_point_group_templates, **kwargs ) self._state = app.extensions['invenio-search'] = state
Flask application initialization. :param app: An instance of :class:`~flask.app.Flask`.
entailment
def main(): """Start the poor_consumer.""" try: opts, args = getopt.getopt(sys.argv[1:], "h:v", ["help", "nack=", "servers=", "queues="]) except getopt.GetoptError as err: print str(err) usage() sys.exit() # defaults nack = 0.0 verbose = False servers = "localhost:7712,localhost:7711" queues = "test" for o, a in opts: if o == "-v": verbose = True elif o in ("-h", "--help"): usage() sys.exit() elif o in ("--nack"): nack = float(a) elif o in ("--servers"): servers = a elif o in ("--queues"): queues = a else: assert False, "unhandled option" # prepare servers and queus for pydisque servers = servers.split(",") queues = queues.split(",") c = Client(servers) c.connect() while True: jobs = c.get_job(queues) for queue_name, job_id, job in jobs: rnd = random.random() # as this is a test processor, we don't do any validation on # the actual job body, so lets just pay attention to id's if rnd >= nack: print ">>> received job:", job_id c.ack_job(job_id) else: print ">>> bouncing job:", job_id c.nack_job(job_id)
Start the poor_consumer.
entailment
def connect(self): """ Connect to one of the Disque nodes. You can get current connection with connected_node property :returns: nothing """ self.connected_node = None for i, node in self.nodes.items(): host, port = i.split(':') port = int(port) redis_client = redis.Redis(host, port, **self.client_kw_args) try: ret = redis_client.execute_command('HELLO') format_version, node_id = ret[0], ret[1] others = ret[2:] self.nodes[i] = Node(node_id, host, port, redis_client) self.connected_node = self.nodes[i] except redis.exceptions.ConnectionError: pass if not self.connected_node: raise ConnectionError('couldnt connect to any nodes') logger.info("connected to node %s" % self.connected_node)
Connect to one of the Disque nodes. You can get current connection with connected_node property :returns: nothing
entailment
def execute_command(self, *args, **kwargs): """Execute a command on the connected server.""" try: return self.get_connection().execute_command(*args, **kwargs) except ConnectionError as e: logger.warn('trying to reconnect') self.connect() logger.warn('connected') raise
Execute a command on the connected server.
entailment
def add_job(self, queue_name, job, timeout=200, replicate=None, delay=None, retry=None, ttl=None, maxlen=None, asynchronous=None): """ Add a job to a queue. ADDJOB queue_name job <ms-timeout> [REPLICATE <count>] [DELAY <sec>] [RETRY <sec>] [TTL <sec>] [MAXLEN <count>] [ASYNC] :param queue_name: is the name of the queue, any string, basically. :param job: is a string representing the job. :param timeout: is the command timeout in milliseconds. :param replicate: count is the number of nodes the job should be replicated to. :param delay: sec is the number of seconds that should elapse before the job is queued by any server. :param retry: sec period after which, if no ACK is received, the job is put again into the queue for delivery. If RETRY is 0, the job has an at-most-once delivery semantics. :param ttl: sec is the max job life in seconds. After this time, the job is deleted even if it was not successfully delivered. :param maxlen: count specifies that if there are already count messages queued for the specified queue name, the message is refused and an error reported to the client. :param asynchronous: asks the server to let the command return ASAP and replicate the job to other nodes in the background. The job gets queued ASAP, while normally the job is put into the queue only when the client gets a positive reply. Changing the name of this argument as async is reserved keyword in python 3.7 :returns: job_id """ command = ['ADDJOB', queue_name, job, timeout] if replicate: command += ['REPLICATE', replicate] if delay: command += ['DELAY', delay] if retry is not None: command += ['RETRY', retry] if ttl: command += ['TTL', ttl] if maxlen: command += ['MAXLEN', maxlen] if asynchronous: command += ['ASYNC'] # TODO(canardleteer): we need to handle "-PAUSE" messages more # appropriately, for now it's up to the person using the library # to handle a generic ResponseError on their own. logger.debug("sending job - %s", command) job_id = self.execute_command(*command) logger.debug("sent job - %s", command) logger.debug("job_id: %s " % job_id) return job_id
Add a job to a queue. ADDJOB queue_name job <ms-timeout> [REPLICATE <count>] [DELAY <sec>] [RETRY <sec>] [TTL <sec>] [MAXLEN <count>] [ASYNC] :param queue_name: is the name of the queue, any string, basically. :param job: is a string representing the job. :param timeout: is the command timeout in milliseconds. :param replicate: count is the number of nodes the job should be replicated to. :param delay: sec is the number of seconds that should elapse before the job is queued by any server. :param retry: sec period after which, if no ACK is received, the job is put again into the queue for delivery. If RETRY is 0, the job has an at-most-once delivery semantics. :param ttl: sec is the max job life in seconds. After this time, the job is deleted even if it was not successfully delivered. :param maxlen: count specifies that if there are already count messages queued for the specified queue name, the message is refused and an error reported to the client. :param asynchronous: asks the server to let the command return ASAP and replicate the job to other nodes in the background. The job gets queued ASAP, while normally the job is put into the queue only when the client gets a positive reply. Changing the name of this argument as async is reserved keyword in python 3.7 :returns: job_id
entailment
def get_job(self, queues, timeout=None, count=None, nohang=False, withcounters=False): """ Return some number of jobs from specified queues. GETJOB [NOHANG] [TIMEOUT <ms-timeout>] [COUNT <count>] [WITHCOUNTERS] FROM queue1 queue2 ... queueN :param queues: name of queues :returns: list of tuple(job_id, queue_name, job), tuple(job_id, queue_name, job, nacks, additional_deliveries) or empty list :rtype: list """ assert queues command = ['GETJOB'] if nohang: command += ['NOHANG'] if timeout: command += ['TIMEOUT', timeout] if count: command += ['COUNT', count] if withcounters: command += ['WITHCOUNTERS'] command += ['FROM'] + queues results = self.execute_command(*command) if not results: return [] if withcounters: return [(job_id, queue_name, job, nacks, additional_deliveries) for job_id, queue_name, job, _, nacks, _, additional_deliveries in results] else: return [(job_id, queue_name, job) for job_id, queue_name, job in results]
Return some number of jobs from specified queues. GETJOB [NOHANG] [TIMEOUT <ms-timeout>] [COUNT <count>] [WITHCOUNTERS] FROM queue1 queue2 ... queueN :param queues: name of queues :returns: list of tuple(job_id, queue_name, job), tuple(job_id, queue_name, job, nacks, additional_deliveries) or empty list :rtype: list
entailment
def qstat(self, queue_name, return_dict=False): """ Return the status of the queue (currently unimplemented). Future support / testing of QSTAT support in Disque QSTAT <qname> Return produced ... consumed ... idle ... sources [...] ctime ... """ rtn = self.execute_command('QSTAT', queue_name) if return_dict: grouped = self._grouper(rtn, 2) rtn = dict((a, b) for a, b in grouped) return rtn
Return the status of the queue (currently unimplemented). Future support / testing of QSTAT support in Disque QSTAT <qname> Return produced ... consumed ... idle ... sources [...] ctime ...
entailment
def show(self, job_id, return_dict=False): """ Describe the job. :param job_id: """ rtn = self.execute_command('SHOW', job_id) if return_dict: grouped = self._grouper(rtn, 2) rtn = dict((a, b) for a, b in grouped) return rtn
Describe the job. :param job_id:
entailment
def pause(self, queue_name, kw_in=None, kw_out=None, kw_all=None, kw_none=None, kw_state=None, kw_bcast=None): """ Pause a queue. Unfortunately, the PAUSE keywords are mostly reserved words in Python, so I've been a little creative in the function variable names. Open to suggestions to change it (canardleteer) :param queue_name: The job queue we are modifying. :param kw_in: pause the queue in input. :param kw_out: pause the queue in output. :param kw_all: pause the queue in input and output (same as specifying both the in and out options). :param kw_none: clear the paused state in input and output. :param kw_state: just report the current queue state. :param kw_bcast: send a PAUSE command to all the reachable nodes of the cluster to set the same queue in the other nodes to the same state. """ command = ["PAUSE", queue_name] if kw_in: command += ["in"] if kw_out: command += ["out"] if kw_all: command += ["all"] if kw_none: command += ["none"] if kw_state: command += ["state"] if kw_bcast: command += ["bcast"] return self.execute_command(*command)
Pause a queue. Unfortunately, the PAUSE keywords are mostly reserved words in Python, so I've been a little creative in the function variable names. Open to suggestions to change it (canardleteer) :param queue_name: The job queue we are modifying. :param kw_in: pause the queue in input. :param kw_out: pause the queue in output. :param kw_all: pause the queue in input and output (same as specifying both the in and out options). :param kw_none: clear the paused state in input and output. :param kw_state: just report the current queue state. :param kw_bcast: send a PAUSE command to all the reachable nodes of the cluster to set the same queue in the other nodes to the same state.
entailment
def qscan(self, cursor=0, count=None, busyloop=None, minlen=None, maxlen=None, importrate=None): """ Iterate all the existing queues in the local node. :param count: An hint about how much work to do per iteration. :param busyloop: Block and return all the elements in a busy loop. :param minlen: Don't return elements with less than count jobs queued. :param maxlen: Don't return elements with more than count jobs queued. :param importrate: Only return elements with an job import rate (from other nodes) >= rate. """ command = ["QSCAN", cursor] if count: command += ["COUNT", count] if busyloop: command += ["BUSYLOOP"] if minlen: command += ["MINLEN", minlen] if maxlen: command += ["MAXLEN", maxlen] if importrate: command += ["IMPORTRATE", importrate] return self.execute_command(*command)
Iterate all the existing queues in the local node. :param count: An hint about how much work to do per iteration. :param busyloop: Block and return all the elements in a busy loop. :param minlen: Don't return elements with less than count jobs queued. :param maxlen: Don't return elements with more than count jobs queued. :param importrate: Only return elements with an job import rate (from other nodes) >= rate.
entailment
def jscan(self, cursor=0, count=None, busyloop=None, queue=None, state=None, reply=None): """Iterate all the existing jobs in the local node. :param count: An hint about how much work to do per iteration. :param busyloop: Block and return all the elements in a busy loop. :param queue: Return only jobs in the specified queue. :param state: Must be a list - Return jobs in the specified state. Can be used multiple times for a logic OR. :param reply: None or string {"all", "id"} - Job reply type. Type can be all or id. Default is to report just the job ID. If all is specified the full job state is returned like for the SHOW command. """ command = ["JSCAN", cursor] if count: command += ["COUNT", count] if busyloop: command += ["BUSYLOOP"] if queue: command += ["QUEUE", queue] if type(state) is list: for s in state: command += ["STATE", s] if reply: command += ["REPLY", reply] return self.execute_command(*command)
Iterate all the existing jobs in the local node. :param count: An hint about how much work to do per iteration. :param busyloop: Block and return all the elements in a busy loop. :param queue: Return only jobs in the specified queue. :param state: Must be a list - Return jobs in the specified state. Can be used multiple times for a logic OR. :param reply: None or string {"all", "id"} - Job reply type. Type can be all or id. Default is to report just the job ID. If all is specified the full job state is returned like for the SHOW command.
entailment
def build_index_name(app, *parts): """Build an index name from parts. :param parts: Parts that should be combined to make an index name. """ base_index = os.path.splitext( '-'.join([part for part in parts if part]) )[0] return prefix_index(app=app, index=base_index)
Build an index name from parts. :param parts: Parts that should be combined to make an index name.
entailment
def schema_to_index(schema, index_names=None): """Get index/doc_type given a schema URL. :param schema: The schema name :param index_names: A list of index name. :returns: A tuple containing (index, doc_type). """ parts = schema.split('/') doc_type = os.path.splitext(parts[-1]) if doc_type[1] not in {'.json', }: return (None, None) if index_names is None: return (build_index_name(current_app, *parts), doc_type[0]) for start in range(len(parts)): index_name = build_index_name(current_app, *parts[start:]) if index_name in index_names: return (index_name, doc_type[0]) return (None, None)
Get index/doc_type given a schema URL. :param schema: The schema name :param index_names: A list of index name. :returns: A tuple containing (index, doc_type).
entailment
def es_version_check(f): """Decorator to check Elasticsearch version.""" @wraps(f) def inner(*args, **kwargs): cluster_ver = current_search.cluster_version[0] client_ver = ES_VERSION[0] if cluster_ver != client_ver: raise click.ClickException( 'Elasticsearch version mismatch. Invenio was installed with ' 'Elasticsearch v{client_ver}.x support, but the cluster runs ' 'Elasticsearch v{cluster_ver}.x.'.format( client_ver=client_ver, cluster_ver=cluster_ver, )) return f(*args, **kwargs) return inner
Decorator to check Elasticsearch version.
entailment
def init(force): """Initialize registered aliases and mappings.""" click.secho('Creating indexes...', fg='green', bold=True, file=sys.stderr) with click.progressbar( current_search.create(ignore=[400] if force else None), length=current_search.number_of_indexes) as bar: for name, response in bar: bar.label = name click.secho('Putting templates...', fg='green', bold=True, file=sys.stderr) with click.progressbar( current_search.put_templates(ignore=[400] if force else None), length=len(current_search.templates.keys())) as bar: for response in bar: bar.label = response
Initialize registered aliases and mappings.
entailment
def destroy(force): """Destroy all indexes.""" click.secho('Destroying indexes...', fg='red', bold=True, file=sys.stderr) with click.progressbar( current_search.delete(ignore=[400, 404] if force else None), length=current_search.number_of_indexes) as bar: for name, response in bar: bar.label = name
Destroy all indexes.
entailment
def create(index_name, body, force, verbose): """Create a new index.""" result = current_search_client.indices.create( index=index_name, body=json.load(body), ignore=[400] if force else None, ) if verbose: click.echo(json.dumps(result))
Create a new index.
entailment
def list_cmd(only_active, only_aliases, verbose): """List indices.""" def _tree_print(d, rec_list=None, verbose=False, indent=2): # Note that on every recursion rec_list is copied, # which might not be very effective for very deep dictionaries. rec_list = rec_list or [] for idx, key in enumerate(sorted(d)): line = (['│' + ' ' * indent if i == 1 else ' ' * (indent+1) for i in rec_list]) line.append('└──' if len(d)-1 == idx else '├──') click.echo(''.join(line), nl=False) if isinstance(d[key], dict): click.echo(key) new_rec_list = rec_list + [0 if len(d)-1 == idx else 1] _tree_print(d[key], new_rec_list, verbose) else: leaf_txt = '{} -> {}'.format(key, d[key]) if verbose else key click.echo(leaf_txt) aliases = (current_search.active_aliases if only_active else current_search.aliases) active_aliases = current_search.active_aliases if only_aliases: click.echo(json.dumps(list((aliases.keys())), indent=4)) else: # Mark active indices for printout aliases = {(k + (' *' if k in active_aliases else '')): v for k, v in aliases.items()} click.echo(_tree_print(aliases, verbose=verbose))
List indices.
entailment
def delete(index_name, force, verbose): """Delete index by its name.""" result = current_search_client.indices.delete( index=index_name, ignore=[400, 404] if force else None, ) if verbose: click.echo(json.dumps(result))
Delete index by its name.
entailment
def put(index_name, doc_type, identifier, body, force, verbose): """Index input data.""" result = current_search_client.index( index=index_name, doc_type=doc_type or index_name, id=identifier, body=json.load(body), op_type='index' if force or identifier is None else 'create', ) if verbose: click.echo(json.dumps(result))
Index input data.
entailment
def get_records(self, ids): """Return records by their identifiers. :param ids: A list of record identifier. :returns: A list of records. """ return self.query(Ids(values=[str(id_) for id_ in ids]))
Return records by their identifiers. :param ids: A list of record identifier. :returns: A list of records.
entailment
def faceted_search(cls, query=None, filters=None, search=None): """Return faceted search instance with defaults set. :param query: Elastic DSL query object (``Q``). :param filters: Dictionary with selected facet values. :param search: An instance of ``Search`` class. (default: ``cls()``). """ search_ = search or cls() class RecordsFacetedSearch(FacetedSearch): """Pass defaults from ``cls.Meta`` object.""" index = prefix_index(app=current_app, index=search_._index[0]) doc_types = getattr(search_.Meta, 'doc_types', ['_all']) fields = getattr(search_.Meta, 'fields', ('*', )) facets = getattr(search_.Meta, 'facets', {}) def search(self): """Use ``search`` or ``cls()`` instead of default Search.""" # Later versions of `elasticsearch-dsl` (>=5.1.0) changed the # Elasticsearch FacetedResponse class constructor signature. if ES_VERSION[0] > 2: return search_.response_class(FacetedResponse) return search_.response_class(partial(FacetedResponse, self)) return RecordsFacetedSearch(query=query, filters=filters or {})
Return faceted search instance with defaults set. :param query: Elastic DSL query object (``Q``). :param filters: Dictionary with selected facet values. :param search: An instance of ``Search`` class. (default: ``cls()``).
entailment
def with_preference_param(self): """Add the preference param to the ES request and return a new Search. The preference param avoids the bouncing effect with multiple replicas, documented on ES documentation. See: https://www.elastic.co/guide/en/elasticsearch/guide/current /_search_options.html#_preference for more information. """ user_hash = self._get_user_hash() if user_hash: return self.params(preference=user_hash) return self
Add the preference param to the ES request and return a new Search. The preference param avoids the bouncing effect with multiple replicas, documented on ES documentation. See: https://www.elastic.co/guide/en/elasticsearch/guide/current /_search_options.html#_preference for more information.
entailment
def _get_user_agent(self): """Retrieve the request's User-Agent, if available. Taken from Flask Login utils.py. """ user_agent = request.headers.get('User-Agent') if user_agent: user_agent = user_agent.encode('utf-8') return user_agent or ''
Retrieve the request's User-Agent, if available. Taken from Flask Login utils.py.
entailment
def _get_user_hash(self): """Calculate a digest based on request's User-Agent and IP address.""" if request: user_hash = '{ip}-{ua}'.format(ip=request.remote_addr, ua=self._get_user_agent()) alg = hashlib.md5() alg.update(user_hash.encode('utf8')) return alg.hexdigest() return None
Calculate a digest based on request's User-Agent and IP address.
entailment
def beautify(filename=None, json_str=None): """Beautify JSON string or file. Keyword arguments: :param filename: use its contents as json string instead of json_str param. :param json_str: json string to be beautified. """ if filename is not None: with open(filename) as json_file: json_str = json.load(json_file) return json.dumps(json_str, indent=4, sort_keys=True)
Beautify JSON string or file. Keyword arguments: :param filename: use its contents as json string instead of json_str param. :param json_str: json string to be beautified.
entailment
def replace(pretty, old_str, new_str): """ Replace strings giving some info on where the replacement was done """ out_str = '' line_number = 1 changes = 0 for line in pretty.splitlines(keepends=True): new_line = line.replace(old_str, new_str) if line.find(old_str) != -1: logging.debug('%s', line_number) logging.debug('< %s', line) logging.debug('> %s', new_line) changes += 1 out_str += new_line line_number += 1 logging.info('Total changes(%s): %s', old_str, changes) return out_str
Replace strings giving some info on where the replacement was done
entailment
def receive_connection(): """Wait for and then return a connected socket.. Opens a TCP connection on port 8080, and waits for a single client. """ server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server.bind(("localhost", 8080)) server.listen(1) client = server.accept()[0] server.close() return client
Wait for and then return a connected socket.. Opens a TCP connection on port 8080, and waits for a single client.
entailment
def send_message(client, message): """Send message to client and close the connection.""" print(message) client.send("HTTP/1.1 200 OK\r\n\r\n{}".format(message).encode("utf-8")) client.close()
Send message to client and close the connection.
entailment
def main(): """Provide the program's entry point when directly executed.""" if len(sys.argv) < 2: print("Usage: {} SCOPE...".format(sys.argv[0])) return 1 authenticator = prawcore.TrustedAuthenticator( prawcore.Requestor("prawcore_refresh_token_example"), os.environ["PRAWCORE_CLIENT_ID"], os.environ["PRAWCORE_CLIENT_SECRET"], os.environ["PRAWCORE_REDIRECT_URI"], ) state = str(random.randint(0, 65000)) url = authenticator.authorize_url("permanent", sys.argv[1:], state) print(url) client = receive_connection() data = client.recv(1024).decode("utf-8") param_tokens = data.split(" ", 2)[1].split("?", 1)[1].split("&") params = { key: value for (key, value) in [token.split("=") for token in param_tokens] } if state != params["state"]: send_message( client, "State mismatch. Expected: {} Received: {}".format( state, params["state"] ), ) return 1 elif "error" in params: send_message(client, params["error"]) return 1 authorizer = prawcore.Authorizer(authenticator) authorizer.authorize(params["code"]) send_message(client, "Refresh token: {}".format(authorizer.refresh_token)) return 0
Provide the program's entry point when directly executed.
entailment
def watch(logger_name, level=DEBUG, out=stdout): """ Quick wrapper for using the Watcher. :param logger_name: name of logger to watch :param level: minimum log level to show (default INFO) :param out: where to send output (default stdout) :return: Watcher instance """ watcher = Watcher(logger_name) watcher.watch(level, out) return watcher
Quick wrapper for using the Watcher. :param logger_name: name of logger to watch :param level: minimum log level to show (default INFO) :param out: where to send output (default stdout) :return: Watcher instance
entailment
def get_user_agent(): """ Obtain the default user agent string sent to the server after a successful handshake. """ from sys import platform, version_info template = "neobolt/{} Python/{}.{}.{}-{}-{} ({})" fields = (version,) + tuple(version_info) + (platform,) return template.format(*fields)
Obtain the default user agent string sent to the server after a successful handshake.
entailment
def import_best(c_module, py_module): """ Import the best available module, with C preferred to pure Python. """ from importlib import import_module from os import getenv pure_python = getenv("PURE_PYTHON", "") if pure_python: return import_module(py_module) else: try: return import_module(c_module) except ImportError: return import_module(py_module)
Import the best available module, with C preferred to pure Python.
entailment
def hydrate(self, values): """ Convert PackStream values into native values. """ def hydrate_(obj): if isinstance(obj, Structure): try: f = self.hydration_functions[obj.tag] except KeyError: # If we don't recognise the structure type, just return it as-is return obj else: return f(*map(hydrate_, obj.fields)) elif isinstance(obj, list): return list(map(hydrate_, obj)) elif isinstance(obj, dict): return {key: hydrate_(value) for key, value in obj.items()} else: return obj return tuple(map(hydrate_, values))
Convert PackStream values into native values.
entailment
def authorize_url(self, duration, scopes, state, implicit=False): """Return the URL used out-of-band to grant access to your application. :param duration: Either ``permanent`` or ``temporary``. ``temporary`` authorizations generate access tokens that last only 1 hour. ``permanent`` authorizations additionally generate a refresh token that can be indefinitely used to generate new hour-long access tokens. Only ``temporary`` can be specified if ``implicit`` is set to ``True``. :param scopes: A list of OAuth scopes to request authorization for. :param state: A string that will be reflected in the callback to ``redirect_uri``. This value should be temporarily unique to the client for whom the URL was generated for. :param implicit: (optional) Use the implicit grant flow (default: False). This flow is only available for UntrustedAuthenticators. """ if self.redirect_uri is None: raise InvalidInvocation("redirect URI not provided") if implicit and not isinstance(self, UntrustedAuthenticator): raise InvalidInvocation( "Only UntrustedAuthentictor instances can " "use the implicit grant flow." ) if implicit and duration != "temporary": raise InvalidInvocation( "The implicit grant flow only supports " "temporary access tokens." ) params = { "client_id": self.client_id, "duration": duration, "redirect_uri": self.redirect_uri, "response_type": "token" if implicit else "code", "scope": " ".join(scopes), "state": state, } url = self._requestor.reddit_url + const.AUTHORIZATION_PATH request = Request("GET", url, params=params) return request.prepare().url
Return the URL used out-of-band to grant access to your application. :param duration: Either ``permanent`` or ``temporary``. ``temporary`` authorizations generate access tokens that last only 1 hour. ``permanent`` authorizations additionally generate a refresh token that can be indefinitely used to generate new hour-long access tokens. Only ``temporary`` can be specified if ``implicit`` is set to ``True``. :param scopes: A list of OAuth scopes to request authorization for. :param state: A string that will be reflected in the callback to ``redirect_uri``. This value should be temporarily unique to the client for whom the URL was generated for. :param implicit: (optional) Use the implicit grant flow (default: False). This flow is only available for UntrustedAuthenticators.
entailment
def revoke_token(self, token, token_type=None): """Ask Reddit to revoke the provided token. :param token: The access or refresh token to revoke. :param token_type: (Optional) When provided, hint to Reddit what the token type is for a possible efficiency gain. The value can be either ``access_token`` or ``refresh_token``. """ data = {"token": token} if token_type is not None: data["token_type_hint"] = token_type url = self._requestor.reddit_url + const.REVOKE_TOKEN_PATH self._post(url, success_status=codes["no_content"], **data)
Ask Reddit to revoke the provided token. :param token: The access or refresh token to revoke. :param token_type: (Optional) When provided, hint to Reddit what the token type is for a possible efficiency gain. The value can be either ``access_token`` or ``refresh_token``.
entailment
def revoke(self): """Revoke the current Authorization.""" if self.access_token is None: raise InvalidInvocation("no token available to revoke") self._authenticator.revoke_token(self.access_token, "access_token") self._clear_access_token()
Revoke the current Authorization.
entailment
def authorize(self, code): """Obtain and set authorization tokens based on ``code``. :param code: The code obtained by an out-of-band authorization request to Reddit. """ if self._authenticator.redirect_uri is None: raise InvalidInvocation("redirect URI not provided") self._request_token( code=code, grant_type="authorization_code", redirect_uri=self._authenticator.redirect_uri, )
Obtain and set authorization tokens based on ``code``. :param code: The code obtained by an out-of-band authorization request to Reddit.
entailment