sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def calculate_colorbar(self): """ Returns the positions and colors of all intervals inside the colorbar. """ self._base._process_values() self._base._find_range() X, Y = self._base._mesh() C = self._base._values[:, np.newaxis] return X, Y, C
Returns the positions and colors of all intervals inside the colorbar.
entailment
def calculate_ticks(self): """ Returns the sequence of ticks (colorbar data locations), ticklabels (strings), and the corresponding offset string. """ current_version = packaging.version.parse(matplotlib.__version__) critical_version = packaging.version.parse('3.0.0') if current_version > critical_version: locator, formatter = self._base._get_ticker_locator_formatter() return self._base._ticker(locator, formatter) else: return self._base._ticker()
Returns the sequence of ticks (colorbar data locations), ticklabels (strings), and the corresponding offset string.
entailment
def get_media_formats(self, media_id): """CR doesn't seem to provide the video_format and video_quality params through any of the APIs so we have to scrape the video page """ url = (SCRAPER.API_URL + 'media-' + media_id).format( protocol=SCRAPER.PROTOCOL_INSECURE) format_pattern = re.compile(SCRAPER.VIDEO.FORMAT_PATTERN) formats = {} for format, param in iteritems(SCRAPER.VIDEO.FORMAT_PARAMS): resp = self._connector.get(url, params={param: '1'}) if not resp.ok: continue try: match = format_pattern.search(resp.content) except TypeError: match = format_pattern.search(resp.text) if match: formats[format] = (int(match.group(1)), int(match.group(2))) return formats
CR doesn't seem to provide the video_format and video_quality params through any of the APIs so we have to scrape the video page
entailment
def parse_nbt(literal): """Parse a literal nbt string and return the resulting tag.""" parser = Parser(tokenize(literal)) tag = parser.parse() cursor = parser.token_span[1] leftover = literal[cursor:] if leftover.strip(): parser.token_span = cursor, cursor + len(leftover) raise parser.error(f'Expected end of string but got {leftover!r}') return tag
Parse a literal nbt string and return the resulting tag.
entailment
def tokenize(string): """Match and yield all the tokens of the input string.""" for match in TOKENS_REGEX.finditer(string): yield Token(match.lastgroup, match.group().strip(), match.span())
Match and yield all the tokens of the input string.
entailment
def next(self): """Move to the next token in the token stream.""" self.current_token = next(self.token_stream, None) if self.current_token is None: self.token_span = self.token_span[1], self.token_span[1] raise self.error('Unexpected end of input') self.token_span = self.current_token.span return self
Move to the next token in the token stream.
entailment
def parse(self): """Parse and return an nbt literal from the token stream.""" token_type = self.current_token.type.lower() handler = getattr(self, f'parse_{token_type}', None) if handler is None: raise self.error(f'Invalid literal {self.current_token.value!r}') return handler()
Parse and return an nbt literal from the token stream.
entailment
def parse_number(self): """Parse a number from the token stream.""" value = self.current_token.value suffix = value[-1].lower() try: if suffix in NUMBER_SUFFIXES: return NUMBER_SUFFIXES[suffix](value[:-1]) return Double(value) if '.' in value else Int(value) except (OutOfRange, ValueError): return String(value)
Parse a number from the token stream.
entailment
def parse_string(self): """Parse a regular unquoted string from the token stream.""" aliased_value = LITERAL_ALIASES.get(self.current_token.value.lower()) if aliased_value is not None: return aliased_value return String(self.current_token.value)
Parse a regular unquoted string from the token stream.
entailment
def collect_tokens_until(self, token_type): """Yield the item tokens in a comma-separated tag collection.""" self.next() if self.current_token.type == token_type: return while True: yield self.current_token self.next() if self.current_token.type == token_type: return if self.current_token.type != 'COMMA': raise self.error(f'Expected comma but got ' f'{self.current_token.value!r}') self.next()
Yield the item tokens in a comma-separated tag collection.
entailment
def parse_compound(self): """Parse a compound from the token stream.""" compound_tag = Compound() for token in self.collect_tokens_until('CLOSE_COMPOUND'): item_key = token.value if token.type not in ('NUMBER', 'STRING', 'QUOTED_STRING'): raise self.error(f'Expected compound key but got {item_key!r}') if token.type == 'QUOTED_STRING': item_key = self.unquote_string(item_key) if self.next().current_token.type != 'COLON': raise self.error(f'Expected colon but got ' f'{self.current_token.value!r}') self.next() compound_tag[item_key] = self.parse() return compound_tag
Parse a compound from the token stream.
entailment
def array_items(self, number_type, *, number_suffix=''): """Parse and yield array items from the token stream.""" for token in self.collect_tokens_until('CLOSE_BRACKET'): is_number = token.type == 'NUMBER' value = token.value.lower() if not (is_number and value.endswith(number_suffix)): raise self.error(f'Invalid {number_type} array element ' f'{token.value!r}') yield int(value.replace(number_suffix, ''))
Parse and yield array items from the token stream.
entailment
def parse_list(self): """Parse a list from the token stream.""" try: return List([self.parse() for _ in self.collect_tokens_until('CLOSE_BRACKET')]) except IncompatibleItemType as exc: raise self.error(f'Item {str(exc.item)!r} is not a ' f'{exc.subtype.__name__} tag') from None
Parse a list from the token stream.
entailment
def unquote_string(self, string): """Return the unquoted value of a quoted string.""" value = string[1:-1] forbidden_sequences = {ESCAPE_SUBS[STRING_QUOTES[string[0]]]} valid_sequences = set(ESCAPE_SEQUENCES) - forbidden_sequences for seq in ESCAPE_REGEX.findall(value): if seq not in valid_sequences: raise self.error(f'Invalid escape sequence "{seq}"') for seq, sub in ESCAPE_SEQUENCES.items(): value = value.replace(seq, sub) return value
Return the unquoted value of a quoted string.
entailment
def opener_from_zipfile(zipfile): """ Returns a function that will open a file in a zipfile by name. For Python3 compatibility, the raw file will be converted to text. """ def opener(filename): inner_file = zipfile.open(filename) if PY3: from io import TextIOWrapper return TextIOWrapper(inner_file) else: return inner_file return opener
Returns a function that will open a file in a zipfile by name. For Python3 compatibility, the raw file will be converted to text.
entailment
def write_text_rows(writer, rows): '''Write CSV row data which may include text.''' for row in rows: try: writer.writerow(row) except UnicodeEncodeError: # Python 2 csv does badly with unicode outside of ASCII new_row = [] for item in row: if isinstance(item, text_type): new_row.append(item.encode('utf-8')) else: new_row.append(item) writer.writerow(new_row)
Write CSV row data which may include text.
entailment
def serialize_tag(tag, *, indent=None, compact=False, quote=None): """Serialize an nbt tag to its literal representation.""" serializer = Serializer(indent=indent, compact=compact, quote=quote) return serializer.serialize(tag)
Serialize an nbt tag to its literal representation.
entailment
def depth(self): """Increase the level of indentation by one.""" if self.indentation is None: yield else: previous = self.previous_indent self.previous_indent = self.indent self.indent += self.indentation yield self.indent = self.previous_indent self.previous_indent = previous
Increase the level of indentation by one.
entailment
def should_expand(self, tag): """Return whether the specified tag should be expanded.""" return self.indentation is not None and tag and ( not self.previous_indent or ( tag.serializer == 'list' and tag.subtype.serializer in ('array', 'list', 'compound') ) or ( tag.serializer == 'compound' ) )
Return whether the specified tag should be expanded.
entailment
def escape_string(self, string): """Return the escaped literal representation of an nbt string.""" if self.quote: quote = self.quote else: found = QUOTE_REGEX.search(string) quote = STRING_QUOTES[found.group()] if found else next(iter(STRING_QUOTES)) for match, seq in ESCAPE_SUBS.items(): if match == quote or match not in STRING_QUOTES: string = string.replace(match, seq) return f'{quote}{string}{quote}'
Return the escaped literal representation of an nbt string.
entailment
def stringify_compound_key(self, key): """Escape the compound key if it can't be represented unquoted.""" if UNQUOTED_COMPOUND_KEY.match(key): return key return self.escape_string(key)
Escape the compound key if it can't be represented unquoted.
entailment
def serialize(self, tag): """Return the literal representation of a tag.""" handler = getattr(self, f'serialize_{tag.serializer}', None) if handler is None: raise TypeError(f'Can\'t serialize {type(tag)!r} instance') return handler(tag)
Return the literal representation of a tag.
entailment
def serialize_numeric(self, tag): """Return the literal representation of a numeric tag.""" str_func = int.__str__ if isinstance(tag, int) else float.__str__ return str_func(tag) + tag.suffix
Return the literal representation of a numeric tag.
entailment
def serialize_array(self, tag): """Return the literal representation of an array tag.""" elements = self.comma.join(f'{el}{tag.item_suffix}' for el in tag) return f'[{tag.array_prefix}{self.semicolon}{elements}]'
Return the literal representation of an array tag.
entailment
def serialize_list(self, tag): """Return the literal representation of a list tag.""" separator, fmt = self.comma, '[{}]' with self.depth(): if self.should_expand(tag): separator, fmt = self.expand(separator, fmt) return fmt.format(separator.join(map(self.serialize, tag)))
Return the literal representation of a list tag.
entailment
def serialize_compound(self, tag): """Return the literal representation of a compound tag.""" separator, fmt = self.comma, '{{{}}}' with self.depth(): if self.should_expand(tag): separator, fmt = self.expand(separator, fmt) return fmt.format(separator.join( f'{self.stringify_compound_key(key)}{self.colon}{self.serialize(value)}' for key, value in tag.items() ))
Return the literal representation of a compound tag.
entailment
def populated_column_map(self): '''Return the _column_map without unused optional fields''' column_map = [] cls = self.model for csv_name, field_pattern in cls._column_map: # Separate the local field name from foreign columns if '__' in field_pattern: field_name = field_pattern.split('__', 1)[0] else: field_name = field_pattern # Handle point fields point_match = re_point.match(field_name) if point_match: field = None else: field = cls._meta.get_field(field_name) # Only add optional columns if they are used in the records if field and field.blank and not field.has_default(): kwargs = {field_name: get_blank_value(field)} if self.exclude(**kwargs).exists(): column_map.append((csv_name, field_pattern)) else: column_map.append((csv_name, field_pattern)) return column_map
Return the _column_map without unused optional fields
entailment
def in_feed(self, feed): '''Return the objects in the target feed''' kwargs = {self.model._rel_to_feed: feed} return self.filter(**kwargs)
Return the objects in the target feed
entailment
def import_txt(cls, txt_file, feed, filter_func=None): '''Import from the GTFS text file''' # Setup the conversion from GTFS to Django Format # Conversion functions def no_convert(value): return value def date_convert(value): return datetime.strptime(value, '%Y%m%d') def bool_convert(value): return (value == '1') def char_convert(value): return (value or '') def null_convert(value): return (value or None) def point_convert(value): """Convert latitude / longitude, strip leading +.""" if value.startswith('+'): return value[1:] else: return (value or 0.0) cache = {} def default_convert(field): def get_value_or_default(value): if value == '' or value is None: return field.get_default() else: return value return get_value_or_default def instance_convert(field, feed, rel_name): def get_instance(value): if value.strip(): related = field.related_model key1 = "{}:{}".format(related.__name__, rel_name) key2 = text_type(value) # Load existing objects if key1 not in cache: pairs = related.objects.filter( **{related._rel_to_feed: feed}).values_list( rel_name, 'id') cache[key1] = dict((text_type(x), i) for x, i in pairs) # Create new? if key2 not in cache[key1]: kwargs = { related._rel_to_feed: feed, rel_name: value} cache[key1][key2] = related.objects.create( **kwargs).id return cache[key1][key2] else: return None return get_instance # Check unique fields column_names = [c for c, _ in cls._column_map] for unique_field in cls._unique_fields: assert unique_field in column_names, \ '{} not in {}'.format(unique_field, column_names) # Map of field_name to converters from GTFS to Django format val_map = dict() name_map = dict() point_map = dict() for csv_name, field_pattern in cls._column_map: # Separate the local field name from foreign columns if '__' in field_pattern: field_base, rel_name = field_pattern.split('__', 1) field_name = field_base + '_id' else: field_name = field_base = field_pattern # Use the field name in the name mapping name_map[csv_name] = field_name # Is it a point field? point_match = re_point.match(field_name) if point_match: field = None else: field = cls._meta.get_field(field_base) # Pick a conversion function for the field if point_match: converter = point_convert elif isinstance(field, models.DateField): converter = date_convert elif isinstance(field, models.BooleanField): converter = bool_convert elif isinstance(field, models.CharField): converter = char_convert elif field.is_relation: converter = instance_convert(field, feed, rel_name) assert not isinstance(field, models.ManyToManyField) elif field.null: converter = null_convert elif field.has_default(): converter = default_convert(field) else: converter = no_convert if point_match: index = int(point_match.group('index')) point_map[csv_name] = (index, converter) else: val_map[csv_name] = converter # Read and convert the source txt csv_reader = reader(txt_file, skipinitialspace=True) unique_line = dict() count = 0 first = True extra_counts = defaultdict(int) new_objects = [] for row in csv_reader: if first: # Read the columns columns = row if columns[0].startswith(CSV_BOM): columns[0] = columns[0][len(CSV_BOM):] first = False continue if filter_func and not filter_func(zip(columns, row)): continue if not row: continue # Read a data row fields = dict() point_coords = [None, None] ukey_values = {} if cls._rel_to_feed == 'feed': fields['feed'] = feed for column_name, value in zip(columns, row): if column_name not in name_map: val = null_convert(value) if val is not None: fields.setdefault('extra_data', {})[column_name] = val extra_counts[column_name] += 1 elif column_name in val_map: fields[name_map[column_name]] = val_map[column_name](value) else: assert column_name in point_map pos, converter = point_map[column_name] point_coords[pos] = converter(value) # Is it part of the unique key? if column_name in cls._unique_fields: ukey_values[column_name] = value # Join the lat/long into a point if point_map: assert point_coords[0] and point_coords[1] fields['point'] = "POINT(%s)" % (' '.join(point_coords)) # Is the item unique? ukey = tuple(ukey_values.get(u) for u in cls._unique_fields) if ukey in unique_line: logger.warning( '%s line %d is a duplicate of line %d, not imported.', cls._filename, csv_reader.line_num, unique_line[ukey]) continue else: unique_line[ukey] = csv_reader.line_num # Create after accumulating a batch new_objects.append(cls(**fields)) if len(new_objects) % batch_size == 0: # pragma: no cover cls.objects.bulk_create(new_objects) count += len(new_objects) logger.info( "Imported %d %s", count, cls._meta.verbose_name_plural) new_objects = [] # Create remaining objects if new_objects: cls.objects.bulk_create(new_objects) # Take note of extra fields if extra_counts: extra_columns = feed.meta.setdefault( 'extra_columns', {}).setdefault(cls.__name__, []) for column in columns: if column in extra_counts and column not in extra_columns: extra_columns.append(column) feed.save() return len(unique_line)
Import from the GTFS text file
entailment
def export_txt(cls, feed): '''Export records as a GTFS comma-separated file''' objects = cls.objects.in_feed(feed) # If no records, return None if not objects.exists(): return # Get the columns used in the dataset column_map = objects.populated_column_map() columns, fields = zip(*column_map) extra_columns = feed.meta.get( 'extra_columns', {}).get(cls.__name__, []) # Get sort order if hasattr(cls, '_sort_order'): sort_fields = cls._sort_order else: sort_fields = [] for field in fields: base_field = field.split('__', 1)[0] point_match = re_point.match(base_field) if point_match: continue field_type = cls._meta.get_field(base_field) assert not isinstance(field_type, ManyToManyField) sort_fields.append(field) # Create CSV writer out = StringIO() csv_writer = writer(out, lineterminator='\n') # Write header row header_row = [text_type(c) for c in columns] header_row.extend(extra_columns) write_text_rows(csv_writer, [header_row]) # Report the work to be done total = objects.count() logger.info( '%d %s to export...', total, cls._meta.verbose_name_plural) # Populate related items cache model_to_field_name = {} cache = {} for field_name in fields: if '__' in field_name: local_field_name, subfield_name = field_name.split('__', 1) field = cls._meta.get_field(local_field_name) field_type = field.related_model model_name = field_type.__name__ if model_name in model_to_field_name: # Already loaded this model under a different field name cache[field_name] = cache[model_to_field_name[model_name]] else: # Load all feed data for this model pairs = field_type.objects.in_feed( feed).values_list('id', subfield_name) cache[field_name] = dict( (i, text_type(x)) for i, x in pairs) cache[field_name][None] = u'' model_to_field_name[model_name] = field_name # Assemble the rows, writing when we hit batch size count = 0 rows = [] for item in objects.order_by(*sort_fields).iterator(): row = [] for csv_name, field_name in column_map: obj = item point_match = re_point.match(field_name) if '__' in field_name: # Return relations from cache local_field_name = field_name.split('__', 1)[0] field_id = getattr(obj, local_field_name + '_id') row.append(cache[field_name][field_id]) elif point_match: # Get the lat or long from the point name, index = point_match.groups() field = getattr(obj, name) row.append(field.coords[int(index)]) else: # Handle other field types field = getattr(obj, field_name) if obj else '' if isinstance(field, date): formatted = field.strftime(u'%Y%m%d') row.append(text_type(formatted)) elif isinstance(field, bool): row.append(1 if field else 0) elif field is None: row.append(u'') else: row.append(text_type(field)) for col in extra_columns: row.append(obj.extra_data.get(col, u'')) rows.append(row) if len(rows) % batch_size == 0: # pragma: no cover write_text_rows(csv_writer, rows) count += len(rows) logger.info( "Exported %d %s", count, cls._meta.verbose_name_plural) rows = [] # Write rows smaller than batch size write_text_rows(csv_writer, rows) return out.getvalue()
Export records as a GTFS comma-separated file
entailment
def make_android_api_method(req_method, secure=True, version=0): """Turn an AndroidApi's method into a function that builds the request, sends it, then passes the response to the actual method. Should be used as a decorator. """ def outer_func(func): def inner_func(self, **kwargs): req_url = self._build_request_url(secure, func.__name__, version) req_func = self._build_request(req_method, req_url, params=kwargs) response = req_func() func(self, response) return response return inner_func return outer_func
Turn an AndroidApi's method into a function that builds the request, sends it, then passes the response to the actual method. Should be used as a decorator.
entailment
def _get_base_params(self): """Get the params that will be included with every request """ base_params = { 'locale': self._get_locale(), 'device_id': ANDROID.DEVICE_ID, 'device_type': ANDROID.APP_PACKAGE, 'access_token': ANDROID.ACCESS_TOKEN, 'version': ANDROID.APP_CODE, } base_params.update(dict((k, v) \ for k, v in iteritems(self._state_params) \ if v is not None)) return base_params
Get the params that will be included with every request
entailment
def _build_request_url(self, secure, api_method, version): """Build a URL for a API method request """ if secure: proto = ANDROID.PROTOCOL_SECURE else: proto = ANDROID.PROTOCOL_INSECURE req_url = ANDROID.API_URL.format( protocol=proto, api_method=api_method, version=version ) return req_url
Build a URL for a API method request
entailment
def is_premium(self, media_type): """Get if the session is premium for a given media type @param str media_type Should be one of ANDROID.MEDIA_TYPE_* @return bool """ if self.logged_in: if media_type in self._user_data['premium']: return True return False
Get if the session is premium for a given media type @param str media_type Should be one of ANDROID.MEDIA_TYPE_* @return bool
entailment
def login(self, response): """ Login using email/username and password, used to get the auth token @param str account @param str password @param int duration (optional) """ self._state_params['auth'] = response['auth'] self._user_data = response['user'] if not self.logged_in: raise ApiLoginFailure(response)
Login using email/username and password, used to get the auth token @param str account @param str password @param int duration (optional)
entailment
def read_numeric(fmt, buff, byteorder='big'): """Read a numeric value from a file-like object.""" try: fmt = fmt[byteorder] return fmt.unpack(buff.read(fmt.size))[0] except StructError: return 0 except KeyError as exc: raise ValueError('Invalid byte order') from exc
Read a numeric value from a file-like object.
entailment
def write_numeric(fmt, value, buff, byteorder='big'): """Write a numeric value to a file-like object.""" try: buff.write(fmt[byteorder].pack(value)) except KeyError as exc: raise ValueError('Invalid byte order') from exc
Write a numeric value to a file-like object.
entailment
def read_string(buff, byteorder='big'): """Read a string from a file-like object.""" length = read_numeric(USHORT, buff, byteorder) return buff.read(length).decode('utf-8')
Read a string from a file-like object.
entailment
def write_string(value, buff, byteorder='big'): """Write a string to a file-like object.""" data = value.encode('utf-8') write_numeric(USHORT, len(data), buff, byteorder) buff.write(data)
Write a string to a file-like object.
entailment
def infer_list_subtype(items): """Infer a list subtype from a collection of items.""" subtype = End for item in items: item_type = type(item) if not issubclass(item_type, Base): continue if subtype is End: subtype = item_type if not issubclass(subtype, List): return subtype elif subtype is not item_type: stype, itype = subtype, item_type generic = List while issubclass(stype, List) and issubclass(itype, List): stype, itype = stype.subtype, itype.subtype generic = List[generic] if stype is End: subtype = item_type elif itype is not End: return generic.subtype return subtype
Infer a list subtype from a collection of items.
entailment
def cast_item(cls, item): """Cast list item to the appropriate tag type.""" if not isinstance(item, cls.subtype): incompatible = isinstance(item, Base) and not any( issubclass(cls.subtype, tag_type) and isinstance(item, tag_type) for tag_type in cls.all_tags.values() ) if incompatible: raise IncompatibleItemType(item, cls.subtype) try: return cls.subtype(item) except EndInstantiation: raise ValueError('List tags without an explicit subtype must ' 'either be empty or instantiated with ' 'elements from which a subtype can be ' 'inferred') from None except (IncompatibleItemType, CastError): raise except Exception as exc: raise CastError(item, cls.subtype) from exc return item
Cast list item to the appropriate tag type.
entailment
def merge(self, other): """Recursively merge tags from another compound.""" for key, value in other.items(): if key in self and (isinstance(self[key], Compound) and isinstance(value, dict)): self[key].merge(value) else: self[key] = value
Recursively merge tags from another compound.
entailment
def decrypt_subtitle(self, subtitle): """Decrypt encrypted subtitle data in high level model object @param crunchyroll.models.Subtitle subtitle @return str """ return self.decrypt(self._build_encryption_key(int(subtitle.id)), subtitle['iv'][0].text.decode('base64'), subtitle['data'][0].text.decode('base64'))
Decrypt encrypted subtitle data in high level model object @param crunchyroll.models.Subtitle subtitle @return str
entailment
def decrypt(self, encryption_key, iv, encrypted_data): """Decrypt encrypted subtitle data @param int subtitle_id @param str iv @param str encrypted_data @return str """ logger.info('Decrypting subtitles with length (%d bytes), key=%r', len(encrypted_data), encryption_key) return zlib.decompress(aes_decrypt(encryption_key, iv, encrypted_data))
Decrypt encrypted subtitle data @param int subtitle_id @param str iv @param str encrypted_data @return str
entailment
def _build_encryption_key(self, subtitle_id, key_size=ENCRYPTION_KEY_SIZE): """Generate the encryption key for a given media item Encryption key is basically just sha1(<magic value based on subtitle_id> + '"#$&).6CXzPHw=2N_+isZK') then padded with 0s to 32 chars @param int subtitle_id @param int key_size @return str """ # generate a 160-bit SHA1 hash sha1_hash = hashlib.new('sha1', self._build_hash_secret((1, 2)) + self._build_hash_magic(subtitle_id)).digest() # pad to 256-bit hash for 32 byte key sha1_hash += '\x00' * max(key_size - len(sha1_hash), 0) return sha1_hash[:key_size]
Generate the encryption key for a given media item Encryption key is basically just sha1(<magic value based on subtitle_id> + '"#$&).6CXzPHw=2N_+isZK') then padded with 0s to 32 chars @param int subtitle_id @param int key_size @return str
entailment
def _build_hash_magic(self, subtitle_id): """Build the other half of the encryption key hash I have no idea what is going on here @param int subtitle_id @return str """ media_magic = self.HASH_MAGIC_CONST ^ subtitle_id hash_magic = media_magic ^ media_magic >> 3 ^ media_magic * 32 return str(hash_magic)
Build the other half of the encryption key hash I have no idea what is going on here @param int subtitle_id @return str
entailment
def _build_hash_secret(self, seq_seed, seq_len=HASH_SECRET_LENGTH, mod_value=HASH_SECRET_MOD_CONST): """Build a seed for the hash based on the Fibonacci sequence Take first `seq_len` + len(`seq_seed`) characters of Fibonacci sequence, starting with `seq_seed`, and applying e % `mod_value` + `HASH_SECRET_CHAR_OFFSET` to the resulting sequence, then return as a string @param tuple|list seq_seed @param int seq_len @param int mod_value @return str """ # make sure we use a list, tuples are immutable fbn_seq = list(seq_seed) for i in range(seq_len): fbn_seq.append(fbn_seq[-1] + fbn_seq[-2]) hash_secret = list(map( lambda c: chr(c % mod_value + self.HASH_SECRET_CHAR_OFFSET), fbn_seq[2:])) return ''.join(hash_secret)
Build a seed for the hash based on the Fibonacci sequence Take first `seq_len` + len(`seq_seed`) characters of Fibonacci sequence, starting with `seq_seed`, and applying e % `mod_value` + `HASH_SECRET_CHAR_OFFSET` to the resulting sequence, then return as a string @param tuple|list seq_seed @param int seq_len @param int mod_value @return str
entailment
def format(self, subtitles): """Turn a string containing the subs xml document into the formatted subtitle string @param str|crunchyroll.models.StyledSubtitle sub_xml_text @return str """ logger.debug('Formatting subtitles (id=%s) with %s', subtitles.id, self.__class__.__name__) return self._format(subtitles).encode('utf-8')
Turn a string containing the subs xml document into the formatted subtitle string @param str|crunchyroll.models.StyledSubtitle sub_xml_text @return str
entailment
def require_session_started(func): """Check if API sessions are started and start them if not """ @functools.wraps(func) def inner_func(self, *pargs, **kwargs): if not self.session_started: logger.info('Starting session for required meta method') self.start_session() return func(self, *pargs, **kwargs) return inner_func
Check if API sessions are started and start them if not
entailment
def require_android_logged_in(func): """Check if andoid API is logged in and login if not, implies `require_session_started` """ @functools.wraps(func) @require_session_started def inner_func(self, *pargs, **kwargs): if not self._android_api.logged_in: logger.info('Logging into android API for required meta method') if not self.has_credentials: raise ApiLoginFailure( 'Login is required but no credentials were provided') self._android_api.login(account=self._state['username'], password=self._state['password']) return func(self, *pargs, **kwargs) return inner_func
Check if andoid API is logged in and login if not, implies `require_session_started`
entailment
def optional_manga_logged_in(func): """Check if andoid manga API is logged in and login if credentials were provided, implies `require_session_started` """ @functools.wraps(func) @require_session_started def inner_func(self, *pargs, **kwargs): if not self._manga_api.logged_in and self.has_credentials: logger.info('Logging into android manga API for optional meta method') self._manga_api.cr_login(account=self._state['username'], password=self._state['password']) return func(self, *pargs, **kwargs) return inner_func
Check if andoid manga API is logged in and login if credentials were provided, implies `require_session_started`
entailment
def require_ajax_logged_in(func): """Check if ajax API is logged in and login if not """ @functools.wraps(func) def inner_func(self, *pargs, **kwargs): if not self._ajax_api.logged_in: logger.info('Logging into AJAX API for required meta method') if not self.has_credentials: raise ApiLoginFailure( 'Login is required but no credentials were provided') self._ajax_api.User_Login(name=self._state['username'], password=self._state['password']) return func(self, *pargs, **kwargs) return inner_func
Check if ajax API is logged in and login if not
entailment
def start_session(self): """Start the underlying APIs sessions Calling this is not required, it will be called automatically if a method that needs a session is called @return bool """ self._android_api.start_session() self._manga_api.cr_start_session() return self.session_started
Start the underlying APIs sessions Calling this is not required, it will be called automatically if a method that needs a session is called @return bool
entailment
def login(self, username, password): """Login with the given username/email and password Calling this method is not required if credentials were provided in the constructor, but it could be used to switch users or something maybe @return bool """ # we could get stuck in an inconsistent state if got an exception while # trying to login with different credentials than what is stored so # we rollback the state to prevent that state_snapshot = self._state.copy() try: self._ajax_api.User_Login(name=username, password=password) self._android_api.login(account=username, password=password) self._manga_api.cr_login(account=username, password=password) except Exception as err: # something went wrong, rollback self._state = state_snapshot raise err self._state['username'] = username self._state['password'] = password return self.logged_in
Login with the given username/email and password Calling this method is not required if credentials were provided in the constructor, but it could be used to switch users or something maybe @return bool
entailment
def list_anime_series(self, sort=META.SORT_ALPHA, limit=META.MAX_SERIES, offset=0): """Get a list of anime series @param str sort pick how results should be sorted, should be one of META.SORT_* @param int limit limit number of series to return, there doesn't seem to be an upper bound @param int offset list series starting from this offset, for pagination @return list<crunchyroll.models.Series> """ result = self._android_api.list_series( media_type=ANDROID.MEDIA_TYPE_ANIME, filter=sort, limit=limit, offset=offset) return result
Get a list of anime series @param str sort pick how results should be sorted, should be one of META.SORT_* @param int limit limit number of series to return, there doesn't seem to be an upper bound @param int offset list series starting from this offset, for pagination @return list<crunchyroll.models.Series>
entailment
def list_drama_series(self, sort=META.SORT_ALPHA, limit=META.MAX_SERIES, offset=0): """Get a list of drama series @param str sort pick how results should be sorted, should be one of META.SORT_* @param int limit limit number of series to return, there doesn't seem to be an upper bound @param int offset list series starting from this offset, for pagination @return list<crunchyroll.models.Series> """ result = self._android_api.list_series( media_type=ANDROID.MEDIA_TYPE_DRAMA, filter=sort, limit=limit, offset=offset) return result
Get a list of drama series @param str sort pick how results should be sorted, should be one of META.SORT_* @param int limit limit number of series to return, there doesn't seem to be an upper bound @param int offset list series starting from this offset, for pagination @return list<crunchyroll.models.Series>
entailment
def list_manga_series(self, filter=None, content_type='jp_manga'): """Get a list of manga series """ result = self._manga_api.list_series(filter, content_type) return result
Get a list of manga series
entailment
def search_anime_series(self, query_string): """Search anime series list by series name, case-sensitive @param str query_string string to search for, note that the search is very simplistic and only matches against the start of the series name, ex) search for "space" matches "Space Brothers" but wouldn't match "Brothers Space" @return list<crunchyroll.models.Series> """ result = self._android_api.list_series( media_type=ANDROID.MEDIA_TYPE_ANIME, filter=ANDROID.FILTER_PREFIX + query_string) return result
Search anime series list by series name, case-sensitive @param str query_string string to search for, note that the search is very simplistic and only matches against the start of the series name, ex) search for "space" matches "Space Brothers" but wouldn't match "Brothers Space" @return list<crunchyroll.models.Series>
entailment
def search_drama_series(self, query_string): """Search drama series list by series name, case-sensitive @param str query_string string to search for, note that the search is very simplistic and only matches against the start of the series name, ex) search for "space" matches "Space Brothers" but wouldn't match "Brothers Space" @return list<crunchyroll.models.Series> """ result = self._android_api.list_series( media_type=ANDROID.MEDIA_TYPE_DRAMA, filter=ANDROID.FILTER_PREFIX + query_string) return result
Search drama series list by series name, case-sensitive @param str query_string string to search for, note that the search is very simplistic and only matches against the start of the series name, ex) search for "space" matches "Space Brothers" but wouldn't match "Brothers Space" @return list<crunchyroll.models.Series>
entailment
def search_manga_series(self, query_string): """Search the manga series list by name, case-insensitive @param str query_string @return list<crunchyroll.models.Series> """ result = self._manga_api.list_series() return [series for series in result \ if series['locale']['enUS']['name'].lower().startswith( query_string.lower())]
Search the manga series list by name, case-insensitive @param str query_string @return list<crunchyroll.models.Series>
entailment
def list_media(self, series, sort=META.SORT_DESC, limit=META.MAX_MEDIA, offset=0): """List media for a given series or collection @param crunchyroll.models.Series series the series to search for @param str sort choose the ordering of the results, only META.SORT_DESC is known to work @param int limit limit size of results @param int offset start results from this index, for pagination @return list<crunchyroll.models.Media> """ params = { 'sort': sort, 'offset': offset, 'limit': limit, } params.update(self._get_series_query_dict(series)) result = self._android_api.list_media(**params) return result
List media for a given series or collection @param crunchyroll.models.Series series the series to search for @param str sort choose the ordering of the results, only META.SORT_DESC is known to work @param int limit limit size of results @param int offset start results from this index, for pagination @return list<crunchyroll.models.Media>
entailment
def search_media(self, series, query_string): """Search for media from a series starting with query_string, case-sensitive @param crunchyroll.models.Series series the series to search in @param str query_string the search query, same restrictions as `search_anime_series` @return list<crunchyroll.models.Media> """ params = { 'sort': ANDROID.FILTER_PREFIX + query_string, } params.update(self._get_series_query_dict(series)) result = self._android_api.list_media(**params) return result
Search for media from a series starting with query_string, case-sensitive @param crunchyroll.models.Series series the series to search in @param str query_string the search query, same restrictions as `search_anime_series` @return list<crunchyroll.models.Media>
entailment
def get_media_stream(self, media_item, format, quality): """Get the stream data for a given media item @param crunchyroll.models.Media media_item @param int format @param int quality @return crunchyroll.models.MediaStream """ result = self._ajax_api.VideoPlayer_GetStandardConfig( media_id=media_item.media_id, video_format=format, video_quality=quality) return MediaStream(result)
Get the stream data for a given media item @param crunchyroll.models.Media media_item @param int format @param int quality @return crunchyroll.models.MediaStream
entailment
def unfold_subtitle_stub(self, subtitle_stub): """Turn a SubtitleStub into a full Subtitle object @param crunchyroll.models.SubtitleStub subtitle_stub @return crunchyroll.models.Subtitle """ return Subtitle(self._ajax_api.Subtitle_GetXml( subtitle_script_id=int(subtitle_stub.id)))
Turn a SubtitleStub into a full Subtitle object @param crunchyroll.models.SubtitleStub subtitle_stub @return crunchyroll.models.Subtitle
entailment
def get_stream_formats(self, media_item): """Get the available media formats for a given media item @param crunchyroll.models.Media @return dict """ scraper = ScraperApi(self._ajax_api._connector) formats = scraper.get_media_formats(media_item.media_id) return formats
Get the available media formats for a given media item @param crunchyroll.models.Media @return dict
entailment
def list_queue(self, media_types=[META.TYPE_ANIME, META.TYPE_DRAMA]): """List the series in the queue, optionally filtering by type of media @param list<str> media_types a list of media types to filter the queue with, should be of META.TYPE_* @return list<crunchyroll.models.Series> """ result = self._android_api.queue(media_types='|'.join(media_types)) return [queue_item['series'] for queue_item in result]
List the series in the queue, optionally filtering by type of media @param list<str> media_types a list of media types to filter the queue with, should be of META.TYPE_* @return list<crunchyroll.models.Series>
entailment
def add_to_queue(self, series): """Add a series to the queue @param crunchyroll.models.Series series @return bool """ result = self._android_api.add_to_queue(series_id=series.series_id) return result
Add a series to the queue @param crunchyroll.models.Series series @return bool
entailment
def remove_from_queue(self, series): """Remove a series from the queue @param crunchyroll.models.Series series @return bool """ result = self._android_api.remove_from_queue(series_id=series.series_id) return result
Remove a series from the queue @param crunchyroll.models.Series series @return bool
entailment
def schema(name, dct, *, strict=False): """Create a compound tag schema. This function is a short convenience function that makes it easy to subclass the base `CompoundSchema` class. The `name` argument is the name of the class and `dct` should be a dictionnary containing the actual schema. The schema should map keys to tag types or other compound schemas. If the `strict` keyword only argument is set to True, interacting with keys that are not defined in the schema will raise a `TypeError`. """ return type(name, (CompoundSchema,), {'__slots__': (), 'schema': dct, 'strict': strict})
Create a compound tag schema. This function is a short convenience function that makes it easy to subclass the base `CompoundSchema` class. The `name` argument is the name of the class and `dct` should be a dictionnary containing the actual schema. The schema should map keys to tag types or other compound schemas. If the `strict` keyword only argument is set to True, interacting with keys that are not defined in the schema will raise a `TypeError`.
entailment
def cast_item(cls, key, value): """Cast schema item to the appropriate tag type.""" schema_type = cls.schema.get(key) if schema_type is None: if cls.strict: raise TypeError(f'Invalid key {key!r}') elif not isinstance(value, schema_type): try: return schema_type(value) except CastError: raise except Exception as exc: raise CastError(value, schema_type) from exc return value
Cast schema item to the appropriate tag type.
entailment
def obsolete_rename(oldname, newfunc): """ Simple obsolete/removed method decorator Parameters ---------- oldname : str The name of the old obsolete name newfunc : FunctionType Replacement unbound member function. """ newname = newfunc.__name__ def __obsolete(*args, **kwargs): warnings.warn( "{oldname} is obsolete and is removed in PyQt5. " "Use {newname} instead.".format(oldname=oldname, newname=newname), DeprecationWarning, stacklevel=2 ) return newfunc(*args, **kwargs) __obsolete.__name__ = oldname return __obsolete
Simple obsolete/removed method decorator Parameters ---------- oldname : str The name of the old obsolete name newfunc : FunctionType Replacement unbound member function.
entailment
def call(command, silent=False): """ Runs a bash command safely, with shell=false, catches any non-zero return codes. Raises slightly modified CalledProcessError exceptions on failures. Note: command is a string and cannot include pipes.""" try: if silent: with open(os.devnull, 'w') as FNULL: return subprocess.check_call(command_to_array(command), stdout=FNULL) else: # Using the defaults, shell=False, no i/o redirection. return check_call(command_to_array(command)) except CalledProcessError as e: # We are modifying the error itself for 2 reasons. 1) it WILL contain # login credentials when run_mongodump is run, 2) CalledProcessError is # slightly not-to-spec (the message variable is blank), which means # cronutils.ErrorHandler would report unlabeled stack traces. e.message = "%s failed with error code %s" % (e.cmd[0], e.returncode) e.cmd = e.cmd[0] + " [arguments stripped for security]" raise e
Runs a bash command safely, with shell=false, catches any non-zero return codes. Raises slightly modified CalledProcessError exceptions on failures. Note: command is a string and cannot include pipes.
entailment
def tarbz(source_directory_path, output_file_full_path, silent=False): """ Tars and bzips a directory, preserving as much metadata as possible. Adds '.tbz' to the provided output file name. """ output_directory_path = output_file_full_path.rsplit("/", 1)[0] create_folders(output_directory_path) # Note: default compression for bzip is supposed to be -9, highest compression. full_tar_file_path = output_file_full_path + ".tbz" if path.exists(full_tar_file_path): raise Exception("%s already exists, aborting." % (full_tar_file_path)) # preserve permissions, create file, use files (not tape devices), preserve # access time. tar is the only program in the universe to use (dstn, src). tar_command = ("tar jpcfvC %s %s %s" % (full_tar_file_path, source_directory_path, "./")) call(tar_command, silent=silent) return full_tar_file_path
Tars and bzips a directory, preserving as much metadata as possible. Adds '.tbz' to the provided output file name.
entailment
def untarbz(source_file_path, output_directory_path, silent=False): """ Restores your mongo database backup from a .tbz created using this library. This function will ensure that a directory is created at the file path if one does not exist already. If used in conjunction with this library's mongodump operation, the backup data will be extracted directly into the provided directory path. This command will fail if the output directory is not empty as existing files with identical names are not overwritten by tar. """ if not path.exists(source_file_path): raise Exception("the provided tar file %s does not exist." % (source_file_path)) if output_directory_path[0:1] == "./": output_directory_path = path.abspath(output_directory_path) if output_directory_path[0] != "/": raise Exception("your output directory path must start with '/' or './'; you used: %s" % (output_directory_path)) create_folders(output_directory_path) if listdir(output_directory_path): raise Exception("Your output directory isn't empty. Aborting as " + "exiting files are not overwritten by tar.") untar_command = ("tar jxfvkCp %s %s --atime-preserve " % (source_file_path, output_directory_path)) call(untar_command, silent=silent)
Restores your mongo database backup from a .tbz created using this library. This function will ensure that a directory is created at the file path if one does not exist already. If used in conjunction with this library's mongodump operation, the backup data will be extracted directly into the provided directory path. This command will fail if the output directory is not empty as existing files with identical names are not overwritten by tar.
entailment
def value_contains(self, value, attribute): """ Determine if any of the items in the value list for the given attribute contain value. """ for item in self[attribute]: if value in item: return True return False
Determine if any of the items in the value list for the given attribute contain value.
entailment
def clear_search_defaults(self, args=None): """ Clear all search defaults specified by the list of parameter names given as ``args``. If ``args`` is not given, then clear all existing search defaults. Examples:: conn.set_search_defaults(scope=ldap.SCOPE_BASE, attrs=['cn']) conn.clear_search_defaults(['scope']) conn.clear_search_defaults() """ if args is None: self._search_defaults.clear() else: for arg in args: if arg in self._search_defaults: del self._search_defaults[arg]
Clear all search defaults specified by the list of parameter names given as ``args``. If ``args`` is not given, then clear all existing search defaults. Examples:: conn.set_search_defaults(scope=ldap.SCOPE_BASE, attrs=['cn']) conn.clear_search_defaults(['scope']) conn.clear_search_defaults()
entailment
def search(self, filter, base_dn=None, attrs=None, scope=None, timeout=None, limit=None): """ Search the directory. """ if base_dn is None: base_dn = self._search_defaults.get('base_dn', '') if attrs is None: attrs = self._search_defaults.get('attrs', None) if scope is None: scope = self._search_defaults.get('scope', ldap.SCOPE_SUBTREE) if timeout is None: timeout = self._search_defaults.get('timeout', -1) if limit is None: limit = self._search_defaults.get('limit', 0) results = self.connection.search_ext_s( base_dn, scope, filter, attrs, timeout=timeout, sizelimit=limit) return self.to_items(results)
Search the directory.
entailment
def get(self, *args, **kwargs): """ Get a single object. This is a convenience wrapper for the search method that checks that only one object was returned, and returns that single object instead of a list. This method takes the exact same arguments as search. """ results = self.search(*args, **kwargs) num_results = len(results) if num_results == 1: return results[0] if num_results > 1: raise MultipleObjectsFound() raise ObjectNotFound()
Get a single object. This is a convenience wrapper for the search method that checks that only one object was returned, and returns that single object instead of a list. This method takes the exact same arguments as search.
entailment
def authenticate(self, dn='', password=''): """ Attempt to authenticate given dn and password using a bind operation. Return True if the bind is successful, and return False there was an exception raised that is contained in self.failed_authentication_exceptions. """ try: self.connection.simple_bind_s(dn, password) except tuple(self.failed_authentication_exceptions): return False else: return True
Attempt to authenticate given dn and password using a bind operation. Return True if the bind is successful, and return False there was an exception raised that is contained in self.failed_authentication_exceptions.
entailment
def compare(self, dn, attr, value): """ Compare the ``attr`` of the entry ``dn`` with given ``value``. This is a convenience wrapper for the ldap library's ``compare`` function that returns a boolean value instead of 1 or 0. """ return self.connection.compare_s(dn, attr, value) == 1
Compare the ``attr`` of the entry ``dn`` with given ``value``. This is a convenience wrapper for the ldap library's ``compare`` function that returns a boolean value instead of 1 or 0.
entailment
def get_property_func(key): """ Get the accessor function for an instance to look for `key`. Look for it as an attribute, and if that does not work, look to see if it is a tag. """ def get_it(obj): try: return getattr(obj, key) except AttributeError: return obj.tags.get(key) return get_it
Get the accessor function for an instance to look for `key`. Look for it as an attribute, and if that does not work, look to see if it is a tag.
entailment
def list_billing(region, filter_by_kwargs): """List available billing metrics""" conn = boto.ec2.cloudwatch.connect_to_region(region) metrics = conn.list_metrics(metric_name='EstimatedCharges') # Filtering is based on metric Dimensions. Only really valuable one is # ServiceName. if filter_by_kwargs: filter_key = filter_by_kwargs.keys()[0] filter_value = filter_by_kwargs.values()[0] if filter_value: filtered_metrics = [x for x in metrics if x.dimensions.get(filter_key) and x.dimensions.get(filter_key)[0] == filter_value] else: # ServiceName='' filtered_metrics = [x for x in metrics if not x.dimensions.get(filter_key)] else: filtered_metrics = metrics return filtered_metrics
List available billing metrics
entailment
def list_cloudfront(region, filter_by_kwargs): """List running ec2 instances.""" conn = boto.connect_cloudfront() instances = conn.get_all_distributions() return lookup(instances, filter_by=filter_by_kwargs)
List running ec2 instances.
entailment
def list_ec2(region, filter_by_kwargs): """List running ec2 instances.""" conn = boto.ec2.connect_to_region(region) instances = conn.get_only_instances() return lookup(instances, filter_by=filter_by_kwargs)
List running ec2 instances.
entailment
def list_ebs(region, filter_by_kwargs): """List running ebs volumes.""" conn = boto.ec2.connect_to_region(region) instances = conn.get_all_volumes() return lookup(instances, filter_by=filter_by_kwargs)
List running ebs volumes.
entailment
def list_elb(region, filter_by_kwargs): """List all load balancers.""" conn = boto.ec2.elb.connect_to_region(region) instances = conn.get_all_load_balancers() return lookup(instances, filter_by=filter_by_kwargs)
List all load balancers.
entailment
def list_rds(region, filter_by_kwargs): """List all RDS thingys.""" conn = boto.rds.connect_to_region(region) instances = conn.get_all_dbinstances() return lookup(instances, filter_by=filter_by_kwargs)
List all RDS thingys.
entailment
def list_elasticache(region, filter_by_kwargs): """List all ElastiCache Clusters.""" conn = boto.elasticache.connect_to_region(region) req = conn.describe_cache_clusters() data = req["DescribeCacheClustersResponse"]["DescribeCacheClustersResult"]["CacheClusters"] if filter_by_kwargs: clusters = [x['CacheClusterId'] for x in data if x[filter_by_kwargs.keys()[0]] == filter_by_kwargs.values()[0]] else: clusters = [x['CacheClusterId'] for x in data] return clusters
List all ElastiCache Clusters.
entailment
def list_autoscaling_group(region, filter_by_kwargs): """List all Auto Scaling Groups.""" conn = boto.ec2.autoscale.connect_to_region(region) groups = conn.get_all_groups() return lookup(groups, filter_by=filter_by_kwargs)
List all Auto Scaling Groups.
entailment
def list_sqs(region, filter_by_kwargs): """List all SQS Queues.""" conn = boto.sqs.connect_to_region(region) queues = conn.get_all_queues() return lookup(queues, filter_by=filter_by_kwargs)
List all SQS Queues.
entailment
def list_kinesis_applications(region, filter_by_kwargs): """List all the kinesis applications along with the shards for each stream""" conn = boto.kinesis.connect_to_region(region) streams = conn.list_streams()['StreamNames'] kinesis_streams = {} for stream_name in streams: shard_ids = [] shards = conn.describe_stream(stream_name)['StreamDescription']['Shards'] for shard in shards: shard_ids.append(shard['ShardId']) kinesis_streams[stream_name] = shard_ids return kinesis_streams
List all the kinesis applications along with the shards for each stream
entailment
def list_dynamodb(region, filter_by_kwargs): """List all DynamoDB tables.""" conn = boto.dynamodb.connect_to_region(region) tables = conn.list_tables() return lookup(tables, filter_by=filter_by_kwargs)
List all DynamoDB tables.
entailment
def register(self, callback_id: str, handler: Any, name: str = "*") -> None: """ Register a new handler for a specific :class:`slack.actions.Action` `callback_id`. Optional routing based on the action name too. The name argument is useful for actions of type `interactive_message` to provide a different handler for each individual action. Args: callback_id: Callback_id the handler is interested in handler: Callback name: Name of the action (optional). """ LOG.info("Registering %s, %s to %s", callback_id, name, handler) if name not in self._routes[callback_id]: self._routes[callback_id][name] = [] self._routes[callback_id][name].append(handler)
Register a new handler for a specific :class:`slack.actions.Action` `callback_id`. Optional routing based on the action name too. The name argument is useful for actions of type `interactive_message` to provide a different handler for each individual action. Args: callback_id: Callback_id the handler is interested in handler: Callback name: Name of the action (optional).
entailment
def dispatch(self, action: Action) -> Any: """ Yields handlers matching the incoming :class:`slack.actions.Action` `callback_id`. Args: action: :class:`slack.actions.Action` Yields: handler """ LOG.debug("Dispatching action %s, %s", action["type"], action["callback_id"]) if action["type"] == "interactive_message": yield from self._dispatch_interactive_message(action) elif action["type"] in ("dialog_submission", "message_action"): yield from self._dispatch_action(action) else: raise UnknownActionType(action)
Yields handlers matching the incoming :class:`slack.actions.Action` `callback_id`. Args: action: :class:`slack.actions.Action` Yields: handler
entailment
def comittoapi(api): """ Commit to the use of specified Qt api. Raise an error if another Qt api is already loaded in sys.modules """ global USED_API assert USED_API is None, "committoapi called again!" check = ["PyQt4", "PyQt5", "PySide", "PySide2"] assert api in [QT_API_PYQT5, QT_API_PYQT4, QT_API_PYSIDE, QT_API_PYSIDE2] for name in check: if name.lower() != api and name in sys.modules: raise RuntimeError( "{} was already imported. Cannot commit to {}!" .format(name, api) ) else: api = _intern(api) USED_API = api AnyQt.__SELECTED_API = api AnyQt.USED_API = api
Commit to the use of specified Qt api. Raise an error if another Qt api is already loaded in sys.modules
entailment
def get_metadata(dist): """ Return dictionary of metadata for given dist @param dist: distribution @type dist: pkg_resources Distribution object @returns: dict of metadata or None """ if not dist.has_metadata('PKG-INFO'): return msg = email.message_from_string(dist.get_metadata('PKG-INFO')) metadata = {} for header in [l for l in msg._headers]: metadata[header[0]] = header[1] return metadata
Return dictionary of metadata for given dist @param dist: distribution @type dist: pkg_resources Distribution object @returns: dict of metadata or None
entailment
def add_options(self, parser): """Add command-line options for this plugin. The base plugin class adds --with-$name by default, used to enable the plugin. """ parser.add_option("--with-%s" % self.name, action="store_true", dest=self.enable_opt, help="Enable plugin %s: %s" % (self.__class__.__name__, self.help()) )
Add command-line options for this plugin. The base plugin class adds --with-$name by default, used to enable the plugin.
entailment
def configure(self, options, conf): """Configure the plugin and system, based on selected options. The base plugin class sets the plugin to enabled if the enable option for the plugin (self.enable_opt) is true. """ self.conf = conf if hasattr(options, self.enable_opt): self.enabled = getattr(options, self.enable_opt)
Configure the plugin and system, based on selected options. The base plugin class sets the plugin to enabled if the enable option for the plugin (self.enable_opt) is true.
entailment
def help(self): """Return help for this plugin. This will be output as the help section of the --with-$name option that enables the plugin. """ if self.__class__.__doc__: # doc sections are often indented; compress the spaces return textwrap.dedent(self.__class__.__doc__) return "(no help available)"
Return help for this plugin. This will be output as the help section of the --with-$name option that enables the plugin.
entailment
def raise_for_status( status: int, headers: MutableMapping, data: MutableMapping ) -> None: """ Check request response status Args: status: Response status headers: Response headers data: Response data Raises: :class:`slack.exceptions.RateLimited`: For 429 status code :class:`slack.exceptions:HTTPException`: """ if status != 200: if status == 429: if isinstance(data, str): error = data else: error = data.get("error", "ratelimited") try: retry_after = int(headers.get("Retry-After", 1)) except ValueError: retry_after = 1 raise exceptions.RateLimited(retry_after, error, status, headers, data) else: raise exceptions.HTTPException(status, headers, data)
Check request response status Args: status: Response status headers: Response headers data: Response data Raises: :class:`slack.exceptions.RateLimited`: For 429 status code :class:`slack.exceptions:HTTPException`:
entailment