repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
ppinard/matplotlib-colorbar
matplotlib_colorbar/colorbar.py
ColorbarCalculator.calculate_colorbar
def calculate_colorbar(self): """ Returns the positions and colors of all intervals inside the colorbar. """ self._base._process_values() self._base._find_range() X, Y = self._base._mesh() C = self._base._values[:, np.newaxis] return X, Y, C
python
def calculate_colorbar(self): """ Returns the positions and colors of all intervals inside the colorbar. """ self._base._process_values() self._base._find_range() X, Y = self._base._mesh() C = self._base._values[:, np.newaxis] return X, Y, C
[ "def", "calculate_colorbar", "(", "self", ")", ":", "self", ".", "_base", ".", "_process_values", "(", ")", "self", ".", "_base", ".", "_find_range", "(", ")", "X", ",", "Y", "=", "self", ".", "_base", ".", "_mesh", "(", ")", "C", "=", "self", ".", "_base", ".", "_values", "[", ":", ",", "np", ".", "newaxis", "]", "return", "X", ",", "Y", ",", "C" ]
Returns the positions and colors of all intervals inside the colorbar.
[ "Returns", "the", "positions", "and", "colors", "of", "all", "intervals", "inside", "the", "colorbar", "." ]
train
https://github.com/ppinard/matplotlib-colorbar/blob/d53748c54dd18ba5183bff8da3f11cfd107282e6/matplotlib_colorbar/colorbar.py#L152-L160
ppinard/matplotlib-colorbar
matplotlib_colorbar/colorbar.py
ColorbarCalculator.calculate_ticks
def calculate_ticks(self): """ Returns the sequence of ticks (colorbar data locations), ticklabels (strings), and the corresponding offset string. """ current_version = packaging.version.parse(matplotlib.__version__) critical_version = packaging.version.parse('3.0.0') if current_version > critical_version: locator, formatter = self._base._get_ticker_locator_formatter() return self._base._ticker(locator, formatter) else: return self._base._ticker()
python
def calculate_ticks(self): """ Returns the sequence of ticks (colorbar data locations), ticklabels (strings), and the corresponding offset string. """ current_version = packaging.version.parse(matplotlib.__version__) critical_version = packaging.version.parse('3.0.0') if current_version > critical_version: locator, formatter = self._base._get_ticker_locator_formatter() return self._base._ticker(locator, formatter) else: return self._base._ticker()
[ "def", "calculate_ticks", "(", "self", ")", ":", "current_version", "=", "packaging", ".", "version", ".", "parse", "(", "matplotlib", ".", "__version__", ")", "critical_version", "=", "packaging", ".", "version", ".", "parse", "(", "'3.0.0'", ")", "if", "current_version", ">", "critical_version", ":", "locator", ",", "formatter", "=", "self", ".", "_base", ".", "_get_ticker_locator_formatter", "(", ")", "return", "self", ".", "_base", ".", "_ticker", "(", "locator", ",", "formatter", ")", "else", ":", "return", "self", ".", "_base", ".", "_ticker", "(", ")" ]
Returns the sequence of ticks (colorbar data locations), ticklabels (strings), and the corresponding offset string.
[ "Returns", "the", "sequence", "of", "ticks", "(", "colorbar", "data", "locations", ")", "ticklabels", "(", "strings", ")", "and", "the", "corresponding", "offset", "string", "." ]
train
https://github.com/ppinard/matplotlib-colorbar/blob/d53748c54dd18ba5183bff8da3f11cfd107282e6/matplotlib_colorbar/colorbar.py#L162-L174
aheadley/python-crunchyroll
crunchyroll/apis/scraper.py
ScraperApi.get_media_formats
def get_media_formats(self, media_id): """CR doesn't seem to provide the video_format and video_quality params through any of the APIs so we have to scrape the video page """ url = (SCRAPER.API_URL + 'media-' + media_id).format( protocol=SCRAPER.PROTOCOL_INSECURE) format_pattern = re.compile(SCRAPER.VIDEO.FORMAT_PATTERN) formats = {} for format, param in iteritems(SCRAPER.VIDEO.FORMAT_PARAMS): resp = self._connector.get(url, params={param: '1'}) if not resp.ok: continue try: match = format_pattern.search(resp.content) except TypeError: match = format_pattern.search(resp.text) if match: formats[format] = (int(match.group(1)), int(match.group(2))) return formats
python
def get_media_formats(self, media_id): """CR doesn't seem to provide the video_format and video_quality params through any of the APIs so we have to scrape the video page """ url = (SCRAPER.API_URL + 'media-' + media_id).format( protocol=SCRAPER.PROTOCOL_INSECURE) format_pattern = re.compile(SCRAPER.VIDEO.FORMAT_PATTERN) formats = {} for format, param in iteritems(SCRAPER.VIDEO.FORMAT_PARAMS): resp = self._connector.get(url, params={param: '1'}) if not resp.ok: continue try: match = format_pattern.search(resp.content) except TypeError: match = format_pattern.search(resp.text) if match: formats[format] = (int(match.group(1)), int(match.group(2))) return formats
[ "def", "get_media_formats", "(", "self", ",", "media_id", ")", ":", "url", "=", "(", "SCRAPER", ".", "API_URL", "+", "'media-'", "+", "media_id", ")", ".", "format", "(", "protocol", "=", "SCRAPER", ".", "PROTOCOL_INSECURE", ")", "format_pattern", "=", "re", ".", "compile", "(", "SCRAPER", ".", "VIDEO", ".", "FORMAT_PATTERN", ")", "formats", "=", "{", "}", "for", "format", ",", "param", "in", "iteritems", "(", "SCRAPER", ".", "VIDEO", ".", "FORMAT_PARAMS", ")", ":", "resp", "=", "self", ".", "_connector", ".", "get", "(", "url", ",", "params", "=", "{", "param", ":", "'1'", "}", ")", "if", "not", "resp", ".", "ok", ":", "continue", "try", ":", "match", "=", "format_pattern", ".", "search", "(", "resp", ".", "content", ")", "except", "TypeError", ":", "match", "=", "format_pattern", ".", "search", "(", "resp", ".", "text", ")", "if", "match", ":", "formats", "[", "format", "]", "=", "(", "int", "(", "match", ".", "group", "(", "1", ")", ")", ",", "int", "(", "match", ".", "group", "(", "2", ")", ")", ")", "return", "formats" ]
CR doesn't seem to provide the video_format and video_quality params through any of the APIs so we have to scrape the video page
[ "CR", "doesn", "t", "seem", "to", "provide", "the", "video_format", "and", "video_quality", "params", "through", "any", "of", "the", "APIs", "so", "we", "have", "to", "scrape", "the", "video", "page" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/scraper.py#L34-L53
vberlier/nbtlib
nbtlib/literal/parser.py
parse_nbt
def parse_nbt(literal): """Parse a literal nbt string and return the resulting tag.""" parser = Parser(tokenize(literal)) tag = parser.parse() cursor = parser.token_span[1] leftover = literal[cursor:] if leftover.strip(): parser.token_span = cursor, cursor + len(leftover) raise parser.error(f'Expected end of string but got {leftover!r}') return tag
python
def parse_nbt(literal): """Parse a literal nbt string and return the resulting tag.""" parser = Parser(tokenize(literal)) tag = parser.parse() cursor = parser.token_span[1] leftover = literal[cursor:] if leftover.strip(): parser.token_span = cursor, cursor + len(leftover) raise parser.error(f'Expected end of string but got {leftover!r}') return tag
[ "def", "parse_nbt", "(", "literal", ")", ":", "parser", "=", "Parser", "(", "tokenize", "(", "literal", ")", ")", "tag", "=", "parser", ".", "parse", "(", ")", "cursor", "=", "parser", ".", "token_span", "[", "1", "]", "leftover", "=", "literal", "[", "cursor", ":", "]", "if", "leftover", ".", "strip", "(", ")", ":", "parser", ".", "token_span", "=", "cursor", ",", "cursor", "+", "len", "(", "leftover", ")", "raise", "parser", ".", "error", "(", "f'Expected end of string but got {leftover!r}'", ")", "return", "tag" ]
Parse a literal nbt string and return the resulting tag.
[ "Parse", "a", "literal", "nbt", "string", "and", "return", "the", "resulting", "tag", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/literal/parser.py#L85-L96
vberlier/nbtlib
nbtlib/literal/parser.py
tokenize
def tokenize(string): """Match and yield all the tokens of the input string.""" for match in TOKENS_REGEX.finditer(string): yield Token(match.lastgroup, match.group().strip(), match.span())
python
def tokenize(string): """Match and yield all the tokens of the input string.""" for match in TOKENS_REGEX.finditer(string): yield Token(match.lastgroup, match.group().strip(), match.span())
[ "def", "tokenize", "(", "string", ")", ":", "for", "match", "in", "TOKENS_REGEX", ".", "finditer", "(", "string", ")", ":", "yield", "Token", "(", "match", ".", "lastgroup", ",", "match", ".", "group", "(", ")", ".", "strip", "(", ")", ",", "match", ".", "span", "(", ")", ")" ]
Match and yield all the tokens of the input string.
[ "Match", "and", "yield", "all", "the", "tokens", "of", "the", "input", "string", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/literal/parser.py#L104-L107
vberlier/nbtlib
nbtlib/literal/parser.py
Parser.next
def next(self): """Move to the next token in the token stream.""" self.current_token = next(self.token_stream, None) if self.current_token is None: self.token_span = self.token_span[1], self.token_span[1] raise self.error('Unexpected end of input') self.token_span = self.current_token.span return self
python
def next(self): """Move to the next token in the token stream.""" self.current_token = next(self.token_stream, None) if self.current_token is None: self.token_span = self.token_span[1], self.token_span[1] raise self.error('Unexpected end of input') self.token_span = self.current_token.span return self
[ "def", "next", "(", "self", ")", ":", "self", ".", "current_token", "=", "next", "(", "self", ".", "token_stream", ",", "None", ")", "if", "self", ".", "current_token", "is", "None", ":", "self", ".", "token_span", "=", "self", ".", "token_span", "[", "1", "]", ",", "self", ".", "token_span", "[", "1", "]", "raise", "self", ".", "error", "(", "'Unexpected end of input'", ")", "self", ".", "token_span", "=", "self", ".", "current_token", ".", "span", "return", "self" ]
Move to the next token in the token stream.
[ "Move", "to", "the", "next", "token", "in", "the", "token", "stream", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/literal/parser.py#L133-L140
vberlier/nbtlib
nbtlib/literal/parser.py
Parser.parse
def parse(self): """Parse and return an nbt literal from the token stream.""" token_type = self.current_token.type.lower() handler = getattr(self, f'parse_{token_type}', None) if handler is None: raise self.error(f'Invalid literal {self.current_token.value!r}') return handler()
python
def parse(self): """Parse and return an nbt literal from the token stream.""" token_type = self.current_token.type.lower() handler = getattr(self, f'parse_{token_type}', None) if handler is None: raise self.error(f'Invalid literal {self.current_token.value!r}') return handler()
[ "def", "parse", "(", "self", ")", ":", "token_type", "=", "self", ".", "current_token", ".", "type", ".", "lower", "(", ")", "handler", "=", "getattr", "(", "self", ",", "f'parse_{token_type}'", ",", "None", ")", "if", "handler", "is", "None", ":", "raise", "self", ".", "error", "(", "f'Invalid literal {self.current_token.value!r}'", ")", "return", "handler", "(", ")" ]
Parse and return an nbt literal from the token stream.
[ "Parse", "and", "return", "an", "nbt", "literal", "from", "the", "token", "stream", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/literal/parser.py#L142-L148
vberlier/nbtlib
nbtlib/literal/parser.py
Parser.parse_number
def parse_number(self): """Parse a number from the token stream.""" value = self.current_token.value suffix = value[-1].lower() try: if suffix in NUMBER_SUFFIXES: return NUMBER_SUFFIXES[suffix](value[:-1]) return Double(value) if '.' in value else Int(value) except (OutOfRange, ValueError): return String(value)
python
def parse_number(self): """Parse a number from the token stream.""" value = self.current_token.value suffix = value[-1].lower() try: if suffix in NUMBER_SUFFIXES: return NUMBER_SUFFIXES[suffix](value[:-1]) return Double(value) if '.' in value else Int(value) except (OutOfRange, ValueError): return String(value)
[ "def", "parse_number", "(", "self", ")", ":", "value", "=", "self", ".", "current_token", ".", "value", "suffix", "=", "value", "[", "-", "1", "]", ".", "lower", "(", ")", "try", ":", "if", "suffix", "in", "NUMBER_SUFFIXES", ":", "return", "NUMBER_SUFFIXES", "[", "suffix", "]", "(", "value", "[", ":", "-", "1", "]", ")", "return", "Double", "(", "value", ")", "if", "'.'", "in", "value", "else", "Int", "(", "value", ")", "except", "(", "OutOfRange", ",", "ValueError", ")", ":", "return", "String", "(", "value", ")" ]
Parse a number from the token stream.
[ "Parse", "a", "number", "from", "the", "token", "stream", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/literal/parser.py#L154-L164
vberlier/nbtlib
nbtlib/literal/parser.py
Parser.parse_string
def parse_string(self): """Parse a regular unquoted string from the token stream.""" aliased_value = LITERAL_ALIASES.get(self.current_token.value.lower()) if aliased_value is not None: return aliased_value return String(self.current_token.value)
python
def parse_string(self): """Parse a regular unquoted string from the token stream.""" aliased_value = LITERAL_ALIASES.get(self.current_token.value.lower()) if aliased_value is not None: return aliased_value return String(self.current_token.value)
[ "def", "parse_string", "(", "self", ")", ":", "aliased_value", "=", "LITERAL_ALIASES", ".", "get", "(", "self", ".", "current_token", ".", "value", ".", "lower", "(", ")", ")", "if", "aliased_value", "is", "not", "None", ":", "return", "aliased_value", "return", "String", "(", "self", ".", "current_token", ".", "value", ")" ]
Parse a regular unquoted string from the token stream.
[ "Parse", "a", "regular", "unquoted", "string", "from", "the", "token", "stream", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/literal/parser.py#L166-L171
vberlier/nbtlib
nbtlib/literal/parser.py
Parser.collect_tokens_until
def collect_tokens_until(self, token_type): """Yield the item tokens in a comma-separated tag collection.""" self.next() if self.current_token.type == token_type: return while True: yield self.current_token self.next() if self.current_token.type == token_type: return if self.current_token.type != 'COMMA': raise self.error(f'Expected comma but got ' f'{self.current_token.value!r}') self.next()
python
def collect_tokens_until(self, token_type): """Yield the item tokens in a comma-separated tag collection.""" self.next() if self.current_token.type == token_type: return while True: yield self.current_token self.next() if self.current_token.type == token_type: return if self.current_token.type != 'COMMA': raise self.error(f'Expected comma but got ' f'{self.current_token.value!r}') self.next()
[ "def", "collect_tokens_until", "(", "self", ",", "token_type", ")", ":", "self", ".", "next", "(", ")", "if", "self", ".", "current_token", ".", "type", "==", "token_type", ":", "return", "while", "True", ":", "yield", "self", ".", "current_token", "self", ".", "next", "(", ")", "if", "self", ".", "current_token", ".", "type", "==", "token_type", ":", "return", "if", "self", ".", "current_token", ".", "type", "!=", "'COMMA'", ":", "raise", "self", ".", "error", "(", "f'Expected comma but got '", "f'{self.current_token.value!r}'", ")", "self", ".", "next", "(", ")" ]
Yield the item tokens in a comma-separated tag collection.
[ "Yield", "the", "item", "tokens", "in", "a", "comma", "-", "separated", "tag", "collection", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/literal/parser.py#L173-L189
vberlier/nbtlib
nbtlib/literal/parser.py
Parser.parse_compound
def parse_compound(self): """Parse a compound from the token stream.""" compound_tag = Compound() for token in self.collect_tokens_until('CLOSE_COMPOUND'): item_key = token.value if token.type not in ('NUMBER', 'STRING', 'QUOTED_STRING'): raise self.error(f'Expected compound key but got {item_key!r}') if token.type == 'QUOTED_STRING': item_key = self.unquote_string(item_key) if self.next().current_token.type != 'COLON': raise self.error(f'Expected colon but got ' f'{self.current_token.value!r}') self.next() compound_tag[item_key] = self.parse() return compound_tag
python
def parse_compound(self): """Parse a compound from the token stream.""" compound_tag = Compound() for token in self.collect_tokens_until('CLOSE_COMPOUND'): item_key = token.value if token.type not in ('NUMBER', 'STRING', 'QUOTED_STRING'): raise self.error(f'Expected compound key but got {item_key!r}') if token.type == 'QUOTED_STRING': item_key = self.unquote_string(item_key) if self.next().current_token.type != 'COLON': raise self.error(f'Expected colon but got ' f'{self.current_token.value!r}') self.next() compound_tag[item_key] = self.parse() return compound_tag
[ "def", "parse_compound", "(", "self", ")", ":", "compound_tag", "=", "Compound", "(", ")", "for", "token", "in", "self", ".", "collect_tokens_until", "(", "'CLOSE_COMPOUND'", ")", ":", "item_key", "=", "token", ".", "value", "if", "token", ".", "type", "not", "in", "(", "'NUMBER'", ",", "'STRING'", ",", "'QUOTED_STRING'", ")", ":", "raise", "self", ".", "error", "(", "f'Expected compound key but got {item_key!r}'", ")", "if", "token", ".", "type", "==", "'QUOTED_STRING'", ":", "item_key", "=", "self", ".", "unquote_string", "(", "item_key", ")", "if", "self", ".", "next", "(", ")", ".", "current_token", ".", "type", "!=", "'COLON'", ":", "raise", "self", ".", "error", "(", "f'Expected colon but got '", "f'{self.current_token.value!r}'", ")", "self", ".", "next", "(", ")", "compound_tag", "[", "item_key", "]", "=", "self", ".", "parse", "(", ")", "return", "compound_tag" ]
Parse a compound from the token stream.
[ "Parse", "a", "compound", "from", "the", "token", "stream", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/literal/parser.py#L191-L208
vberlier/nbtlib
nbtlib/literal/parser.py
Parser.array_items
def array_items(self, number_type, *, number_suffix=''): """Parse and yield array items from the token stream.""" for token in self.collect_tokens_until('CLOSE_BRACKET'): is_number = token.type == 'NUMBER' value = token.value.lower() if not (is_number and value.endswith(number_suffix)): raise self.error(f'Invalid {number_type} array element ' f'{token.value!r}') yield int(value.replace(number_suffix, ''))
python
def array_items(self, number_type, *, number_suffix=''): """Parse and yield array items from the token stream.""" for token in self.collect_tokens_until('CLOSE_BRACKET'): is_number = token.type == 'NUMBER' value = token.value.lower() if not (is_number and value.endswith(number_suffix)): raise self.error(f'Invalid {number_type} array element ' f'{token.value!r}') yield int(value.replace(number_suffix, ''))
[ "def", "array_items", "(", "self", ",", "number_type", ",", "*", ",", "number_suffix", "=", "''", ")", ":", "for", "token", "in", "self", ".", "collect_tokens_until", "(", "'CLOSE_BRACKET'", ")", ":", "is_number", "=", "token", ".", "type", "==", "'NUMBER'", "value", "=", "token", ".", "value", ".", "lower", "(", ")", "if", "not", "(", "is_number", "and", "value", ".", "endswith", "(", "number_suffix", ")", ")", ":", "raise", "self", ".", "error", "(", "f'Invalid {number_type} array element '", "f'{token.value!r}'", ")", "yield", "int", "(", "value", ".", "replace", "(", "number_suffix", ",", "''", ")", ")" ]
Parse and yield array items from the token stream.
[ "Parse", "and", "yield", "array", "items", "from", "the", "token", "stream", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/literal/parser.py#L210-L218
vberlier/nbtlib
nbtlib/literal/parser.py
Parser.parse_list
def parse_list(self): """Parse a list from the token stream.""" try: return List([self.parse() for _ in self.collect_tokens_until('CLOSE_BRACKET')]) except IncompatibleItemType as exc: raise self.error(f'Item {str(exc.item)!r} is not a ' f'{exc.subtype.__name__} tag') from None
python
def parse_list(self): """Parse a list from the token stream.""" try: return List([self.parse() for _ in self.collect_tokens_until('CLOSE_BRACKET')]) except IncompatibleItemType as exc: raise self.error(f'Item {str(exc.item)!r} is not a ' f'{exc.subtype.__name__} tag') from None
[ "def", "parse_list", "(", "self", ")", ":", "try", ":", "return", "List", "(", "[", "self", ".", "parse", "(", ")", "for", "_", "in", "self", ".", "collect_tokens_until", "(", "'CLOSE_BRACKET'", ")", "]", ")", "except", "IncompatibleItemType", "as", "exc", ":", "raise", "self", ".", "error", "(", "f'Item {str(exc.item)!r} is not a '", "f'{exc.subtype.__name__} tag'", ")", "from", "None" ]
Parse a list from the token stream.
[ "Parse", "a", "list", "from", "the", "token", "stream", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/literal/parser.py#L232-L239
vberlier/nbtlib
nbtlib/literal/parser.py
Parser.unquote_string
def unquote_string(self, string): """Return the unquoted value of a quoted string.""" value = string[1:-1] forbidden_sequences = {ESCAPE_SUBS[STRING_QUOTES[string[0]]]} valid_sequences = set(ESCAPE_SEQUENCES) - forbidden_sequences for seq in ESCAPE_REGEX.findall(value): if seq not in valid_sequences: raise self.error(f'Invalid escape sequence "{seq}"') for seq, sub in ESCAPE_SEQUENCES.items(): value = value.replace(seq, sub) return value
python
def unquote_string(self, string): """Return the unquoted value of a quoted string.""" value = string[1:-1] forbidden_sequences = {ESCAPE_SUBS[STRING_QUOTES[string[0]]]} valid_sequences = set(ESCAPE_SEQUENCES) - forbidden_sequences for seq in ESCAPE_REGEX.findall(value): if seq not in valid_sequences: raise self.error(f'Invalid escape sequence "{seq}"') for seq, sub in ESCAPE_SEQUENCES.items(): value = value.replace(seq, sub) return value
[ "def", "unquote_string", "(", "self", ",", "string", ")", ":", "value", "=", "string", "[", "1", ":", "-", "1", "]", "forbidden_sequences", "=", "{", "ESCAPE_SUBS", "[", "STRING_QUOTES", "[", "string", "[", "0", "]", "]", "]", "}", "valid_sequences", "=", "set", "(", "ESCAPE_SEQUENCES", ")", "-", "forbidden_sequences", "for", "seq", "in", "ESCAPE_REGEX", ".", "findall", "(", "value", ")", ":", "if", "seq", "not", "in", "valid_sequences", ":", "raise", "self", ".", "error", "(", "f'Invalid escape sequence \"{seq}\"'", ")", "for", "seq", ",", "sub", "in", "ESCAPE_SEQUENCES", ".", "items", "(", ")", ":", "value", "=", "value", ".", "replace", "(", "seq", ",", "sub", ")", "return", "value" ]
Return the unquoted value of a quoted string.
[ "Return", "the", "unquoted", "value", "of", "a", "quoted", "string", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/literal/parser.py#L245-L259
tulsawebdevs/django-multi-gtfs
multigtfs/compat.py
opener_from_zipfile
def opener_from_zipfile(zipfile): """ Returns a function that will open a file in a zipfile by name. For Python3 compatibility, the raw file will be converted to text. """ def opener(filename): inner_file = zipfile.open(filename) if PY3: from io import TextIOWrapper return TextIOWrapper(inner_file) else: return inner_file return opener
python
def opener_from_zipfile(zipfile): """ Returns a function that will open a file in a zipfile by name. For Python3 compatibility, the raw file will be converted to text. """ def opener(filename): inner_file = zipfile.open(filename) if PY3: from io import TextIOWrapper return TextIOWrapper(inner_file) else: return inner_file return opener
[ "def", "opener_from_zipfile", "(", "zipfile", ")", ":", "def", "opener", "(", "filename", ")", ":", "inner_file", "=", "zipfile", ".", "open", "(", "filename", ")", "if", "PY3", ":", "from", "io", "import", "TextIOWrapper", "return", "TextIOWrapper", "(", "inner_file", ")", "else", ":", "return", "inner_file", "return", "opener" ]
Returns a function that will open a file in a zipfile by name. For Python3 compatibility, the raw file will be converted to text.
[ "Returns", "a", "function", "that", "will", "open", "a", "file", "in", "a", "zipfile", "by", "name", "." ]
train
https://github.com/tulsawebdevs/django-multi-gtfs/blob/8c442bfb67e87566c24a7364d8fa0aacd4a0a652/multigtfs/compat.py#L73-L88
tulsawebdevs/django-multi-gtfs
multigtfs/compat.py
write_text_rows
def write_text_rows(writer, rows): '''Write CSV row data which may include text.''' for row in rows: try: writer.writerow(row) except UnicodeEncodeError: # Python 2 csv does badly with unicode outside of ASCII new_row = [] for item in row: if isinstance(item, text_type): new_row.append(item.encode('utf-8')) else: new_row.append(item) writer.writerow(new_row)
python
def write_text_rows(writer, rows): '''Write CSV row data which may include text.''' for row in rows: try: writer.writerow(row) except UnicodeEncodeError: # Python 2 csv does badly with unicode outside of ASCII new_row = [] for item in row: if isinstance(item, text_type): new_row.append(item.encode('utf-8')) else: new_row.append(item) writer.writerow(new_row)
[ "def", "write_text_rows", "(", "writer", ",", "rows", ")", ":", "for", "row", "in", "rows", ":", "try", ":", "writer", ".", "writerow", "(", "row", ")", "except", "UnicodeEncodeError", ":", "# Python 2 csv does badly with unicode outside of ASCII", "new_row", "=", "[", "]", "for", "item", "in", "row", ":", "if", "isinstance", "(", "item", ",", "text_type", ")", ":", "new_row", ".", "append", "(", "item", ".", "encode", "(", "'utf-8'", ")", ")", "else", ":", "new_row", ".", "append", "(", "item", ")", "writer", ".", "writerow", "(", "new_row", ")" ]
Write CSV row data which may include text.
[ "Write", "CSV", "row", "data", "which", "may", "include", "text", "." ]
train
https://github.com/tulsawebdevs/django-multi-gtfs/blob/8c442bfb67e87566c24a7364d8fa0aacd4a0a652/multigtfs/compat.py#L91-L104
vberlier/nbtlib
nbtlib/literal/serializer.py
serialize_tag
def serialize_tag(tag, *, indent=None, compact=False, quote=None): """Serialize an nbt tag to its literal representation.""" serializer = Serializer(indent=indent, compact=compact, quote=quote) return serializer.serialize(tag)
python
def serialize_tag(tag, *, indent=None, compact=False, quote=None): """Serialize an nbt tag to its literal representation.""" serializer = Serializer(indent=indent, compact=compact, quote=quote) return serializer.serialize(tag)
[ "def", "serialize_tag", "(", "tag", ",", "*", ",", "indent", "=", "None", ",", "compact", "=", "False", ",", "quote", "=", "None", ")", ":", "serializer", "=", "Serializer", "(", "indent", "=", "indent", ",", "compact", "=", "compact", ",", "quote", "=", "quote", ")", "return", "serializer", ".", "serialize", "(", "tag", ")" ]
Serialize an nbt tag to its literal representation.
[ "Serialize", "an", "nbt", "tag", "to", "its", "literal", "representation", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/literal/serializer.py#L48-L51
vberlier/nbtlib
nbtlib/literal/serializer.py
Serializer.depth
def depth(self): """Increase the level of indentation by one.""" if self.indentation is None: yield else: previous = self.previous_indent self.previous_indent = self.indent self.indent += self.indentation yield self.indent = self.previous_indent self.previous_indent = previous
python
def depth(self): """Increase the level of indentation by one.""" if self.indentation is None: yield else: previous = self.previous_indent self.previous_indent = self.indent self.indent += self.indentation yield self.indent = self.previous_indent self.previous_indent = previous
[ "def", "depth", "(", "self", ")", ":", "if", "self", ".", "indentation", "is", "None", ":", "yield", "else", ":", "previous", "=", "self", ".", "previous_indent", "self", ".", "previous_indent", "=", "self", ".", "indent", "self", ".", "indent", "+=", "self", ".", "indentation", "yield", "self", ".", "indent", "=", "self", ".", "previous_indent", "self", ".", "previous_indent", "=", "previous" ]
Increase the level of indentation by one.
[ "Increase", "the", "level", "of", "indentation", "by", "one", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/literal/serializer.py#L71-L81
vberlier/nbtlib
nbtlib/literal/serializer.py
Serializer.should_expand
def should_expand(self, tag): """Return whether the specified tag should be expanded.""" return self.indentation is not None and tag and ( not self.previous_indent or ( tag.serializer == 'list' and tag.subtype.serializer in ('array', 'list', 'compound') ) or ( tag.serializer == 'compound' ) )
python
def should_expand(self, tag): """Return whether the specified tag should be expanded.""" return self.indentation is not None and tag and ( not self.previous_indent or ( tag.serializer == 'list' and tag.subtype.serializer in ('array', 'list', 'compound') ) or ( tag.serializer == 'compound' ) )
[ "def", "should_expand", "(", "self", ",", "tag", ")", ":", "return", "self", ".", "indentation", "is", "not", "None", "and", "tag", "and", "(", "not", "self", ".", "previous_indent", "or", "(", "tag", ".", "serializer", "==", "'list'", "and", "tag", ".", "subtype", ".", "serializer", "in", "(", "'array'", ",", "'list'", ",", "'compound'", ")", ")", "or", "(", "tag", ".", "serializer", "==", "'compound'", ")", ")" ]
Return whether the specified tag should be expanded.
[ "Return", "whether", "the", "specified", "tag", "should", "be", "expanded", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/literal/serializer.py#L83-L92
vberlier/nbtlib
nbtlib/literal/serializer.py
Serializer.escape_string
def escape_string(self, string): """Return the escaped literal representation of an nbt string.""" if self.quote: quote = self.quote else: found = QUOTE_REGEX.search(string) quote = STRING_QUOTES[found.group()] if found else next(iter(STRING_QUOTES)) for match, seq in ESCAPE_SUBS.items(): if match == quote or match not in STRING_QUOTES: string = string.replace(match, seq) return f'{quote}{string}{quote}'
python
def escape_string(self, string): """Return the escaped literal representation of an nbt string.""" if self.quote: quote = self.quote else: found = QUOTE_REGEX.search(string) quote = STRING_QUOTES[found.group()] if found else next(iter(STRING_QUOTES)) for match, seq in ESCAPE_SUBS.items(): if match == quote or match not in STRING_QUOTES: string = string.replace(match, seq) return f'{quote}{string}{quote}'
[ "def", "escape_string", "(", "self", ",", "string", ")", ":", "if", "self", ".", "quote", ":", "quote", "=", "self", ".", "quote", "else", ":", "found", "=", "QUOTE_REGEX", ".", "search", "(", "string", ")", "quote", "=", "STRING_QUOTES", "[", "found", ".", "group", "(", ")", "]", "if", "found", "else", "next", "(", "iter", "(", "STRING_QUOTES", ")", ")", "for", "match", ",", "seq", "in", "ESCAPE_SUBS", ".", "items", "(", ")", ":", "if", "match", "==", "quote", "or", "match", "not", "in", "STRING_QUOTES", ":", "string", "=", "string", ".", "replace", "(", "match", ",", "seq", ")", "return", "f'{quote}{string}{quote}'" ]
Return the escaped literal representation of an nbt string.
[ "Return", "the", "escaped", "literal", "representation", "of", "an", "nbt", "string", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/literal/serializer.py#L101-L113
vberlier/nbtlib
nbtlib/literal/serializer.py
Serializer.stringify_compound_key
def stringify_compound_key(self, key): """Escape the compound key if it can't be represented unquoted.""" if UNQUOTED_COMPOUND_KEY.match(key): return key return self.escape_string(key)
python
def stringify_compound_key(self, key): """Escape the compound key if it can't be represented unquoted.""" if UNQUOTED_COMPOUND_KEY.match(key): return key return self.escape_string(key)
[ "def", "stringify_compound_key", "(", "self", ",", "key", ")", ":", "if", "UNQUOTED_COMPOUND_KEY", ".", "match", "(", "key", ")", ":", "return", "key", "return", "self", ".", "escape_string", "(", "key", ")" ]
Escape the compound key if it can't be represented unquoted.
[ "Escape", "the", "compound", "key", "if", "it", "can", "t", "be", "represented", "unquoted", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/literal/serializer.py#L115-L119
vberlier/nbtlib
nbtlib/literal/serializer.py
Serializer.serialize
def serialize(self, tag): """Return the literal representation of a tag.""" handler = getattr(self, f'serialize_{tag.serializer}', None) if handler is None: raise TypeError(f'Can\'t serialize {type(tag)!r} instance') return handler(tag)
python
def serialize(self, tag): """Return the literal representation of a tag.""" handler = getattr(self, f'serialize_{tag.serializer}', None) if handler is None: raise TypeError(f'Can\'t serialize {type(tag)!r} instance') return handler(tag)
[ "def", "serialize", "(", "self", ",", "tag", ")", ":", "handler", "=", "getattr", "(", "self", ",", "f'serialize_{tag.serializer}'", ",", "None", ")", "if", "handler", "is", "None", ":", "raise", "TypeError", "(", "f'Can\\'t serialize {type(tag)!r} instance'", ")", "return", "handler", "(", "tag", ")" ]
Return the literal representation of a tag.
[ "Return", "the", "literal", "representation", "of", "a", "tag", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/literal/serializer.py#L121-L126
vberlier/nbtlib
nbtlib/literal/serializer.py
Serializer.serialize_numeric
def serialize_numeric(self, tag): """Return the literal representation of a numeric tag.""" str_func = int.__str__ if isinstance(tag, int) else float.__str__ return str_func(tag) + tag.suffix
python
def serialize_numeric(self, tag): """Return the literal representation of a numeric tag.""" str_func = int.__str__ if isinstance(tag, int) else float.__str__ return str_func(tag) + tag.suffix
[ "def", "serialize_numeric", "(", "self", ",", "tag", ")", ":", "str_func", "=", "int", ".", "__str__", "if", "isinstance", "(", "tag", ",", "int", ")", "else", "float", ".", "__str__", "return", "str_func", "(", "tag", ")", "+", "tag", ".", "suffix" ]
Return the literal representation of a numeric tag.
[ "Return", "the", "literal", "representation", "of", "a", "numeric", "tag", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/literal/serializer.py#L128-L131
vberlier/nbtlib
nbtlib/literal/serializer.py
Serializer.serialize_array
def serialize_array(self, tag): """Return the literal representation of an array tag.""" elements = self.comma.join(f'{el}{tag.item_suffix}' for el in tag) return f'[{tag.array_prefix}{self.semicolon}{elements}]'
python
def serialize_array(self, tag): """Return the literal representation of an array tag.""" elements = self.comma.join(f'{el}{tag.item_suffix}' for el in tag) return f'[{tag.array_prefix}{self.semicolon}{elements}]'
[ "def", "serialize_array", "(", "self", ",", "tag", ")", ":", "elements", "=", "self", ".", "comma", ".", "join", "(", "f'{el}{tag.item_suffix}'", "for", "el", "in", "tag", ")", "return", "f'[{tag.array_prefix}{self.semicolon}{elements}]'" ]
Return the literal representation of an array tag.
[ "Return", "the", "literal", "representation", "of", "an", "array", "tag", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/literal/serializer.py#L133-L136
vberlier/nbtlib
nbtlib/literal/serializer.py
Serializer.serialize_list
def serialize_list(self, tag): """Return the literal representation of a list tag.""" separator, fmt = self.comma, '[{}]' with self.depth(): if self.should_expand(tag): separator, fmt = self.expand(separator, fmt) return fmt.format(separator.join(map(self.serialize, tag)))
python
def serialize_list(self, tag): """Return the literal representation of a list tag.""" separator, fmt = self.comma, '[{}]' with self.depth(): if self.should_expand(tag): separator, fmt = self.expand(separator, fmt) return fmt.format(separator.join(map(self.serialize, tag)))
[ "def", "serialize_list", "(", "self", ",", "tag", ")", ":", "separator", ",", "fmt", "=", "self", ".", "comma", ",", "'[{}]'", "with", "self", ".", "depth", "(", ")", ":", "if", "self", ".", "should_expand", "(", "tag", ")", ":", "separator", ",", "fmt", "=", "self", ".", "expand", "(", "separator", ",", "fmt", ")", "return", "fmt", ".", "format", "(", "separator", ".", "join", "(", "map", "(", "self", ".", "serialize", ",", "tag", ")", ")", ")" ]
Return the literal representation of a list tag.
[ "Return", "the", "literal", "representation", "of", "a", "list", "tag", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/literal/serializer.py#L142-L150
vberlier/nbtlib
nbtlib/literal/serializer.py
Serializer.serialize_compound
def serialize_compound(self, tag): """Return the literal representation of a compound tag.""" separator, fmt = self.comma, '{{{}}}' with self.depth(): if self.should_expand(tag): separator, fmt = self.expand(separator, fmt) return fmt.format(separator.join( f'{self.stringify_compound_key(key)}{self.colon}{self.serialize(value)}' for key, value in tag.items() ))
python
def serialize_compound(self, tag): """Return the literal representation of a compound tag.""" separator, fmt = self.comma, '{{{}}}' with self.depth(): if self.should_expand(tag): separator, fmt = self.expand(separator, fmt) return fmt.format(separator.join( f'{self.stringify_compound_key(key)}{self.colon}{self.serialize(value)}' for key, value in tag.items() ))
[ "def", "serialize_compound", "(", "self", ",", "tag", ")", ":", "separator", ",", "fmt", "=", "self", ".", "comma", ",", "'{{{}}}'", "with", "self", ".", "depth", "(", ")", ":", "if", "self", ".", "should_expand", "(", "tag", ")", ":", "separator", ",", "fmt", "=", "self", ".", "expand", "(", "separator", ",", "fmt", ")", "return", "fmt", ".", "format", "(", "separator", ".", "join", "(", "f'{self.stringify_compound_key(key)}{self.colon}{self.serialize(value)}'", "for", "key", ",", "value", "in", "tag", ".", "items", "(", ")", ")", ")" ]
Return the literal representation of a compound tag.
[ "Return", "the", "literal", "representation", "of", "a", "compound", "tag", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/literal/serializer.py#L152-L163
tulsawebdevs/django-multi-gtfs
multigtfs/models/base.py
BaseQuerySet.populated_column_map
def populated_column_map(self): '''Return the _column_map without unused optional fields''' column_map = [] cls = self.model for csv_name, field_pattern in cls._column_map: # Separate the local field name from foreign columns if '__' in field_pattern: field_name = field_pattern.split('__', 1)[0] else: field_name = field_pattern # Handle point fields point_match = re_point.match(field_name) if point_match: field = None else: field = cls._meta.get_field(field_name) # Only add optional columns if they are used in the records if field and field.blank and not field.has_default(): kwargs = {field_name: get_blank_value(field)} if self.exclude(**kwargs).exists(): column_map.append((csv_name, field_pattern)) else: column_map.append((csv_name, field_pattern)) return column_map
python
def populated_column_map(self): '''Return the _column_map without unused optional fields''' column_map = [] cls = self.model for csv_name, field_pattern in cls._column_map: # Separate the local field name from foreign columns if '__' in field_pattern: field_name = field_pattern.split('__', 1)[0] else: field_name = field_pattern # Handle point fields point_match = re_point.match(field_name) if point_match: field = None else: field = cls._meta.get_field(field_name) # Only add optional columns if they are used in the records if field and field.blank and not field.has_default(): kwargs = {field_name: get_blank_value(field)} if self.exclude(**kwargs).exists(): column_map.append((csv_name, field_pattern)) else: column_map.append((csv_name, field_pattern)) return column_map
[ "def", "populated_column_map", "(", "self", ")", ":", "column_map", "=", "[", "]", "cls", "=", "self", ".", "model", "for", "csv_name", ",", "field_pattern", "in", "cls", ".", "_column_map", ":", "# Separate the local field name from foreign columns", "if", "'__'", "in", "field_pattern", ":", "field_name", "=", "field_pattern", ".", "split", "(", "'__'", ",", "1", ")", "[", "0", "]", "else", ":", "field_name", "=", "field_pattern", "# Handle point fields", "point_match", "=", "re_point", ".", "match", "(", "field_name", ")", "if", "point_match", ":", "field", "=", "None", "else", ":", "field", "=", "cls", ".", "_meta", ".", "get_field", "(", "field_name", ")", "# Only add optional columns if they are used in the records", "if", "field", "and", "field", ".", "blank", "and", "not", "field", ".", "has_default", "(", ")", ":", "kwargs", "=", "{", "field_name", ":", "get_blank_value", "(", "field", ")", "}", "if", "self", ".", "exclude", "(", "*", "*", "kwargs", ")", ".", "exists", "(", ")", ":", "column_map", ".", "append", "(", "(", "csv_name", ",", "field_pattern", ")", ")", "else", ":", "column_map", ".", "append", "(", "(", "csv_name", ",", "field_pattern", ")", ")", "return", "column_map" ]
Return the _column_map without unused optional fields
[ "Return", "the", "_column_map", "without", "unused", "optional", "fields" ]
train
https://github.com/tulsawebdevs/django-multi-gtfs/blob/8c442bfb67e87566c24a7364d8fa0aacd4a0a652/multigtfs/models/base.py#L37-L62
tulsawebdevs/django-multi-gtfs
multigtfs/models/base.py
BaseManager.in_feed
def in_feed(self, feed): '''Return the objects in the target feed''' kwargs = {self.model._rel_to_feed: feed} return self.filter(**kwargs)
python
def in_feed(self, feed): '''Return the objects in the target feed''' kwargs = {self.model._rel_to_feed: feed} return self.filter(**kwargs)
[ "def", "in_feed", "(", "self", ",", "feed", ")", ":", "kwargs", "=", "{", "self", ".", "model", ".", "_rel_to_feed", ":", "feed", "}", "return", "self", ".", "filter", "(", "*", "*", "kwargs", ")" ]
Return the objects in the target feed
[ "Return", "the", "objects", "in", "the", "target", "feed" ]
train
https://github.com/tulsawebdevs/django-multi-gtfs/blob/8c442bfb67e87566c24a7364d8fa0aacd4a0a652/multigtfs/models/base.py#L70-L73
tulsawebdevs/django-multi-gtfs
multigtfs/models/base.py
Base.import_txt
def import_txt(cls, txt_file, feed, filter_func=None): '''Import from the GTFS text file''' # Setup the conversion from GTFS to Django Format # Conversion functions def no_convert(value): return value def date_convert(value): return datetime.strptime(value, '%Y%m%d') def bool_convert(value): return (value == '1') def char_convert(value): return (value or '') def null_convert(value): return (value or None) def point_convert(value): """Convert latitude / longitude, strip leading +.""" if value.startswith('+'): return value[1:] else: return (value or 0.0) cache = {} def default_convert(field): def get_value_or_default(value): if value == '' or value is None: return field.get_default() else: return value return get_value_or_default def instance_convert(field, feed, rel_name): def get_instance(value): if value.strip(): related = field.related_model key1 = "{}:{}".format(related.__name__, rel_name) key2 = text_type(value) # Load existing objects if key1 not in cache: pairs = related.objects.filter( **{related._rel_to_feed: feed}).values_list( rel_name, 'id') cache[key1] = dict((text_type(x), i) for x, i in pairs) # Create new? if key2 not in cache[key1]: kwargs = { related._rel_to_feed: feed, rel_name: value} cache[key1][key2] = related.objects.create( **kwargs).id return cache[key1][key2] else: return None return get_instance # Check unique fields column_names = [c for c, _ in cls._column_map] for unique_field in cls._unique_fields: assert unique_field in column_names, \ '{} not in {}'.format(unique_field, column_names) # Map of field_name to converters from GTFS to Django format val_map = dict() name_map = dict() point_map = dict() for csv_name, field_pattern in cls._column_map: # Separate the local field name from foreign columns if '__' in field_pattern: field_base, rel_name = field_pattern.split('__', 1) field_name = field_base + '_id' else: field_name = field_base = field_pattern # Use the field name in the name mapping name_map[csv_name] = field_name # Is it a point field? point_match = re_point.match(field_name) if point_match: field = None else: field = cls._meta.get_field(field_base) # Pick a conversion function for the field if point_match: converter = point_convert elif isinstance(field, models.DateField): converter = date_convert elif isinstance(field, models.BooleanField): converter = bool_convert elif isinstance(field, models.CharField): converter = char_convert elif field.is_relation: converter = instance_convert(field, feed, rel_name) assert not isinstance(field, models.ManyToManyField) elif field.null: converter = null_convert elif field.has_default(): converter = default_convert(field) else: converter = no_convert if point_match: index = int(point_match.group('index')) point_map[csv_name] = (index, converter) else: val_map[csv_name] = converter # Read and convert the source txt csv_reader = reader(txt_file, skipinitialspace=True) unique_line = dict() count = 0 first = True extra_counts = defaultdict(int) new_objects = [] for row in csv_reader: if first: # Read the columns columns = row if columns[0].startswith(CSV_BOM): columns[0] = columns[0][len(CSV_BOM):] first = False continue if filter_func and not filter_func(zip(columns, row)): continue if not row: continue # Read a data row fields = dict() point_coords = [None, None] ukey_values = {} if cls._rel_to_feed == 'feed': fields['feed'] = feed for column_name, value in zip(columns, row): if column_name not in name_map: val = null_convert(value) if val is not None: fields.setdefault('extra_data', {})[column_name] = val extra_counts[column_name] += 1 elif column_name in val_map: fields[name_map[column_name]] = val_map[column_name](value) else: assert column_name in point_map pos, converter = point_map[column_name] point_coords[pos] = converter(value) # Is it part of the unique key? if column_name in cls._unique_fields: ukey_values[column_name] = value # Join the lat/long into a point if point_map: assert point_coords[0] and point_coords[1] fields['point'] = "POINT(%s)" % (' '.join(point_coords)) # Is the item unique? ukey = tuple(ukey_values.get(u) for u in cls._unique_fields) if ukey in unique_line: logger.warning( '%s line %d is a duplicate of line %d, not imported.', cls._filename, csv_reader.line_num, unique_line[ukey]) continue else: unique_line[ukey] = csv_reader.line_num # Create after accumulating a batch new_objects.append(cls(**fields)) if len(new_objects) % batch_size == 0: # pragma: no cover cls.objects.bulk_create(new_objects) count += len(new_objects) logger.info( "Imported %d %s", count, cls._meta.verbose_name_plural) new_objects = [] # Create remaining objects if new_objects: cls.objects.bulk_create(new_objects) # Take note of extra fields if extra_counts: extra_columns = feed.meta.setdefault( 'extra_columns', {}).setdefault(cls.__name__, []) for column in columns: if column in extra_counts and column not in extra_columns: extra_columns.append(column) feed.save() return len(unique_line)
python
def import_txt(cls, txt_file, feed, filter_func=None): '''Import from the GTFS text file''' # Setup the conversion from GTFS to Django Format # Conversion functions def no_convert(value): return value def date_convert(value): return datetime.strptime(value, '%Y%m%d') def bool_convert(value): return (value == '1') def char_convert(value): return (value or '') def null_convert(value): return (value or None) def point_convert(value): """Convert latitude / longitude, strip leading +.""" if value.startswith('+'): return value[1:] else: return (value or 0.0) cache = {} def default_convert(field): def get_value_or_default(value): if value == '' or value is None: return field.get_default() else: return value return get_value_or_default def instance_convert(field, feed, rel_name): def get_instance(value): if value.strip(): related = field.related_model key1 = "{}:{}".format(related.__name__, rel_name) key2 = text_type(value) # Load existing objects if key1 not in cache: pairs = related.objects.filter( **{related._rel_to_feed: feed}).values_list( rel_name, 'id') cache[key1] = dict((text_type(x), i) for x, i in pairs) # Create new? if key2 not in cache[key1]: kwargs = { related._rel_to_feed: feed, rel_name: value} cache[key1][key2] = related.objects.create( **kwargs).id return cache[key1][key2] else: return None return get_instance # Check unique fields column_names = [c for c, _ in cls._column_map] for unique_field in cls._unique_fields: assert unique_field in column_names, \ '{} not in {}'.format(unique_field, column_names) # Map of field_name to converters from GTFS to Django format val_map = dict() name_map = dict() point_map = dict() for csv_name, field_pattern in cls._column_map: # Separate the local field name from foreign columns if '__' in field_pattern: field_base, rel_name = field_pattern.split('__', 1) field_name = field_base + '_id' else: field_name = field_base = field_pattern # Use the field name in the name mapping name_map[csv_name] = field_name # Is it a point field? point_match = re_point.match(field_name) if point_match: field = None else: field = cls._meta.get_field(field_base) # Pick a conversion function for the field if point_match: converter = point_convert elif isinstance(field, models.DateField): converter = date_convert elif isinstance(field, models.BooleanField): converter = bool_convert elif isinstance(field, models.CharField): converter = char_convert elif field.is_relation: converter = instance_convert(field, feed, rel_name) assert not isinstance(field, models.ManyToManyField) elif field.null: converter = null_convert elif field.has_default(): converter = default_convert(field) else: converter = no_convert if point_match: index = int(point_match.group('index')) point_map[csv_name] = (index, converter) else: val_map[csv_name] = converter # Read and convert the source txt csv_reader = reader(txt_file, skipinitialspace=True) unique_line = dict() count = 0 first = True extra_counts = defaultdict(int) new_objects = [] for row in csv_reader: if first: # Read the columns columns = row if columns[0].startswith(CSV_BOM): columns[0] = columns[0][len(CSV_BOM):] first = False continue if filter_func and not filter_func(zip(columns, row)): continue if not row: continue # Read a data row fields = dict() point_coords = [None, None] ukey_values = {} if cls._rel_to_feed == 'feed': fields['feed'] = feed for column_name, value in zip(columns, row): if column_name not in name_map: val = null_convert(value) if val is not None: fields.setdefault('extra_data', {})[column_name] = val extra_counts[column_name] += 1 elif column_name in val_map: fields[name_map[column_name]] = val_map[column_name](value) else: assert column_name in point_map pos, converter = point_map[column_name] point_coords[pos] = converter(value) # Is it part of the unique key? if column_name in cls._unique_fields: ukey_values[column_name] = value # Join the lat/long into a point if point_map: assert point_coords[0] and point_coords[1] fields['point'] = "POINT(%s)" % (' '.join(point_coords)) # Is the item unique? ukey = tuple(ukey_values.get(u) for u in cls._unique_fields) if ukey in unique_line: logger.warning( '%s line %d is a duplicate of line %d, not imported.', cls._filename, csv_reader.line_num, unique_line[ukey]) continue else: unique_line[ukey] = csv_reader.line_num # Create after accumulating a batch new_objects.append(cls(**fields)) if len(new_objects) % batch_size == 0: # pragma: no cover cls.objects.bulk_create(new_objects) count += len(new_objects) logger.info( "Imported %d %s", count, cls._meta.verbose_name_plural) new_objects = [] # Create remaining objects if new_objects: cls.objects.bulk_create(new_objects) # Take note of extra fields if extra_counts: extra_columns = feed.meta.setdefault( 'extra_columns', {}).setdefault(cls.__name__, []) for column in columns: if column in extra_counts and column not in extra_columns: extra_columns.append(column) feed.save() return len(unique_line)
[ "def", "import_txt", "(", "cls", ",", "txt_file", ",", "feed", ",", "filter_func", "=", "None", ")", ":", "# Setup the conversion from GTFS to Django Format", "# Conversion functions", "def", "no_convert", "(", "value", ")", ":", "return", "value", "def", "date_convert", "(", "value", ")", ":", "return", "datetime", ".", "strptime", "(", "value", ",", "'%Y%m%d'", ")", "def", "bool_convert", "(", "value", ")", ":", "return", "(", "value", "==", "'1'", ")", "def", "char_convert", "(", "value", ")", ":", "return", "(", "value", "or", "''", ")", "def", "null_convert", "(", "value", ")", ":", "return", "(", "value", "or", "None", ")", "def", "point_convert", "(", "value", ")", ":", "\"\"\"Convert latitude / longitude, strip leading +.\"\"\"", "if", "value", ".", "startswith", "(", "'+'", ")", ":", "return", "value", "[", "1", ":", "]", "else", ":", "return", "(", "value", "or", "0.0", ")", "cache", "=", "{", "}", "def", "default_convert", "(", "field", ")", ":", "def", "get_value_or_default", "(", "value", ")", ":", "if", "value", "==", "''", "or", "value", "is", "None", ":", "return", "field", ".", "get_default", "(", ")", "else", ":", "return", "value", "return", "get_value_or_default", "def", "instance_convert", "(", "field", ",", "feed", ",", "rel_name", ")", ":", "def", "get_instance", "(", "value", ")", ":", "if", "value", ".", "strip", "(", ")", ":", "related", "=", "field", ".", "related_model", "key1", "=", "\"{}:{}\"", ".", "format", "(", "related", ".", "__name__", ",", "rel_name", ")", "key2", "=", "text_type", "(", "value", ")", "# Load existing objects", "if", "key1", "not", "in", "cache", ":", "pairs", "=", "related", ".", "objects", ".", "filter", "(", "*", "*", "{", "related", ".", "_rel_to_feed", ":", "feed", "}", ")", ".", "values_list", "(", "rel_name", ",", "'id'", ")", "cache", "[", "key1", "]", "=", "dict", "(", "(", "text_type", "(", "x", ")", ",", "i", ")", "for", "x", ",", "i", "in", "pairs", ")", "# Create new?", "if", "key2", "not", "in", "cache", "[", "key1", "]", ":", "kwargs", "=", "{", "related", ".", "_rel_to_feed", ":", "feed", ",", "rel_name", ":", "value", "}", "cache", "[", "key1", "]", "[", "key2", "]", "=", "related", ".", "objects", ".", "create", "(", "*", "*", "kwargs", ")", ".", "id", "return", "cache", "[", "key1", "]", "[", "key2", "]", "else", ":", "return", "None", "return", "get_instance", "# Check unique fields", "column_names", "=", "[", "c", "for", "c", ",", "_", "in", "cls", ".", "_column_map", "]", "for", "unique_field", "in", "cls", ".", "_unique_fields", ":", "assert", "unique_field", "in", "column_names", ",", "'{} not in {}'", ".", "format", "(", "unique_field", ",", "column_names", ")", "# Map of field_name to converters from GTFS to Django format", "val_map", "=", "dict", "(", ")", "name_map", "=", "dict", "(", ")", "point_map", "=", "dict", "(", ")", "for", "csv_name", ",", "field_pattern", "in", "cls", ".", "_column_map", ":", "# Separate the local field name from foreign columns", "if", "'__'", "in", "field_pattern", ":", "field_base", ",", "rel_name", "=", "field_pattern", ".", "split", "(", "'__'", ",", "1", ")", "field_name", "=", "field_base", "+", "'_id'", "else", ":", "field_name", "=", "field_base", "=", "field_pattern", "# Use the field name in the name mapping", "name_map", "[", "csv_name", "]", "=", "field_name", "# Is it a point field?", "point_match", "=", "re_point", ".", "match", "(", "field_name", ")", "if", "point_match", ":", "field", "=", "None", "else", ":", "field", "=", "cls", ".", "_meta", ".", "get_field", "(", "field_base", ")", "# Pick a conversion function for the field", "if", "point_match", ":", "converter", "=", "point_convert", "elif", "isinstance", "(", "field", ",", "models", ".", "DateField", ")", ":", "converter", "=", "date_convert", "elif", "isinstance", "(", "field", ",", "models", ".", "BooleanField", ")", ":", "converter", "=", "bool_convert", "elif", "isinstance", "(", "field", ",", "models", ".", "CharField", ")", ":", "converter", "=", "char_convert", "elif", "field", ".", "is_relation", ":", "converter", "=", "instance_convert", "(", "field", ",", "feed", ",", "rel_name", ")", "assert", "not", "isinstance", "(", "field", ",", "models", ".", "ManyToManyField", ")", "elif", "field", ".", "null", ":", "converter", "=", "null_convert", "elif", "field", ".", "has_default", "(", ")", ":", "converter", "=", "default_convert", "(", "field", ")", "else", ":", "converter", "=", "no_convert", "if", "point_match", ":", "index", "=", "int", "(", "point_match", ".", "group", "(", "'index'", ")", ")", "point_map", "[", "csv_name", "]", "=", "(", "index", ",", "converter", ")", "else", ":", "val_map", "[", "csv_name", "]", "=", "converter", "# Read and convert the source txt", "csv_reader", "=", "reader", "(", "txt_file", ",", "skipinitialspace", "=", "True", ")", "unique_line", "=", "dict", "(", ")", "count", "=", "0", "first", "=", "True", "extra_counts", "=", "defaultdict", "(", "int", ")", "new_objects", "=", "[", "]", "for", "row", "in", "csv_reader", ":", "if", "first", ":", "# Read the columns", "columns", "=", "row", "if", "columns", "[", "0", "]", ".", "startswith", "(", "CSV_BOM", ")", ":", "columns", "[", "0", "]", "=", "columns", "[", "0", "]", "[", "len", "(", "CSV_BOM", ")", ":", "]", "first", "=", "False", "continue", "if", "filter_func", "and", "not", "filter_func", "(", "zip", "(", "columns", ",", "row", ")", ")", ":", "continue", "if", "not", "row", ":", "continue", "# Read a data row", "fields", "=", "dict", "(", ")", "point_coords", "=", "[", "None", ",", "None", "]", "ukey_values", "=", "{", "}", "if", "cls", ".", "_rel_to_feed", "==", "'feed'", ":", "fields", "[", "'feed'", "]", "=", "feed", "for", "column_name", ",", "value", "in", "zip", "(", "columns", ",", "row", ")", ":", "if", "column_name", "not", "in", "name_map", ":", "val", "=", "null_convert", "(", "value", ")", "if", "val", "is", "not", "None", ":", "fields", ".", "setdefault", "(", "'extra_data'", ",", "{", "}", ")", "[", "column_name", "]", "=", "val", "extra_counts", "[", "column_name", "]", "+=", "1", "elif", "column_name", "in", "val_map", ":", "fields", "[", "name_map", "[", "column_name", "]", "]", "=", "val_map", "[", "column_name", "]", "(", "value", ")", "else", ":", "assert", "column_name", "in", "point_map", "pos", ",", "converter", "=", "point_map", "[", "column_name", "]", "point_coords", "[", "pos", "]", "=", "converter", "(", "value", ")", "# Is it part of the unique key?", "if", "column_name", "in", "cls", ".", "_unique_fields", ":", "ukey_values", "[", "column_name", "]", "=", "value", "# Join the lat/long into a point", "if", "point_map", ":", "assert", "point_coords", "[", "0", "]", "and", "point_coords", "[", "1", "]", "fields", "[", "'point'", "]", "=", "\"POINT(%s)\"", "%", "(", "' '", ".", "join", "(", "point_coords", ")", ")", "# Is the item unique?", "ukey", "=", "tuple", "(", "ukey_values", ".", "get", "(", "u", ")", "for", "u", "in", "cls", ".", "_unique_fields", ")", "if", "ukey", "in", "unique_line", ":", "logger", ".", "warning", "(", "'%s line %d is a duplicate of line %d, not imported.'", ",", "cls", ".", "_filename", ",", "csv_reader", ".", "line_num", ",", "unique_line", "[", "ukey", "]", ")", "continue", "else", ":", "unique_line", "[", "ukey", "]", "=", "csv_reader", ".", "line_num", "# Create after accumulating a batch", "new_objects", ".", "append", "(", "cls", "(", "*", "*", "fields", ")", ")", "if", "len", "(", "new_objects", ")", "%", "batch_size", "==", "0", ":", "# pragma: no cover", "cls", ".", "objects", ".", "bulk_create", "(", "new_objects", ")", "count", "+=", "len", "(", "new_objects", ")", "logger", ".", "info", "(", "\"Imported %d %s\"", ",", "count", ",", "cls", ".", "_meta", ".", "verbose_name_plural", ")", "new_objects", "=", "[", "]", "# Create remaining objects", "if", "new_objects", ":", "cls", ".", "objects", ".", "bulk_create", "(", "new_objects", ")", "# Take note of extra fields", "if", "extra_counts", ":", "extra_columns", "=", "feed", ".", "meta", ".", "setdefault", "(", "'extra_columns'", ",", "{", "}", ")", ".", "setdefault", "(", "cls", ".", "__name__", ",", "[", "]", ")", "for", "column", "in", "columns", ":", "if", "column", "in", "extra_counts", "and", "column", "not", "in", "extra_columns", ":", "extra_columns", ".", "append", "(", "column", ")", "feed", ".", "save", "(", ")", "return", "len", "(", "unique_line", ")" ]
Import from the GTFS text file
[ "Import", "from", "the", "GTFS", "text", "file" ]
train
https://github.com/tulsawebdevs/django-multi-gtfs/blob/8c442bfb67e87566c24a7364d8fa0aacd4a0a652/multigtfs/models/base.py#L108-L300
tulsawebdevs/django-multi-gtfs
multigtfs/models/base.py
Base.export_txt
def export_txt(cls, feed): '''Export records as a GTFS comma-separated file''' objects = cls.objects.in_feed(feed) # If no records, return None if not objects.exists(): return # Get the columns used in the dataset column_map = objects.populated_column_map() columns, fields = zip(*column_map) extra_columns = feed.meta.get( 'extra_columns', {}).get(cls.__name__, []) # Get sort order if hasattr(cls, '_sort_order'): sort_fields = cls._sort_order else: sort_fields = [] for field in fields: base_field = field.split('__', 1)[0] point_match = re_point.match(base_field) if point_match: continue field_type = cls._meta.get_field(base_field) assert not isinstance(field_type, ManyToManyField) sort_fields.append(field) # Create CSV writer out = StringIO() csv_writer = writer(out, lineterminator='\n') # Write header row header_row = [text_type(c) for c in columns] header_row.extend(extra_columns) write_text_rows(csv_writer, [header_row]) # Report the work to be done total = objects.count() logger.info( '%d %s to export...', total, cls._meta.verbose_name_plural) # Populate related items cache model_to_field_name = {} cache = {} for field_name in fields: if '__' in field_name: local_field_name, subfield_name = field_name.split('__', 1) field = cls._meta.get_field(local_field_name) field_type = field.related_model model_name = field_type.__name__ if model_name in model_to_field_name: # Already loaded this model under a different field name cache[field_name] = cache[model_to_field_name[model_name]] else: # Load all feed data for this model pairs = field_type.objects.in_feed( feed).values_list('id', subfield_name) cache[field_name] = dict( (i, text_type(x)) for i, x in pairs) cache[field_name][None] = u'' model_to_field_name[model_name] = field_name # Assemble the rows, writing when we hit batch size count = 0 rows = [] for item in objects.order_by(*sort_fields).iterator(): row = [] for csv_name, field_name in column_map: obj = item point_match = re_point.match(field_name) if '__' in field_name: # Return relations from cache local_field_name = field_name.split('__', 1)[0] field_id = getattr(obj, local_field_name + '_id') row.append(cache[field_name][field_id]) elif point_match: # Get the lat or long from the point name, index = point_match.groups() field = getattr(obj, name) row.append(field.coords[int(index)]) else: # Handle other field types field = getattr(obj, field_name) if obj else '' if isinstance(field, date): formatted = field.strftime(u'%Y%m%d') row.append(text_type(formatted)) elif isinstance(field, bool): row.append(1 if field else 0) elif field is None: row.append(u'') else: row.append(text_type(field)) for col in extra_columns: row.append(obj.extra_data.get(col, u'')) rows.append(row) if len(rows) % batch_size == 0: # pragma: no cover write_text_rows(csv_writer, rows) count += len(rows) logger.info( "Exported %d %s", count, cls._meta.verbose_name_plural) rows = [] # Write rows smaller than batch size write_text_rows(csv_writer, rows) return out.getvalue()
python
def export_txt(cls, feed): '''Export records as a GTFS comma-separated file''' objects = cls.objects.in_feed(feed) # If no records, return None if not objects.exists(): return # Get the columns used in the dataset column_map = objects.populated_column_map() columns, fields = zip(*column_map) extra_columns = feed.meta.get( 'extra_columns', {}).get(cls.__name__, []) # Get sort order if hasattr(cls, '_sort_order'): sort_fields = cls._sort_order else: sort_fields = [] for field in fields: base_field = field.split('__', 1)[0] point_match = re_point.match(base_field) if point_match: continue field_type = cls._meta.get_field(base_field) assert not isinstance(field_type, ManyToManyField) sort_fields.append(field) # Create CSV writer out = StringIO() csv_writer = writer(out, lineterminator='\n') # Write header row header_row = [text_type(c) for c in columns] header_row.extend(extra_columns) write_text_rows(csv_writer, [header_row]) # Report the work to be done total = objects.count() logger.info( '%d %s to export...', total, cls._meta.verbose_name_plural) # Populate related items cache model_to_field_name = {} cache = {} for field_name in fields: if '__' in field_name: local_field_name, subfield_name = field_name.split('__', 1) field = cls._meta.get_field(local_field_name) field_type = field.related_model model_name = field_type.__name__ if model_name in model_to_field_name: # Already loaded this model under a different field name cache[field_name] = cache[model_to_field_name[model_name]] else: # Load all feed data for this model pairs = field_type.objects.in_feed( feed).values_list('id', subfield_name) cache[field_name] = dict( (i, text_type(x)) for i, x in pairs) cache[field_name][None] = u'' model_to_field_name[model_name] = field_name # Assemble the rows, writing when we hit batch size count = 0 rows = [] for item in objects.order_by(*sort_fields).iterator(): row = [] for csv_name, field_name in column_map: obj = item point_match = re_point.match(field_name) if '__' in field_name: # Return relations from cache local_field_name = field_name.split('__', 1)[0] field_id = getattr(obj, local_field_name + '_id') row.append(cache[field_name][field_id]) elif point_match: # Get the lat or long from the point name, index = point_match.groups() field = getattr(obj, name) row.append(field.coords[int(index)]) else: # Handle other field types field = getattr(obj, field_name) if obj else '' if isinstance(field, date): formatted = field.strftime(u'%Y%m%d') row.append(text_type(formatted)) elif isinstance(field, bool): row.append(1 if field else 0) elif field is None: row.append(u'') else: row.append(text_type(field)) for col in extra_columns: row.append(obj.extra_data.get(col, u'')) rows.append(row) if len(rows) % batch_size == 0: # pragma: no cover write_text_rows(csv_writer, rows) count += len(rows) logger.info( "Exported %d %s", count, cls._meta.verbose_name_plural) rows = [] # Write rows smaller than batch size write_text_rows(csv_writer, rows) return out.getvalue()
[ "def", "export_txt", "(", "cls", ",", "feed", ")", ":", "objects", "=", "cls", ".", "objects", ".", "in_feed", "(", "feed", ")", "# If no records, return None", "if", "not", "objects", ".", "exists", "(", ")", ":", "return", "# Get the columns used in the dataset", "column_map", "=", "objects", ".", "populated_column_map", "(", ")", "columns", ",", "fields", "=", "zip", "(", "*", "column_map", ")", "extra_columns", "=", "feed", ".", "meta", ".", "get", "(", "'extra_columns'", ",", "{", "}", ")", ".", "get", "(", "cls", ".", "__name__", ",", "[", "]", ")", "# Get sort order", "if", "hasattr", "(", "cls", ",", "'_sort_order'", ")", ":", "sort_fields", "=", "cls", ".", "_sort_order", "else", ":", "sort_fields", "=", "[", "]", "for", "field", "in", "fields", ":", "base_field", "=", "field", ".", "split", "(", "'__'", ",", "1", ")", "[", "0", "]", "point_match", "=", "re_point", ".", "match", "(", "base_field", ")", "if", "point_match", ":", "continue", "field_type", "=", "cls", ".", "_meta", ".", "get_field", "(", "base_field", ")", "assert", "not", "isinstance", "(", "field_type", ",", "ManyToManyField", ")", "sort_fields", ".", "append", "(", "field", ")", "# Create CSV writer", "out", "=", "StringIO", "(", ")", "csv_writer", "=", "writer", "(", "out", ",", "lineterminator", "=", "'\\n'", ")", "# Write header row", "header_row", "=", "[", "text_type", "(", "c", ")", "for", "c", "in", "columns", "]", "header_row", ".", "extend", "(", "extra_columns", ")", "write_text_rows", "(", "csv_writer", ",", "[", "header_row", "]", ")", "# Report the work to be done", "total", "=", "objects", ".", "count", "(", ")", "logger", ".", "info", "(", "'%d %s to export...'", ",", "total", ",", "cls", ".", "_meta", ".", "verbose_name_plural", ")", "# Populate related items cache", "model_to_field_name", "=", "{", "}", "cache", "=", "{", "}", "for", "field_name", "in", "fields", ":", "if", "'__'", "in", "field_name", ":", "local_field_name", ",", "subfield_name", "=", "field_name", ".", "split", "(", "'__'", ",", "1", ")", "field", "=", "cls", ".", "_meta", ".", "get_field", "(", "local_field_name", ")", "field_type", "=", "field", ".", "related_model", "model_name", "=", "field_type", ".", "__name__", "if", "model_name", "in", "model_to_field_name", ":", "# Already loaded this model under a different field name", "cache", "[", "field_name", "]", "=", "cache", "[", "model_to_field_name", "[", "model_name", "]", "]", "else", ":", "# Load all feed data for this model", "pairs", "=", "field_type", ".", "objects", ".", "in_feed", "(", "feed", ")", ".", "values_list", "(", "'id'", ",", "subfield_name", ")", "cache", "[", "field_name", "]", "=", "dict", "(", "(", "i", ",", "text_type", "(", "x", ")", ")", "for", "i", ",", "x", "in", "pairs", ")", "cache", "[", "field_name", "]", "[", "None", "]", "=", "u''", "model_to_field_name", "[", "model_name", "]", "=", "field_name", "# Assemble the rows, writing when we hit batch size", "count", "=", "0", "rows", "=", "[", "]", "for", "item", "in", "objects", ".", "order_by", "(", "*", "sort_fields", ")", ".", "iterator", "(", ")", ":", "row", "=", "[", "]", "for", "csv_name", ",", "field_name", "in", "column_map", ":", "obj", "=", "item", "point_match", "=", "re_point", ".", "match", "(", "field_name", ")", "if", "'__'", "in", "field_name", ":", "# Return relations from cache", "local_field_name", "=", "field_name", ".", "split", "(", "'__'", ",", "1", ")", "[", "0", "]", "field_id", "=", "getattr", "(", "obj", ",", "local_field_name", "+", "'_id'", ")", "row", ".", "append", "(", "cache", "[", "field_name", "]", "[", "field_id", "]", ")", "elif", "point_match", ":", "# Get the lat or long from the point", "name", ",", "index", "=", "point_match", ".", "groups", "(", ")", "field", "=", "getattr", "(", "obj", ",", "name", ")", "row", ".", "append", "(", "field", ".", "coords", "[", "int", "(", "index", ")", "]", ")", "else", ":", "# Handle other field types", "field", "=", "getattr", "(", "obj", ",", "field_name", ")", "if", "obj", "else", "''", "if", "isinstance", "(", "field", ",", "date", ")", ":", "formatted", "=", "field", ".", "strftime", "(", "u'%Y%m%d'", ")", "row", ".", "append", "(", "text_type", "(", "formatted", ")", ")", "elif", "isinstance", "(", "field", ",", "bool", ")", ":", "row", ".", "append", "(", "1", "if", "field", "else", "0", ")", "elif", "field", "is", "None", ":", "row", ".", "append", "(", "u''", ")", "else", ":", "row", ".", "append", "(", "text_type", "(", "field", ")", ")", "for", "col", "in", "extra_columns", ":", "row", ".", "append", "(", "obj", ".", "extra_data", ".", "get", "(", "col", ",", "u''", ")", ")", "rows", ".", "append", "(", "row", ")", "if", "len", "(", "rows", ")", "%", "batch_size", "==", "0", ":", "# pragma: no cover", "write_text_rows", "(", "csv_writer", ",", "rows", ")", "count", "+=", "len", "(", "rows", ")", "logger", ".", "info", "(", "\"Exported %d %s\"", ",", "count", ",", "cls", ".", "_meta", ".", "verbose_name_plural", ")", "rows", "=", "[", "]", "# Write rows smaller than batch size", "write_text_rows", "(", "csv_writer", ",", "rows", ")", "return", "out", ".", "getvalue", "(", ")" ]
Export records as a GTFS comma-separated file
[ "Export", "records", "as", "a", "GTFS", "comma", "-", "separated", "file" ]
train
https://github.com/tulsawebdevs/django-multi-gtfs/blob/8c442bfb67e87566c24a7364d8fa0aacd4a0a652/multigtfs/models/base.py#L303-L410
aheadley/python-crunchyroll
crunchyroll/apis/android.py
make_android_api_method
def make_android_api_method(req_method, secure=True, version=0): """Turn an AndroidApi's method into a function that builds the request, sends it, then passes the response to the actual method. Should be used as a decorator. """ def outer_func(func): def inner_func(self, **kwargs): req_url = self._build_request_url(secure, func.__name__, version) req_func = self._build_request(req_method, req_url, params=kwargs) response = req_func() func(self, response) return response return inner_func return outer_func
python
def make_android_api_method(req_method, secure=True, version=0): """Turn an AndroidApi's method into a function that builds the request, sends it, then passes the response to the actual method. Should be used as a decorator. """ def outer_func(func): def inner_func(self, **kwargs): req_url = self._build_request_url(secure, func.__name__, version) req_func = self._build_request(req_method, req_url, params=kwargs) response = req_func() func(self, response) return response return inner_func return outer_func
[ "def", "make_android_api_method", "(", "req_method", ",", "secure", "=", "True", ",", "version", "=", "0", ")", ":", "def", "outer_func", "(", "func", ")", ":", "def", "inner_func", "(", "self", ",", "*", "*", "kwargs", ")", ":", "req_url", "=", "self", ".", "_build_request_url", "(", "secure", ",", "func", ".", "__name__", ",", "version", ")", "req_func", "=", "self", ".", "_build_request", "(", "req_method", ",", "req_url", ",", "params", "=", "kwargs", ")", "response", "=", "req_func", "(", ")", "func", "(", "self", ",", "response", ")", "return", "response", "return", "inner_func", "return", "outer_func" ]
Turn an AndroidApi's method into a function that builds the request, sends it, then passes the response to the actual method. Should be used as a decorator.
[ "Turn", "an", "AndroidApi", "s", "method", "into", "a", "function", "that", "builds", "the", "request", "sends", "it", "then", "passes", "the", "response", "to", "the", "actual", "method", ".", "Should", "be", "used", "as", "a", "decorator", "." ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/android.py#L32-L45
aheadley/python-crunchyroll
crunchyroll/apis/android.py
AndroidApi._get_base_params
def _get_base_params(self): """Get the params that will be included with every request """ base_params = { 'locale': self._get_locale(), 'device_id': ANDROID.DEVICE_ID, 'device_type': ANDROID.APP_PACKAGE, 'access_token': ANDROID.ACCESS_TOKEN, 'version': ANDROID.APP_CODE, } base_params.update(dict((k, v) \ for k, v in iteritems(self._state_params) \ if v is not None)) return base_params
python
def _get_base_params(self): """Get the params that will be included with every request """ base_params = { 'locale': self._get_locale(), 'device_id': ANDROID.DEVICE_ID, 'device_type': ANDROID.APP_PACKAGE, 'access_token': ANDROID.ACCESS_TOKEN, 'version': ANDROID.APP_CODE, } base_params.update(dict((k, v) \ for k, v in iteritems(self._state_params) \ if v is not None)) return base_params
[ "def", "_get_base_params", "(", "self", ")", ":", "base_params", "=", "{", "'locale'", ":", "self", ".", "_get_locale", "(", ")", ",", "'device_id'", ":", "ANDROID", ".", "DEVICE_ID", ",", "'device_type'", ":", "ANDROID", ".", "APP_PACKAGE", ",", "'access_token'", ":", "ANDROID", ".", "ACCESS_TOKEN", ",", "'version'", ":", "ANDROID", ".", "APP_CODE", ",", "}", "base_params", ".", "update", "(", "dict", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "iteritems", "(", "self", ".", "_state_params", ")", "if", "v", "is", "not", "None", ")", ")", "return", "base_params" ]
Get the params that will be included with every request
[ "Get", "the", "params", "that", "will", "be", "included", "with", "every", "request" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/android.py#L105-L118
aheadley/python-crunchyroll
crunchyroll/apis/android.py
AndroidApi._build_request_url
def _build_request_url(self, secure, api_method, version): """Build a URL for a API method request """ if secure: proto = ANDROID.PROTOCOL_SECURE else: proto = ANDROID.PROTOCOL_INSECURE req_url = ANDROID.API_URL.format( protocol=proto, api_method=api_method, version=version ) return req_url
python
def _build_request_url(self, secure, api_method, version): """Build a URL for a API method request """ if secure: proto = ANDROID.PROTOCOL_SECURE else: proto = ANDROID.PROTOCOL_INSECURE req_url = ANDROID.API_URL.format( protocol=proto, api_method=api_method, version=version ) return req_url
[ "def", "_build_request_url", "(", "self", ",", "secure", ",", "api_method", ",", "version", ")", ":", "if", "secure", ":", "proto", "=", "ANDROID", ".", "PROTOCOL_SECURE", "else", ":", "proto", "=", "ANDROID", ".", "PROTOCOL_INSECURE", "req_url", "=", "ANDROID", ".", "API_URL", ".", "format", "(", "protocol", "=", "proto", ",", "api_method", "=", "api_method", ",", "version", "=", "version", ")", "return", "req_url" ]
Build a URL for a API method request
[ "Build", "a", "URL", "for", "a", "API", "method", "request" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/android.py#L174-L186
aheadley/python-crunchyroll
crunchyroll/apis/android.py
AndroidApi.is_premium
def is_premium(self, media_type): """Get if the session is premium for a given media type @param str media_type Should be one of ANDROID.MEDIA_TYPE_* @return bool """ if self.logged_in: if media_type in self._user_data['premium']: return True return False
python
def is_premium(self, media_type): """Get if the session is premium for a given media type @param str media_type Should be one of ANDROID.MEDIA_TYPE_* @return bool """ if self.logged_in: if media_type in self._user_data['premium']: return True return False
[ "def", "is_premium", "(", "self", ",", "media_type", ")", ":", "if", "self", ".", "logged_in", ":", "if", "media_type", "in", "self", ".", "_user_data", "[", "'premium'", "]", ":", "return", "True", "return", "False" ]
Get if the session is premium for a given media type @param str media_type Should be one of ANDROID.MEDIA_TYPE_* @return bool
[ "Get", "if", "the", "session", "is", "premium", "for", "a", "given", "media", "type" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/android.py#L196-L205
aheadley/python-crunchyroll
crunchyroll/apis/android.py
AndroidApi.login
def login(self, response): """ Login using email/username and password, used to get the auth token @param str account @param str password @param int duration (optional) """ self._state_params['auth'] = response['auth'] self._user_data = response['user'] if not self.logged_in: raise ApiLoginFailure(response)
python
def login(self, response): """ Login using email/username and password, used to get the auth token @param str account @param str password @param int duration (optional) """ self._state_params['auth'] = response['auth'] self._user_data = response['user'] if not self.logged_in: raise ApiLoginFailure(response)
[ "def", "login", "(", "self", ",", "response", ")", ":", "self", ".", "_state_params", "[", "'auth'", "]", "=", "response", "[", "'auth'", "]", "self", ".", "_user_data", "=", "response", "[", "'user'", "]", "if", "not", "self", ".", "logged_in", ":", "raise", "ApiLoginFailure", "(", "response", ")" ]
Login using email/username and password, used to get the auth token @param str account @param str password @param int duration (optional)
[ "Login", "using", "email", "/", "username", "and", "password", "used", "to", "get", "the", "auth", "token" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/android.py#L242-L253
vberlier/nbtlib
nbtlib/tag.py
read_numeric
def read_numeric(fmt, buff, byteorder='big'): """Read a numeric value from a file-like object.""" try: fmt = fmt[byteorder] return fmt.unpack(buff.read(fmt.size))[0] except StructError: return 0 except KeyError as exc: raise ValueError('Invalid byte order') from exc
python
def read_numeric(fmt, buff, byteorder='big'): """Read a numeric value from a file-like object.""" try: fmt = fmt[byteorder] return fmt.unpack(buff.read(fmt.size))[0] except StructError: return 0 except KeyError as exc: raise ValueError('Invalid byte order') from exc
[ "def", "read_numeric", "(", "fmt", ",", "buff", ",", "byteorder", "=", "'big'", ")", ":", "try", ":", "fmt", "=", "fmt", "[", "byteorder", "]", "return", "fmt", ".", "unpack", "(", "buff", ".", "read", "(", "fmt", ".", "size", ")", ")", "[", "0", "]", "except", "StructError", ":", "return", "0", "except", "KeyError", "as", "exc", ":", "raise", "ValueError", "(", "'Invalid byte order'", ")", "from", "exc" ]
Read a numeric value from a file-like object.
[ "Read", "a", "numeric", "value", "from", "a", "file", "-", "like", "object", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/tag.py#L99-L107
vberlier/nbtlib
nbtlib/tag.py
write_numeric
def write_numeric(fmt, value, buff, byteorder='big'): """Write a numeric value to a file-like object.""" try: buff.write(fmt[byteorder].pack(value)) except KeyError as exc: raise ValueError('Invalid byte order') from exc
python
def write_numeric(fmt, value, buff, byteorder='big'): """Write a numeric value to a file-like object.""" try: buff.write(fmt[byteorder].pack(value)) except KeyError as exc: raise ValueError('Invalid byte order') from exc
[ "def", "write_numeric", "(", "fmt", ",", "value", ",", "buff", ",", "byteorder", "=", "'big'", ")", ":", "try", ":", "buff", ".", "write", "(", "fmt", "[", "byteorder", "]", ".", "pack", "(", "value", ")", ")", "except", "KeyError", "as", "exc", ":", "raise", "ValueError", "(", "'Invalid byte order'", ")", "from", "exc" ]
Write a numeric value to a file-like object.
[ "Write", "a", "numeric", "value", "to", "a", "file", "-", "like", "object", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/tag.py#L110-L115
vberlier/nbtlib
nbtlib/tag.py
read_string
def read_string(buff, byteorder='big'): """Read a string from a file-like object.""" length = read_numeric(USHORT, buff, byteorder) return buff.read(length).decode('utf-8')
python
def read_string(buff, byteorder='big'): """Read a string from a file-like object.""" length = read_numeric(USHORT, buff, byteorder) return buff.read(length).decode('utf-8')
[ "def", "read_string", "(", "buff", ",", "byteorder", "=", "'big'", ")", ":", "length", "=", "read_numeric", "(", "USHORT", ",", "buff", ",", "byteorder", ")", "return", "buff", ".", "read", "(", "length", ")", ".", "decode", "(", "'utf-8'", ")" ]
Read a string from a file-like object.
[ "Read", "a", "string", "from", "a", "file", "-", "like", "object", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/tag.py#L118-L121
vberlier/nbtlib
nbtlib/tag.py
write_string
def write_string(value, buff, byteorder='big'): """Write a string to a file-like object.""" data = value.encode('utf-8') write_numeric(USHORT, len(data), buff, byteorder) buff.write(data)
python
def write_string(value, buff, byteorder='big'): """Write a string to a file-like object.""" data = value.encode('utf-8') write_numeric(USHORT, len(data), buff, byteorder) buff.write(data)
[ "def", "write_string", "(", "value", ",", "buff", ",", "byteorder", "=", "'big'", ")", ":", "data", "=", "value", ".", "encode", "(", "'utf-8'", ")", "write_numeric", "(", "USHORT", ",", "len", "(", "data", ")", ",", "buff", ",", "byteorder", ")", "buff", ".", "write", "(", "data", ")" ]
Write a string to a file-like object.
[ "Write", "a", "string", "to", "a", "file", "-", "like", "object", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/tag.py#L124-L128
vberlier/nbtlib
nbtlib/tag.py
List.infer_list_subtype
def infer_list_subtype(items): """Infer a list subtype from a collection of items.""" subtype = End for item in items: item_type = type(item) if not issubclass(item_type, Base): continue if subtype is End: subtype = item_type if not issubclass(subtype, List): return subtype elif subtype is not item_type: stype, itype = subtype, item_type generic = List while issubclass(stype, List) and issubclass(itype, List): stype, itype = stype.subtype, itype.subtype generic = List[generic] if stype is End: subtype = item_type elif itype is not End: return generic.subtype return subtype
python
def infer_list_subtype(items): """Infer a list subtype from a collection of items.""" subtype = End for item in items: item_type = type(item) if not issubclass(item_type, Base): continue if subtype is End: subtype = item_type if not issubclass(subtype, List): return subtype elif subtype is not item_type: stype, itype = subtype, item_type generic = List while issubclass(stype, List) and issubclass(itype, List): stype, itype = stype.subtype, itype.subtype generic = List[generic] if stype is End: subtype = item_type elif itype is not End: return generic.subtype return subtype
[ "def", "infer_list_subtype", "(", "items", ")", ":", "subtype", "=", "End", "for", "item", "in", "items", ":", "item_type", "=", "type", "(", "item", ")", "if", "not", "issubclass", "(", "item_type", ",", "Base", ")", ":", "continue", "if", "subtype", "is", "End", ":", "subtype", "=", "item_type", "if", "not", "issubclass", "(", "subtype", ",", "List", ")", ":", "return", "subtype", "elif", "subtype", "is", "not", "item_type", ":", "stype", ",", "itype", "=", "subtype", ",", "item_type", "generic", "=", "List", "while", "issubclass", "(", "stype", ",", "List", ")", "and", "issubclass", "(", "itype", ",", "List", ")", ":", "stype", ",", "itype", "=", "stype", ".", "subtype", ",", "itype", ".", "subtype", "generic", "=", "List", "[", "generic", "]", "if", "stype", "is", "End", ":", "subtype", "=", "item_type", "elif", "itype", "is", "not", "End", ":", "return", "generic", ".", "subtype", "return", "subtype" ]
Infer a list subtype from a collection of items.
[ "Infer", "a", "list", "subtype", "from", "a", "collection", "of", "items", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/tag.py#L414-L438
vberlier/nbtlib
nbtlib/tag.py
List.cast_item
def cast_item(cls, item): """Cast list item to the appropriate tag type.""" if not isinstance(item, cls.subtype): incompatible = isinstance(item, Base) and not any( issubclass(cls.subtype, tag_type) and isinstance(item, tag_type) for tag_type in cls.all_tags.values() ) if incompatible: raise IncompatibleItemType(item, cls.subtype) try: return cls.subtype(item) except EndInstantiation: raise ValueError('List tags without an explicit subtype must ' 'either be empty or instantiated with ' 'elements from which a subtype can be ' 'inferred') from None except (IncompatibleItemType, CastError): raise except Exception as exc: raise CastError(item, cls.subtype) from exc return item
python
def cast_item(cls, item): """Cast list item to the appropriate tag type.""" if not isinstance(item, cls.subtype): incompatible = isinstance(item, Base) and not any( issubclass(cls.subtype, tag_type) and isinstance(item, tag_type) for tag_type in cls.all_tags.values() ) if incompatible: raise IncompatibleItemType(item, cls.subtype) try: return cls.subtype(item) except EndInstantiation: raise ValueError('List tags without an explicit subtype must ' 'either be empty or instantiated with ' 'elements from which a subtype can be ' 'inferred') from None except (IncompatibleItemType, CastError): raise except Exception as exc: raise CastError(item, cls.subtype) from exc return item
[ "def", "cast_item", "(", "cls", ",", "item", ")", ":", "if", "not", "isinstance", "(", "item", ",", "cls", ".", "subtype", ")", ":", "incompatible", "=", "isinstance", "(", "item", ",", "Base", ")", "and", "not", "any", "(", "issubclass", "(", "cls", ".", "subtype", ",", "tag_type", ")", "and", "isinstance", "(", "item", ",", "tag_type", ")", "for", "tag_type", "in", "cls", ".", "all_tags", ".", "values", "(", ")", ")", "if", "incompatible", ":", "raise", "IncompatibleItemType", "(", "item", ",", "cls", ".", "subtype", ")", "try", ":", "return", "cls", ".", "subtype", "(", "item", ")", "except", "EndInstantiation", ":", "raise", "ValueError", "(", "'List tags without an explicit subtype must '", "'either be empty or instantiated with '", "'elements from which a subtype can be '", "'inferred'", ")", "from", "None", "except", "(", "IncompatibleItemType", ",", "CastError", ")", ":", "raise", "except", "Exception", "as", "exc", ":", "raise", "CastError", "(", "item", ",", "cls", ".", "subtype", ")", "from", "exc", "return", "item" ]
Cast list item to the appropriate tag type.
[ "Cast", "list", "item", "to", "the", "appropriate", "tag", "type", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/tag.py#L465-L486
vberlier/nbtlib
nbtlib/tag.py
Compound.merge
def merge(self, other): """Recursively merge tags from another compound.""" for key, value in other.items(): if key in self and (isinstance(self[key], Compound) and isinstance(value, dict)): self[key].merge(value) else: self[key] = value
python
def merge(self, other): """Recursively merge tags from another compound.""" for key, value in other.items(): if key in self and (isinstance(self[key], Compound) and isinstance(value, dict)): self[key].merge(value) else: self[key] = value
[ "def", "merge", "(", "self", ",", "other", ")", ":", "for", "key", ",", "value", "in", "other", ".", "items", "(", ")", ":", "if", "key", "in", "self", "and", "(", "isinstance", "(", "self", "[", "key", "]", ",", "Compound", ")", "and", "isinstance", "(", "value", ",", "dict", ")", ")", ":", "self", "[", "key", "]", ".", "merge", "(", "value", ")", "else", ":", "self", "[", "key", "]", "=", "value" ]
Recursively merge tags from another compound.
[ "Recursively", "merge", "tags", "from", "another", "compound", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/tag.py#L522-L529
aheadley/python-crunchyroll
crunchyroll/subtitles.py
SubtitleDecrypter.decrypt_subtitle
def decrypt_subtitle(self, subtitle): """Decrypt encrypted subtitle data in high level model object @param crunchyroll.models.Subtitle subtitle @return str """ return self.decrypt(self._build_encryption_key(int(subtitle.id)), subtitle['iv'][0].text.decode('base64'), subtitle['data'][0].text.decode('base64'))
python
def decrypt_subtitle(self, subtitle): """Decrypt encrypted subtitle data in high level model object @param crunchyroll.models.Subtitle subtitle @return str """ return self.decrypt(self._build_encryption_key(int(subtitle.id)), subtitle['iv'][0].text.decode('base64'), subtitle['data'][0].text.decode('base64'))
[ "def", "decrypt_subtitle", "(", "self", ",", "subtitle", ")", ":", "return", "self", ".", "decrypt", "(", "self", ".", "_build_encryption_key", "(", "int", "(", "subtitle", ".", "id", ")", ")", ",", "subtitle", "[", "'iv'", "]", "[", "0", "]", ".", "text", ".", "decode", "(", "'base64'", ")", ",", "subtitle", "[", "'data'", "]", "[", "0", "]", ".", "text", ".", "decode", "(", "'base64'", ")", ")" ]
Decrypt encrypted subtitle data in high level model object @param crunchyroll.models.Subtitle subtitle @return str
[ "Decrypt", "encrypted", "subtitle", "data", "in", "high", "level", "model", "object" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/subtitles.py#L51-L59
aheadley/python-crunchyroll
crunchyroll/subtitles.py
SubtitleDecrypter.decrypt
def decrypt(self, encryption_key, iv, encrypted_data): """Decrypt encrypted subtitle data @param int subtitle_id @param str iv @param str encrypted_data @return str """ logger.info('Decrypting subtitles with length (%d bytes), key=%r', len(encrypted_data), encryption_key) return zlib.decompress(aes_decrypt(encryption_key, iv, encrypted_data))
python
def decrypt(self, encryption_key, iv, encrypted_data): """Decrypt encrypted subtitle data @param int subtitle_id @param str iv @param str encrypted_data @return str """ logger.info('Decrypting subtitles with length (%d bytes), key=%r', len(encrypted_data), encryption_key) return zlib.decompress(aes_decrypt(encryption_key, iv, encrypted_data))
[ "def", "decrypt", "(", "self", ",", "encryption_key", ",", "iv", ",", "encrypted_data", ")", ":", "logger", ".", "info", "(", "'Decrypting subtitles with length (%d bytes), key=%r'", ",", "len", "(", "encrypted_data", ")", ",", "encryption_key", ")", "return", "zlib", ".", "decompress", "(", "aes_decrypt", "(", "encryption_key", ",", "iv", ",", "encrypted_data", ")", ")" ]
Decrypt encrypted subtitle data @param int subtitle_id @param str iv @param str encrypted_data @return str
[ "Decrypt", "encrypted", "subtitle", "data" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/subtitles.py#L61-L72
aheadley/python-crunchyroll
crunchyroll/subtitles.py
SubtitleDecrypter._build_encryption_key
def _build_encryption_key(self, subtitle_id, key_size=ENCRYPTION_KEY_SIZE): """Generate the encryption key for a given media item Encryption key is basically just sha1(<magic value based on subtitle_id> + '"#$&).6CXzPHw=2N_+isZK') then padded with 0s to 32 chars @param int subtitle_id @param int key_size @return str """ # generate a 160-bit SHA1 hash sha1_hash = hashlib.new('sha1', self._build_hash_secret((1, 2)) + self._build_hash_magic(subtitle_id)).digest() # pad to 256-bit hash for 32 byte key sha1_hash += '\x00' * max(key_size - len(sha1_hash), 0) return sha1_hash[:key_size]
python
def _build_encryption_key(self, subtitle_id, key_size=ENCRYPTION_KEY_SIZE): """Generate the encryption key for a given media item Encryption key is basically just sha1(<magic value based on subtitle_id> + '"#$&).6CXzPHw=2N_+isZK') then padded with 0s to 32 chars @param int subtitle_id @param int key_size @return str """ # generate a 160-bit SHA1 hash sha1_hash = hashlib.new('sha1', self._build_hash_secret((1, 2)) + self._build_hash_magic(subtitle_id)).digest() # pad to 256-bit hash for 32 byte key sha1_hash += '\x00' * max(key_size - len(sha1_hash), 0) return sha1_hash[:key_size]
[ "def", "_build_encryption_key", "(", "self", ",", "subtitle_id", ",", "key_size", "=", "ENCRYPTION_KEY_SIZE", ")", ":", "# generate a 160-bit SHA1 hash", "sha1_hash", "=", "hashlib", ".", "new", "(", "'sha1'", ",", "self", ".", "_build_hash_secret", "(", "(", "1", ",", "2", ")", ")", "+", "self", ".", "_build_hash_magic", "(", "subtitle_id", ")", ")", ".", "digest", "(", ")", "# pad to 256-bit hash for 32 byte key", "sha1_hash", "+=", "'\\x00'", "*", "max", "(", "key_size", "-", "len", "(", "sha1_hash", ")", ",", "0", ")", "return", "sha1_hash", "[", ":", "key_size", "]" ]
Generate the encryption key for a given media item Encryption key is basically just sha1(<magic value based on subtitle_id> + '"#$&).6CXzPHw=2N_+isZK') then padded with 0s to 32 chars @param int subtitle_id @param int key_size @return str
[ "Generate", "the", "encryption", "key", "for", "a", "given", "media", "item" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/subtitles.py#L74-L91
aheadley/python-crunchyroll
crunchyroll/subtitles.py
SubtitleDecrypter._build_hash_magic
def _build_hash_magic(self, subtitle_id): """Build the other half of the encryption key hash I have no idea what is going on here @param int subtitle_id @return str """ media_magic = self.HASH_MAGIC_CONST ^ subtitle_id hash_magic = media_magic ^ media_magic >> 3 ^ media_magic * 32 return str(hash_magic)
python
def _build_hash_magic(self, subtitle_id): """Build the other half of the encryption key hash I have no idea what is going on here @param int subtitle_id @return str """ media_magic = self.HASH_MAGIC_CONST ^ subtitle_id hash_magic = media_magic ^ media_magic >> 3 ^ media_magic * 32 return str(hash_magic)
[ "def", "_build_hash_magic", "(", "self", ",", "subtitle_id", ")", ":", "media_magic", "=", "self", ".", "HASH_MAGIC_CONST", "^", "subtitle_id", "hash_magic", "=", "media_magic", "^", "media_magic", ">>", "3", "^", "media_magic", "*", "32", "return", "str", "(", "hash_magic", ")" ]
Build the other half of the encryption key hash I have no idea what is going on here @param int subtitle_id @return str
[ "Build", "the", "other", "half", "of", "the", "encryption", "key", "hash" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/subtitles.py#L93-L104
aheadley/python-crunchyroll
crunchyroll/subtitles.py
SubtitleDecrypter._build_hash_secret
def _build_hash_secret(self, seq_seed, seq_len=HASH_SECRET_LENGTH, mod_value=HASH_SECRET_MOD_CONST): """Build a seed for the hash based on the Fibonacci sequence Take first `seq_len` + len(`seq_seed`) characters of Fibonacci sequence, starting with `seq_seed`, and applying e % `mod_value` + `HASH_SECRET_CHAR_OFFSET` to the resulting sequence, then return as a string @param tuple|list seq_seed @param int seq_len @param int mod_value @return str """ # make sure we use a list, tuples are immutable fbn_seq = list(seq_seed) for i in range(seq_len): fbn_seq.append(fbn_seq[-1] + fbn_seq[-2]) hash_secret = list(map( lambda c: chr(c % mod_value + self.HASH_SECRET_CHAR_OFFSET), fbn_seq[2:])) return ''.join(hash_secret)
python
def _build_hash_secret(self, seq_seed, seq_len=HASH_SECRET_LENGTH, mod_value=HASH_SECRET_MOD_CONST): """Build a seed for the hash based on the Fibonacci sequence Take first `seq_len` + len(`seq_seed`) characters of Fibonacci sequence, starting with `seq_seed`, and applying e % `mod_value` + `HASH_SECRET_CHAR_OFFSET` to the resulting sequence, then return as a string @param tuple|list seq_seed @param int seq_len @param int mod_value @return str """ # make sure we use a list, tuples are immutable fbn_seq = list(seq_seed) for i in range(seq_len): fbn_seq.append(fbn_seq[-1] + fbn_seq[-2]) hash_secret = list(map( lambda c: chr(c % mod_value + self.HASH_SECRET_CHAR_OFFSET), fbn_seq[2:])) return ''.join(hash_secret)
[ "def", "_build_hash_secret", "(", "self", ",", "seq_seed", ",", "seq_len", "=", "HASH_SECRET_LENGTH", ",", "mod_value", "=", "HASH_SECRET_MOD_CONST", ")", ":", "# make sure we use a list, tuples are immutable", "fbn_seq", "=", "list", "(", "seq_seed", ")", "for", "i", "in", "range", "(", "seq_len", ")", ":", "fbn_seq", ".", "append", "(", "fbn_seq", "[", "-", "1", "]", "+", "fbn_seq", "[", "-", "2", "]", ")", "hash_secret", "=", "list", "(", "map", "(", "lambda", "c", ":", "chr", "(", "c", "%", "mod_value", "+", "self", ".", "HASH_SECRET_CHAR_OFFSET", ")", ",", "fbn_seq", "[", "2", ":", "]", ")", ")", "return", "''", ".", "join", "(", "hash_secret", ")" ]
Build a seed for the hash based on the Fibonacci sequence Take first `seq_len` + len(`seq_seed`) characters of Fibonacci sequence, starting with `seq_seed`, and applying e % `mod_value` + `HASH_SECRET_CHAR_OFFSET` to the resulting sequence, then return as a string @param tuple|list seq_seed @param int seq_len @param int mod_value @return str
[ "Build", "a", "seed", "for", "the", "hash", "based", "on", "the", "Fibonacci", "sequence" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/subtitles.py#L106-L128
aheadley/python-crunchyroll
crunchyroll/subtitles.py
SubtitleFormatter.format
def format(self, subtitles): """Turn a string containing the subs xml document into the formatted subtitle string @param str|crunchyroll.models.StyledSubtitle sub_xml_text @return str """ logger.debug('Formatting subtitles (id=%s) with %s', subtitles.id, self.__class__.__name__) return self._format(subtitles).encode('utf-8')
python
def format(self, subtitles): """Turn a string containing the subs xml document into the formatted subtitle string @param str|crunchyroll.models.StyledSubtitle sub_xml_text @return str """ logger.debug('Formatting subtitles (id=%s) with %s', subtitles.id, self.__class__.__name__) return self._format(subtitles).encode('utf-8')
[ "def", "format", "(", "self", ",", "subtitles", ")", ":", "logger", ".", "debug", "(", "'Formatting subtitles (id=%s) with %s'", ",", "subtitles", ".", "id", ",", "self", ".", "__class__", ".", "__name__", ")", "return", "self", ".", "_format", "(", "subtitles", ")", ".", "encode", "(", "'utf-8'", ")" ]
Turn a string containing the subs xml document into the formatted subtitle string @param str|crunchyroll.models.StyledSubtitle sub_xml_text @return str
[ "Turn", "a", "string", "containing", "the", "subs", "xml", "document", "into", "the", "formatted", "subtitle", "string" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/subtitles.py#L134-L143
aheadley/python-crunchyroll
crunchyroll/apis/meta.py
require_session_started
def require_session_started(func): """Check if API sessions are started and start them if not """ @functools.wraps(func) def inner_func(self, *pargs, **kwargs): if not self.session_started: logger.info('Starting session for required meta method') self.start_session() return func(self, *pargs, **kwargs) return inner_func
python
def require_session_started(func): """Check if API sessions are started and start them if not """ @functools.wraps(func) def inner_func(self, *pargs, **kwargs): if not self.session_started: logger.info('Starting session for required meta method') self.start_session() return func(self, *pargs, **kwargs) return inner_func
[ "def", "require_session_started", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "inner_func", "(", "self", ",", "*", "pargs", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "session_started", ":", "logger", ".", "info", "(", "'Starting session for required meta method'", ")", "self", ".", "start_session", "(", ")", "return", "func", "(", "self", ",", "*", "pargs", ",", "*", "*", "kwargs", ")", "return", "inner_func" ]
Check if API sessions are started and start them if not
[ "Check", "if", "API", "sessions", "are", "started", "and", "start", "them", "if", "not" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/meta.py#L35-L44
aheadley/python-crunchyroll
crunchyroll/apis/meta.py
require_android_logged_in
def require_android_logged_in(func): """Check if andoid API is logged in and login if not, implies `require_session_started` """ @functools.wraps(func) @require_session_started def inner_func(self, *pargs, **kwargs): if not self._android_api.logged_in: logger.info('Logging into android API for required meta method') if not self.has_credentials: raise ApiLoginFailure( 'Login is required but no credentials were provided') self._android_api.login(account=self._state['username'], password=self._state['password']) return func(self, *pargs, **kwargs) return inner_func
python
def require_android_logged_in(func): """Check if andoid API is logged in and login if not, implies `require_session_started` """ @functools.wraps(func) @require_session_started def inner_func(self, *pargs, **kwargs): if not self._android_api.logged_in: logger.info('Logging into android API for required meta method') if not self.has_credentials: raise ApiLoginFailure( 'Login is required but no credentials were provided') self._android_api.login(account=self._state['username'], password=self._state['password']) return func(self, *pargs, **kwargs) return inner_func
[ "def", "require_android_logged_in", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "@", "require_session_started", "def", "inner_func", "(", "self", ",", "*", "pargs", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "_android_api", ".", "logged_in", ":", "logger", ".", "info", "(", "'Logging into android API for required meta method'", ")", "if", "not", "self", ".", "has_credentials", ":", "raise", "ApiLoginFailure", "(", "'Login is required but no credentials were provided'", ")", "self", ".", "_android_api", ".", "login", "(", "account", "=", "self", ".", "_state", "[", "'username'", "]", ",", "password", "=", "self", ".", "_state", "[", "'password'", "]", ")", "return", "func", "(", "self", ",", "*", "pargs", ",", "*", "*", "kwargs", ")", "return", "inner_func" ]
Check if andoid API is logged in and login if not, implies `require_session_started`
[ "Check", "if", "andoid", "API", "is", "logged", "in", "and", "login", "if", "not", "implies", "require_session_started" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/meta.py#L46-L61
aheadley/python-crunchyroll
crunchyroll/apis/meta.py
optional_manga_logged_in
def optional_manga_logged_in(func): """Check if andoid manga API is logged in and login if credentials were provided, implies `require_session_started` """ @functools.wraps(func) @require_session_started def inner_func(self, *pargs, **kwargs): if not self._manga_api.logged_in and self.has_credentials: logger.info('Logging into android manga API for optional meta method') self._manga_api.cr_login(account=self._state['username'], password=self._state['password']) return func(self, *pargs, **kwargs) return inner_func
python
def optional_manga_logged_in(func): """Check if andoid manga API is logged in and login if credentials were provided, implies `require_session_started` """ @functools.wraps(func) @require_session_started def inner_func(self, *pargs, **kwargs): if not self._manga_api.logged_in and self.has_credentials: logger.info('Logging into android manga API for optional meta method') self._manga_api.cr_login(account=self._state['username'], password=self._state['password']) return func(self, *pargs, **kwargs) return inner_func
[ "def", "optional_manga_logged_in", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "@", "require_session_started", "def", "inner_func", "(", "self", ",", "*", "pargs", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "_manga_api", ".", "logged_in", "and", "self", ".", "has_credentials", ":", "logger", ".", "info", "(", "'Logging into android manga API for optional meta method'", ")", "self", ".", "_manga_api", ".", "cr_login", "(", "account", "=", "self", ".", "_state", "[", "'username'", "]", ",", "password", "=", "self", ".", "_state", "[", "'password'", "]", ")", "return", "func", "(", "self", ",", "*", "pargs", ",", "*", "*", "kwargs", ")", "return", "inner_func" ]
Check if andoid manga API is logged in and login if credentials were provided, implies `require_session_started`
[ "Check", "if", "andoid", "manga", "API", "is", "logged", "in", "and", "login", "if", "credentials", "were", "provided", "implies", "require_session_started" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/meta.py#L77-L89
aheadley/python-crunchyroll
crunchyroll/apis/meta.py
require_ajax_logged_in
def require_ajax_logged_in(func): """Check if ajax API is logged in and login if not """ @functools.wraps(func) def inner_func(self, *pargs, **kwargs): if not self._ajax_api.logged_in: logger.info('Logging into AJAX API for required meta method') if not self.has_credentials: raise ApiLoginFailure( 'Login is required but no credentials were provided') self._ajax_api.User_Login(name=self._state['username'], password=self._state['password']) return func(self, *pargs, **kwargs) return inner_func
python
def require_ajax_logged_in(func): """Check if ajax API is logged in and login if not """ @functools.wraps(func) def inner_func(self, *pargs, **kwargs): if not self._ajax_api.logged_in: logger.info('Logging into AJAX API for required meta method') if not self.has_credentials: raise ApiLoginFailure( 'Login is required but no credentials were provided') self._ajax_api.User_Login(name=self._state['username'], password=self._state['password']) return func(self, *pargs, **kwargs) return inner_func
[ "def", "require_ajax_logged_in", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "inner_func", "(", "self", ",", "*", "pargs", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "_ajax_api", ".", "logged_in", ":", "logger", ".", "info", "(", "'Logging into AJAX API for required meta method'", ")", "if", "not", "self", ".", "has_credentials", ":", "raise", "ApiLoginFailure", "(", "'Login is required but no credentials were provided'", ")", "self", ".", "_ajax_api", ".", "User_Login", "(", "name", "=", "self", ".", "_state", "[", "'username'", "]", ",", "password", "=", "self", ".", "_state", "[", "'password'", "]", ")", "return", "func", "(", "self", ",", "*", "pargs", ",", "*", "*", "kwargs", ")", "return", "inner_func" ]
Check if ajax API is logged in and login if not
[ "Check", "if", "ajax", "API", "is", "logged", "in", "and", "login", "if", "not" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/meta.py#L91-L104
aheadley/python-crunchyroll
crunchyroll/apis/meta.py
MetaApi.start_session
def start_session(self): """Start the underlying APIs sessions Calling this is not required, it will be called automatically if a method that needs a session is called @return bool """ self._android_api.start_session() self._manga_api.cr_start_session() return self.session_started
python
def start_session(self): """Start the underlying APIs sessions Calling this is not required, it will be called automatically if a method that needs a session is called @return bool """ self._android_api.start_session() self._manga_api.cr_start_session() return self.session_started
[ "def", "start_session", "(", "self", ")", ":", "self", ".", "_android_api", ".", "start_session", "(", ")", "self", ".", "_manga_api", ".", "cr_start_session", "(", ")", "return", "self", ".", "session_started" ]
Start the underlying APIs sessions Calling this is not required, it will be called automatically if a method that needs a session is called @return bool
[ "Start", "the", "underlying", "APIs", "sessions" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/meta.py#L174-L184
aheadley/python-crunchyroll
crunchyroll/apis/meta.py
MetaApi.login
def login(self, username, password): """Login with the given username/email and password Calling this method is not required if credentials were provided in the constructor, but it could be used to switch users or something maybe @return bool """ # we could get stuck in an inconsistent state if got an exception while # trying to login with different credentials than what is stored so # we rollback the state to prevent that state_snapshot = self._state.copy() try: self._ajax_api.User_Login(name=username, password=password) self._android_api.login(account=username, password=password) self._manga_api.cr_login(account=username, password=password) except Exception as err: # something went wrong, rollback self._state = state_snapshot raise err self._state['username'] = username self._state['password'] = password return self.logged_in
python
def login(self, username, password): """Login with the given username/email and password Calling this method is not required if credentials were provided in the constructor, but it could be used to switch users or something maybe @return bool """ # we could get stuck in an inconsistent state if got an exception while # trying to login with different credentials than what is stored so # we rollback the state to prevent that state_snapshot = self._state.copy() try: self._ajax_api.User_Login(name=username, password=password) self._android_api.login(account=username, password=password) self._manga_api.cr_login(account=username, password=password) except Exception as err: # something went wrong, rollback self._state = state_snapshot raise err self._state['username'] = username self._state['password'] = password return self.logged_in
[ "def", "login", "(", "self", ",", "username", ",", "password", ")", ":", "# we could get stuck in an inconsistent state if got an exception while", "# trying to login with different credentials than what is stored so", "# we rollback the state to prevent that", "state_snapshot", "=", "self", ".", "_state", ".", "copy", "(", ")", "try", ":", "self", ".", "_ajax_api", ".", "User_Login", "(", "name", "=", "username", ",", "password", "=", "password", ")", "self", ".", "_android_api", ".", "login", "(", "account", "=", "username", ",", "password", "=", "password", ")", "self", ".", "_manga_api", ".", "cr_login", "(", "account", "=", "username", ",", "password", "=", "password", ")", "except", "Exception", "as", "err", ":", "# something went wrong, rollback", "self", ".", "_state", "=", "state_snapshot", "raise", "err", "self", ".", "_state", "[", "'username'", "]", "=", "username", "self", ".", "_state", "[", "'password'", "]", "=", "password", "return", "self", ".", "logged_in" ]
Login with the given username/email and password Calling this method is not required if credentials were provided in the constructor, but it could be used to switch users or something maybe @return bool
[ "Login", "with", "the", "given", "username", "/", "email", "and", "password" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/meta.py#L187-L209
aheadley/python-crunchyroll
crunchyroll/apis/meta.py
MetaApi.list_anime_series
def list_anime_series(self, sort=META.SORT_ALPHA, limit=META.MAX_SERIES, offset=0): """Get a list of anime series @param str sort pick how results should be sorted, should be one of META.SORT_* @param int limit limit number of series to return, there doesn't seem to be an upper bound @param int offset list series starting from this offset, for pagination @return list<crunchyroll.models.Series> """ result = self._android_api.list_series( media_type=ANDROID.MEDIA_TYPE_ANIME, filter=sort, limit=limit, offset=offset) return result
python
def list_anime_series(self, sort=META.SORT_ALPHA, limit=META.MAX_SERIES, offset=0): """Get a list of anime series @param str sort pick how results should be sorted, should be one of META.SORT_* @param int limit limit number of series to return, there doesn't seem to be an upper bound @param int offset list series starting from this offset, for pagination @return list<crunchyroll.models.Series> """ result = self._android_api.list_series( media_type=ANDROID.MEDIA_TYPE_ANIME, filter=sort, limit=limit, offset=offset) return result
[ "def", "list_anime_series", "(", "self", ",", "sort", "=", "META", ".", "SORT_ALPHA", ",", "limit", "=", "META", ".", "MAX_SERIES", ",", "offset", "=", "0", ")", ":", "result", "=", "self", ".", "_android_api", ".", "list_series", "(", "media_type", "=", "ANDROID", ".", "MEDIA_TYPE_ANIME", ",", "filter", "=", "sort", ",", "limit", "=", "limit", ",", "offset", "=", "offset", ")", "return", "result" ]
Get a list of anime series @param str sort pick how results should be sorted, should be one of META.SORT_* @param int limit limit number of series to return, there doesn't seem to be an upper bound @param int offset list series starting from this offset, for pagination @return list<crunchyroll.models.Series>
[ "Get", "a", "list", "of", "anime", "series" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/meta.py#L213-L228
aheadley/python-crunchyroll
crunchyroll/apis/meta.py
MetaApi.list_drama_series
def list_drama_series(self, sort=META.SORT_ALPHA, limit=META.MAX_SERIES, offset=0): """Get a list of drama series @param str sort pick how results should be sorted, should be one of META.SORT_* @param int limit limit number of series to return, there doesn't seem to be an upper bound @param int offset list series starting from this offset, for pagination @return list<crunchyroll.models.Series> """ result = self._android_api.list_series( media_type=ANDROID.MEDIA_TYPE_DRAMA, filter=sort, limit=limit, offset=offset) return result
python
def list_drama_series(self, sort=META.SORT_ALPHA, limit=META.MAX_SERIES, offset=0): """Get a list of drama series @param str sort pick how results should be sorted, should be one of META.SORT_* @param int limit limit number of series to return, there doesn't seem to be an upper bound @param int offset list series starting from this offset, for pagination @return list<crunchyroll.models.Series> """ result = self._android_api.list_series( media_type=ANDROID.MEDIA_TYPE_DRAMA, filter=sort, limit=limit, offset=offset) return result
[ "def", "list_drama_series", "(", "self", ",", "sort", "=", "META", ".", "SORT_ALPHA", ",", "limit", "=", "META", ".", "MAX_SERIES", ",", "offset", "=", "0", ")", ":", "result", "=", "self", ".", "_android_api", ".", "list_series", "(", "media_type", "=", "ANDROID", ".", "MEDIA_TYPE_DRAMA", ",", "filter", "=", "sort", ",", "limit", "=", "limit", ",", "offset", "=", "offset", ")", "return", "result" ]
Get a list of drama series @param str sort pick how results should be sorted, should be one of META.SORT_* @param int limit limit number of series to return, there doesn't seem to be an upper bound @param int offset list series starting from this offset, for pagination @return list<crunchyroll.models.Series>
[ "Get", "a", "list", "of", "drama", "series" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/meta.py#L232-L247
aheadley/python-crunchyroll
crunchyroll/apis/meta.py
MetaApi.list_manga_series
def list_manga_series(self, filter=None, content_type='jp_manga'): """Get a list of manga series """ result = self._manga_api.list_series(filter, content_type) return result
python
def list_manga_series(self, filter=None, content_type='jp_manga'): """Get a list of manga series """ result = self._manga_api.list_series(filter, content_type) return result
[ "def", "list_manga_series", "(", "self", ",", "filter", "=", "None", ",", "content_type", "=", "'jp_manga'", ")", ":", "result", "=", "self", ".", "_manga_api", ".", "list_series", "(", "filter", ",", "content_type", ")", "return", "result" ]
Get a list of manga series
[ "Get", "a", "list", "of", "manga", "series" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/meta.py#L251-L256
aheadley/python-crunchyroll
crunchyroll/apis/meta.py
MetaApi.search_anime_series
def search_anime_series(self, query_string): """Search anime series list by series name, case-sensitive @param str query_string string to search for, note that the search is very simplistic and only matches against the start of the series name, ex) search for "space" matches "Space Brothers" but wouldn't match "Brothers Space" @return list<crunchyroll.models.Series> """ result = self._android_api.list_series( media_type=ANDROID.MEDIA_TYPE_ANIME, filter=ANDROID.FILTER_PREFIX + query_string) return result
python
def search_anime_series(self, query_string): """Search anime series list by series name, case-sensitive @param str query_string string to search for, note that the search is very simplistic and only matches against the start of the series name, ex) search for "space" matches "Space Brothers" but wouldn't match "Brothers Space" @return list<crunchyroll.models.Series> """ result = self._android_api.list_series( media_type=ANDROID.MEDIA_TYPE_ANIME, filter=ANDROID.FILTER_PREFIX + query_string) return result
[ "def", "search_anime_series", "(", "self", ",", "query_string", ")", ":", "result", "=", "self", ".", "_android_api", ".", "list_series", "(", "media_type", "=", "ANDROID", ".", "MEDIA_TYPE_ANIME", ",", "filter", "=", "ANDROID", ".", "FILTER_PREFIX", "+", "query_string", ")", "return", "result" ]
Search anime series list by series name, case-sensitive @param str query_string string to search for, note that the search is very simplistic and only matches against the start of the series name, ex) search for "space" matches "Space Brothers" but wouldn't match "Brothers Space" @return list<crunchyroll.models.Series>
[ "Search", "anime", "series", "list", "by", "series", "name", "case", "-", "sensitive" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/meta.py#L260-L273
aheadley/python-crunchyroll
crunchyroll/apis/meta.py
MetaApi.search_drama_series
def search_drama_series(self, query_string): """Search drama series list by series name, case-sensitive @param str query_string string to search for, note that the search is very simplistic and only matches against the start of the series name, ex) search for "space" matches "Space Brothers" but wouldn't match "Brothers Space" @return list<crunchyroll.models.Series> """ result = self._android_api.list_series( media_type=ANDROID.MEDIA_TYPE_DRAMA, filter=ANDROID.FILTER_PREFIX + query_string) return result
python
def search_drama_series(self, query_string): """Search drama series list by series name, case-sensitive @param str query_string string to search for, note that the search is very simplistic and only matches against the start of the series name, ex) search for "space" matches "Space Brothers" but wouldn't match "Brothers Space" @return list<crunchyroll.models.Series> """ result = self._android_api.list_series( media_type=ANDROID.MEDIA_TYPE_DRAMA, filter=ANDROID.FILTER_PREFIX + query_string) return result
[ "def", "search_drama_series", "(", "self", ",", "query_string", ")", ":", "result", "=", "self", ".", "_android_api", ".", "list_series", "(", "media_type", "=", "ANDROID", ".", "MEDIA_TYPE_DRAMA", ",", "filter", "=", "ANDROID", ".", "FILTER_PREFIX", "+", "query_string", ")", "return", "result" ]
Search drama series list by series name, case-sensitive @param str query_string string to search for, note that the search is very simplistic and only matches against the start of the series name, ex) search for "space" matches "Space Brothers" but wouldn't match "Brothers Space" @return list<crunchyroll.models.Series>
[ "Search", "drama", "series", "list", "by", "series", "name", "case", "-", "sensitive" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/meta.py#L277-L290
aheadley/python-crunchyroll
crunchyroll/apis/meta.py
MetaApi.search_manga_series
def search_manga_series(self, query_string): """Search the manga series list by name, case-insensitive @param str query_string @return list<crunchyroll.models.Series> """ result = self._manga_api.list_series() return [series for series in result \ if series['locale']['enUS']['name'].lower().startswith( query_string.lower())]
python
def search_manga_series(self, query_string): """Search the manga series list by name, case-insensitive @param str query_string @return list<crunchyroll.models.Series> """ result = self._manga_api.list_series() return [series for series in result \ if series['locale']['enUS']['name'].lower().startswith( query_string.lower())]
[ "def", "search_manga_series", "(", "self", ",", "query_string", ")", ":", "result", "=", "self", ".", "_manga_api", ".", "list_series", "(", ")", "return", "[", "series", "for", "series", "in", "result", "if", "series", "[", "'locale'", "]", "[", "'enUS'", "]", "[", "'name'", "]", ".", "lower", "(", ")", ".", "startswith", "(", "query_string", ".", "lower", "(", ")", ")", "]" ]
Search the manga series list by name, case-insensitive @param str query_string @return list<crunchyroll.models.Series>
[ "Search", "the", "manga", "series", "list", "by", "name", "case", "-", "insensitive" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/meta.py#L294-L305
aheadley/python-crunchyroll
crunchyroll/apis/meta.py
MetaApi.list_media
def list_media(self, series, sort=META.SORT_DESC, limit=META.MAX_MEDIA, offset=0): """List media for a given series or collection @param crunchyroll.models.Series series the series to search for @param str sort choose the ordering of the results, only META.SORT_DESC is known to work @param int limit limit size of results @param int offset start results from this index, for pagination @return list<crunchyroll.models.Media> """ params = { 'sort': sort, 'offset': offset, 'limit': limit, } params.update(self._get_series_query_dict(series)) result = self._android_api.list_media(**params) return result
python
def list_media(self, series, sort=META.SORT_DESC, limit=META.MAX_MEDIA, offset=0): """List media for a given series or collection @param crunchyroll.models.Series series the series to search for @param str sort choose the ordering of the results, only META.SORT_DESC is known to work @param int limit limit size of results @param int offset start results from this index, for pagination @return list<crunchyroll.models.Media> """ params = { 'sort': sort, 'offset': offset, 'limit': limit, } params.update(self._get_series_query_dict(series)) result = self._android_api.list_media(**params) return result
[ "def", "list_media", "(", "self", ",", "series", ",", "sort", "=", "META", ".", "SORT_DESC", ",", "limit", "=", "META", ".", "MAX_MEDIA", ",", "offset", "=", "0", ")", ":", "params", "=", "{", "'sort'", ":", "sort", ",", "'offset'", ":", "offset", ",", "'limit'", ":", "limit", ",", "}", "params", ".", "update", "(", "self", ".", "_get_series_query_dict", "(", "series", ")", ")", "result", "=", "self", ".", "_android_api", ".", "list_media", "(", "*", "*", "params", ")", "return", "result" ]
List media for a given series or collection @param crunchyroll.models.Series series the series to search for @param str sort choose the ordering of the results, only META.SORT_DESC is known to work @param int limit limit size of results @param int offset start results from this index, for pagination @return list<crunchyroll.models.Media>
[ "List", "media", "for", "a", "given", "series", "or", "collection" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/meta.py#L309-L328
aheadley/python-crunchyroll
crunchyroll/apis/meta.py
MetaApi.search_media
def search_media(self, series, query_string): """Search for media from a series starting with query_string, case-sensitive @param crunchyroll.models.Series series the series to search in @param str query_string the search query, same restrictions as `search_anime_series` @return list<crunchyroll.models.Media> """ params = { 'sort': ANDROID.FILTER_PREFIX + query_string, } params.update(self._get_series_query_dict(series)) result = self._android_api.list_media(**params) return result
python
def search_media(self, series, query_string): """Search for media from a series starting with query_string, case-sensitive @param crunchyroll.models.Series series the series to search in @param str query_string the search query, same restrictions as `search_anime_series` @return list<crunchyroll.models.Media> """ params = { 'sort': ANDROID.FILTER_PREFIX + query_string, } params.update(self._get_series_query_dict(series)) result = self._android_api.list_media(**params) return result
[ "def", "search_media", "(", "self", ",", "series", ",", "query_string", ")", ":", "params", "=", "{", "'sort'", ":", "ANDROID", ".", "FILTER_PREFIX", "+", "query_string", ",", "}", "params", ".", "update", "(", "self", ".", "_get_series_query_dict", "(", "series", ")", ")", "result", "=", "self", ".", "_android_api", ".", "list_media", "(", "*", "*", "params", ")", "return", "result" ]
Search for media from a series starting with query_string, case-sensitive @param crunchyroll.models.Series series the series to search in @param str query_string the search query, same restrictions as `search_anime_series` @return list<crunchyroll.models.Media>
[ "Search", "for", "media", "from", "a", "series", "starting", "with", "query_string", "case", "-", "sensitive" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/meta.py#L359-L372
aheadley/python-crunchyroll
crunchyroll/apis/meta.py
MetaApi.get_media_stream
def get_media_stream(self, media_item, format, quality): """Get the stream data for a given media item @param crunchyroll.models.Media media_item @param int format @param int quality @return crunchyroll.models.MediaStream """ result = self._ajax_api.VideoPlayer_GetStandardConfig( media_id=media_item.media_id, video_format=format, video_quality=quality) return MediaStream(result)
python
def get_media_stream(self, media_item, format, quality): """Get the stream data for a given media item @param crunchyroll.models.Media media_item @param int format @param int quality @return crunchyroll.models.MediaStream """ result = self._ajax_api.VideoPlayer_GetStandardConfig( media_id=media_item.media_id, video_format=format, video_quality=quality) return MediaStream(result)
[ "def", "get_media_stream", "(", "self", ",", "media_item", ",", "format", ",", "quality", ")", ":", "result", "=", "self", ".", "_ajax_api", ".", "VideoPlayer_GetStandardConfig", "(", "media_id", "=", "media_item", ".", "media_id", ",", "video_format", "=", "format", ",", "video_quality", "=", "quality", ")", "return", "MediaStream", "(", "result", ")" ]
Get the stream data for a given media item @param crunchyroll.models.Media media_item @param int format @param int quality @return crunchyroll.models.MediaStream
[ "Get", "the", "stream", "data", "for", "a", "given", "media", "item" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/meta.py#L375-L387
aheadley/python-crunchyroll
crunchyroll/apis/meta.py
MetaApi.unfold_subtitle_stub
def unfold_subtitle_stub(self, subtitle_stub): """Turn a SubtitleStub into a full Subtitle object @param crunchyroll.models.SubtitleStub subtitle_stub @return crunchyroll.models.Subtitle """ return Subtitle(self._ajax_api.Subtitle_GetXml( subtitle_script_id=int(subtitle_stub.id)))
python
def unfold_subtitle_stub(self, subtitle_stub): """Turn a SubtitleStub into a full Subtitle object @param crunchyroll.models.SubtitleStub subtitle_stub @return crunchyroll.models.Subtitle """ return Subtitle(self._ajax_api.Subtitle_GetXml( subtitle_script_id=int(subtitle_stub.id)))
[ "def", "unfold_subtitle_stub", "(", "self", ",", "subtitle_stub", ")", ":", "return", "Subtitle", "(", "self", ".", "_ajax_api", ".", "Subtitle_GetXml", "(", "subtitle_script_id", "=", "int", "(", "subtitle_stub", ".", "id", ")", ")", ")" ]
Turn a SubtitleStub into a full Subtitle object @param crunchyroll.models.SubtitleStub subtitle_stub @return crunchyroll.models.Subtitle
[ "Turn", "a", "SubtitleStub", "into", "a", "full", "Subtitle", "object" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/meta.py#L402-L409
aheadley/python-crunchyroll
crunchyroll/apis/meta.py
MetaApi.get_stream_formats
def get_stream_formats(self, media_item): """Get the available media formats for a given media item @param crunchyroll.models.Media @return dict """ scraper = ScraperApi(self._ajax_api._connector) formats = scraper.get_media_formats(media_item.media_id) return formats
python
def get_stream_formats(self, media_item): """Get the available media formats for a given media item @param crunchyroll.models.Media @return dict """ scraper = ScraperApi(self._ajax_api._connector) formats = scraper.get_media_formats(media_item.media_id) return formats
[ "def", "get_stream_formats", "(", "self", ",", "media_item", ")", ":", "scraper", "=", "ScraperApi", "(", "self", ".", "_ajax_api", ".", "_connector", ")", "formats", "=", "scraper", ".", "get_media_formats", "(", "media_item", ".", "media_id", ")", "return", "formats" ]
Get the available media formats for a given media item @param crunchyroll.models.Media @return dict
[ "Get", "the", "available", "media", "formats", "for", "a", "given", "media", "item" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/meta.py#L412-L420
aheadley/python-crunchyroll
crunchyroll/apis/meta.py
MetaApi.list_queue
def list_queue(self, media_types=[META.TYPE_ANIME, META.TYPE_DRAMA]): """List the series in the queue, optionally filtering by type of media @param list<str> media_types a list of media types to filter the queue with, should be of META.TYPE_* @return list<crunchyroll.models.Series> """ result = self._android_api.queue(media_types='|'.join(media_types)) return [queue_item['series'] for queue_item in result]
python
def list_queue(self, media_types=[META.TYPE_ANIME, META.TYPE_DRAMA]): """List the series in the queue, optionally filtering by type of media @param list<str> media_types a list of media types to filter the queue with, should be of META.TYPE_* @return list<crunchyroll.models.Series> """ result = self._android_api.queue(media_types='|'.join(media_types)) return [queue_item['series'] for queue_item in result]
[ "def", "list_queue", "(", "self", ",", "media_types", "=", "[", "META", ".", "TYPE_ANIME", ",", "META", ".", "TYPE_DRAMA", "]", ")", ":", "result", "=", "self", ".", "_android_api", ".", "queue", "(", "media_types", "=", "'|'", ".", "join", "(", "media_types", ")", ")", "return", "[", "queue_item", "[", "'series'", "]", "for", "queue_item", "in", "result", "]" ]
List the series in the queue, optionally filtering by type of media @param list<str> media_types a list of media types to filter the queue with, should be of META.TYPE_* @return list<crunchyroll.models.Series>
[ "List", "the", "series", "in", "the", "queue", "optionally", "filtering", "by", "type", "of", "media" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/meta.py#L424-L432
aheadley/python-crunchyroll
crunchyroll/apis/meta.py
MetaApi.add_to_queue
def add_to_queue(self, series): """Add a series to the queue @param crunchyroll.models.Series series @return bool """ result = self._android_api.add_to_queue(series_id=series.series_id) return result
python
def add_to_queue(self, series): """Add a series to the queue @param crunchyroll.models.Series series @return bool """ result = self._android_api.add_to_queue(series_id=series.series_id) return result
[ "def", "add_to_queue", "(", "self", ",", "series", ")", ":", "result", "=", "self", ".", "_android_api", ".", "add_to_queue", "(", "series_id", "=", "series", ".", "series_id", ")", "return", "result" ]
Add a series to the queue @param crunchyroll.models.Series series @return bool
[ "Add", "a", "series", "to", "the", "queue" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/meta.py#L435-L442
aheadley/python-crunchyroll
crunchyroll/apis/meta.py
MetaApi.remove_from_queue
def remove_from_queue(self, series): """Remove a series from the queue @param crunchyroll.models.Series series @return bool """ result = self._android_api.remove_from_queue(series_id=series.series_id) return result
python
def remove_from_queue(self, series): """Remove a series from the queue @param crunchyroll.models.Series series @return bool """ result = self._android_api.remove_from_queue(series_id=series.series_id) return result
[ "def", "remove_from_queue", "(", "self", ",", "series", ")", ":", "result", "=", "self", ".", "_android_api", ".", "remove_from_queue", "(", "series_id", "=", "series", ".", "series_id", ")", "return", "result" ]
Remove a series from the queue @param crunchyroll.models.Series series @return bool
[ "Remove", "a", "series", "from", "the", "queue" ]
train
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/meta.py#L445-L452
vberlier/nbtlib
nbtlib/schema.py
schema
def schema(name, dct, *, strict=False): """Create a compound tag schema. This function is a short convenience function that makes it easy to subclass the base `CompoundSchema` class. The `name` argument is the name of the class and `dct` should be a dictionnary containing the actual schema. The schema should map keys to tag types or other compound schemas. If the `strict` keyword only argument is set to True, interacting with keys that are not defined in the schema will raise a `TypeError`. """ return type(name, (CompoundSchema,), {'__slots__': (), 'schema': dct, 'strict': strict})
python
def schema(name, dct, *, strict=False): """Create a compound tag schema. This function is a short convenience function that makes it easy to subclass the base `CompoundSchema` class. The `name` argument is the name of the class and `dct` should be a dictionnary containing the actual schema. The schema should map keys to tag types or other compound schemas. If the `strict` keyword only argument is set to True, interacting with keys that are not defined in the schema will raise a `TypeError`. """ return type(name, (CompoundSchema,), {'__slots__': (), 'schema': dct, 'strict': strict})
[ "def", "schema", "(", "name", ",", "dct", ",", "*", ",", "strict", "=", "False", ")", ":", "return", "type", "(", "name", ",", "(", "CompoundSchema", ",", ")", ",", "{", "'__slots__'", ":", "(", ")", ",", "'schema'", ":", "dct", ",", "'strict'", ":", "strict", "}", ")" ]
Create a compound tag schema. This function is a short convenience function that makes it easy to subclass the base `CompoundSchema` class. The `name` argument is the name of the class and `dct` should be a dictionnary containing the actual schema. The schema should map keys to tag types or other compound schemas. If the `strict` keyword only argument is set to True, interacting with keys that are not defined in the schema will raise a `TypeError`.
[ "Create", "a", "compound", "tag", "schema", ".", "This", "function", "is", "a", "short", "convenience", "function", "that", "makes", "it", "easy", "to", "subclass", "the", "base", "CompoundSchema", "class", ".", "The", "name", "argument", "is", "the", "name", "of", "the", "class", "and", "dct", "should", "be", "a", "dictionnary", "containing", "the", "actual", "schema", ".", "The", "schema", "should", "map", "keys", "to", "tag", "types", "or", "other", "compound", "schemas", ".", "If", "the", "strict", "keyword", "only", "argument", "is", "set", "to", "True", "interacting", "with", "keys", "that", "are", "not", "defined", "in", "the", "schema", "will", "raise", "a", "TypeError", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/schema.py#L17-L32
vberlier/nbtlib
nbtlib/schema.py
CompoundSchema.cast_item
def cast_item(cls, key, value): """Cast schema item to the appropriate tag type.""" schema_type = cls.schema.get(key) if schema_type is None: if cls.strict: raise TypeError(f'Invalid key {key!r}') elif not isinstance(value, schema_type): try: return schema_type(value) except CastError: raise except Exception as exc: raise CastError(value, schema_type) from exc return value
python
def cast_item(cls, key, value): """Cast schema item to the appropriate tag type.""" schema_type = cls.schema.get(key) if schema_type is None: if cls.strict: raise TypeError(f'Invalid key {key!r}') elif not isinstance(value, schema_type): try: return schema_type(value) except CastError: raise except Exception as exc: raise CastError(value, schema_type) from exc return value
[ "def", "cast_item", "(", "cls", ",", "key", ",", "value", ")", ":", "schema_type", "=", "cls", ".", "schema", ".", "get", "(", "key", ")", "if", "schema_type", "is", "None", ":", "if", "cls", ".", "strict", ":", "raise", "TypeError", "(", "f'Invalid key {key!r}'", ")", "elif", "not", "isinstance", "(", "value", ",", "schema_type", ")", ":", "try", ":", "return", "schema_type", "(", "value", ")", "except", "CastError", ":", "raise", "except", "Exception", "as", "exc", ":", "raise", "CastError", "(", "value", ",", "schema_type", ")", "from", "exc", "return", "value" ]
Cast schema item to the appropriate tag type.
[ "Cast", "schema", "item", "to", "the", "appropriate", "tag", "type", "." ]
train
https://github.com/vberlier/nbtlib/blob/9c9d58b5c4a530b0f1ffd76dda176f00406c3547/nbtlib/schema.py#L73-L86
ales-erjavec/anyqt
AnyQt/_backport/_utils.py
obsolete_rename
def obsolete_rename(oldname, newfunc): """ Simple obsolete/removed method decorator Parameters ---------- oldname : str The name of the old obsolete name newfunc : FunctionType Replacement unbound member function. """ newname = newfunc.__name__ def __obsolete(*args, **kwargs): warnings.warn( "{oldname} is obsolete and is removed in PyQt5. " "Use {newname} instead.".format(oldname=oldname, newname=newname), DeprecationWarning, stacklevel=2 ) return newfunc(*args, **kwargs) __obsolete.__name__ = oldname return __obsolete
python
def obsolete_rename(oldname, newfunc): """ Simple obsolete/removed method decorator Parameters ---------- oldname : str The name of the old obsolete name newfunc : FunctionType Replacement unbound member function. """ newname = newfunc.__name__ def __obsolete(*args, **kwargs): warnings.warn( "{oldname} is obsolete and is removed in PyQt5. " "Use {newname} instead.".format(oldname=oldname, newname=newname), DeprecationWarning, stacklevel=2 ) return newfunc(*args, **kwargs) __obsolete.__name__ = oldname return __obsolete
[ "def", "obsolete_rename", "(", "oldname", ",", "newfunc", ")", ":", "newname", "=", "newfunc", ".", "__name__", "def", "__obsolete", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "\"{oldname} is obsolete and is removed in PyQt5. \"", "\"Use {newname} instead.\"", ".", "format", "(", "oldname", "=", "oldname", ",", "newname", "=", "newname", ")", ",", "DeprecationWarning", ",", "stacklevel", "=", "2", ")", "return", "newfunc", "(", "*", "args", ",", "*", "*", "kwargs", ")", "__obsolete", ".", "__name__", "=", "oldname", "return", "__obsolete" ]
Simple obsolete/removed method decorator Parameters ---------- oldname : str The name of the old obsolete name newfunc : FunctionType Replacement unbound member function.
[ "Simple", "obsolete", "/", "removed", "method", "decorator" ]
train
https://github.com/ales-erjavec/anyqt/blob/07b73c5ccb8f73f70fc6566249c0c7228fc9b921/AnyQt/_backport/_utils.py#L4-L25
zagaran/mongobackup
mongobackup/shell.py
call
def call(command, silent=False): """ Runs a bash command safely, with shell=false, catches any non-zero return codes. Raises slightly modified CalledProcessError exceptions on failures. Note: command is a string and cannot include pipes.""" try: if silent: with open(os.devnull, 'w') as FNULL: return subprocess.check_call(command_to_array(command), stdout=FNULL) else: # Using the defaults, shell=False, no i/o redirection. return check_call(command_to_array(command)) except CalledProcessError as e: # We are modifying the error itself for 2 reasons. 1) it WILL contain # login credentials when run_mongodump is run, 2) CalledProcessError is # slightly not-to-spec (the message variable is blank), which means # cronutils.ErrorHandler would report unlabeled stack traces. e.message = "%s failed with error code %s" % (e.cmd[0], e.returncode) e.cmd = e.cmd[0] + " [arguments stripped for security]" raise e
python
def call(command, silent=False): """ Runs a bash command safely, with shell=false, catches any non-zero return codes. Raises slightly modified CalledProcessError exceptions on failures. Note: command is a string and cannot include pipes.""" try: if silent: with open(os.devnull, 'w') as FNULL: return subprocess.check_call(command_to_array(command), stdout=FNULL) else: # Using the defaults, shell=False, no i/o redirection. return check_call(command_to_array(command)) except CalledProcessError as e: # We are modifying the error itself for 2 reasons. 1) it WILL contain # login credentials when run_mongodump is run, 2) CalledProcessError is # slightly not-to-spec (the message variable is blank), which means # cronutils.ErrorHandler would report unlabeled stack traces. e.message = "%s failed with error code %s" % (e.cmd[0], e.returncode) e.cmd = e.cmd[0] + " [arguments stripped for security]" raise e
[ "def", "call", "(", "command", ",", "silent", "=", "False", ")", ":", "try", ":", "if", "silent", ":", "with", "open", "(", "os", ".", "devnull", ",", "'w'", ")", "as", "FNULL", ":", "return", "subprocess", ".", "check_call", "(", "command_to_array", "(", "command", ")", ",", "stdout", "=", "FNULL", ")", "else", ":", "# Using the defaults, shell=False, no i/o redirection.", "return", "check_call", "(", "command_to_array", "(", "command", ")", ")", "except", "CalledProcessError", "as", "e", ":", "# We are modifying the error itself for 2 reasons. 1) it WILL contain", "# login credentials when run_mongodump is run, 2) CalledProcessError is", "# slightly not-to-spec (the message variable is blank), which means", "# cronutils.ErrorHandler would report unlabeled stack traces.", "e", ".", "message", "=", "\"%s failed with error code %s\"", "%", "(", "e", ".", "cmd", "[", "0", "]", ",", "e", ".", "returncode", ")", "e", ".", "cmd", "=", "e", ".", "cmd", "[", "0", "]", "+", "\" [arguments stripped for security]\"", "raise", "e" ]
Runs a bash command safely, with shell=false, catches any non-zero return codes. Raises slightly modified CalledProcessError exceptions on failures. Note: command is a string and cannot include pipes.
[ "Runs", "a", "bash", "command", "safely", "with", "shell", "=", "false", "catches", "any", "non", "-", "zero", "return", "codes", ".", "Raises", "slightly", "modified", "CalledProcessError", "exceptions", "on", "failures", ".", "Note", ":", "command", "is", "a", "string", "and", "cannot", "include", "pipes", "." ]
train
https://github.com/zagaran/mongobackup/blob/d090d0cca44ecd066974c4de80edca5f26b7eeea/mongobackup/shell.py#L35-L54
zagaran/mongobackup
mongobackup/shell.py
tarbz
def tarbz(source_directory_path, output_file_full_path, silent=False): """ Tars and bzips a directory, preserving as much metadata as possible. Adds '.tbz' to the provided output file name. """ output_directory_path = output_file_full_path.rsplit("/", 1)[0] create_folders(output_directory_path) # Note: default compression for bzip is supposed to be -9, highest compression. full_tar_file_path = output_file_full_path + ".tbz" if path.exists(full_tar_file_path): raise Exception("%s already exists, aborting." % (full_tar_file_path)) # preserve permissions, create file, use files (not tape devices), preserve # access time. tar is the only program in the universe to use (dstn, src). tar_command = ("tar jpcfvC %s %s %s" % (full_tar_file_path, source_directory_path, "./")) call(tar_command, silent=silent) return full_tar_file_path
python
def tarbz(source_directory_path, output_file_full_path, silent=False): """ Tars and bzips a directory, preserving as much metadata as possible. Adds '.tbz' to the provided output file name. """ output_directory_path = output_file_full_path.rsplit("/", 1)[0] create_folders(output_directory_path) # Note: default compression for bzip is supposed to be -9, highest compression. full_tar_file_path = output_file_full_path + ".tbz" if path.exists(full_tar_file_path): raise Exception("%s already exists, aborting." % (full_tar_file_path)) # preserve permissions, create file, use files (not tape devices), preserve # access time. tar is the only program in the universe to use (dstn, src). tar_command = ("tar jpcfvC %s %s %s" % (full_tar_file_path, source_directory_path, "./")) call(tar_command, silent=silent) return full_tar_file_path
[ "def", "tarbz", "(", "source_directory_path", ",", "output_file_full_path", ",", "silent", "=", "False", ")", ":", "output_directory_path", "=", "output_file_full_path", ".", "rsplit", "(", "\"/\"", ",", "1", ")", "[", "0", "]", "create_folders", "(", "output_directory_path", ")", "# Note: default compression for bzip is supposed to be -9, highest compression.", "full_tar_file_path", "=", "output_file_full_path", "+", "\".tbz\"", "if", "path", ".", "exists", "(", "full_tar_file_path", ")", ":", "raise", "Exception", "(", "\"%s already exists, aborting.\"", "%", "(", "full_tar_file_path", ")", ")", "# preserve permissions, create file, use files (not tape devices), preserve", "# access time. tar is the only program in the universe to use (dstn, src).", "tar_command", "=", "(", "\"tar jpcfvC %s %s %s\"", "%", "(", "full_tar_file_path", ",", "source_directory_path", ",", "\"./\"", ")", ")", "call", "(", "tar_command", ",", "silent", "=", "silent", ")", "return", "full_tar_file_path" ]
Tars and bzips a directory, preserving as much metadata as possible. Adds '.tbz' to the provided output file name.
[ "Tars", "and", "bzips", "a", "directory", "preserving", "as", "much", "metadata", "as", "possible", ".", "Adds", ".", "tbz", "to", "the", "provided", "output", "file", "name", "." ]
train
https://github.com/zagaran/mongobackup/blob/d090d0cca44ecd066974c4de80edca5f26b7eeea/mongobackup/shell.py#L64-L79
zagaran/mongobackup
mongobackup/shell.py
untarbz
def untarbz(source_file_path, output_directory_path, silent=False): """ Restores your mongo database backup from a .tbz created using this library. This function will ensure that a directory is created at the file path if one does not exist already. If used in conjunction with this library's mongodump operation, the backup data will be extracted directly into the provided directory path. This command will fail if the output directory is not empty as existing files with identical names are not overwritten by tar. """ if not path.exists(source_file_path): raise Exception("the provided tar file %s does not exist." % (source_file_path)) if output_directory_path[0:1] == "./": output_directory_path = path.abspath(output_directory_path) if output_directory_path[0] != "/": raise Exception("your output directory path must start with '/' or './'; you used: %s" % (output_directory_path)) create_folders(output_directory_path) if listdir(output_directory_path): raise Exception("Your output directory isn't empty. Aborting as " + "exiting files are not overwritten by tar.") untar_command = ("tar jxfvkCp %s %s --atime-preserve " % (source_file_path, output_directory_path)) call(untar_command, silent=silent)
python
def untarbz(source_file_path, output_directory_path, silent=False): """ Restores your mongo database backup from a .tbz created using this library. This function will ensure that a directory is created at the file path if one does not exist already. If used in conjunction with this library's mongodump operation, the backup data will be extracted directly into the provided directory path. This command will fail if the output directory is not empty as existing files with identical names are not overwritten by tar. """ if not path.exists(source_file_path): raise Exception("the provided tar file %s does not exist." % (source_file_path)) if output_directory_path[0:1] == "./": output_directory_path = path.abspath(output_directory_path) if output_directory_path[0] != "/": raise Exception("your output directory path must start with '/' or './'; you used: %s" % (output_directory_path)) create_folders(output_directory_path) if listdir(output_directory_path): raise Exception("Your output directory isn't empty. Aborting as " + "exiting files are not overwritten by tar.") untar_command = ("tar jxfvkCp %s %s --atime-preserve " % (source_file_path, output_directory_path)) call(untar_command, silent=silent)
[ "def", "untarbz", "(", "source_file_path", ",", "output_directory_path", ",", "silent", "=", "False", ")", ":", "if", "not", "path", ".", "exists", "(", "source_file_path", ")", ":", "raise", "Exception", "(", "\"the provided tar file %s does not exist.\"", "%", "(", "source_file_path", ")", ")", "if", "output_directory_path", "[", "0", ":", "1", "]", "==", "\"./\"", ":", "output_directory_path", "=", "path", ".", "abspath", "(", "output_directory_path", ")", "if", "output_directory_path", "[", "0", "]", "!=", "\"/\"", ":", "raise", "Exception", "(", "\"your output directory path must start with '/' or './'; you used: %s\"", "%", "(", "output_directory_path", ")", ")", "create_folders", "(", "output_directory_path", ")", "if", "listdir", "(", "output_directory_path", ")", ":", "raise", "Exception", "(", "\"Your output directory isn't empty. Aborting as \"", "+", "\"exiting files are not overwritten by tar.\"", ")", "untar_command", "=", "(", "\"tar jxfvkCp %s %s --atime-preserve \"", "%", "(", "source_file_path", ",", "output_directory_path", ")", ")", "call", "(", "untar_command", ",", "silent", "=", "silent", ")" ]
Restores your mongo database backup from a .tbz created using this library. This function will ensure that a directory is created at the file path if one does not exist already. If used in conjunction with this library's mongodump operation, the backup data will be extracted directly into the provided directory path. This command will fail if the output directory is not empty as existing files with identical names are not overwritten by tar.
[ "Restores", "your", "mongo", "database", "backup", "from", "a", ".", "tbz", "created", "using", "this", "library", ".", "This", "function", "will", "ensure", "that", "a", "directory", "is", "created", "at", "the", "file", "path", "if", "one", "does", "not", "exist", "already", ".", "If", "used", "in", "conjunction", "with", "this", "library", "s", "mongodump", "operation", "the", "backup", "data", "will", "be", "extracted", "directly", "into", "the", "provided", "directory", "path", ".", "This", "command", "will", "fail", "if", "the", "output", "directory", "is", "not", "empty", "as", "existing", "files", "with", "identical", "names", "are", "not", "overwritten", "by", "tar", "." ]
train
https://github.com/zagaran/mongobackup/blob/d090d0cca44ecd066974c4de80edca5f26b7eeea/mongobackup/shell.py#L82-L108
gdub/python-simpleldap
simpleldap/__init__.py
LDAPItem.value_contains
def value_contains(self, value, attribute): """ Determine if any of the items in the value list for the given attribute contain value. """ for item in self[attribute]: if value in item: return True return False
python
def value_contains(self, value, attribute): """ Determine if any of the items in the value list for the given attribute contain value. """ for item in self[attribute]: if value in item: return True return False
[ "def", "value_contains", "(", "self", ",", "value", ",", "attribute", ")", ":", "for", "item", "in", "self", "[", "attribute", "]", ":", "if", "value", "in", "item", ":", "return", "True", "return", "False" ]
Determine if any of the items in the value list for the given attribute contain value.
[ "Determine", "if", "any", "of", "the", "items", "in", "the", "value", "list", "for", "the", "given", "attribute", "contain", "value", "." ]
train
https://github.com/gdub/python-simpleldap/blob/a833f444d90ad2f3fe779c3e2cb08350052fedc8/simpleldap/__init__.py#L64-L72
gdub/python-simpleldap
simpleldap/__init__.py
Connection.clear_search_defaults
def clear_search_defaults(self, args=None): """ Clear all search defaults specified by the list of parameter names given as ``args``. If ``args`` is not given, then clear all existing search defaults. Examples:: conn.set_search_defaults(scope=ldap.SCOPE_BASE, attrs=['cn']) conn.clear_search_defaults(['scope']) conn.clear_search_defaults() """ if args is None: self._search_defaults.clear() else: for arg in args: if arg in self._search_defaults: del self._search_defaults[arg]
python
def clear_search_defaults(self, args=None): """ Clear all search defaults specified by the list of parameter names given as ``args``. If ``args`` is not given, then clear all existing search defaults. Examples:: conn.set_search_defaults(scope=ldap.SCOPE_BASE, attrs=['cn']) conn.clear_search_defaults(['scope']) conn.clear_search_defaults() """ if args is None: self._search_defaults.clear() else: for arg in args: if arg in self._search_defaults: del self._search_defaults[arg]
[ "def", "clear_search_defaults", "(", "self", ",", "args", "=", "None", ")", ":", "if", "args", "is", "None", ":", "self", ".", "_search_defaults", ".", "clear", "(", ")", "else", ":", "for", "arg", "in", "args", ":", "if", "arg", "in", "self", ".", "_search_defaults", ":", "del", "self", ".", "_search_defaults", "[", "arg", "]" ]
Clear all search defaults specified by the list of parameter names given as ``args``. If ``args`` is not given, then clear all existing search defaults. Examples:: conn.set_search_defaults(scope=ldap.SCOPE_BASE, attrs=['cn']) conn.clear_search_defaults(['scope']) conn.clear_search_defaults()
[ "Clear", "all", "search", "defaults", "specified", "by", "the", "list", "of", "parameter", "names", "given", "as", "args", ".", "If", "args", "is", "not", "given", "then", "clear", "all", "existing", "search", "defaults", "." ]
train
https://github.com/gdub/python-simpleldap/blob/a833f444d90ad2f3fe779c3e2cb08350052fedc8/simpleldap/__init__.py#L188-L205
gdub/python-simpleldap
simpleldap/__init__.py
Connection.search
def search(self, filter, base_dn=None, attrs=None, scope=None, timeout=None, limit=None): """ Search the directory. """ if base_dn is None: base_dn = self._search_defaults.get('base_dn', '') if attrs is None: attrs = self._search_defaults.get('attrs', None) if scope is None: scope = self._search_defaults.get('scope', ldap.SCOPE_SUBTREE) if timeout is None: timeout = self._search_defaults.get('timeout', -1) if limit is None: limit = self._search_defaults.get('limit', 0) results = self.connection.search_ext_s( base_dn, scope, filter, attrs, timeout=timeout, sizelimit=limit) return self.to_items(results)
python
def search(self, filter, base_dn=None, attrs=None, scope=None, timeout=None, limit=None): """ Search the directory. """ if base_dn is None: base_dn = self._search_defaults.get('base_dn', '') if attrs is None: attrs = self._search_defaults.get('attrs', None) if scope is None: scope = self._search_defaults.get('scope', ldap.SCOPE_SUBTREE) if timeout is None: timeout = self._search_defaults.get('timeout', -1) if limit is None: limit = self._search_defaults.get('limit', 0) results = self.connection.search_ext_s( base_dn, scope, filter, attrs, timeout=timeout, sizelimit=limit) return self.to_items(results)
[ "def", "search", "(", "self", ",", "filter", ",", "base_dn", "=", "None", ",", "attrs", "=", "None", ",", "scope", "=", "None", ",", "timeout", "=", "None", ",", "limit", "=", "None", ")", ":", "if", "base_dn", "is", "None", ":", "base_dn", "=", "self", ".", "_search_defaults", ".", "get", "(", "'base_dn'", ",", "''", ")", "if", "attrs", "is", "None", ":", "attrs", "=", "self", ".", "_search_defaults", ".", "get", "(", "'attrs'", ",", "None", ")", "if", "scope", "is", "None", ":", "scope", "=", "self", ".", "_search_defaults", ".", "get", "(", "'scope'", ",", "ldap", ".", "SCOPE_SUBTREE", ")", "if", "timeout", "is", "None", ":", "timeout", "=", "self", ".", "_search_defaults", ".", "get", "(", "'timeout'", ",", "-", "1", ")", "if", "limit", "is", "None", ":", "limit", "=", "self", ".", "_search_defaults", ".", "get", "(", "'limit'", ",", "0", ")", "results", "=", "self", ".", "connection", ".", "search_ext_s", "(", "base_dn", ",", "scope", ",", "filter", ",", "attrs", ",", "timeout", "=", "timeout", ",", "sizelimit", "=", "limit", ")", "return", "self", ".", "to_items", "(", "results", ")" ]
Search the directory.
[ "Search", "the", "directory", "." ]
train
https://github.com/gdub/python-simpleldap/blob/a833f444d90ad2f3fe779c3e2cb08350052fedc8/simpleldap/__init__.py#L219-L237
gdub/python-simpleldap
simpleldap/__init__.py
Connection.get
def get(self, *args, **kwargs): """ Get a single object. This is a convenience wrapper for the search method that checks that only one object was returned, and returns that single object instead of a list. This method takes the exact same arguments as search. """ results = self.search(*args, **kwargs) num_results = len(results) if num_results == 1: return results[0] if num_results > 1: raise MultipleObjectsFound() raise ObjectNotFound()
python
def get(self, *args, **kwargs): """ Get a single object. This is a convenience wrapper for the search method that checks that only one object was returned, and returns that single object instead of a list. This method takes the exact same arguments as search. """ results = self.search(*args, **kwargs) num_results = len(results) if num_results == 1: return results[0] if num_results > 1: raise MultipleObjectsFound() raise ObjectNotFound()
[ "def", "get", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "results", "=", "self", ".", "search", "(", "*", "args", ",", "*", "*", "kwargs", ")", "num_results", "=", "len", "(", "results", ")", "if", "num_results", "==", "1", ":", "return", "results", "[", "0", "]", "if", "num_results", ">", "1", ":", "raise", "MultipleObjectsFound", "(", ")", "raise", "ObjectNotFound", "(", ")" ]
Get a single object. This is a convenience wrapper for the search method that checks that only one object was returned, and returns that single object instead of a list. This method takes the exact same arguments as search.
[ "Get", "a", "single", "object", "." ]
train
https://github.com/gdub/python-simpleldap/blob/a833f444d90ad2f3fe779c3e2cb08350052fedc8/simpleldap/__init__.py#L239-L253
gdub/python-simpleldap
simpleldap/__init__.py
Connection.authenticate
def authenticate(self, dn='', password=''): """ Attempt to authenticate given dn and password using a bind operation. Return True if the bind is successful, and return False there was an exception raised that is contained in self.failed_authentication_exceptions. """ try: self.connection.simple_bind_s(dn, password) except tuple(self.failed_authentication_exceptions): return False else: return True
python
def authenticate(self, dn='', password=''): """ Attempt to authenticate given dn and password using a bind operation. Return True if the bind is successful, and return False there was an exception raised that is contained in self.failed_authentication_exceptions. """ try: self.connection.simple_bind_s(dn, password) except tuple(self.failed_authentication_exceptions): return False else: return True
[ "def", "authenticate", "(", "self", ",", "dn", "=", "''", ",", "password", "=", "''", ")", ":", "try", ":", "self", ".", "connection", ".", "simple_bind_s", "(", "dn", ",", "password", ")", "except", "tuple", "(", "self", ".", "failed_authentication_exceptions", ")", ":", "return", "False", "else", ":", "return", "True" ]
Attempt to authenticate given dn and password using a bind operation. Return True if the bind is successful, and return False there was an exception raised that is contained in self.failed_authentication_exceptions.
[ "Attempt", "to", "authenticate", "given", "dn", "and", "password", "using", "a", "bind", "operation", ".", "Return", "True", "if", "the", "bind", "is", "successful", "and", "return", "False", "there", "was", "an", "exception", "raised", "that", "is", "contained", "in", "self", ".", "failed_authentication_exceptions", "." ]
train
https://github.com/gdub/python-simpleldap/blob/a833f444d90ad2f3fe779c3e2cb08350052fedc8/simpleldap/__init__.py#L262-L274
gdub/python-simpleldap
simpleldap/__init__.py
Connection.compare
def compare(self, dn, attr, value): """ Compare the ``attr`` of the entry ``dn`` with given ``value``. This is a convenience wrapper for the ldap library's ``compare`` function that returns a boolean value instead of 1 or 0. """ return self.connection.compare_s(dn, attr, value) == 1
python
def compare(self, dn, attr, value): """ Compare the ``attr`` of the entry ``dn`` with given ``value``. This is a convenience wrapper for the ldap library's ``compare`` function that returns a boolean value instead of 1 or 0. """ return self.connection.compare_s(dn, attr, value) == 1
[ "def", "compare", "(", "self", ",", "dn", ",", "attr", ",", "value", ")", ":", "return", "self", ".", "connection", ".", "compare_s", "(", "dn", ",", "attr", ",", "value", ")", "==", "1" ]
Compare the ``attr`` of the entry ``dn`` with given ``value``. This is a convenience wrapper for the ldap library's ``compare`` function that returns a boolean value instead of 1 or 0.
[ "Compare", "the", "attr", "of", "the", "entry", "dn", "with", "given", "value", "." ]
train
https://github.com/gdub/python-simpleldap/blob/a833f444d90ad2f3fe779c3e2cb08350052fedc8/simpleldap/__init__.py#L276-L283
crccheck/cloudwatch-to-graphite
plumbum.py
get_property_func
def get_property_func(key): """ Get the accessor function for an instance to look for `key`. Look for it as an attribute, and if that does not work, look to see if it is a tag. """ def get_it(obj): try: return getattr(obj, key) except AttributeError: return obj.tags.get(key) return get_it
python
def get_property_func(key): """ Get the accessor function for an instance to look for `key`. Look for it as an attribute, and if that does not work, look to see if it is a tag. """ def get_it(obj): try: return getattr(obj, key) except AttributeError: return obj.tags.get(key) return get_it
[ "def", "get_property_func", "(", "key", ")", ":", "def", "get_it", "(", "obj", ")", ":", "try", ":", "return", "getattr", "(", "obj", ",", "key", ")", "except", "AttributeError", ":", "return", "obj", ".", "tags", ".", "get", "(", "key", ")", "return", "get_it" ]
Get the accessor function for an instance to look for `key`. Look for it as an attribute, and if that does not work, look to see if it is a tag.
[ "Get", "the", "accessor", "function", "for", "an", "instance", "to", "look", "for", "key", "." ]
train
https://github.com/crccheck/cloudwatch-to-graphite/blob/28a11ee56f7231cef6b6f8af142a8aab3d2eb5a6/plumbum.py#L59-L71
crccheck/cloudwatch-to-graphite
plumbum.py
list_billing
def list_billing(region, filter_by_kwargs): """List available billing metrics""" conn = boto.ec2.cloudwatch.connect_to_region(region) metrics = conn.list_metrics(metric_name='EstimatedCharges') # Filtering is based on metric Dimensions. Only really valuable one is # ServiceName. if filter_by_kwargs: filter_key = filter_by_kwargs.keys()[0] filter_value = filter_by_kwargs.values()[0] if filter_value: filtered_metrics = [x for x in metrics if x.dimensions.get(filter_key) and x.dimensions.get(filter_key)[0] == filter_value] else: # ServiceName='' filtered_metrics = [x for x in metrics if not x.dimensions.get(filter_key)] else: filtered_metrics = metrics return filtered_metrics
python
def list_billing(region, filter_by_kwargs): """List available billing metrics""" conn = boto.ec2.cloudwatch.connect_to_region(region) metrics = conn.list_metrics(metric_name='EstimatedCharges') # Filtering is based on metric Dimensions. Only really valuable one is # ServiceName. if filter_by_kwargs: filter_key = filter_by_kwargs.keys()[0] filter_value = filter_by_kwargs.values()[0] if filter_value: filtered_metrics = [x for x in metrics if x.dimensions.get(filter_key) and x.dimensions.get(filter_key)[0] == filter_value] else: # ServiceName='' filtered_metrics = [x for x in metrics if not x.dimensions.get(filter_key)] else: filtered_metrics = metrics return filtered_metrics
[ "def", "list_billing", "(", "region", ",", "filter_by_kwargs", ")", ":", "conn", "=", "boto", ".", "ec2", ".", "cloudwatch", ".", "connect_to_region", "(", "region", ")", "metrics", "=", "conn", ".", "list_metrics", "(", "metric_name", "=", "'EstimatedCharges'", ")", "# Filtering is based on metric Dimensions. Only really valuable one is", "# ServiceName.", "if", "filter_by_kwargs", ":", "filter_key", "=", "filter_by_kwargs", ".", "keys", "(", ")", "[", "0", "]", "filter_value", "=", "filter_by_kwargs", ".", "values", "(", ")", "[", "0", "]", "if", "filter_value", ":", "filtered_metrics", "=", "[", "x", "for", "x", "in", "metrics", "if", "x", ".", "dimensions", ".", "get", "(", "filter_key", ")", "and", "x", ".", "dimensions", ".", "get", "(", "filter_key", ")", "[", "0", "]", "==", "filter_value", "]", "else", ":", "# ServiceName=''", "filtered_metrics", "=", "[", "x", "for", "x", "in", "metrics", "if", "not", "x", ".", "dimensions", ".", "get", "(", "filter_key", ")", "]", "else", ":", "filtered_metrics", "=", "metrics", "return", "filtered_metrics" ]
List available billing metrics
[ "List", "available", "billing", "metrics" ]
train
https://github.com/crccheck/cloudwatch-to-graphite/blob/28a11ee56f7231cef6b6f8af142a8aab3d2eb5a6/plumbum.py#L111-L127
crccheck/cloudwatch-to-graphite
plumbum.py
list_cloudfront
def list_cloudfront(region, filter_by_kwargs): """List running ec2 instances.""" conn = boto.connect_cloudfront() instances = conn.get_all_distributions() return lookup(instances, filter_by=filter_by_kwargs)
python
def list_cloudfront(region, filter_by_kwargs): """List running ec2 instances.""" conn = boto.connect_cloudfront() instances = conn.get_all_distributions() return lookup(instances, filter_by=filter_by_kwargs)
[ "def", "list_cloudfront", "(", "region", ",", "filter_by_kwargs", ")", ":", "conn", "=", "boto", ".", "connect_cloudfront", "(", ")", "instances", "=", "conn", ".", "get_all_distributions", "(", ")", "return", "lookup", "(", "instances", ",", "filter_by", "=", "filter_by_kwargs", ")" ]
List running ec2 instances.
[ "List", "running", "ec2", "instances", "." ]
train
https://github.com/crccheck/cloudwatch-to-graphite/blob/28a11ee56f7231cef6b6f8af142a8aab3d2eb5a6/plumbum.py#L130-L134
crccheck/cloudwatch-to-graphite
plumbum.py
list_ec2
def list_ec2(region, filter_by_kwargs): """List running ec2 instances.""" conn = boto.ec2.connect_to_region(region) instances = conn.get_only_instances() return lookup(instances, filter_by=filter_by_kwargs)
python
def list_ec2(region, filter_by_kwargs): """List running ec2 instances.""" conn = boto.ec2.connect_to_region(region) instances = conn.get_only_instances() return lookup(instances, filter_by=filter_by_kwargs)
[ "def", "list_ec2", "(", "region", ",", "filter_by_kwargs", ")", ":", "conn", "=", "boto", ".", "ec2", ".", "connect_to_region", "(", "region", ")", "instances", "=", "conn", ".", "get_only_instances", "(", ")", "return", "lookup", "(", "instances", ",", "filter_by", "=", "filter_by_kwargs", ")" ]
List running ec2 instances.
[ "List", "running", "ec2", "instances", "." ]
train
https://github.com/crccheck/cloudwatch-to-graphite/blob/28a11ee56f7231cef6b6f8af142a8aab3d2eb5a6/plumbum.py#L137-L141
crccheck/cloudwatch-to-graphite
plumbum.py
list_ebs
def list_ebs(region, filter_by_kwargs): """List running ebs volumes.""" conn = boto.ec2.connect_to_region(region) instances = conn.get_all_volumes() return lookup(instances, filter_by=filter_by_kwargs)
python
def list_ebs(region, filter_by_kwargs): """List running ebs volumes.""" conn = boto.ec2.connect_to_region(region) instances = conn.get_all_volumes() return lookup(instances, filter_by=filter_by_kwargs)
[ "def", "list_ebs", "(", "region", ",", "filter_by_kwargs", ")", ":", "conn", "=", "boto", ".", "ec2", ".", "connect_to_region", "(", "region", ")", "instances", "=", "conn", ".", "get_all_volumes", "(", ")", "return", "lookup", "(", "instances", ",", "filter_by", "=", "filter_by_kwargs", ")" ]
List running ebs volumes.
[ "List", "running", "ebs", "volumes", "." ]
train
https://github.com/crccheck/cloudwatch-to-graphite/blob/28a11ee56f7231cef6b6f8af142a8aab3d2eb5a6/plumbum.py#L143-L147
crccheck/cloudwatch-to-graphite
plumbum.py
list_elb
def list_elb(region, filter_by_kwargs): """List all load balancers.""" conn = boto.ec2.elb.connect_to_region(region) instances = conn.get_all_load_balancers() return lookup(instances, filter_by=filter_by_kwargs)
python
def list_elb(region, filter_by_kwargs): """List all load balancers.""" conn = boto.ec2.elb.connect_to_region(region) instances = conn.get_all_load_balancers() return lookup(instances, filter_by=filter_by_kwargs)
[ "def", "list_elb", "(", "region", ",", "filter_by_kwargs", ")", ":", "conn", "=", "boto", ".", "ec2", ".", "elb", ".", "connect_to_region", "(", "region", ")", "instances", "=", "conn", ".", "get_all_load_balancers", "(", ")", "return", "lookup", "(", "instances", ",", "filter_by", "=", "filter_by_kwargs", ")" ]
List all load balancers.
[ "List", "all", "load", "balancers", "." ]
train
https://github.com/crccheck/cloudwatch-to-graphite/blob/28a11ee56f7231cef6b6f8af142a8aab3d2eb5a6/plumbum.py#L150-L154
crccheck/cloudwatch-to-graphite
plumbum.py
list_rds
def list_rds(region, filter_by_kwargs): """List all RDS thingys.""" conn = boto.rds.connect_to_region(region) instances = conn.get_all_dbinstances() return lookup(instances, filter_by=filter_by_kwargs)
python
def list_rds(region, filter_by_kwargs): """List all RDS thingys.""" conn = boto.rds.connect_to_region(region) instances = conn.get_all_dbinstances() return lookup(instances, filter_by=filter_by_kwargs)
[ "def", "list_rds", "(", "region", ",", "filter_by_kwargs", ")", ":", "conn", "=", "boto", ".", "rds", ".", "connect_to_region", "(", "region", ")", "instances", "=", "conn", ".", "get_all_dbinstances", "(", ")", "return", "lookup", "(", "instances", ",", "filter_by", "=", "filter_by_kwargs", ")" ]
List all RDS thingys.
[ "List", "all", "RDS", "thingys", "." ]
train
https://github.com/crccheck/cloudwatch-to-graphite/blob/28a11ee56f7231cef6b6f8af142a8aab3d2eb5a6/plumbum.py#L157-L161
crccheck/cloudwatch-to-graphite
plumbum.py
list_elasticache
def list_elasticache(region, filter_by_kwargs): """List all ElastiCache Clusters.""" conn = boto.elasticache.connect_to_region(region) req = conn.describe_cache_clusters() data = req["DescribeCacheClustersResponse"]["DescribeCacheClustersResult"]["CacheClusters"] if filter_by_kwargs: clusters = [x['CacheClusterId'] for x in data if x[filter_by_kwargs.keys()[0]] == filter_by_kwargs.values()[0]] else: clusters = [x['CacheClusterId'] for x in data] return clusters
python
def list_elasticache(region, filter_by_kwargs): """List all ElastiCache Clusters.""" conn = boto.elasticache.connect_to_region(region) req = conn.describe_cache_clusters() data = req["DescribeCacheClustersResponse"]["DescribeCacheClustersResult"]["CacheClusters"] if filter_by_kwargs: clusters = [x['CacheClusterId'] for x in data if x[filter_by_kwargs.keys()[0]] == filter_by_kwargs.values()[0]] else: clusters = [x['CacheClusterId'] for x in data] return clusters
[ "def", "list_elasticache", "(", "region", ",", "filter_by_kwargs", ")", ":", "conn", "=", "boto", ".", "elasticache", ".", "connect_to_region", "(", "region", ")", "req", "=", "conn", ".", "describe_cache_clusters", "(", ")", "data", "=", "req", "[", "\"DescribeCacheClustersResponse\"", "]", "[", "\"DescribeCacheClustersResult\"", "]", "[", "\"CacheClusters\"", "]", "if", "filter_by_kwargs", ":", "clusters", "=", "[", "x", "[", "'CacheClusterId'", "]", "for", "x", "in", "data", "if", "x", "[", "filter_by_kwargs", ".", "keys", "(", ")", "[", "0", "]", "]", "==", "filter_by_kwargs", ".", "values", "(", ")", "[", "0", "]", "]", "else", ":", "clusters", "=", "[", "x", "[", "'CacheClusterId'", "]", "for", "x", "in", "data", "]", "return", "clusters" ]
List all ElastiCache Clusters.
[ "List", "all", "ElastiCache", "Clusters", "." ]
train
https://github.com/crccheck/cloudwatch-to-graphite/blob/28a11ee56f7231cef6b6f8af142a8aab3d2eb5a6/plumbum.py#L164-L173
crccheck/cloudwatch-to-graphite
plumbum.py
list_autoscaling_group
def list_autoscaling_group(region, filter_by_kwargs): """List all Auto Scaling Groups.""" conn = boto.ec2.autoscale.connect_to_region(region) groups = conn.get_all_groups() return lookup(groups, filter_by=filter_by_kwargs)
python
def list_autoscaling_group(region, filter_by_kwargs): """List all Auto Scaling Groups.""" conn = boto.ec2.autoscale.connect_to_region(region) groups = conn.get_all_groups() return lookup(groups, filter_by=filter_by_kwargs)
[ "def", "list_autoscaling_group", "(", "region", ",", "filter_by_kwargs", ")", ":", "conn", "=", "boto", ".", "ec2", ".", "autoscale", ".", "connect_to_region", "(", "region", ")", "groups", "=", "conn", ".", "get_all_groups", "(", ")", "return", "lookup", "(", "groups", ",", "filter_by", "=", "filter_by_kwargs", ")" ]
List all Auto Scaling Groups.
[ "List", "all", "Auto", "Scaling", "Groups", "." ]
train
https://github.com/crccheck/cloudwatch-to-graphite/blob/28a11ee56f7231cef6b6f8af142a8aab3d2eb5a6/plumbum.py#L176-L180
crccheck/cloudwatch-to-graphite
plumbum.py
list_sqs
def list_sqs(region, filter_by_kwargs): """List all SQS Queues.""" conn = boto.sqs.connect_to_region(region) queues = conn.get_all_queues() return lookup(queues, filter_by=filter_by_kwargs)
python
def list_sqs(region, filter_by_kwargs): """List all SQS Queues.""" conn = boto.sqs.connect_to_region(region) queues = conn.get_all_queues() return lookup(queues, filter_by=filter_by_kwargs)
[ "def", "list_sqs", "(", "region", ",", "filter_by_kwargs", ")", ":", "conn", "=", "boto", ".", "sqs", ".", "connect_to_region", "(", "region", ")", "queues", "=", "conn", ".", "get_all_queues", "(", ")", "return", "lookup", "(", "queues", ",", "filter_by", "=", "filter_by_kwargs", ")" ]
List all SQS Queues.
[ "List", "all", "SQS", "Queues", "." ]
train
https://github.com/crccheck/cloudwatch-to-graphite/blob/28a11ee56f7231cef6b6f8af142a8aab3d2eb5a6/plumbum.py#L183-L187
crccheck/cloudwatch-to-graphite
plumbum.py
list_kinesis_applications
def list_kinesis_applications(region, filter_by_kwargs): """List all the kinesis applications along with the shards for each stream""" conn = boto.kinesis.connect_to_region(region) streams = conn.list_streams()['StreamNames'] kinesis_streams = {} for stream_name in streams: shard_ids = [] shards = conn.describe_stream(stream_name)['StreamDescription']['Shards'] for shard in shards: shard_ids.append(shard['ShardId']) kinesis_streams[stream_name] = shard_ids return kinesis_streams
python
def list_kinesis_applications(region, filter_by_kwargs): """List all the kinesis applications along with the shards for each stream""" conn = boto.kinesis.connect_to_region(region) streams = conn.list_streams()['StreamNames'] kinesis_streams = {} for stream_name in streams: shard_ids = [] shards = conn.describe_stream(stream_name)['StreamDescription']['Shards'] for shard in shards: shard_ids.append(shard['ShardId']) kinesis_streams[stream_name] = shard_ids return kinesis_streams
[ "def", "list_kinesis_applications", "(", "region", ",", "filter_by_kwargs", ")", ":", "conn", "=", "boto", ".", "kinesis", ".", "connect_to_region", "(", "region", ")", "streams", "=", "conn", ".", "list_streams", "(", ")", "[", "'StreamNames'", "]", "kinesis_streams", "=", "{", "}", "for", "stream_name", "in", "streams", ":", "shard_ids", "=", "[", "]", "shards", "=", "conn", ".", "describe_stream", "(", "stream_name", ")", "[", "'StreamDescription'", "]", "[", "'Shards'", "]", "for", "shard", "in", "shards", ":", "shard_ids", ".", "append", "(", "shard", "[", "'ShardId'", "]", ")", "kinesis_streams", "[", "stream_name", "]", "=", "shard_ids", "return", "kinesis_streams" ]
List all the kinesis applications along with the shards for each stream
[ "List", "all", "the", "kinesis", "applications", "along", "with", "the", "shards", "for", "each", "stream" ]
train
https://github.com/crccheck/cloudwatch-to-graphite/blob/28a11ee56f7231cef6b6f8af142a8aab3d2eb5a6/plumbum.py#L190-L201
crccheck/cloudwatch-to-graphite
plumbum.py
list_dynamodb
def list_dynamodb(region, filter_by_kwargs): """List all DynamoDB tables.""" conn = boto.dynamodb.connect_to_region(region) tables = conn.list_tables() return lookup(tables, filter_by=filter_by_kwargs)
python
def list_dynamodb(region, filter_by_kwargs): """List all DynamoDB tables.""" conn = boto.dynamodb.connect_to_region(region) tables = conn.list_tables() return lookup(tables, filter_by=filter_by_kwargs)
[ "def", "list_dynamodb", "(", "region", ",", "filter_by_kwargs", ")", ":", "conn", "=", "boto", ".", "dynamodb", ".", "connect_to_region", "(", "region", ")", "tables", "=", "conn", ".", "list_tables", "(", ")", "return", "lookup", "(", "tables", ",", "filter_by", "=", "filter_by_kwargs", ")" ]
List all DynamoDB tables.
[ "List", "all", "DynamoDB", "tables", "." ]
train
https://github.com/crccheck/cloudwatch-to-graphite/blob/28a11ee56f7231cef6b6f8af142a8aab3d2eb5a6/plumbum.py#L204-L208
pyslackers/slack-sansio
slack/actions.py
Router.register
def register(self, callback_id: str, handler: Any, name: str = "*") -> None: """ Register a new handler for a specific :class:`slack.actions.Action` `callback_id`. Optional routing based on the action name too. The name argument is useful for actions of type `interactive_message` to provide a different handler for each individual action. Args: callback_id: Callback_id the handler is interested in handler: Callback name: Name of the action (optional). """ LOG.info("Registering %s, %s to %s", callback_id, name, handler) if name not in self._routes[callback_id]: self._routes[callback_id][name] = [] self._routes[callback_id][name].append(handler)
python
def register(self, callback_id: str, handler: Any, name: str = "*") -> None: """ Register a new handler for a specific :class:`slack.actions.Action` `callback_id`. Optional routing based on the action name too. The name argument is useful for actions of type `interactive_message` to provide a different handler for each individual action. Args: callback_id: Callback_id the handler is interested in handler: Callback name: Name of the action (optional). """ LOG.info("Registering %s, %s to %s", callback_id, name, handler) if name not in self._routes[callback_id]: self._routes[callback_id][name] = [] self._routes[callback_id][name].append(handler)
[ "def", "register", "(", "self", ",", "callback_id", ":", "str", ",", "handler", ":", "Any", ",", "name", ":", "str", "=", "\"*\"", ")", "->", "None", ":", "LOG", ".", "info", "(", "\"Registering %s, %s to %s\"", ",", "callback_id", ",", "name", ",", "handler", ")", "if", "name", "not", "in", "self", ".", "_routes", "[", "callback_id", "]", ":", "self", ".", "_routes", "[", "callback_id", "]", "[", "name", "]", "=", "[", "]", "self", ".", "_routes", "[", "callback_id", "]", "[", "name", "]", ".", "append", "(", "handler", ")" ]
Register a new handler for a specific :class:`slack.actions.Action` `callback_id`. Optional routing based on the action name too. The name argument is useful for actions of type `interactive_message` to provide a different handler for each individual action. Args: callback_id: Callback_id the handler is interested in handler: Callback name: Name of the action (optional).
[ "Register", "a", "new", "handler", "for", "a", "specific", ":", "class", ":", "slack", ".", "actions", ".", "Action", "callback_id", ".", "Optional", "routing", "based", "on", "the", "action", "name", "too", "." ]
train
https://github.com/pyslackers/slack-sansio/blob/068ddd6480c6d2f9bf14fa4db498c9fe1017f4ab/slack/actions.py#L82-L99
pyslackers/slack-sansio
slack/actions.py
Router.dispatch
def dispatch(self, action: Action) -> Any: """ Yields handlers matching the incoming :class:`slack.actions.Action` `callback_id`. Args: action: :class:`slack.actions.Action` Yields: handler """ LOG.debug("Dispatching action %s, %s", action["type"], action["callback_id"]) if action["type"] == "interactive_message": yield from self._dispatch_interactive_message(action) elif action["type"] in ("dialog_submission", "message_action"): yield from self._dispatch_action(action) else: raise UnknownActionType(action)
python
def dispatch(self, action: Action) -> Any: """ Yields handlers matching the incoming :class:`slack.actions.Action` `callback_id`. Args: action: :class:`slack.actions.Action` Yields: handler """ LOG.debug("Dispatching action %s, %s", action["type"], action["callback_id"]) if action["type"] == "interactive_message": yield from self._dispatch_interactive_message(action) elif action["type"] in ("dialog_submission", "message_action"): yield from self._dispatch_action(action) else: raise UnknownActionType(action)
[ "def", "dispatch", "(", "self", ",", "action", ":", "Action", ")", "->", "Any", ":", "LOG", ".", "debug", "(", "\"Dispatching action %s, %s\"", ",", "action", "[", "\"type\"", "]", ",", "action", "[", "\"callback_id\"", "]", ")", "if", "action", "[", "\"type\"", "]", "==", "\"interactive_message\"", ":", "yield", "from", "self", ".", "_dispatch_interactive_message", "(", "action", ")", "elif", "action", "[", "\"type\"", "]", "in", "(", "\"dialog_submission\"", ",", "\"message_action\"", ")", ":", "yield", "from", "self", ".", "_dispatch_action", "(", "action", ")", "else", ":", "raise", "UnknownActionType", "(", "action", ")" ]
Yields handlers matching the incoming :class:`slack.actions.Action` `callback_id`. Args: action: :class:`slack.actions.Action` Yields: handler
[ "Yields", "handlers", "matching", "the", "incoming", ":", "class", ":", "slack", ".", "actions", ".", "Action", "callback_id", "." ]
train
https://github.com/pyslackers/slack-sansio/blob/068ddd6480c6d2f9bf14fa4db498c9fe1017f4ab/slack/actions.py#L101-L118
ales-erjavec/anyqt
AnyQt/_api.py
comittoapi
def comittoapi(api): """ Commit to the use of specified Qt api. Raise an error if another Qt api is already loaded in sys.modules """ global USED_API assert USED_API is None, "committoapi called again!" check = ["PyQt4", "PyQt5", "PySide", "PySide2"] assert api in [QT_API_PYQT5, QT_API_PYQT4, QT_API_PYSIDE, QT_API_PYSIDE2] for name in check: if name.lower() != api and name in sys.modules: raise RuntimeError( "{} was already imported. Cannot commit to {}!" .format(name, api) ) else: api = _intern(api) USED_API = api AnyQt.__SELECTED_API = api AnyQt.USED_API = api
python
def comittoapi(api): """ Commit to the use of specified Qt api. Raise an error if another Qt api is already loaded in sys.modules """ global USED_API assert USED_API is None, "committoapi called again!" check = ["PyQt4", "PyQt5", "PySide", "PySide2"] assert api in [QT_API_PYQT5, QT_API_PYQT4, QT_API_PYSIDE, QT_API_PYSIDE2] for name in check: if name.lower() != api and name in sys.modules: raise RuntimeError( "{} was already imported. Cannot commit to {}!" .format(name, api) ) else: api = _intern(api) USED_API = api AnyQt.__SELECTED_API = api AnyQt.USED_API = api
[ "def", "comittoapi", "(", "api", ")", ":", "global", "USED_API", "assert", "USED_API", "is", "None", ",", "\"committoapi called again!\"", "check", "=", "[", "\"PyQt4\"", ",", "\"PyQt5\"", ",", "\"PySide\"", ",", "\"PySide2\"", "]", "assert", "api", "in", "[", "QT_API_PYQT5", ",", "QT_API_PYQT4", ",", "QT_API_PYSIDE", ",", "QT_API_PYSIDE2", "]", "for", "name", "in", "check", ":", "if", "name", ".", "lower", "(", ")", "!=", "api", "and", "name", "in", "sys", ".", "modules", ":", "raise", "RuntimeError", "(", "\"{} was already imported. Cannot commit to {}!\"", ".", "format", "(", "name", ",", "api", ")", ")", "else", ":", "api", "=", "_intern", "(", "api", ")", "USED_API", "=", "api", "AnyQt", ".", "__SELECTED_API", "=", "api", "AnyQt", ".", "USED_API", "=", "api" ]
Commit to the use of specified Qt api. Raise an error if another Qt api is already loaded in sys.modules
[ "Commit", "to", "the", "use", "of", "specified", "Qt", "api", "." ]
train
https://github.com/ales-erjavec/anyqt/blob/07b73c5ccb8f73f70fc6566249c0c7228fc9b921/AnyQt/_api.py#L23-L44
cakebread/yolk
yolk/metadata.py
get_metadata
def get_metadata(dist): """ Return dictionary of metadata for given dist @param dist: distribution @type dist: pkg_resources Distribution object @returns: dict of metadata or None """ if not dist.has_metadata('PKG-INFO'): return msg = email.message_from_string(dist.get_metadata('PKG-INFO')) metadata = {} for header in [l for l in msg._headers]: metadata[header[0]] = header[1] return metadata
python
def get_metadata(dist): """ Return dictionary of metadata for given dist @param dist: distribution @type dist: pkg_resources Distribution object @returns: dict of metadata or None """ if not dist.has_metadata('PKG-INFO'): return msg = email.message_from_string(dist.get_metadata('PKG-INFO')) metadata = {} for header in [l for l in msg._headers]: metadata[header[0]] = header[1] return metadata
[ "def", "get_metadata", "(", "dist", ")", ":", "if", "not", "dist", ".", "has_metadata", "(", "'PKG-INFO'", ")", ":", "return", "msg", "=", "email", ".", "message_from_string", "(", "dist", ".", "get_metadata", "(", "'PKG-INFO'", ")", ")", "metadata", "=", "{", "}", "for", "header", "in", "[", "l", "for", "l", "in", "msg", ".", "_headers", "]", ":", "metadata", "[", "header", "[", "0", "]", "]", "=", "header", "[", "1", "]", "return", "metadata" ]
Return dictionary of metadata for given dist @param dist: distribution @type dist: pkg_resources Distribution object @returns: dict of metadata or None
[ "Return", "dictionary", "of", "metadata", "for", "given", "dist" ]
train
https://github.com/cakebread/yolk/blob/ee8c9f529a542d9c5eff4fe69b9c7906c802e4d8/yolk/metadata.py#L25-L43
cakebread/yolk
yolk/plugins/base.py
Plugin.add_options
def add_options(self, parser): """Add command-line options for this plugin. The base plugin class adds --with-$name by default, used to enable the plugin. """ parser.add_option("--with-%s" % self.name, action="store_true", dest=self.enable_opt, help="Enable plugin %s: %s" % (self.__class__.__name__, self.help()) )
python
def add_options(self, parser): """Add command-line options for this plugin. The base plugin class adds --with-$name by default, used to enable the plugin. """ parser.add_option("--with-%s" % self.name, action="store_true", dest=self.enable_opt, help="Enable plugin %s: %s" % (self.__class__.__name__, self.help()) )
[ "def", "add_options", "(", "self", ",", "parser", ")", ":", "parser", ".", "add_option", "(", "\"--with-%s\"", "%", "self", ".", "name", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "self", ".", "enable_opt", ",", "help", "=", "\"Enable plugin %s: %s\"", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "self", ".", "help", "(", ")", ")", ")" ]
Add command-line options for this plugin. The base plugin class adds --with-$name by default, used to enable the plugin.
[ "Add", "command", "-", "line", "options", "for", "this", "plugin", "." ]
train
https://github.com/cakebread/yolk/blob/ee8c9f529a542d9c5eff4fe69b9c7906c802e4d8/yolk/plugins/base.py#L42-L53
cakebread/yolk
yolk/plugins/base.py
Plugin.configure
def configure(self, options, conf): """Configure the plugin and system, based on selected options. The base plugin class sets the plugin to enabled if the enable option for the plugin (self.enable_opt) is true. """ self.conf = conf if hasattr(options, self.enable_opt): self.enabled = getattr(options, self.enable_opt)
python
def configure(self, options, conf): """Configure the plugin and system, based on selected options. The base plugin class sets the plugin to enabled if the enable option for the plugin (self.enable_opt) is true. """ self.conf = conf if hasattr(options, self.enable_opt): self.enabled = getattr(options, self.enable_opt)
[ "def", "configure", "(", "self", ",", "options", ",", "conf", ")", ":", "self", ".", "conf", "=", "conf", "if", "hasattr", "(", "options", ",", "self", ".", "enable_opt", ")", ":", "self", ".", "enabled", "=", "getattr", "(", "options", ",", "self", ".", "enable_opt", ")" ]
Configure the plugin and system, based on selected options. The base plugin class sets the plugin to enabled if the enable option for the plugin (self.enable_opt) is true.
[ "Configure", "the", "plugin", "and", "system", "based", "on", "selected", "options", "." ]
train
https://github.com/cakebread/yolk/blob/ee8c9f529a542d9c5eff4fe69b9c7906c802e4d8/yolk/plugins/base.py#L55-L63
cakebread/yolk
yolk/plugins/base.py
Plugin.help
def help(self): """Return help for this plugin. This will be output as the help section of the --with-$name option that enables the plugin. """ if self.__class__.__doc__: # doc sections are often indented; compress the spaces return textwrap.dedent(self.__class__.__doc__) return "(no help available)"
python
def help(self): """Return help for this plugin. This will be output as the help section of the --with-$name option that enables the plugin. """ if self.__class__.__doc__: # doc sections are often indented; compress the spaces return textwrap.dedent(self.__class__.__doc__) return "(no help available)"
[ "def", "help", "(", "self", ")", ":", "if", "self", ".", "__class__", ".", "__doc__", ":", "# doc sections are often indented; compress the spaces", "return", "textwrap", ".", "dedent", "(", "self", ".", "__class__", ".", "__doc__", ")", "return", "\"(no help available)\"" ]
Return help for this plugin. This will be output as the help section of the --with-$name option that enables the plugin.
[ "Return", "help", "for", "this", "plugin", ".", "This", "will", "be", "output", "as", "the", "help", "section", "of", "the", "--", "with", "-", "$name", "option", "that", "enables", "the", "plugin", "." ]
train
https://github.com/cakebread/yolk/blob/ee8c9f529a542d9c5eff4fe69b9c7906c802e4d8/yolk/plugins/base.py#L65-L72
pyslackers/slack-sansio
slack/sansio.py
raise_for_status
def raise_for_status( status: int, headers: MutableMapping, data: MutableMapping ) -> None: """ Check request response status Args: status: Response status headers: Response headers data: Response data Raises: :class:`slack.exceptions.RateLimited`: For 429 status code :class:`slack.exceptions:HTTPException`: """ if status != 200: if status == 429: if isinstance(data, str): error = data else: error = data.get("error", "ratelimited") try: retry_after = int(headers.get("Retry-After", 1)) except ValueError: retry_after = 1 raise exceptions.RateLimited(retry_after, error, status, headers, data) else: raise exceptions.HTTPException(status, headers, data)
python
def raise_for_status( status: int, headers: MutableMapping, data: MutableMapping ) -> None: """ Check request response status Args: status: Response status headers: Response headers data: Response data Raises: :class:`slack.exceptions.RateLimited`: For 429 status code :class:`slack.exceptions:HTTPException`: """ if status != 200: if status == 429: if isinstance(data, str): error = data else: error = data.get("error", "ratelimited") try: retry_after = int(headers.get("Retry-After", 1)) except ValueError: retry_after = 1 raise exceptions.RateLimited(retry_after, error, status, headers, data) else: raise exceptions.HTTPException(status, headers, data)
[ "def", "raise_for_status", "(", "status", ":", "int", ",", "headers", ":", "MutableMapping", ",", "data", ":", "MutableMapping", ")", "->", "None", ":", "if", "status", "!=", "200", ":", "if", "status", "==", "429", ":", "if", "isinstance", "(", "data", ",", "str", ")", ":", "error", "=", "data", "else", ":", "error", "=", "data", ".", "get", "(", "\"error\"", ",", "\"ratelimited\"", ")", "try", ":", "retry_after", "=", "int", "(", "headers", ".", "get", "(", "\"Retry-After\"", ",", "1", ")", ")", "except", "ValueError", ":", "retry_after", "=", "1", "raise", "exceptions", ".", "RateLimited", "(", "retry_after", ",", "error", ",", "status", ",", "headers", ",", "data", ")", "else", ":", "raise", "exceptions", ".", "HTTPException", "(", "status", ",", "headers", ",", "data", ")" ]
Check request response status Args: status: Response status headers: Response headers data: Response data Raises: :class:`slack.exceptions.RateLimited`: For 429 status code :class:`slack.exceptions:HTTPException`:
[ "Check", "request", "response", "status" ]
train
https://github.com/pyslackers/slack-sansio/blob/068ddd6480c6d2f9bf14fa4db498c9fe1017f4ab/slack/sansio.py#L28-L57