repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
jfilter/text-classification-keras
texcla/preprocessing/tokenizer.py
Tokenizer.create_token_indices
def create_token_indices(self, tokens): """If `apply_encoding_options` is inadequate, one can retrieve tokens from `self.token_counts`, filter with a desired strategy and regenerate `token_index` using this method. The token index is subsequently used when `encode_texts` or `decode_texts` methods are called. """ start_index = len(self.special_token) indices = list(range(len(tokens) + start_index)) # prepend because the special tokens come in the beginning tokens_with_special = self.special_token + list(tokens) self._token2idx = dict(list(zip(tokens_with_special, indices))) self._idx2token = dict(list(zip(indices, tokens_with_special)))
python
def create_token_indices(self, tokens): """If `apply_encoding_options` is inadequate, one can retrieve tokens from `self.token_counts`, filter with a desired strategy and regenerate `token_index` using this method. The token index is subsequently used when `encode_texts` or `decode_texts` methods are called. """ start_index = len(self.special_token) indices = list(range(len(tokens) + start_index)) # prepend because the special tokens come in the beginning tokens_with_special = self.special_token + list(tokens) self._token2idx = dict(list(zip(tokens_with_special, indices))) self._idx2token = dict(list(zip(indices, tokens_with_special)))
[ "def", "create_token_indices", "(", "self", ",", "tokens", ")", ":", "start_index", "=", "len", "(", "self", ".", "special_token", ")", "indices", "=", "list", "(", "range", "(", "len", "(", "tokens", ")", "+", "start_index", ")", ")", "# prepend because the special tokens come in the beginning", "tokens_with_special", "=", "self", ".", "special_token", "+", "list", "(", "tokens", ")", "self", ".", "_token2idx", "=", "dict", "(", "list", "(", "zip", "(", "tokens_with_special", ",", "indices", ")", ")", ")", "self", ".", "_idx2token", "=", "dict", "(", "list", "(", "zip", "(", "indices", ",", "tokens_with_special", ")", ")", ")" ]
If `apply_encoding_options` is inadequate, one can retrieve tokens from `self.token_counts`, filter with a desired strategy and regenerate `token_index` using this method. The token index is subsequently used when `encode_texts` or `decode_texts` methods are called.
[ "If", "apply_encoding_options", "is", "inadequate", "one", "can", "retrieve", "tokens", "from", "self", ".", "token_counts", "filter", "with", "a", "desired", "strategy", "and", "regenerate", "token_index", "using", "this", "method", ".", "The", "token", "index", "is", "subsequently", "used", "when", "encode_texts", "or", "decode_texts", "methods", "are", "called", "." ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/preprocessing/tokenizer.py#L70-L80
jfilter/text-classification-keras
texcla/preprocessing/tokenizer.py
Tokenizer.apply_encoding_options
def apply_encoding_options(self, min_token_count=1, limit_top_tokens=None): """Applies the given settings for subsequent calls to `encode_texts` and `decode_texts`. This allows you to play with different settings without having to re-run tokenization on the entire corpus. Args: min_token_count: The minimum token count (frequency) in order to include during encoding. All tokens below this frequency will be encoded to `0` which corresponds to unknown token. (Default value = 1) limit_top_tokens: The maximum number of tokens to keep, based their frequency. Only the most common `limit_top_tokens` tokens will be kept. Set to None to keep everything. (Default value: None) """ if not self.has_vocab: raise ValueError("You need to build the vocabulary using `build_vocab` " "before using `apply_encoding_options`") if min_token_count < 1: raise ValueError("`min_token_count` should atleast be 1") # Remove tokens with freq < min_token_count token_counts = list(self._token_counts.items()) token_counts = [x for x in token_counts if x[1] >= min_token_count] # Clip to max_tokens. if limit_top_tokens is not None: token_counts.sort(key=lambda x: x[1], reverse=True) filtered_tokens = list(zip(*token_counts))[0] filtered_tokens = filtered_tokens[:limit_top_tokens] else: filtered_tokens = zip(*token_counts)[0] # Generate indices based on filtered tokens. self.create_token_indices(filtered_tokens)
python
def apply_encoding_options(self, min_token_count=1, limit_top_tokens=None): """Applies the given settings for subsequent calls to `encode_texts` and `decode_texts`. This allows you to play with different settings without having to re-run tokenization on the entire corpus. Args: min_token_count: The minimum token count (frequency) in order to include during encoding. All tokens below this frequency will be encoded to `0` which corresponds to unknown token. (Default value = 1) limit_top_tokens: The maximum number of tokens to keep, based their frequency. Only the most common `limit_top_tokens` tokens will be kept. Set to None to keep everything. (Default value: None) """ if not self.has_vocab: raise ValueError("You need to build the vocabulary using `build_vocab` " "before using `apply_encoding_options`") if min_token_count < 1: raise ValueError("`min_token_count` should atleast be 1") # Remove tokens with freq < min_token_count token_counts = list(self._token_counts.items()) token_counts = [x for x in token_counts if x[1] >= min_token_count] # Clip to max_tokens. if limit_top_tokens is not None: token_counts.sort(key=lambda x: x[1], reverse=True) filtered_tokens = list(zip(*token_counts))[0] filtered_tokens = filtered_tokens[:limit_top_tokens] else: filtered_tokens = zip(*token_counts)[0] # Generate indices based on filtered tokens. self.create_token_indices(filtered_tokens)
[ "def", "apply_encoding_options", "(", "self", ",", "min_token_count", "=", "1", ",", "limit_top_tokens", "=", "None", ")", ":", "if", "not", "self", ".", "has_vocab", ":", "raise", "ValueError", "(", "\"You need to build the vocabulary using `build_vocab` \"", "\"before using `apply_encoding_options`\"", ")", "if", "min_token_count", "<", "1", ":", "raise", "ValueError", "(", "\"`min_token_count` should atleast be 1\"", ")", "# Remove tokens with freq < min_token_count", "token_counts", "=", "list", "(", "self", ".", "_token_counts", ".", "items", "(", ")", ")", "token_counts", "=", "[", "x", "for", "x", "in", "token_counts", "if", "x", "[", "1", "]", ">=", "min_token_count", "]", "# Clip to max_tokens.", "if", "limit_top_tokens", "is", "not", "None", ":", "token_counts", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ",", "reverse", "=", "True", ")", "filtered_tokens", "=", "list", "(", "zip", "(", "*", "token_counts", ")", ")", "[", "0", "]", "filtered_tokens", "=", "filtered_tokens", "[", ":", "limit_top_tokens", "]", "else", ":", "filtered_tokens", "=", "zip", "(", "*", "token_counts", ")", "[", "0", "]", "# Generate indices based on filtered tokens.", "self", ".", "create_token_indices", "(", "filtered_tokens", ")" ]
Applies the given settings for subsequent calls to `encode_texts` and `decode_texts`. This allows you to play with different settings without having to re-run tokenization on the entire corpus. Args: min_token_count: The minimum token count (frequency) in order to include during encoding. All tokens below this frequency will be encoded to `0` which corresponds to unknown token. (Default value = 1) limit_top_tokens: The maximum number of tokens to keep, based their frequency. Only the most common `limit_top_tokens` tokens will be kept. Set to None to keep everything. (Default value: None)
[ "Applies", "the", "given", "settings", "for", "subsequent", "calls", "to", "encode_texts", "and", "decode_texts", ".", "This", "allows", "you", "to", "play", "with", "different", "settings", "without", "having", "to", "re", "-", "run", "tokenization", "on", "the", "entire", "corpus", "." ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/preprocessing/tokenizer.py#L82-L111
jfilter/text-classification-keras
texcla/preprocessing/tokenizer.py
Tokenizer.encode_texts
def encode_texts(self, texts, unknown_token="<UNK>", verbose=1, **kwargs): """Encodes the given texts using internal vocabulary with optionally applied encoding options. See ``apply_encoding_options` to set various options. Args: texts: The list of text items to encode. unknown_token: The token to replace words that out of vocabulary. If none, those words are omitted. verbose: The verbosity level for progress. Can be 0, 1, 2. (Default value = 1) **kwargs: The kwargs for `token_generator`. Returns: The encoded texts. """ if not self.has_vocab: raise ValueError( "You need to build the vocabulary using `build_vocab` before using `encode_texts`") if unknown_token and unknown_token not in self.special_token: raise ValueError( "Your special token (" + unknown_token + ") to replace unknown words is not in the list of special token: " + self.special_token) progbar = Progbar(len(texts), verbose=verbose, interval=0.25) encoded_texts = [] for token_data in self.token_generator(texts, **kwargs): indices, token = token_data[:-1], token_data[-1] token_idx = self._token2idx.get(token) if token_idx is None and unknown_token: token_idx = self.special_token.index(unknown_token) if token_idx is not None: utils._append(encoded_texts, indices, token_idx) # Update progressbar per document level. progbar.update(indices[0]) # All done. Finalize progressbar. progbar.update(len(texts)) return encoded_texts
python
def encode_texts(self, texts, unknown_token="<UNK>", verbose=1, **kwargs): """Encodes the given texts using internal vocabulary with optionally applied encoding options. See ``apply_encoding_options` to set various options. Args: texts: The list of text items to encode. unknown_token: The token to replace words that out of vocabulary. If none, those words are omitted. verbose: The verbosity level for progress. Can be 0, 1, 2. (Default value = 1) **kwargs: The kwargs for `token_generator`. Returns: The encoded texts. """ if not self.has_vocab: raise ValueError( "You need to build the vocabulary using `build_vocab` before using `encode_texts`") if unknown_token and unknown_token not in self.special_token: raise ValueError( "Your special token (" + unknown_token + ") to replace unknown words is not in the list of special token: " + self.special_token) progbar = Progbar(len(texts), verbose=verbose, interval=0.25) encoded_texts = [] for token_data in self.token_generator(texts, **kwargs): indices, token = token_data[:-1], token_data[-1] token_idx = self._token2idx.get(token) if token_idx is None and unknown_token: token_idx = self.special_token.index(unknown_token) if token_idx is not None: utils._append(encoded_texts, indices, token_idx) # Update progressbar per document level. progbar.update(indices[0]) # All done. Finalize progressbar. progbar.update(len(texts)) return encoded_texts
[ "def", "encode_texts", "(", "self", ",", "texts", ",", "unknown_token", "=", "\"<UNK>\"", ",", "verbose", "=", "1", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "has_vocab", ":", "raise", "ValueError", "(", "\"You need to build the vocabulary using `build_vocab` before using `encode_texts`\"", ")", "if", "unknown_token", "and", "unknown_token", "not", "in", "self", ".", "special_token", ":", "raise", "ValueError", "(", "\"Your special token (\"", "+", "unknown_token", "+", "\") to replace unknown words is not in the list of special token: \"", "+", "self", ".", "special_token", ")", "progbar", "=", "Progbar", "(", "len", "(", "texts", ")", ",", "verbose", "=", "verbose", ",", "interval", "=", "0.25", ")", "encoded_texts", "=", "[", "]", "for", "token_data", "in", "self", ".", "token_generator", "(", "texts", ",", "*", "*", "kwargs", ")", ":", "indices", ",", "token", "=", "token_data", "[", ":", "-", "1", "]", ",", "token_data", "[", "-", "1", "]", "token_idx", "=", "self", ".", "_token2idx", ".", "get", "(", "token", ")", "if", "token_idx", "is", "None", "and", "unknown_token", ":", "token_idx", "=", "self", ".", "special_token", ".", "index", "(", "unknown_token", ")", "if", "token_idx", "is", "not", "None", ":", "utils", ".", "_append", "(", "encoded_texts", ",", "indices", ",", "token_idx", ")", "# Update progressbar per document level.", "progbar", ".", "update", "(", "indices", "[", "0", "]", ")", "# All done. Finalize progressbar.", "progbar", ".", "update", "(", "len", "(", "texts", ")", ")", "return", "encoded_texts" ]
Encodes the given texts using internal vocabulary with optionally applied encoding options. See ``apply_encoding_options` to set various options. Args: texts: The list of text items to encode. unknown_token: The token to replace words that out of vocabulary. If none, those words are omitted. verbose: The verbosity level for progress. Can be 0, 1, 2. (Default value = 1) **kwargs: The kwargs for `token_generator`. Returns: The encoded texts.
[ "Encodes", "the", "given", "texts", "using", "internal", "vocabulary", "with", "optionally", "applied", "encoding", "options", ".", "See", "apply_encoding_options", "to", "set", "various", "options", "." ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/preprocessing/tokenizer.py#L113-L151
jfilter/text-classification-keras
texcla/preprocessing/tokenizer.py
Tokenizer.decode_texts
def decode_texts(self, encoded_texts, unknown_token="<UNK>", inplace=True): """Decodes the texts using internal vocabulary. The list structure is maintained. Args: encoded_texts: The list of texts to decode. unknown_token: The placeholder value for unknown token. (Default value: "<UNK>") inplace: True to make changes inplace. (Default value: True) Returns: The decoded texts. """ if len(self._token2idx) == 0: raise ValueError( "You need to build vocabulary using `build_vocab` before using `decode_texts`") if not isinstance(encoded_texts, list): # assume it's a numpy array encoded_texts = encoded_texts.tolist() if not inplace: encoded_texts = deepcopy(encoded_texts) utils._recursive_apply(encoded_texts, lambda token_id: self._idx2token.get(token_id) or unknown_token) return encoded_texts
python
def decode_texts(self, encoded_texts, unknown_token="<UNK>", inplace=True): """Decodes the texts using internal vocabulary. The list structure is maintained. Args: encoded_texts: The list of texts to decode. unknown_token: The placeholder value for unknown token. (Default value: "<UNK>") inplace: True to make changes inplace. (Default value: True) Returns: The decoded texts. """ if len(self._token2idx) == 0: raise ValueError( "You need to build vocabulary using `build_vocab` before using `decode_texts`") if not isinstance(encoded_texts, list): # assume it's a numpy array encoded_texts = encoded_texts.tolist() if not inplace: encoded_texts = deepcopy(encoded_texts) utils._recursive_apply(encoded_texts, lambda token_id: self._idx2token.get(token_id) or unknown_token) return encoded_texts
[ "def", "decode_texts", "(", "self", ",", "encoded_texts", ",", "unknown_token", "=", "\"<UNK>\"", ",", "inplace", "=", "True", ")", ":", "if", "len", "(", "self", ".", "_token2idx", ")", "==", "0", ":", "raise", "ValueError", "(", "\"You need to build vocabulary using `build_vocab` before using `decode_texts`\"", ")", "if", "not", "isinstance", "(", "encoded_texts", ",", "list", ")", ":", "# assume it's a numpy array", "encoded_texts", "=", "encoded_texts", ".", "tolist", "(", ")", "if", "not", "inplace", ":", "encoded_texts", "=", "deepcopy", "(", "encoded_texts", ")", "utils", ".", "_recursive_apply", "(", "encoded_texts", ",", "lambda", "token_id", ":", "self", ".", "_idx2token", ".", "get", "(", "token_id", ")", "or", "unknown_token", ")", "return", "encoded_texts" ]
Decodes the texts using internal vocabulary. The list structure is maintained. Args: encoded_texts: The list of texts to decode. unknown_token: The placeholder value for unknown token. (Default value: "<UNK>") inplace: True to make changes inplace. (Default value: True) Returns: The decoded texts.
[ "Decodes", "the", "texts", "using", "internal", "vocabulary", ".", "The", "list", "structure", "is", "maintained", "." ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/preprocessing/tokenizer.py#L153-L176
jfilter/text-classification-keras
texcla/preprocessing/tokenizer.py
Tokenizer.build_vocab
def build_vocab(self, texts, verbose=1, **kwargs): """Builds the internal vocabulary and computes various statistics. Args: texts: The list of text items to encode. verbose: The verbosity level for progress. Can be 0, 1, 2. (Default value = 1) **kwargs: The kwargs for `token_generator`. """ if self.has_vocab: logger.warn( "Tokenizer already has existing vocabulary. Overriding and building new vocabulary.") progbar = Progbar(len(texts), verbose=verbose, interval=0.25) count_tracker = utils._CountTracker() self._token_counts.clear() self._num_texts = len(texts) for token_data in self.token_generator(texts, **kwargs): indices, token = token_data[:-1], token_data[-1] count_tracker.update(indices) self._token_counts[token] += 1 # Update progressbar per document level. progbar.update(indices[0]) # Generate token2idx and idx2token. self.create_token_indices(self._token_counts.keys()) # All done. Finalize progressbar update and count tracker. count_tracker.finalize() self._counts = count_tracker.counts progbar.update(len(texts))
python
def build_vocab(self, texts, verbose=1, **kwargs): """Builds the internal vocabulary and computes various statistics. Args: texts: The list of text items to encode. verbose: The verbosity level for progress. Can be 0, 1, 2. (Default value = 1) **kwargs: The kwargs for `token_generator`. """ if self.has_vocab: logger.warn( "Tokenizer already has existing vocabulary. Overriding and building new vocabulary.") progbar = Progbar(len(texts), verbose=verbose, interval=0.25) count_tracker = utils._CountTracker() self._token_counts.clear() self._num_texts = len(texts) for token_data in self.token_generator(texts, **kwargs): indices, token = token_data[:-1], token_data[-1] count_tracker.update(indices) self._token_counts[token] += 1 # Update progressbar per document level. progbar.update(indices[0]) # Generate token2idx and idx2token. self.create_token_indices(self._token_counts.keys()) # All done. Finalize progressbar update and count tracker. count_tracker.finalize() self._counts = count_tracker.counts progbar.update(len(texts))
[ "def", "build_vocab", "(", "self", ",", "texts", ",", "verbose", "=", "1", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "has_vocab", ":", "logger", ".", "warn", "(", "\"Tokenizer already has existing vocabulary. Overriding and building new vocabulary.\"", ")", "progbar", "=", "Progbar", "(", "len", "(", "texts", ")", ",", "verbose", "=", "verbose", ",", "interval", "=", "0.25", ")", "count_tracker", "=", "utils", ".", "_CountTracker", "(", ")", "self", ".", "_token_counts", ".", "clear", "(", ")", "self", ".", "_num_texts", "=", "len", "(", "texts", ")", "for", "token_data", "in", "self", ".", "token_generator", "(", "texts", ",", "*", "*", "kwargs", ")", ":", "indices", ",", "token", "=", "token_data", "[", ":", "-", "1", "]", ",", "token_data", "[", "-", "1", "]", "count_tracker", ".", "update", "(", "indices", ")", "self", ".", "_token_counts", "[", "token", "]", "+=", "1", "# Update progressbar per document level.", "progbar", ".", "update", "(", "indices", "[", "0", "]", ")", "# Generate token2idx and idx2token.", "self", ".", "create_token_indices", "(", "self", ".", "_token_counts", ".", "keys", "(", ")", ")", "# All done. Finalize progressbar update and count tracker.", "count_tracker", ".", "finalize", "(", ")", "self", ".", "_counts", "=", "count_tracker", ".", "counts", "progbar", ".", "update", "(", "len", "(", "texts", ")", ")" ]
Builds the internal vocabulary and computes various statistics. Args: texts: The list of text items to encode. verbose: The verbosity level for progress. Can be 0, 1, 2. (Default value = 1) **kwargs: The kwargs for `token_generator`.
[ "Builds", "the", "internal", "vocabulary", "and", "computes", "various", "statistics", "." ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/preprocessing/tokenizer.py#L207-L239
jfilter/text-classification-keras
texcla/preprocessing/tokenizer.py
Tokenizer.pad_sequences
def pad_sequences(self, sequences, fixed_sentences_seq_length=None, fixed_token_seq_length=None, padding='pre', truncating='post', padding_token="<PAD>"): """Pads each sequence to the same fixed length (length of the longest sequence or provided override). Args: sequences: list of list (samples, words) or list of list of list (samples, sentences, words) fixed_sentences_seq_length: The fix sentence sequence length to use. If None, largest sentence length is used. fixed_token_seq_length: The fix token sequence length to use. If None, largest word length is used. padding: 'pre' or 'post', pad either before or after each sequence. truncating: 'pre' or 'post', remove values from sequences larger than fixed_sentences_seq_length or fixed_token_seq_length either in the beginning or in the end of the sentence or word sequence respectively. padding_token: The token to add for padding. Returns: Numpy array of (samples, max_sentences, max_tokens) or (samples, max_tokens) depending on the sequence input. Raises: ValueError: in case of invalid values for `truncating` or `padding`. """ value = self.special_token.index(padding_token) if value < 0: raise ValueError('The padding token "' + padding_token + " is not in the special tokens of the tokenizer.") # Determine if input is (samples, max_sentences, max_tokens) or not. if isinstance(sequences[0][0], list): x = utils._pad_sent_sequences(sequences, fixed_sentences_seq_length, fixed_token_seq_length, padding, truncating, value) else: x = utils._pad_token_sequences( sequences, fixed_token_seq_length, padding, truncating, value) return np.array(x, dtype='int32')
python
def pad_sequences(self, sequences, fixed_sentences_seq_length=None, fixed_token_seq_length=None, padding='pre', truncating='post', padding_token="<PAD>"): """Pads each sequence to the same fixed length (length of the longest sequence or provided override). Args: sequences: list of list (samples, words) or list of list of list (samples, sentences, words) fixed_sentences_seq_length: The fix sentence sequence length to use. If None, largest sentence length is used. fixed_token_seq_length: The fix token sequence length to use. If None, largest word length is used. padding: 'pre' or 'post', pad either before or after each sequence. truncating: 'pre' or 'post', remove values from sequences larger than fixed_sentences_seq_length or fixed_token_seq_length either in the beginning or in the end of the sentence or word sequence respectively. padding_token: The token to add for padding. Returns: Numpy array of (samples, max_sentences, max_tokens) or (samples, max_tokens) depending on the sequence input. Raises: ValueError: in case of invalid values for `truncating` or `padding`. """ value = self.special_token.index(padding_token) if value < 0: raise ValueError('The padding token "' + padding_token + " is not in the special tokens of the tokenizer.") # Determine if input is (samples, max_sentences, max_tokens) or not. if isinstance(sequences[0][0], list): x = utils._pad_sent_sequences(sequences, fixed_sentences_seq_length, fixed_token_seq_length, padding, truncating, value) else: x = utils._pad_token_sequences( sequences, fixed_token_seq_length, padding, truncating, value) return np.array(x, dtype='int32')
[ "def", "pad_sequences", "(", "self", ",", "sequences", ",", "fixed_sentences_seq_length", "=", "None", ",", "fixed_token_seq_length", "=", "None", ",", "padding", "=", "'pre'", ",", "truncating", "=", "'post'", ",", "padding_token", "=", "\"<PAD>\"", ")", ":", "value", "=", "self", ".", "special_token", ".", "index", "(", "padding_token", ")", "if", "value", "<", "0", ":", "raise", "ValueError", "(", "'The padding token \"'", "+", "padding_token", "+", "\" is not in the special tokens of the tokenizer.\"", ")", "# Determine if input is (samples, max_sentences, max_tokens) or not.", "if", "isinstance", "(", "sequences", "[", "0", "]", "[", "0", "]", ",", "list", ")", ":", "x", "=", "utils", ".", "_pad_sent_sequences", "(", "sequences", ",", "fixed_sentences_seq_length", ",", "fixed_token_seq_length", ",", "padding", ",", "truncating", ",", "value", ")", "else", ":", "x", "=", "utils", ".", "_pad_token_sequences", "(", "sequences", ",", "fixed_token_seq_length", ",", "padding", ",", "truncating", ",", "value", ")", "return", "np", ".", "array", "(", "x", ",", "dtype", "=", "'int32'", ")" ]
Pads each sequence to the same fixed length (length of the longest sequence or provided override). Args: sequences: list of list (samples, words) or list of list of list (samples, sentences, words) fixed_sentences_seq_length: The fix sentence sequence length to use. If None, largest sentence length is used. fixed_token_seq_length: The fix token sequence length to use. If None, largest word length is used. padding: 'pre' or 'post', pad either before or after each sequence. truncating: 'pre' or 'post', remove values from sequences larger than fixed_sentences_seq_length or fixed_token_seq_length either in the beginning or in the end of the sentence or word sequence respectively. padding_token: The token to add for padding. Returns: Numpy array of (samples, max_sentences, max_tokens) or (samples, max_tokens) depending on the sequence input. Raises: ValueError: in case of invalid values for `truncating` or `padding`.
[ "Pads", "each", "sequence", "to", "the", "same", "fixed", "length", "(", "length", "of", "the", "longest", "sequence", "or", "provided", "override", ")", "." ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/preprocessing/tokenizer.py#L241-L271
jfilter/text-classification-keras
texcla/preprocessing/tokenizer.py
Tokenizer.get_stats
def get_stats(self, i): """Gets the standard statistics for aux_index `i`. For example, if `token_generator` generates `(text_idx, sentence_idx, word)`, then `get_stats(0)` will return various statistics about sentence lengths across texts. Similarly, `get_counts(1)` will return statistics of token lengths across sentences. This information can be used to pad or truncate inputs. """ # OrderedDict to always show same order if printed. result = OrderedDict() result['min'] = np.min(self._counts[i]) result['max'] = np.max(self._counts[i]) result['std'] = np.std(self._counts[i]) result['mean'] = np.mean(self._counts[i]) return result
python
def get_stats(self, i): """Gets the standard statistics for aux_index `i`. For example, if `token_generator` generates `(text_idx, sentence_idx, word)`, then `get_stats(0)` will return various statistics about sentence lengths across texts. Similarly, `get_counts(1)` will return statistics of token lengths across sentences. This information can be used to pad or truncate inputs. """ # OrderedDict to always show same order if printed. result = OrderedDict() result['min'] = np.min(self._counts[i]) result['max'] = np.max(self._counts[i]) result['std'] = np.std(self._counts[i]) result['mean'] = np.mean(self._counts[i]) return result
[ "def", "get_stats", "(", "self", ",", "i", ")", ":", "# OrderedDict to always show same order if printed.", "result", "=", "OrderedDict", "(", ")", "result", "[", "'min'", "]", "=", "np", ".", "min", "(", "self", ".", "_counts", "[", "i", "]", ")", "result", "[", "'max'", "]", "=", "np", ".", "max", "(", "self", ".", "_counts", "[", "i", "]", ")", "result", "[", "'std'", "]", "=", "np", ".", "std", "(", "self", ".", "_counts", "[", "i", "]", ")", "result", "[", "'mean'", "]", "=", "np", ".", "mean", "(", "self", ".", "_counts", "[", "i", "]", ")", "return", "result" ]
Gets the standard statistics for aux_index `i`. For example, if `token_generator` generates `(text_idx, sentence_idx, word)`, then `get_stats(0)` will return various statistics about sentence lengths across texts. Similarly, `get_counts(1)` will return statistics of token lengths across sentences. This information can be used to pad or truncate inputs.
[ "Gets", "the", "standard", "statistics", "for", "aux_index", "i", ".", "For", "example", "if", "token_generator", "generates", "(", "text_idx", "sentence_idx", "word", ")", "then", "get_stats", "(", "0", ")", "will", "return", "various", "statistics", "about", "sentence", "lengths", "across", "texts", ".", "Similarly", "get_counts", "(", "1", ")", "will", "return", "statistics", "of", "token", "lengths", "across", "sentences", "." ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/preprocessing/tokenizer.py#L286-L299
jfilter/text-classification-keras
texcla/embeddings.py
build_embedding_weights
def build_embedding_weights(word_index, embeddings_index): """Builds an embedding matrix for all words in vocab using embeddings_index """ logger.info('Loading embeddings for all words in the corpus') embedding_dim = list(embeddings_index.values())[0].shape[-1] # setting special tokens such as UNK and PAD to 0 # all other words are also set to 0. embedding_weights = np.zeros((len(word_index), embedding_dim)) for word, i in word_index.items(): word_vector = embeddings_index.get(word) if word_vector is not None: embedding_weights[i] = word_vector return embedding_weights
python
def build_embedding_weights(word_index, embeddings_index): """Builds an embedding matrix for all words in vocab using embeddings_index """ logger.info('Loading embeddings for all words in the corpus') embedding_dim = list(embeddings_index.values())[0].shape[-1] # setting special tokens such as UNK and PAD to 0 # all other words are also set to 0. embedding_weights = np.zeros((len(word_index), embedding_dim)) for word, i in word_index.items(): word_vector = embeddings_index.get(word) if word_vector is not None: embedding_weights[i] = word_vector return embedding_weights
[ "def", "build_embedding_weights", "(", "word_index", ",", "embeddings_index", ")", ":", "logger", ".", "info", "(", "'Loading embeddings for all words in the corpus'", ")", "embedding_dim", "=", "list", "(", "embeddings_index", ".", "values", "(", ")", ")", "[", "0", "]", ".", "shape", "[", "-", "1", "]", "# setting special tokens such as UNK and PAD to 0", "# all other words are also set to 0.", "embedding_weights", "=", "np", ".", "zeros", "(", "(", "len", "(", "word_index", ")", ",", "embedding_dim", ")", ")", "for", "word", ",", "i", "in", "word_index", ".", "items", "(", ")", ":", "word_vector", "=", "embeddings_index", ".", "get", "(", "word", ")", "if", "word_vector", "is", "not", "None", ":", "embedding_weights", "[", "i", "]", "=", "word_vector", "return", "embedding_weights" ]
Builds an embedding matrix for all words in vocab using embeddings_index
[ "Builds", "an", "embedding", "matrix", "for", "all", "words", "in", "vocab", "using", "embeddings_index" ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/embeddings.py#L146-L161
jfilter/text-classification-keras
texcla/embeddings.py
build_fasttext_wiki_embedding_obj
def build_fasttext_wiki_embedding_obj(embedding_type): """FastText pre-trained word vectors for 294 languages, with 300 dimensions, trained on Wikipedia. It's recommended to use the same tokenizer for your data that was used to construct the embeddings. It's implemented as 'FasttextWikiTokenizer'. More information: https://fasttext.cc/docs/en/pretrained-vectors.html. Args: embedding_type: A string in the format `fastext.wiki.$LANG_CODE`. e.g. `fasttext.wiki.de` or `fasttext.wiki.es` Returns: Object with the URL and filename used later on for downloading the file. """ lang = embedding_type.split('.')[2] return { 'file': 'wiki.{}.vec'.format(lang), 'url': 'https://dl.fbaipublicfiles.com/fasttext/vectors-wiki/wiki.{}.vec'.format(lang), 'extract': False, }
python
def build_fasttext_wiki_embedding_obj(embedding_type): """FastText pre-trained word vectors for 294 languages, with 300 dimensions, trained on Wikipedia. It's recommended to use the same tokenizer for your data that was used to construct the embeddings. It's implemented as 'FasttextWikiTokenizer'. More information: https://fasttext.cc/docs/en/pretrained-vectors.html. Args: embedding_type: A string in the format `fastext.wiki.$LANG_CODE`. e.g. `fasttext.wiki.de` or `fasttext.wiki.es` Returns: Object with the URL and filename used later on for downloading the file. """ lang = embedding_type.split('.')[2] return { 'file': 'wiki.{}.vec'.format(lang), 'url': 'https://dl.fbaipublicfiles.com/fasttext/vectors-wiki/wiki.{}.vec'.format(lang), 'extract': False, }
[ "def", "build_fasttext_wiki_embedding_obj", "(", "embedding_type", ")", ":", "lang", "=", "embedding_type", ".", "split", "(", "'.'", ")", "[", "2", "]", "return", "{", "'file'", ":", "'wiki.{}.vec'", ".", "format", "(", "lang", ")", ",", "'url'", ":", "'https://dl.fbaipublicfiles.com/fasttext/vectors-wiki/wiki.{}.vec'", ".", "format", "(", "lang", ")", ",", "'extract'", ":", "False", ",", "}" ]
FastText pre-trained word vectors for 294 languages, with 300 dimensions, trained on Wikipedia. It's recommended to use the same tokenizer for your data that was used to construct the embeddings. It's implemented as 'FasttextWikiTokenizer'. More information: https://fasttext.cc/docs/en/pretrained-vectors.html. Args: embedding_type: A string in the format `fastext.wiki.$LANG_CODE`. e.g. `fasttext.wiki.de` or `fasttext.wiki.es` Returns: Object with the URL and filename used later on for downloading the file.
[ "FastText", "pre", "-", "trained", "word", "vectors", "for", "294", "languages", "with", "300", "dimensions", "trained", "on", "Wikipedia", ".", "It", "s", "recommended", "to", "use", "the", "same", "tokenizer", "for", "your", "data", "that", "was", "used", "to", "construct", "the", "embeddings", ".", "It", "s", "implemented", "as", "FasttextWikiTokenizer", ".", "More", "information", ":", "https", ":", "//", "fasttext", ".", "cc", "/", "docs", "/", "en", "/", "pretrained", "-", "vectors", ".", "html", "." ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/embeddings.py#L164-L177
jfilter/text-classification-keras
texcla/embeddings.py
build_fasttext_cc_embedding_obj
def build_fasttext_cc_embedding_obj(embedding_type): """FastText pre-trained word vectors for 157 languages, with 300 dimensions, trained on Common Crawl and Wikipedia. Released in 2018, it succeesed the 2017 FastText Wikipedia embeddings. It's recommended to use the same tokenizer for your data that was used to construct the embeddings. This information and more can be find on their Website: https://fasttext.cc/docs/en/crawl-vectors.html. Args: embedding_type: A string in the format `fastext.cc.$LANG_CODE`. e.g. `fasttext.cc.de` or `fasttext.cc.es` Returns: Object with the URL and filename used later on for downloading the file. """ lang = embedding_type.split('.')[2] return { 'file': 'cc.{}.300.vec.gz'.format(lang), 'url': 'https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.{}.300.vec.gz'.format(lang), 'extract': False }
python
def build_fasttext_cc_embedding_obj(embedding_type): """FastText pre-trained word vectors for 157 languages, with 300 dimensions, trained on Common Crawl and Wikipedia. Released in 2018, it succeesed the 2017 FastText Wikipedia embeddings. It's recommended to use the same tokenizer for your data that was used to construct the embeddings. This information and more can be find on their Website: https://fasttext.cc/docs/en/crawl-vectors.html. Args: embedding_type: A string in the format `fastext.cc.$LANG_CODE`. e.g. `fasttext.cc.de` or `fasttext.cc.es` Returns: Object with the URL and filename used later on for downloading the file. """ lang = embedding_type.split('.')[2] return { 'file': 'cc.{}.300.vec.gz'.format(lang), 'url': 'https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.{}.300.vec.gz'.format(lang), 'extract': False }
[ "def", "build_fasttext_cc_embedding_obj", "(", "embedding_type", ")", ":", "lang", "=", "embedding_type", ".", "split", "(", "'.'", ")", "[", "2", "]", "return", "{", "'file'", ":", "'cc.{}.300.vec.gz'", ".", "format", "(", "lang", ")", ",", "'url'", ":", "'https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.{}.300.vec.gz'", ".", "format", "(", "lang", ")", ",", "'extract'", ":", "False", "}" ]
FastText pre-trained word vectors for 157 languages, with 300 dimensions, trained on Common Crawl and Wikipedia. Released in 2018, it succeesed the 2017 FastText Wikipedia embeddings. It's recommended to use the same tokenizer for your data that was used to construct the embeddings. This information and more can be find on their Website: https://fasttext.cc/docs/en/crawl-vectors.html. Args: embedding_type: A string in the format `fastext.cc.$LANG_CODE`. e.g. `fasttext.cc.de` or `fasttext.cc.es` Returns: Object with the URL and filename used later on for downloading the file.
[ "FastText", "pre", "-", "trained", "word", "vectors", "for", "157", "languages", "with", "300", "dimensions", "trained", "on", "Common", "Crawl", "and", "Wikipedia", ".", "Released", "in", "2018", "it", "succeesed", "the", "2017", "FastText", "Wikipedia", "embeddings", ".", "It", "s", "recommended", "to", "use", "the", "same", "tokenizer", "for", "your", "data", "that", "was", "used", "to", "construct", "the", "embeddings", ".", "This", "information", "and", "more", "can", "be", "find", "on", "their", "Website", ":", "https", ":", "//", "fasttext", ".", "cc", "/", "docs", "/", "en", "/", "crawl", "-", "vectors", ".", "html", "." ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/embeddings.py#L180-L193
jfilter/text-classification-keras
texcla/embeddings.py
get_embeddings_index
def get_embeddings_index(embedding_type='glove.42B.300d', embedding_dims=None, embedding_path=None, cache=True): """Retrieves embeddings index from embedding name or path. Will automatically download and cache as needed. Args: embedding_type: The embedding type to load. embedding_path: Path to a local embedding to use instead of the embedding type. Ignores `embedding_type` if specified. Returns: The embeddings indexed by word. """ if embedding_path is not None: embedding_type = embedding_path # identify embedding by path embeddings_index = _EMBEDDINGS_CACHE.get(embedding_type) if embeddings_index is not None: return embeddings_index if embedding_path is None: embedding_type_obj = get_embedding_type(embedding_type) # some very rough wrangling of zip files with the keras util `get_file` # a special problem: when multiple files are in one zip file extract = embedding_type_obj.get('extract', True) file_path = get_file( embedding_type_obj['file'], origin=embedding_type_obj['url'], extract=extract, cache_subdir='embeddings', file_hash=embedding_type_obj.get('file_hash',)) if 'file_in_zip' in embedding_type_obj: zip_folder = file_path.split('.zip')[0] with ZipFile(file_path, 'r') as zf: zf.extractall(zip_folder) file_path = os.path.join( zip_folder, embedding_type_obj['file_in_zip']) else: if extract: if file_path.endswith('.zip'): file_path = file_path.split('.zip')[0] # if file_path.endswith('.gz'): # file_path = file_path.split('.gz')[0] else: file_path = embedding_path embeddings_index = _build_embeddings_index(file_path, embedding_dims) if cache: _EMBEDDINGS_CACHE[embedding_type] = embeddings_index return embeddings_index
python
def get_embeddings_index(embedding_type='glove.42B.300d', embedding_dims=None, embedding_path=None, cache=True): """Retrieves embeddings index from embedding name or path. Will automatically download and cache as needed. Args: embedding_type: The embedding type to load. embedding_path: Path to a local embedding to use instead of the embedding type. Ignores `embedding_type` if specified. Returns: The embeddings indexed by word. """ if embedding_path is not None: embedding_type = embedding_path # identify embedding by path embeddings_index = _EMBEDDINGS_CACHE.get(embedding_type) if embeddings_index is not None: return embeddings_index if embedding_path is None: embedding_type_obj = get_embedding_type(embedding_type) # some very rough wrangling of zip files with the keras util `get_file` # a special problem: when multiple files are in one zip file extract = embedding_type_obj.get('extract', True) file_path = get_file( embedding_type_obj['file'], origin=embedding_type_obj['url'], extract=extract, cache_subdir='embeddings', file_hash=embedding_type_obj.get('file_hash',)) if 'file_in_zip' in embedding_type_obj: zip_folder = file_path.split('.zip')[0] with ZipFile(file_path, 'r') as zf: zf.extractall(zip_folder) file_path = os.path.join( zip_folder, embedding_type_obj['file_in_zip']) else: if extract: if file_path.endswith('.zip'): file_path = file_path.split('.zip')[0] # if file_path.endswith('.gz'): # file_path = file_path.split('.gz')[0] else: file_path = embedding_path embeddings_index = _build_embeddings_index(file_path, embedding_dims) if cache: _EMBEDDINGS_CACHE[embedding_type] = embeddings_index return embeddings_index
[ "def", "get_embeddings_index", "(", "embedding_type", "=", "'glove.42B.300d'", ",", "embedding_dims", "=", "None", ",", "embedding_path", "=", "None", ",", "cache", "=", "True", ")", ":", "if", "embedding_path", "is", "not", "None", ":", "embedding_type", "=", "embedding_path", "# identify embedding by path", "embeddings_index", "=", "_EMBEDDINGS_CACHE", ".", "get", "(", "embedding_type", ")", "if", "embeddings_index", "is", "not", "None", ":", "return", "embeddings_index", "if", "embedding_path", "is", "None", ":", "embedding_type_obj", "=", "get_embedding_type", "(", "embedding_type", ")", "# some very rough wrangling of zip files with the keras util `get_file`", "# a special problem: when multiple files are in one zip file", "extract", "=", "embedding_type_obj", ".", "get", "(", "'extract'", ",", "True", ")", "file_path", "=", "get_file", "(", "embedding_type_obj", "[", "'file'", "]", ",", "origin", "=", "embedding_type_obj", "[", "'url'", "]", ",", "extract", "=", "extract", ",", "cache_subdir", "=", "'embeddings'", ",", "file_hash", "=", "embedding_type_obj", ".", "get", "(", "'file_hash'", ",", ")", ")", "if", "'file_in_zip'", "in", "embedding_type_obj", ":", "zip_folder", "=", "file_path", ".", "split", "(", "'.zip'", ")", "[", "0", "]", "with", "ZipFile", "(", "file_path", ",", "'r'", ")", "as", "zf", ":", "zf", ".", "extractall", "(", "zip_folder", ")", "file_path", "=", "os", ".", "path", ".", "join", "(", "zip_folder", ",", "embedding_type_obj", "[", "'file_in_zip'", "]", ")", "else", ":", "if", "extract", ":", "if", "file_path", ".", "endswith", "(", "'.zip'", ")", ":", "file_path", "=", "file_path", ".", "split", "(", "'.zip'", ")", "[", "0", "]", "# if file_path.endswith('.gz'):", "# file_path = file_path.split('.gz')[0]", "else", ":", "file_path", "=", "embedding_path", "embeddings_index", "=", "_build_embeddings_index", "(", "file_path", ",", "embedding_dims", ")", "if", "cache", ":", "_EMBEDDINGS_CACHE", "[", "embedding_type", "]", "=", "embeddings_index", "return", "embeddings_index" ]
Retrieves embeddings index from embedding name or path. Will automatically download and cache as needed. Args: embedding_type: The embedding type to load. embedding_path: Path to a local embedding to use instead of the embedding type. Ignores `embedding_type` if specified. Returns: The embeddings indexed by word.
[ "Retrieves", "embeddings", "index", "from", "embedding", "name", "or", "path", ".", "Will", "automatically", "download", "and", "cache", "as", "needed", "." ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/embeddings.py#L211-L257
jfilter/text-classification-keras
texcla/preprocessing/char_tokenizer.py
CharTokenizer.token_generator
def token_generator(self, texts, **kwargs): """Yields tokens from texts as `(text_idx, character)` """ for text_idx, text in enumerate(texts): if self.lower: text = text.lower() for char in text: yield text_idx, char
python
def token_generator(self, texts, **kwargs): """Yields tokens from texts as `(text_idx, character)` """ for text_idx, text in enumerate(texts): if self.lower: text = text.lower() for char in text: yield text_idx, char
[ "def", "token_generator", "(", "self", ",", "texts", ",", "*", "*", "kwargs", ")", ":", "for", "text_idx", ",", "text", "in", "enumerate", "(", "texts", ")", ":", "if", "self", ".", "lower", ":", "text", "=", "text", ".", "lower", "(", ")", "for", "char", "in", "text", ":", "yield", "text_idx", ",", "char" ]
Yields tokens from texts as `(text_idx, character)`
[ "Yields", "tokens", "from", "texts", "as", "(", "text_idx", "character", ")" ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/preprocessing/char_tokenizer.py#L27-L34
jfilter/text-classification-keras
texcla/preprocessing/char_tokenizer.py
SentenceCharTokenizer.token_generator
def token_generator(self, texts, **kwargs): """Yields tokens from texts as `(text_idx, sent_idx, character)` Args: texts: The list of texts. **kwargs: Supported args include: n_threads/num_threads: Number of threads to use. Uses num_cpus - 1 by default. batch_size: The number of texts to accumulate into a common working set before processing. (Default value: 1000) """ # Perf optimization. Only process what is necessary. n_threads, batch_size = utils._parse_spacy_kwargs(**kwargs) nlp = spacy.load(self.lang) kwargs = { 'batch_size': batch_size, 'n_threads': n_threads, 'disable': ['ner'] } # Perf optimization: Lower the entire text instead of individual tokens. texts_gen = utils._apply_generator( texts, lambda x: x.lower()) if self.lower else texts for text_idx, doc in enumerate(nlp.pipe(texts_gen, **kwargs)): for sent_idx, sent in enumerate(doc.sents): for word in sent: for char in word: yield text_idx, sent_idx, char
python
def token_generator(self, texts, **kwargs): """Yields tokens from texts as `(text_idx, sent_idx, character)` Args: texts: The list of texts. **kwargs: Supported args include: n_threads/num_threads: Number of threads to use. Uses num_cpus - 1 by default. batch_size: The number of texts to accumulate into a common working set before processing. (Default value: 1000) """ # Perf optimization. Only process what is necessary. n_threads, batch_size = utils._parse_spacy_kwargs(**kwargs) nlp = spacy.load(self.lang) kwargs = { 'batch_size': batch_size, 'n_threads': n_threads, 'disable': ['ner'] } # Perf optimization: Lower the entire text instead of individual tokens. texts_gen = utils._apply_generator( texts, lambda x: x.lower()) if self.lower else texts for text_idx, doc in enumerate(nlp.pipe(texts_gen, **kwargs)): for sent_idx, sent in enumerate(doc.sents): for word in sent: for char in word: yield text_idx, sent_idx, char
[ "def", "token_generator", "(", "self", ",", "texts", ",", "*", "*", "kwargs", ")", ":", "# Perf optimization. Only process what is necessary.", "n_threads", ",", "batch_size", "=", "utils", ".", "_parse_spacy_kwargs", "(", "*", "*", "kwargs", ")", "nlp", "=", "spacy", ".", "load", "(", "self", ".", "lang", ")", "kwargs", "=", "{", "'batch_size'", ":", "batch_size", ",", "'n_threads'", ":", "n_threads", ",", "'disable'", ":", "[", "'ner'", "]", "}", "# Perf optimization: Lower the entire text instead of individual tokens.", "texts_gen", "=", "utils", ".", "_apply_generator", "(", "texts", ",", "lambda", "x", ":", "x", ".", "lower", "(", ")", ")", "if", "self", ".", "lower", "else", "texts", "for", "text_idx", ",", "doc", "in", "enumerate", "(", "nlp", ".", "pipe", "(", "texts_gen", ",", "*", "*", "kwargs", ")", ")", ":", "for", "sent_idx", ",", "sent", "in", "enumerate", "(", "doc", ".", "sents", ")", ":", "for", "word", "in", "sent", ":", "for", "char", "in", "word", ":", "yield", "text_idx", ",", "sent_idx", ",", "char" ]
Yields tokens from texts as `(text_idx, sent_idx, character)` Args: texts: The list of texts. **kwargs: Supported args include: n_threads/num_threads: Number of threads to use. Uses num_cpus - 1 by default. batch_size: The number of texts to accumulate into a common working set before processing. (Default value: 1000)
[ "Yields", "tokens", "from", "texts", "as", "(", "text_idx", "sent_idx", "character", ")" ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/preprocessing/char_tokenizer.py#L53-L80
jfilter/text-classification-keras
texcla/utils/sampling.py
equal_distribution_folds
def equal_distribution_folds(y, folds=2): """Creates `folds` number of indices that has roughly balanced multi-label distribution. Args: y: The multi-label outputs. folds: The number of folds to create. Returns: `folds` number of indices that have roughly equal multi-label distributions. """ n, classes = y.shape # Compute sample distribution over classes dist = y.sum(axis=0).astype('float') dist /= dist.sum() index_list = [] fold_dist = np.zeros((folds, classes), dtype='float') for _ in range(folds): index_list.append([]) for i in range(n): if i < folds: target_fold = i else: normed_folds = fold_dist.T / fold_dist.sum(axis=1) how_off = normed_folds.T - dist target_fold = np.argmin( np.dot((y[i] - .5).reshape(1, -1), how_off.T)) fold_dist[target_fold] += y[i] index_list[target_fold].append(i) logger.debug("Fold distributions:") logger.debug(fold_dist) return index_list
python
def equal_distribution_folds(y, folds=2): """Creates `folds` number of indices that has roughly balanced multi-label distribution. Args: y: The multi-label outputs. folds: The number of folds to create. Returns: `folds` number of indices that have roughly equal multi-label distributions. """ n, classes = y.shape # Compute sample distribution over classes dist = y.sum(axis=0).astype('float') dist /= dist.sum() index_list = [] fold_dist = np.zeros((folds, classes), dtype='float') for _ in range(folds): index_list.append([]) for i in range(n): if i < folds: target_fold = i else: normed_folds = fold_dist.T / fold_dist.sum(axis=1) how_off = normed_folds.T - dist target_fold = np.argmin( np.dot((y[i] - .5).reshape(1, -1), how_off.T)) fold_dist[target_fold] += y[i] index_list[target_fold].append(i) logger.debug("Fold distributions:") logger.debug(fold_dist) return index_list
[ "def", "equal_distribution_folds", "(", "y", ",", "folds", "=", "2", ")", ":", "n", ",", "classes", "=", "y", ".", "shape", "# Compute sample distribution over classes", "dist", "=", "y", ".", "sum", "(", "axis", "=", "0", ")", ".", "astype", "(", "'float'", ")", "dist", "/=", "dist", ".", "sum", "(", ")", "index_list", "=", "[", "]", "fold_dist", "=", "np", ".", "zeros", "(", "(", "folds", ",", "classes", ")", ",", "dtype", "=", "'float'", ")", "for", "_", "in", "range", "(", "folds", ")", ":", "index_list", ".", "append", "(", "[", "]", ")", "for", "i", "in", "range", "(", "n", ")", ":", "if", "i", "<", "folds", ":", "target_fold", "=", "i", "else", ":", "normed_folds", "=", "fold_dist", ".", "T", "/", "fold_dist", ".", "sum", "(", "axis", "=", "1", ")", "how_off", "=", "normed_folds", ".", "T", "-", "dist", "target_fold", "=", "np", ".", "argmin", "(", "np", ".", "dot", "(", "(", "y", "[", "i", "]", "-", ".5", ")", ".", "reshape", "(", "1", ",", "-", "1", ")", ",", "how_off", ".", "T", ")", ")", "fold_dist", "[", "target_fold", "]", "+=", "y", "[", "i", "]", "index_list", "[", "target_fold", "]", ".", "append", "(", "i", ")", "logger", ".", "debug", "(", "\"Fold distributions:\"", ")", "logger", ".", "debug", "(", "fold_dist", ")", "return", "index_list" ]
Creates `folds` number of indices that has roughly balanced multi-label distribution. Args: y: The multi-label outputs. folds: The number of folds to create. Returns: `folds` number of indices that have roughly equal multi-label distributions.
[ "Creates", "folds", "number", "of", "indices", "that", "has", "roughly", "balanced", "multi", "-", "label", "distribution", "." ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/utils/sampling.py#L11-L44
jfilter/text-classification-keras
texcla/models/sentence_model.py
SentenceModelFactory.build_model
def build_model(self, token_encoder_model, sentence_encoder_model, trainable_embeddings=True, output_activation='softmax'): """Builds a model that first encodes all words within sentences using `token_encoder_model`, followed by `sentence_encoder_model`. Args: token_encoder_model: An instance of `SequenceEncoderBase` for encoding tokens within sentences. This model will be applied across all sentences to create a sentence encoding. sentence_encoder_model: An instance of `SequenceEncoderBase` operating on sentence encoding generated by `token_encoder_model`. This encoding is then fed into a final `Dense` layer for classification. trainable_embeddings: Whether or not to fine tune embeddings. output_activation: The output activation to use. (Default value: 'softmax') Use: - `softmax` for binary or multi-class. - `sigmoid` for multi-label classification. - `linear` for regression output. Returns: The model output tensor. """ if not isinstance(token_encoder_model, SequenceEncoderBase): raise ValueError("`token_encoder_model` should be an instance of `{}`".format( SequenceEncoderBase)) if not isinstance(sentence_encoder_model, SequenceEncoderBase): raise ValueError("`sentence_encoder_model` should be an instance of `{}`".format( SequenceEncoderBase)) if not sentence_encoder_model.allows_dynamic_length() and self.max_sents is None: raise ValueError("Sentence encoder model '{}' requires padding. " "You need to provide `max_sents`") if self.embeddings_index is None: # The +1 is for unknown token index 0. embedding_layer = Embedding(len(self.token_index), self.embedding_dims, input_length=self.max_tokens, mask_zero=token_encoder_model.allows_dynamic_length(), trainable=trainable_embeddings) else: embedding_layer = Embedding(len(self.token_index), self.embedding_dims, weights=[build_embedding_weights( self.token_index, self.embeddings_index)], input_length=self.max_tokens, mask_zero=token_encoder_model.allows_dynamic_length(), trainable=trainable_embeddings) word_input = Input(shape=(self.max_tokens,), dtype='int32') x = embedding_layer(word_input) word_encoding = token_encoder_model(x) token_encoder_model = Model( word_input, word_encoding, name='word_encoder') doc_input = Input( shape=(self.max_sents, self.max_tokens), dtype='int32') sent_encoding = TimeDistributed(token_encoder_model)(doc_input) x = sentence_encoder_model(sent_encoding) x = Dense(self.num_classes, activation=output_activation)(x) return Model(doc_input, x)
python
def build_model(self, token_encoder_model, sentence_encoder_model, trainable_embeddings=True, output_activation='softmax'): """Builds a model that first encodes all words within sentences using `token_encoder_model`, followed by `sentence_encoder_model`. Args: token_encoder_model: An instance of `SequenceEncoderBase` for encoding tokens within sentences. This model will be applied across all sentences to create a sentence encoding. sentence_encoder_model: An instance of `SequenceEncoderBase` operating on sentence encoding generated by `token_encoder_model`. This encoding is then fed into a final `Dense` layer for classification. trainable_embeddings: Whether or not to fine tune embeddings. output_activation: The output activation to use. (Default value: 'softmax') Use: - `softmax` for binary or multi-class. - `sigmoid` for multi-label classification. - `linear` for regression output. Returns: The model output tensor. """ if not isinstance(token_encoder_model, SequenceEncoderBase): raise ValueError("`token_encoder_model` should be an instance of `{}`".format( SequenceEncoderBase)) if not isinstance(sentence_encoder_model, SequenceEncoderBase): raise ValueError("`sentence_encoder_model` should be an instance of `{}`".format( SequenceEncoderBase)) if not sentence_encoder_model.allows_dynamic_length() and self.max_sents is None: raise ValueError("Sentence encoder model '{}' requires padding. " "You need to provide `max_sents`") if self.embeddings_index is None: # The +1 is for unknown token index 0. embedding_layer = Embedding(len(self.token_index), self.embedding_dims, input_length=self.max_tokens, mask_zero=token_encoder_model.allows_dynamic_length(), trainable=trainable_embeddings) else: embedding_layer = Embedding(len(self.token_index), self.embedding_dims, weights=[build_embedding_weights( self.token_index, self.embeddings_index)], input_length=self.max_tokens, mask_zero=token_encoder_model.allows_dynamic_length(), trainable=trainable_embeddings) word_input = Input(shape=(self.max_tokens,), dtype='int32') x = embedding_layer(word_input) word_encoding = token_encoder_model(x) token_encoder_model = Model( word_input, word_encoding, name='word_encoder') doc_input = Input( shape=(self.max_sents, self.max_tokens), dtype='int32') sent_encoding = TimeDistributed(token_encoder_model)(doc_input) x = sentence_encoder_model(sent_encoding) x = Dense(self.num_classes, activation=output_activation)(x) return Model(doc_input, x)
[ "def", "build_model", "(", "self", ",", "token_encoder_model", ",", "sentence_encoder_model", ",", "trainable_embeddings", "=", "True", ",", "output_activation", "=", "'softmax'", ")", ":", "if", "not", "isinstance", "(", "token_encoder_model", ",", "SequenceEncoderBase", ")", ":", "raise", "ValueError", "(", "\"`token_encoder_model` should be an instance of `{}`\"", ".", "format", "(", "SequenceEncoderBase", ")", ")", "if", "not", "isinstance", "(", "sentence_encoder_model", ",", "SequenceEncoderBase", ")", ":", "raise", "ValueError", "(", "\"`sentence_encoder_model` should be an instance of `{}`\"", ".", "format", "(", "SequenceEncoderBase", ")", ")", "if", "not", "sentence_encoder_model", ".", "allows_dynamic_length", "(", ")", "and", "self", ".", "max_sents", "is", "None", ":", "raise", "ValueError", "(", "\"Sentence encoder model '{}' requires padding. \"", "\"You need to provide `max_sents`\"", ")", "if", "self", ".", "embeddings_index", "is", "None", ":", "# The +1 is for unknown token index 0.", "embedding_layer", "=", "Embedding", "(", "len", "(", "self", ".", "token_index", ")", ",", "self", ".", "embedding_dims", ",", "input_length", "=", "self", ".", "max_tokens", ",", "mask_zero", "=", "token_encoder_model", ".", "allows_dynamic_length", "(", ")", ",", "trainable", "=", "trainable_embeddings", ")", "else", ":", "embedding_layer", "=", "Embedding", "(", "len", "(", "self", ".", "token_index", ")", ",", "self", ".", "embedding_dims", ",", "weights", "=", "[", "build_embedding_weights", "(", "self", ".", "token_index", ",", "self", ".", "embeddings_index", ")", "]", ",", "input_length", "=", "self", ".", "max_tokens", ",", "mask_zero", "=", "token_encoder_model", ".", "allows_dynamic_length", "(", ")", ",", "trainable", "=", "trainable_embeddings", ")", "word_input", "=", "Input", "(", "shape", "=", "(", "self", ".", "max_tokens", ",", ")", ",", "dtype", "=", "'int32'", ")", "x", "=", "embedding_layer", "(", "word_input", ")", "word_encoding", "=", "token_encoder_model", "(", "x", ")", "token_encoder_model", "=", "Model", "(", "word_input", ",", "word_encoding", ",", "name", "=", "'word_encoder'", ")", "doc_input", "=", "Input", "(", "shape", "=", "(", "self", ".", "max_sents", ",", "self", ".", "max_tokens", ")", ",", "dtype", "=", "'int32'", ")", "sent_encoding", "=", "TimeDistributed", "(", "token_encoder_model", ")", "(", "doc_input", ")", "x", "=", "sentence_encoder_model", "(", "sent_encoding", ")", "x", "=", "Dense", "(", "self", ".", "num_classes", ",", "activation", "=", "output_activation", ")", "(", "x", ")", "return", "Model", "(", "doc_input", ",", "x", ")" ]
Builds a model that first encodes all words within sentences using `token_encoder_model`, followed by `sentence_encoder_model`. Args: token_encoder_model: An instance of `SequenceEncoderBase` for encoding tokens within sentences. This model will be applied across all sentences to create a sentence encoding. sentence_encoder_model: An instance of `SequenceEncoderBase` operating on sentence encoding generated by `token_encoder_model`. This encoding is then fed into a final `Dense` layer for classification. trainable_embeddings: Whether or not to fine tune embeddings. output_activation: The output activation to use. (Default value: 'softmax') Use: - `softmax` for binary or multi-class. - `sigmoid` for multi-label classification. - `linear` for regression output. Returns: The model output tensor.
[ "Builds", "a", "model", "that", "first", "encodes", "all", "words", "within", "sentences", "using", "token_encoder_model", "followed", "by", "sentence_encoder_model", "." ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/models/sentence_model.py#L45-L104
jfilter/text-classification-keras
texcla/experiment.py
process_save
def process_save(X, y, tokenizer, proc_data_path, max_len=400, train=False, ngrams=None, limit_top_tokens=None): """Process text and save as Dataset """ if train and limit_top_tokens is not None: tokenizer.apply_encoding_options(limit_top_tokens=limit_top_tokens) X_encoded = tokenizer.encode_texts(X) if ngrams is not None: X_encoded = tokenizer.add_ngrams(X_encoded, n=ngrams, train=train) X_padded = tokenizer.pad_sequences( X_encoded, fixed_token_seq_length=max_len) if train: ds = Dataset(X_padded, y, tokenizer=tokenizer) else: ds = Dataset(X_padded, y) ds.save(proc_data_path)
python
def process_save(X, y, tokenizer, proc_data_path, max_len=400, train=False, ngrams=None, limit_top_tokens=None): """Process text and save as Dataset """ if train and limit_top_tokens is not None: tokenizer.apply_encoding_options(limit_top_tokens=limit_top_tokens) X_encoded = tokenizer.encode_texts(X) if ngrams is not None: X_encoded = tokenizer.add_ngrams(X_encoded, n=ngrams, train=train) X_padded = tokenizer.pad_sequences( X_encoded, fixed_token_seq_length=max_len) if train: ds = Dataset(X_padded, y, tokenizer=tokenizer) else: ds = Dataset(X_padded, y) ds.save(proc_data_path)
[ "def", "process_save", "(", "X", ",", "y", ",", "tokenizer", ",", "proc_data_path", ",", "max_len", "=", "400", ",", "train", "=", "False", ",", "ngrams", "=", "None", ",", "limit_top_tokens", "=", "None", ")", ":", "if", "train", "and", "limit_top_tokens", "is", "not", "None", ":", "tokenizer", ".", "apply_encoding_options", "(", "limit_top_tokens", "=", "limit_top_tokens", ")", "X_encoded", "=", "tokenizer", ".", "encode_texts", "(", "X", ")", "if", "ngrams", "is", "not", "None", ":", "X_encoded", "=", "tokenizer", ".", "add_ngrams", "(", "X_encoded", ",", "n", "=", "ngrams", ",", "train", "=", "train", ")", "X_padded", "=", "tokenizer", ".", "pad_sequences", "(", "X_encoded", ",", "fixed_token_seq_length", "=", "max_len", ")", "if", "train", ":", "ds", "=", "Dataset", "(", "X_padded", ",", "y", ",", "tokenizer", "=", "tokenizer", ")", "else", ":", "ds", "=", "Dataset", "(", "X_padded", ",", "y", ")", "ds", ".", "save", "(", "proc_data_path", ")" ]
Process text and save as Dataset
[ "Process", "text", "and", "save", "as", "Dataset" ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/experiment.py#L106-L126
jfilter/text-classification-keras
texcla/experiment.py
setup_data
def setup_data(X, y, tokenizer, proc_data_path, **kwargs): """Setup data Args: X: text data, y: data labels, tokenizer: A Tokenizer instance proc_data_path: Path for the processed data """ # only build vocabulary once (e.g. training data) train = not tokenizer.has_vocab if train: tokenizer.build_vocab(X) process_save(X, y, tokenizer, proc_data_path, train=train, **kwargs) return tokenizer
python
def setup_data(X, y, tokenizer, proc_data_path, **kwargs): """Setup data Args: X: text data, y: data labels, tokenizer: A Tokenizer instance proc_data_path: Path for the processed data """ # only build vocabulary once (e.g. training data) train = not tokenizer.has_vocab if train: tokenizer.build_vocab(X) process_save(X, y, tokenizer, proc_data_path, train=train, **kwargs) return tokenizer
[ "def", "setup_data", "(", "X", ",", "y", ",", "tokenizer", ",", "proc_data_path", ",", "*", "*", "kwargs", ")", ":", "# only build vocabulary once (e.g. training data)", "train", "=", "not", "tokenizer", ".", "has_vocab", "if", "train", ":", "tokenizer", ".", "build_vocab", "(", "X", ")", "process_save", "(", "X", ",", "y", ",", "tokenizer", ",", "proc_data_path", ",", "train", "=", "train", ",", "*", "*", "kwargs", ")", "return", "tokenizer" ]
Setup data Args: X: text data, y: data labels, tokenizer: A Tokenizer instance proc_data_path: Path for the processed data
[ "Setup", "data" ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/experiment.py#L129-L145
jfilter/text-classification-keras
texcla/experiment.py
split_data
def split_data(X, y, ratio=(0.8, 0.1, 0.1)): """Splits data into a training, validation, and test set. Args: X: text data y: data labels ratio: the ratio for splitting. Default: (0.8, 0.1, 0.1) Returns: split data: X_train, X_val, X_test, y_train, y_val, y_test """ assert(sum(ratio) == 1 and len(ratio) == 3) X_train, X_rest, y_train, y_rest = train_test_split( X, y, train_size=ratio[0]) X_val, X_test, y_val, y_test = train_test_split( X_rest, y_rest, train_size=ratio[1]) return X_train, X_val, X_test, y_train, y_val, y_test
python
def split_data(X, y, ratio=(0.8, 0.1, 0.1)): """Splits data into a training, validation, and test set. Args: X: text data y: data labels ratio: the ratio for splitting. Default: (0.8, 0.1, 0.1) Returns: split data: X_train, X_val, X_test, y_train, y_val, y_test """ assert(sum(ratio) == 1 and len(ratio) == 3) X_train, X_rest, y_train, y_rest = train_test_split( X, y, train_size=ratio[0]) X_val, X_test, y_val, y_test = train_test_split( X_rest, y_rest, train_size=ratio[1]) return X_train, X_val, X_test, y_train, y_val, y_test
[ "def", "split_data", "(", "X", ",", "y", ",", "ratio", "=", "(", "0.8", ",", "0.1", ",", "0.1", ")", ")", ":", "assert", "(", "sum", "(", "ratio", ")", "==", "1", "and", "len", "(", "ratio", ")", "==", "3", ")", "X_train", ",", "X_rest", ",", "y_train", ",", "y_rest", "=", "train_test_split", "(", "X", ",", "y", ",", "train_size", "=", "ratio", "[", "0", "]", ")", "X_val", ",", "X_test", ",", "y_val", ",", "y_test", "=", "train_test_split", "(", "X_rest", ",", "y_rest", ",", "train_size", "=", "ratio", "[", "1", "]", ")", "return", "X_train", ",", "X_val", ",", "X_test", ",", "y_train", ",", "y_val", ",", "y_test" ]
Splits data into a training, validation, and test set. Args: X: text data y: data labels ratio: the ratio for splitting. Default: (0.8, 0.1, 0.1) Returns: split data: X_train, X_val, X_test, y_train, y_val, y_test
[ "Splits", "data", "into", "a", "training", "validation", "and", "test", "set", "." ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/experiment.py#L148-L164
jfilter/text-classification-keras
texcla/experiment.py
setup_data_split
def setup_data_split(X, y, tokenizer, proc_data_dir, **kwargs): """Setup data while splitting into a training, validation, and test set. Args: X: text data, y: data labels, tokenizer: A Tokenizer instance proc_data_dir: Directory for the split and processed data """ X_train, X_val, X_test, y_train, y_val, y_test = split_data(X, y) # only build vocabulary on training data tokenizer.build_vocab(X_train) process_save(X_train, y_train, tokenizer, path.join( proc_data_dir, 'train.bin'), train=True, **kwargs) process_save(X_val, y_val, tokenizer, path.join( proc_data_dir, 'val.bin'), **kwargs) process_save(X_test, y_test, tokenizer, path.join( proc_data_dir, 'test.bin'), **kwargs)
python
def setup_data_split(X, y, tokenizer, proc_data_dir, **kwargs): """Setup data while splitting into a training, validation, and test set. Args: X: text data, y: data labels, tokenizer: A Tokenizer instance proc_data_dir: Directory for the split and processed data """ X_train, X_val, X_test, y_train, y_val, y_test = split_data(X, y) # only build vocabulary on training data tokenizer.build_vocab(X_train) process_save(X_train, y_train, tokenizer, path.join( proc_data_dir, 'train.bin'), train=True, **kwargs) process_save(X_val, y_val, tokenizer, path.join( proc_data_dir, 'val.bin'), **kwargs) process_save(X_test, y_test, tokenizer, path.join( proc_data_dir, 'test.bin'), **kwargs)
[ "def", "setup_data_split", "(", "X", ",", "y", ",", "tokenizer", ",", "proc_data_dir", ",", "*", "*", "kwargs", ")", ":", "X_train", ",", "X_val", ",", "X_test", ",", "y_train", ",", "y_val", ",", "y_test", "=", "split_data", "(", "X", ",", "y", ")", "# only build vocabulary on training data", "tokenizer", ".", "build_vocab", "(", "X_train", ")", "process_save", "(", "X_train", ",", "y_train", ",", "tokenizer", ",", "path", ".", "join", "(", "proc_data_dir", ",", "'train.bin'", ")", ",", "train", "=", "True", ",", "*", "*", "kwargs", ")", "process_save", "(", "X_val", ",", "y_val", ",", "tokenizer", ",", "path", ".", "join", "(", "proc_data_dir", ",", "'val.bin'", ")", ",", "*", "*", "kwargs", ")", "process_save", "(", "X_test", ",", "y_test", ",", "tokenizer", ",", "path", ".", "join", "(", "proc_data_dir", ",", "'test.bin'", ")", ",", "*", "*", "kwargs", ")" ]
Setup data while splitting into a training, validation, and test set. Args: X: text data, y: data labels, tokenizer: A Tokenizer instance proc_data_dir: Directory for the split and processed data
[ "Setup", "data", "while", "splitting", "into", "a", "training", "validation", "and", "test", "set", "." ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/experiment.py#L167-L186
jfilter/text-classification-keras
texcla/experiment.py
load_data_split
def load_data_split(proc_data_dir): """Loads a split dataset Args: proc_data_dir: Directory with the split and processed data Returns: (Training Data, Validation Data, Test Data) """ ds_train = Dataset.load(path.join(proc_data_dir, 'train.bin')) ds_val = Dataset.load(path.join(proc_data_dir, 'val.bin')) ds_test = Dataset.load(path.join(proc_data_dir, 'test.bin')) return ds_train, ds_val, ds_test
python
def load_data_split(proc_data_dir): """Loads a split dataset Args: proc_data_dir: Directory with the split and processed data Returns: (Training Data, Validation Data, Test Data) """ ds_train = Dataset.load(path.join(proc_data_dir, 'train.bin')) ds_val = Dataset.load(path.join(proc_data_dir, 'val.bin')) ds_test = Dataset.load(path.join(proc_data_dir, 'test.bin')) return ds_train, ds_val, ds_test
[ "def", "load_data_split", "(", "proc_data_dir", ")", ":", "ds_train", "=", "Dataset", ".", "load", "(", "path", ".", "join", "(", "proc_data_dir", ",", "'train.bin'", ")", ")", "ds_val", "=", "Dataset", ".", "load", "(", "path", ".", "join", "(", "proc_data_dir", ",", "'val.bin'", ")", ")", "ds_test", "=", "Dataset", ".", "load", "(", "path", ".", "join", "(", "proc_data_dir", ",", "'test.bin'", ")", ")", "return", "ds_train", ",", "ds_val", ",", "ds_test" ]
Loads a split dataset Args: proc_data_dir: Directory with the split and processed data Returns: (Training Data, Validation Data, Test Data)
[ "Loads", "a", "split", "dataset" ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/experiment.py#L189-L201
jfilter/text-classification-keras
texcla/models/token_model.py
TokenModelFactory.build_model
def build_model(self, token_encoder_model, trainable_embeddings=True, output_activation='softmax'): """Builds a model using the given `text_model` Args: token_encoder_model: An instance of `SequenceEncoderBase` for encoding all the tokens within a document. This encoding is then fed into a final `Dense` layer for classification. trainable_embeddings: Whether or not to fine tune embeddings. output_activation: The output activation to use. (Default value: 'softmax') Use: - `softmax` for binary or multi-class. - `sigmoid` for multi-label classification. - `linear` for regression output. Returns: The model output tensor. """ if not isinstance(token_encoder_model, SequenceEncoderBase): raise ValueError("`token_encoder_model` should be an instance of `{}`".format( SequenceEncoderBase)) if not token_encoder_model.allows_dynamic_length() and self.max_tokens is None: raise ValueError("The provided `token_encoder_model` does not allow variable length mini-batches. " "You need to provide `max_tokens`") if self.embeddings_index is None: # The +1 is for unknown token index 0. embedding_layer = Embedding(len(self.token_index), self.embedding_dims, input_length=self.max_tokens, mask_zero=token_encoder_model.allows_dynamic_length(), trainable=trainable_embeddings) else: embedding_layer = Embedding(len(self.token_index), self.embedding_dims, weights=[build_embedding_weights( self.token_index, self.embeddings_index)], input_length=self.max_tokens, mask_zero=token_encoder_model.allows_dynamic_length(), trainable=trainable_embeddings) sequence_input = Input(shape=(self.max_tokens,), dtype='int32') x = embedding_layer(sequence_input) x = token_encoder_model(x) x = Dense(self.num_classes, activation=output_activation)(x) return Model(sequence_input, x)
python
def build_model(self, token_encoder_model, trainable_embeddings=True, output_activation='softmax'): """Builds a model using the given `text_model` Args: token_encoder_model: An instance of `SequenceEncoderBase` for encoding all the tokens within a document. This encoding is then fed into a final `Dense` layer for classification. trainable_embeddings: Whether or not to fine tune embeddings. output_activation: The output activation to use. (Default value: 'softmax') Use: - `softmax` for binary or multi-class. - `sigmoid` for multi-label classification. - `linear` for regression output. Returns: The model output tensor. """ if not isinstance(token_encoder_model, SequenceEncoderBase): raise ValueError("`token_encoder_model` should be an instance of `{}`".format( SequenceEncoderBase)) if not token_encoder_model.allows_dynamic_length() and self.max_tokens is None: raise ValueError("The provided `token_encoder_model` does not allow variable length mini-batches. " "You need to provide `max_tokens`") if self.embeddings_index is None: # The +1 is for unknown token index 0. embedding_layer = Embedding(len(self.token_index), self.embedding_dims, input_length=self.max_tokens, mask_zero=token_encoder_model.allows_dynamic_length(), trainable=trainable_embeddings) else: embedding_layer = Embedding(len(self.token_index), self.embedding_dims, weights=[build_embedding_weights( self.token_index, self.embeddings_index)], input_length=self.max_tokens, mask_zero=token_encoder_model.allows_dynamic_length(), trainable=trainable_embeddings) sequence_input = Input(shape=(self.max_tokens,), dtype='int32') x = embedding_layer(sequence_input) x = token_encoder_model(x) x = Dense(self.num_classes, activation=output_activation)(x) return Model(sequence_input, x)
[ "def", "build_model", "(", "self", ",", "token_encoder_model", ",", "trainable_embeddings", "=", "True", ",", "output_activation", "=", "'softmax'", ")", ":", "if", "not", "isinstance", "(", "token_encoder_model", ",", "SequenceEncoderBase", ")", ":", "raise", "ValueError", "(", "\"`token_encoder_model` should be an instance of `{}`\"", ".", "format", "(", "SequenceEncoderBase", ")", ")", "if", "not", "token_encoder_model", ".", "allows_dynamic_length", "(", ")", "and", "self", ".", "max_tokens", "is", "None", ":", "raise", "ValueError", "(", "\"The provided `token_encoder_model` does not allow variable length mini-batches. \"", "\"You need to provide `max_tokens`\"", ")", "if", "self", ".", "embeddings_index", "is", "None", ":", "# The +1 is for unknown token index 0.", "embedding_layer", "=", "Embedding", "(", "len", "(", "self", ".", "token_index", ")", ",", "self", ".", "embedding_dims", ",", "input_length", "=", "self", ".", "max_tokens", ",", "mask_zero", "=", "token_encoder_model", ".", "allows_dynamic_length", "(", ")", ",", "trainable", "=", "trainable_embeddings", ")", "else", ":", "embedding_layer", "=", "Embedding", "(", "len", "(", "self", ".", "token_index", ")", ",", "self", ".", "embedding_dims", ",", "weights", "=", "[", "build_embedding_weights", "(", "self", ".", "token_index", ",", "self", ".", "embeddings_index", ")", "]", ",", "input_length", "=", "self", ".", "max_tokens", ",", "mask_zero", "=", "token_encoder_model", ".", "allows_dynamic_length", "(", ")", ",", "trainable", "=", "trainable_embeddings", ")", "sequence_input", "=", "Input", "(", "shape", "=", "(", "self", ".", "max_tokens", ",", ")", ",", "dtype", "=", "'int32'", ")", "x", "=", "embedding_layer", "(", "sequence_input", ")", "x", "=", "token_encoder_model", "(", "x", ")", "x", "=", "Dense", "(", "self", ".", "num_classes", ",", "activation", "=", "output_activation", ")", "(", "x", ")", "return", "Model", "(", "sequence_input", ",", "x", ")" ]
Builds a model using the given `text_model` Args: token_encoder_model: An instance of `SequenceEncoderBase` for encoding all the tokens within a document. This encoding is then fed into a final `Dense` layer for classification. trainable_embeddings: Whether or not to fine tune embeddings. output_activation: The output activation to use. (Default value: 'softmax') Use: - `softmax` for binary or multi-class. - `sigmoid` for multi-label classification. - `linear` for regression output. Returns: The model output tensor.
[ "Builds", "a", "model", "using", "the", "given", "text_model" ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/models/token_model.py#L39-L83
jfilter/text-classification-keras
texcla/models/layers.py
_softmax
def _softmax(x, dim): """Computes softmax along a specified dim. Keras currently lacks this feature. """ if K.backend() == 'tensorflow': import tensorflow as tf return tf.nn.softmax(x, dim) elif K.backend() is 'cntk': import cntk return cntk.softmax(x, dim) elif K.backend() == 'theano': # Theano cannot softmax along an arbitrary dim. # So, we will shuffle `dim` to -1 and un-shuffle after softmax. perm = np.arange(K.ndim(x)) perm[dim], perm[-1] = perm[-1], perm[dim] x_perm = K.permute_dimensions(x, perm) output = K.softmax(x_perm) # Permute back perm[dim], perm[-1] = perm[-1], perm[dim] output = K.permute_dimensions(x, output) return output else: raise ValueError("Backend '{}' not supported".format(K.backend()))
python
def _softmax(x, dim): """Computes softmax along a specified dim. Keras currently lacks this feature. """ if K.backend() == 'tensorflow': import tensorflow as tf return tf.nn.softmax(x, dim) elif K.backend() is 'cntk': import cntk return cntk.softmax(x, dim) elif K.backend() == 'theano': # Theano cannot softmax along an arbitrary dim. # So, we will shuffle `dim` to -1 and un-shuffle after softmax. perm = np.arange(K.ndim(x)) perm[dim], perm[-1] = perm[-1], perm[dim] x_perm = K.permute_dimensions(x, perm) output = K.softmax(x_perm) # Permute back perm[dim], perm[-1] = perm[-1], perm[dim] output = K.permute_dimensions(x, output) return output else: raise ValueError("Backend '{}' not supported".format(K.backend()))
[ "def", "_softmax", "(", "x", ",", "dim", ")", ":", "if", "K", ".", "backend", "(", ")", "==", "'tensorflow'", ":", "import", "tensorflow", "as", "tf", "return", "tf", ".", "nn", ".", "softmax", "(", "x", ",", "dim", ")", "elif", "K", ".", "backend", "(", ")", "is", "'cntk'", ":", "import", "cntk", "return", "cntk", ".", "softmax", "(", "x", ",", "dim", ")", "elif", "K", ".", "backend", "(", ")", "==", "'theano'", ":", "# Theano cannot softmax along an arbitrary dim.", "# So, we will shuffle `dim` to -1 and un-shuffle after softmax.", "perm", "=", "np", ".", "arange", "(", "K", ".", "ndim", "(", "x", ")", ")", "perm", "[", "dim", "]", ",", "perm", "[", "-", "1", "]", "=", "perm", "[", "-", "1", "]", ",", "perm", "[", "dim", "]", "x_perm", "=", "K", ".", "permute_dimensions", "(", "x", ",", "perm", ")", "output", "=", "K", ".", "softmax", "(", "x_perm", ")", "# Permute back", "perm", "[", "dim", "]", ",", "perm", "[", "-", "1", "]", "=", "perm", "[", "-", "1", "]", ",", "perm", "[", "dim", "]", "output", "=", "K", ".", "permute_dimensions", "(", "x", ",", "output", ")", "return", "output", "else", ":", "raise", "ValueError", "(", "\"Backend '{}' not supported\"", ".", "format", "(", "K", ".", "backend", "(", ")", ")", ")" ]
Computes softmax along a specified dim. Keras currently lacks this feature.
[ "Computes", "softmax", "along", "a", "specified", "dim", ".", "Keras", "currently", "lacks", "this", "feature", "." ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/models/layers.py#L10-L33
jfilter/text-classification-keras
texcla/preprocessing/word_tokenizer.py
SpacyTokenizer._apply_options
def _apply_options(self, token): """Applies various filtering and processing options on token. Returns: The processed token. None if filtered. """ # Apply work token filtering. if token.is_punct and self.remove_punct: return None if token.is_stop and self.remove_stop_words: return None if token.is_digit and self.remove_digits: return None if token.is_oov and self.exclude_oov: return None if token.pos_ in self.exclude_pos_tags: return None if token.ent_type_ in self.exclude_entities: return None # Lemmatized ones are already lowered. if self.lemmatize: return token.lemma_ if self.lower: return token.lower_ return token.orth_
python
def _apply_options(self, token): """Applies various filtering and processing options on token. Returns: The processed token. None if filtered. """ # Apply work token filtering. if token.is_punct and self.remove_punct: return None if token.is_stop and self.remove_stop_words: return None if token.is_digit and self.remove_digits: return None if token.is_oov and self.exclude_oov: return None if token.pos_ in self.exclude_pos_tags: return None if token.ent_type_ in self.exclude_entities: return None # Lemmatized ones are already lowered. if self.lemmatize: return token.lemma_ if self.lower: return token.lower_ return token.orth_
[ "def", "_apply_options", "(", "self", ",", "token", ")", ":", "# Apply work token filtering.", "if", "token", ".", "is_punct", "and", "self", ".", "remove_punct", ":", "return", "None", "if", "token", ".", "is_stop", "and", "self", ".", "remove_stop_words", ":", "return", "None", "if", "token", ".", "is_digit", "and", "self", ".", "remove_digits", ":", "return", "None", "if", "token", ".", "is_oov", "and", "self", ".", "exclude_oov", ":", "return", "None", "if", "token", ".", "pos_", "in", "self", ".", "exclude_pos_tags", ":", "return", "None", "if", "token", ".", "ent_type_", "in", "self", ".", "exclude_entities", ":", "return", "None", "# Lemmatized ones are already lowered.", "if", "self", ".", "lemmatize", ":", "return", "token", ".", "lemma_", "if", "self", ".", "lower", ":", "return", "token", ".", "lower_", "return", "token", ".", "orth_" ]
Applies various filtering and processing options on token. Returns: The processed token. None if filtered.
[ "Applies", "various", "filtering", "and", "processing", "options", "on", "token", "." ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/preprocessing/word_tokenizer.py#L52-L77
jfilter/text-classification-keras
texcla/preprocessing/word_tokenizer.py
SpacyTokenizer.token_generator
def token_generator(self, texts, **kwargs): """Yields tokens from texts as `(text_idx, word)` Args: texts: The list of texts. **kwargs: Supported args include: n_threads/num_threads: Number of threads to use. Uses num_cpus - 1 by default. batch_size: The number of texts to accumulate into a common working set before processing. (Default value: 1000) """ # Perf optimization. Only process what is necessary. n_threads, batch_size = utils._parse_spacy_kwargs(**kwargs) nlp = spacy.load(self.lang) disabled = ['parser'] if len(self.exclude_entities) > 0: disabled.append('ner') kwargs = { 'batch_size': batch_size, 'n_threads': n_threads, 'disable': disabled } for text_idx, doc in enumerate(nlp.pipe(texts, **kwargs)): for word in doc: processed_word = self._apply_options(word) if processed_word is not None: yield text_idx, processed_word
python
def token_generator(self, texts, **kwargs): """Yields tokens from texts as `(text_idx, word)` Args: texts: The list of texts. **kwargs: Supported args include: n_threads/num_threads: Number of threads to use. Uses num_cpus - 1 by default. batch_size: The number of texts to accumulate into a common working set before processing. (Default value: 1000) """ # Perf optimization. Only process what is necessary. n_threads, batch_size = utils._parse_spacy_kwargs(**kwargs) nlp = spacy.load(self.lang) disabled = ['parser'] if len(self.exclude_entities) > 0: disabled.append('ner') kwargs = { 'batch_size': batch_size, 'n_threads': n_threads, 'disable': disabled } for text_idx, doc in enumerate(nlp.pipe(texts, **kwargs)): for word in doc: processed_word = self._apply_options(word) if processed_word is not None: yield text_idx, processed_word
[ "def", "token_generator", "(", "self", ",", "texts", ",", "*", "*", "kwargs", ")", ":", "# Perf optimization. Only process what is necessary.", "n_threads", ",", "batch_size", "=", "utils", ".", "_parse_spacy_kwargs", "(", "*", "*", "kwargs", ")", "nlp", "=", "spacy", ".", "load", "(", "self", ".", "lang", ")", "disabled", "=", "[", "'parser'", "]", "if", "len", "(", "self", ".", "exclude_entities", ")", ">", "0", ":", "disabled", ".", "append", "(", "'ner'", ")", "kwargs", "=", "{", "'batch_size'", ":", "batch_size", ",", "'n_threads'", ":", "n_threads", ",", "'disable'", ":", "disabled", "}", "for", "text_idx", ",", "doc", "in", "enumerate", "(", "nlp", ".", "pipe", "(", "texts", ",", "*", "*", "kwargs", ")", ")", ":", "for", "word", "in", "doc", ":", "processed_word", "=", "self", ".", "_apply_options", "(", "word", ")", "if", "processed_word", "is", "not", "None", ":", "yield", "text_idx", ",", "processed_word" ]
Yields tokens from texts as `(text_idx, word)` Args: texts: The list of texts. **kwargs: Supported args include: n_threads/num_threads: Number of threads to use. Uses num_cpus - 1 by default. batch_size: The number of texts to accumulate into a common working set before processing. (Default value: 1000)
[ "Yields", "tokens", "from", "texts", "as", "(", "text_idx", "word", ")" ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/preprocessing/word_tokenizer.py#L79-L107
jfilter/text-classification-keras
texcla/preprocessing/utils.py
_append
def _append(lst, indices, value): """Adds `value` to `lst` list indexed by `indices`. Will create sub lists as required. """ for i, idx in enumerate(indices): # We need to loop because sometimes indices can increment by more than 1 due to missing tokens. # Example: Sentence with no words after filtering words. while len(lst) <= idx: # Update max counts whenever a new sublist is created. # There is no need to worry about indices beyond `i` since they will end up creating new lists as well. lst.append([]) lst = lst[idx] # Add token and update token max count. lst.append(value)
python
def _append(lst, indices, value): """Adds `value` to `lst` list indexed by `indices`. Will create sub lists as required. """ for i, idx in enumerate(indices): # We need to loop because sometimes indices can increment by more than 1 due to missing tokens. # Example: Sentence with no words after filtering words. while len(lst) <= idx: # Update max counts whenever a new sublist is created. # There is no need to worry about indices beyond `i` since they will end up creating new lists as well. lst.append([]) lst = lst[idx] # Add token and update token max count. lst.append(value)
[ "def", "_append", "(", "lst", ",", "indices", ",", "value", ")", ":", "for", "i", ",", "idx", "in", "enumerate", "(", "indices", ")", ":", "# We need to loop because sometimes indices can increment by more than 1 due to missing tokens.", "# Example: Sentence with no words after filtering words.", "while", "len", "(", "lst", ")", "<=", "idx", ":", "# Update max counts whenever a new sublist is created.", "# There is no need to worry about indices beyond `i` since they will end up creating new lists as well.", "lst", ".", "append", "(", "[", "]", ")", "lst", "=", "lst", "[", "idx", "]", "# Add token and update token max count.", "lst", ".", "append", "(", "value", ")" ]
Adds `value` to `lst` list indexed by `indices`. Will create sub lists as required.
[ "Adds", "value", "to", "lst", "list", "indexed", "by", "indices", ".", "Will", "create", "sub", "lists", "as", "required", "." ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/preprocessing/utils.py#L82-L95
jfilter/text-classification-keras
texcla/preprocessing/utils.py
_parse_spacy_kwargs
def _parse_spacy_kwargs(**kwargs): """Supported args include: Args: n_threads/num_threads: Number of threads to use. Uses num_cpus - 1 by default. batch_size: The number of texts to accumulate into a common working set before processing. (Default value: 1000) """ n_threads = kwargs.get('n_threads') or kwargs.get('num_threads') batch_size = kwargs.get('batch_size') if n_threads is None or n_threads is -1: n_threads = cpu_count() - 1 if batch_size is None or batch_size is -1: batch_size = 1000 return n_threads, batch_size
python
def _parse_spacy_kwargs(**kwargs): """Supported args include: Args: n_threads/num_threads: Number of threads to use. Uses num_cpus - 1 by default. batch_size: The number of texts to accumulate into a common working set before processing. (Default value: 1000) """ n_threads = kwargs.get('n_threads') or kwargs.get('num_threads') batch_size = kwargs.get('batch_size') if n_threads is None or n_threads is -1: n_threads = cpu_count() - 1 if batch_size is None or batch_size is -1: batch_size = 1000 return n_threads, batch_size
[ "def", "_parse_spacy_kwargs", "(", "*", "*", "kwargs", ")", ":", "n_threads", "=", "kwargs", ".", "get", "(", "'n_threads'", ")", "or", "kwargs", ".", "get", "(", "'num_threads'", ")", "batch_size", "=", "kwargs", ".", "get", "(", "'batch_size'", ")", "if", "n_threads", "is", "None", "or", "n_threads", "is", "-", "1", ":", "n_threads", "=", "cpu_count", "(", ")", "-", "1", "if", "batch_size", "is", "None", "or", "batch_size", "is", "-", "1", ":", "batch_size", "=", "1000", "return", "n_threads", ",", "batch_size" ]
Supported args include: Args: n_threads/num_threads: Number of threads to use. Uses num_cpus - 1 by default. batch_size: The number of texts to accumulate into a common working set before processing. (Default value: 1000)
[ "Supported", "args", "include", ":" ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/preprocessing/utils.py#L113-L128
jfilter/text-classification-keras
texcla/preprocessing/utils.py
_CountTracker.update
def update(self, indices): """Updates counts based on indices. The algorithm tracks the index change at i and update global counts for all indices beyond i with local counts tracked so far. """ # Initialize various lists for the first time based on length of indices. if self._prev_indices is None: self._prev_indices = indices # +1 to track token counts in the last index. self._local_counts = np.full(len(indices) + 1, 1) self._local_counts[-1] = 0 self.counts = [[] for _ in range(len(self._local_counts))] has_reset = False for i in range(len(indices)): # index value changed. Push all local values beyond i to count and reset those local_counts. # For example, if document index changed, push counts on sentences and tokens and reset their local_counts # to indicate that we are tracking those for new document. We need to do this at all document hierarchies. if indices[i] > self._prev_indices[i]: self._local_counts[i] += 1 has_reset = True for j in range(i + 1, len(self.counts)): self.counts[j].append(self._local_counts[j]) self._local_counts[j] = 1 # If none of the aux indices changed, update token count. if not has_reset: self._local_counts[-1] += 1 self._prev_indices = indices[:]
python
def update(self, indices): """Updates counts based on indices. The algorithm tracks the index change at i and update global counts for all indices beyond i with local counts tracked so far. """ # Initialize various lists for the first time based on length of indices. if self._prev_indices is None: self._prev_indices = indices # +1 to track token counts in the last index. self._local_counts = np.full(len(indices) + 1, 1) self._local_counts[-1] = 0 self.counts = [[] for _ in range(len(self._local_counts))] has_reset = False for i in range(len(indices)): # index value changed. Push all local values beyond i to count and reset those local_counts. # For example, if document index changed, push counts on sentences and tokens and reset their local_counts # to indicate that we are tracking those for new document. We need to do this at all document hierarchies. if indices[i] > self._prev_indices[i]: self._local_counts[i] += 1 has_reset = True for j in range(i + 1, len(self.counts)): self.counts[j].append(self._local_counts[j]) self._local_counts[j] = 1 # If none of the aux indices changed, update token count. if not has_reset: self._local_counts[-1] += 1 self._prev_indices = indices[:]
[ "def", "update", "(", "self", ",", "indices", ")", ":", "# Initialize various lists for the first time based on length of indices.", "if", "self", ".", "_prev_indices", "is", "None", ":", "self", ".", "_prev_indices", "=", "indices", "# +1 to track token counts in the last index.", "self", ".", "_local_counts", "=", "np", ".", "full", "(", "len", "(", "indices", ")", "+", "1", ",", "1", ")", "self", ".", "_local_counts", "[", "-", "1", "]", "=", "0", "self", ".", "counts", "=", "[", "[", "]", "for", "_", "in", "range", "(", "len", "(", "self", ".", "_local_counts", ")", ")", "]", "has_reset", "=", "False", "for", "i", "in", "range", "(", "len", "(", "indices", ")", ")", ":", "# index value changed. Push all local values beyond i to count and reset those local_counts.", "# For example, if document index changed, push counts on sentences and tokens and reset their local_counts", "# to indicate that we are tracking those for new document. We need to do this at all document hierarchies.", "if", "indices", "[", "i", "]", ">", "self", ".", "_prev_indices", "[", "i", "]", ":", "self", ".", "_local_counts", "[", "i", "]", "+=", "1", "has_reset", "=", "True", "for", "j", "in", "range", "(", "i", "+", "1", ",", "len", "(", "self", ".", "counts", ")", ")", ":", "self", ".", "counts", "[", "j", "]", ".", "append", "(", "self", ".", "_local_counts", "[", "j", "]", ")", "self", ".", "_local_counts", "[", "j", "]", "=", "1", "# If none of the aux indices changed, update token count.", "if", "not", "has_reset", ":", "self", ".", "_local_counts", "[", "-", "1", "]", "+=", "1", "self", ".", "_prev_indices", "=", "indices", "[", ":", "]" ]
Updates counts based on indices. The algorithm tracks the index change at i and update global counts for all indices beyond i with local counts tracked so far.
[ "Updates", "counts", "based", "on", "indices", ".", "The", "algorithm", "tracks", "the", "index", "change", "at", "i", "and", "update", "global", "counts", "for", "all", "indices", "beyond", "i", "with", "local", "counts", "tracked", "so", "far", "." ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/preprocessing/utils.py#L34-L62
jfilter/text-classification-keras
texcla/preprocessing/utils.py
_CountTracker.finalize
def finalize(self): """This will add the very last document to counts. We also get rid of counts[0] since that represents document level which doesnt come under anything else. We also convert all count values to numpy arrays so that stats can be computed easily. """ for i in range(1, len(self._local_counts)): self.counts[i].append(self._local_counts[i]) self.counts.pop(0) for i in range(len(self.counts)): self.counts[i] = np.array(self.counts[i])
python
def finalize(self): """This will add the very last document to counts. We also get rid of counts[0] since that represents document level which doesnt come under anything else. We also convert all count values to numpy arrays so that stats can be computed easily. """ for i in range(1, len(self._local_counts)): self.counts[i].append(self._local_counts[i]) self.counts.pop(0) for i in range(len(self.counts)): self.counts[i] = np.array(self.counts[i])
[ "def", "finalize", "(", "self", ")", ":", "for", "i", "in", "range", "(", "1", ",", "len", "(", "self", ".", "_local_counts", ")", ")", ":", "self", ".", "counts", "[", "i", "]", ".", "append", "(", "self", ".", "_local_counts", "[", "i", "]", ")", "self", ".", "counts", ".", "pop", "(", "0", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "counts", ")", ")", ":", "self", ".", "counts", "[", "i", "]", "=", "np", ".", "array", "(", "self", ".", "counts", "[", "i", "]", ")" ]
This will add the very last document to counts. We also get rid of counts[0] since that represents document level which doesnt come under anything else. We also convert all count values to numpy arrays so that stats can be computed easily.
[ "This", "will", "add", "the", "very", "last", "document", "to", "counts", ".", "We", "also", "get", "rid", "of", "counts", "[", "0", "]", "since", "that", "represents", "document", "level", "which", "doesnt", "come", "under", "anything", "else", ".", "We", "also", "convert", "all", "count", "values", "to", "numpy", "arrays", "so", "that", "stats", "can", "be", "computed", "easily", "." ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/preprocessing/utils.py#L64-L74
jfilter/text-classification-keras
texcla/corpus.py
read_folder
def read_folder(directory): """read text files in directory and returns them as array Args: directory: where the text files are Returns: Array of text """ res = [] for filename in os.listdir(directory): with io.open(os.path.join(directory, filename), encoding="utf-8") as f: content = f.read() res.append(content) return res
python
def read_folder(directory): """read text files in directory and returns them as array Args: directory: where the text files are Returns: Array of text """ res = [] for filename in os.listdir(directory): with io.open(os.path.join(directory, filename), encoding="utf-8") as f: content = f.read() res.append(content) return res
[ "def", "read_folder", "(", "directory", ")", ":", "res", "=", "[", "]", "for", "filename", "in", "os", ".", "listdir", "(", "directory", ")", ":", "with", "io", ".", "open", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "filename", ")", ",", "encoding", "=", "\"utf-8\"", ")", "as", "f", ":", "content", "=", "f", ".", "read", "(", ")", "res", ".", "append", "(", "content", ")", "return", "res" ]
read text files in directory and returns them as array Args: directory: where the text files are Returns: Array of text
[ "read", "text", "files", "in", "directory", "and", "returns", "them", "as", "array" ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/corpus.py#L8-L22
jfilter/text-classification-keras
texcla/corpus.py
read_pos_neg_data
def read_pos_neg_data(path, folder, limit): """returns array with positive and negative examples""" training_pos_path = os.path.join(path, folder, 'pos') training_neg_path = os.path.join(path, folder, 'neg') X_pos = read_folder(training_pos_path) X_neg = read_folder(training_neg_path) if limit is None: X = X_pos + X_neg else: X = X_pos[:limit] + X_neg[:limit] y = [1] * int(len(X) / 2) + [0] * int(len(X) / 2) return X, y
python
def read_pos_neg_data(path, folder, limit): """returns array with positive and negative examples""" training_pos_path = os.path.join(path, folder, 'pos') training_neg_path = os.path.join(path, folder, 'neg') X_pos = read_folder(training_pos_path) X_neg = read_folder(training_neg_path) if limit is None: X = X_pos + X_neg else: X = X_pos[:limit] + X_neg[:limit] y = [1] * int(len(X) / 2) + [0] * int(len(X) / 2) return X, y
[ "def", "read_pos_neg_data", "(", "path", ",", "folder", ",", "limit", ")", ":", "training_pos_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "folder", ",", "'pos'", ")", "training_neg_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "folder", ",", "'neg'", ")", "X_pos", "=", "read_folder", "(", "training_pos_path", ")", "X_neg", "=", "read_folder", "(", "training_neg_path", ")", "if", "limit", "is", "None", ":", "X", "=", "X_pos", "+", "X_neg", "else", ":", "X", "=", "X_pos", "[", ":", "limit", "]", "+", "X_neg", "[", ":", "limit", "]", "y", "=", "[", "1", "]", "*", "int", "(", "len", "(", "X", ")", "/", "2", ")", "+", "[", "0", "]", "*", "int", "(", "len", "(", "X", ")", "/", "2", ")", "return", "X", ",", "y" ]
returns array with positive and negative examples
[ "returns", "array", "with", "positive", "and", "negative", "examples" ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/corpus.py#L25-L40
jfilter/text-classification-keras
texcla/corpus.py
imdb
def imdb(limit=None, shuffle=True): """Downloads (and caches) IMDB Moview Reviews. 25k training data, 25k test data Args: limit: get only first N items for each class Returns: [X_train, y_train, X_test, y_test] """ movie_review_url = 'http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz' # download and extract, thus remove the suffix '.tar.gz' path = keras.utils.get_file( 'aclImdb.tar.gz', movie_review_url, extract=True)[:-7] X_train, y_train = read_pos_neg_data(path, 'train', limit) X_test, y_test = read_pos_neg_data(path, 'test', limit) if shuffle: X_train, y_train = sklearn.utils.shuffle(X_train, y_train) X_test, y_test = sklearn.utils.shuffle(X_test, y_test) return X_train, X_test, y_train, y_test
python
def imdb(limit=None, shuffle=True): """Downloads (and caches) IMDB Moview Reviews. 25k training data, 25k test data Args: limit: get only first N items for each class Returns: [X_train, y_train, X_test, y_test] """ movie_review_url = 'http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz' # download and extract, thus remove the suffix '.tar.gz' path = keras.utils.get_file( 'aclImdb.tar.gz', movie_review_url, extract=True)[:-7] X_train, y_train = read_pos_neg_data(path, 'train', limit) X_test, y_test = read_pos_neg_data(path, 'test', limit) if shuffle: X_train, y_train = sklearn.utils.shuffle(X_train, y_train) X_test, y_test = sklearn.utils.shuffle(X_test, y_test) return X_train, X_test, y_train, y_test
[ "def", "imdb", "(", "limit", "=", "None", ",", "shuffle", "=", "True", ")", ":", "movie_review_url", "=", "'http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'", "# download and extract, thus remove the suffix '.tar.gz'", "path", "=", "keras", ".", "utils", ".", "get_file", "(", "'aclImdb.tar.gz'", ",", "movie_review_url", ",", "extract", "=", "True", ")", "[", ":", "-", "7", "]", "X_train", ",", "y_train", "=", "read_pos_neg_data", "(", "path", ",", "'train'", ",", "limit", ")", "X_test", ",", "y_test", "=", "read_pos_neg_data", "(", "path", ",", "'test'", ",", "limit", ")", "if", "shuffle", ":", "X_train", ",", "y_train", "=", "sklearn", ".", "utils", ".", "shuffle", "(", "X_train", ",", "y_train", ")", "X_test", ",", "y_test", "=", "sklearn", ".", "utils", ".", "shuffle", "(", "X_test", ",", "y_test", ")", "return", "X_train", ",", "X_test", ",", "y_train", ",", "y_test" ]
Downloads (and caches) IMDB Moview Reviews. 25k training data, 25k test data Args: limit: get only first N items for each class Returns: [X_train, y_train, X_test, y_test]
[ "Downloads", "(", "and", "caches", ")", "IMDB", "Moview", "Reviews", ".", "25k", "training", "data", "25k", "test", "data" ]
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/corpus.py#L43-L66
slightlynybbled/tk_tools
tk_tools/canvas.py
Dial.to_absolute
def to_absolute(self, x, y): """ Converts coordinates provided with reference to the center \ of the canvas (0, 0) to absolute coordinates which are used \ by the canvas object in which (0, 0) is located in the top \ left of the object. :param x: x value in pixels :param y: x value in pixels :return: None """ return x + self.size/2, y + self.size/2
python
def to_absolute(self, x, y): """ Converts coordinates provided with reference to the center \ of the canvas (0, 0) to absolute coordinates which are used \ by the canvas object in which (0, 0) is located in the top \ left of the object. :param x: x value in pixels :param y: x value in pixels :return: None """ return x + self.size/2, y + self.size/2
[ "def", "to_absolute", "(", "self", ",", "x", ",", "y", ")", ":", "return", "x", "+", "self", ".", "size", "/", "2", ",", "y", "+", "self", ".", "size", "/", "2" ]
Converts coordinates provided with reference to the center \ of the canvas (0, 0) to absolute coordinates which are used \ by the canvas object in which (0, 0) is located in the top \ left of the object. :param x: x value in pixels :param y: x value in pixels :return: None
[ "Converts", "coordinates", "provided", "with", "reference", "to", "the", "center", "\\", "of", "the", "canvas", "(", "0", "0", ")", "to", "absolute", "coordinates", "which", "are", "used", "\\", "by", "the", "canvas", "object", "in", "which", "(", "0", "0", ")", "is", "located", "in", "the", "top", "\\", "left", "of", "the", "object", "." ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/canvas.py#L43-L54
slightlynybbled/tk_tools
tk_tools/canvas.py
RotaryScale.set_value
def set_value(self, number: (float, int)): """ Sets the value of the graphic :param number: the number (must be between 0 and \ 'max_range' or the scale will peg the limits :return: None """ self.canvas.delete('all') self.canvas.create_image(0, 0, image=self.image, anchor='nw') number = number if number <= self.max_value else self.max_value number = 0.0 if number < 0.0 else number radius = 0.9 * self.size/2.0 angle_in_radians = (2.0 * cmath.pi / 3.0) \ + number / self.max_value * (5.0 * cmath.pi / 3.0) center = cmath.rect(0, 0) outer = cmath.rect(radius, angle_in_radians) if self.needle_thickness == 0: line_width = int(5 * self.size / 200) line_width = 1 if line_width < 1 else line_width else: line_width = self.needle_thickness self.canvas.create_line( *self.to_absolute(center.real, center.imag), *self.to_absolute(outer.real, outer.imag), width=line_width, fill=self.needle_color ) self.readout['text'] = '{}{}'.format(number, self.unit)
python
def set_value(self, number: (float, int)): """ Sets the value of the graphic :param number: the number (must be between 0 and \ 'max_range' or the scale will peg the limits :return: None """ self.canvas.delete('all') self.canvas.create_image(0, 0, image=self.image, anchor='nw') number = number if number <= self.max_value else self.max_value number = 0.0 if number < 0.0 else number radius = 0.9 * self.size/2.0 angle_in_radians = (2.0 * cmath.pi / 3.0) \ + number / self.max_value * (5.0 * cmath.pi / 3.0) center = cmath.rect(0, 0) outer = cmath.rect(radius, angle_in_radians) if self.needle_thickness == 0: line_width = int(5 * self.size / 200) line_width = 1 if line_width < 1 else line_width else: line_width = self.needle_thickness self.canvas.create_line( *self.to_absolute(center.real, center.imag), *self.to_absolute(outer.real, outer.imag), width=line_width, fill=self.needle_color ) self.readout['text'] = '{}{}'.format(number, self.unit)
[ "def", "set_value", "(", "self", ",", "number", ":", "(", "float", ",", "int", ")", ")", ":", "self", ".", "canvas", ".", "delete", "(", "'all'", ")", "self", ".", "canvas", ".", "create_image", "(", "0", ",", "0", ",", "image", "=", "self", ".", "image", ",", "anchor", "=", "'nw'", ")", "number", "=", "number", "if", "number", "<=", "self", ".", "max_value", "else", "self", ".", "max_value", "number", "=", "0.0", "if", "number", "<", "0.0", "else", "number", "radius", "=", "0.9", "*", "self", ".", "size", "/", "2.0", "angle_in_radians", "=", "(", "2.0", "*", "cmath", ".", "pi", "/", "3.0", ")", "+", "number", "/", "self", ".", "max_value", "*", "(", "5.0", "*", "cmath", ".", "pi", "/", "3.0", ")", "center", "=", "cmath", ".", "rect", "(", "0", ",", "0", ")", "outer", "=", "cmath", ".", "rect", "(", "radius", ",", "angle_in_radians", ")", "if", "self", ".", "needle_thickness", "==", "0", ":", "line_width", "=", "int", "(", "5", "*", "self", ".", "size", "/", "200", ")", "line_width", "=", "1", "if", "line_width", "<", "1", "else", "line_width", "else", ":", "line_width", "=", "self", ".", "needle_thickness", "self", ".", "canvas", ".", "create_line", "(", "*", "self", ".", "to_absolute", "(", "center", ".", "real", ",", "center", ".", "imag", ")", ",", "*", "self", ".", "to_absolute", "(", "outer", ".", "real", ",", "outer", ".", "imag", ")", ",", "width", "=", "line_width", ",", "fill", "=", "self", ".", "needle_color", ")", "self", ".", "readout", "[", "'text'", "]", "=", "'{}{}'", ".", "format", "(", "number", ",", "self", ".", "unit", ")" ]
Sets the value of the graphic :param number: the number (must be between 0 and \ 'max_range' or the scale will peg the limits :return: None
[ "Sets", "the", "value", "of", "the", "graphic" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/canvas.py#L112-L145
slightlynybbled/tk_tools
tk_tools/canvas.py
RotaryScale._draw_background
def _draw_background(self, divisions=10): """ Draws the background of the dial :param divisions: the number of divisions between 'ticks' shown on the dial :return: None """ self.canvas.create_arc(2, 2, self.size-2, self.size-2, style=tk.PIESLICE, start=-60, extent=30, fill='red') self.canvas.create_arc(2, 2, self.size-2, self.size-2, style=tk.PIESLICE, start=-30, extent=60, fill='yellow') self.canvas.create_arc(2, 2, self.size-2, self.size-2, style=tk.PIESLICE, start=30, extent=210, fill='green') # find the distance between the center and the inner tick radius inner_tick_radius = int(self.size * 0.4) outer_tick_radius = int(self.size * 0.5) for tick in range(divisions): angle_in_radians = (2.0 * cmath.pi / 3.0) \ + tick/divisions * (5.0 * cmath.pi / 3.0) inner_point = cmath.rect(inner_tick_radius, angle_in_radians) outer_point = cmath.rect(outer_tick_radius, angle_in_radians) self.canvas.create_line( *self.to_absolute(inner_point.real, inner_point.imag), *self.to_absolute(outer_point.real, outer_point.imag), width=1 )
python
def _draw_background(self, divisions=10): """ Draws the background of the dial :param divisions: the number of divisions between 'ticks' shown on the dial :return: None """ self.canvas.create_arc(2, 2, self.size-2, self.size-2, style=tk.PIESLICE, start=-60, extent=30, fill='red') self.canvas.create_arc(2, 2, self.size-2, self.size-2, style=tk.PIESLICE, start=-30, extent=60, fill='yellow') self.canvas.create_arc(2, 2, self.size-2, self.size-2, style=tk.PIESLICE, start=30, extent=210, fill='green') # find the distance between the center and the inner tick radius inner_tick_radius = int(self.size * 0.4) outer_tick_radius = int(self.size * 0.5) for tick in range(divisions): angle_in_radians = (2.0 * cmath.pi / 3.0) \ + tick/divisions * (5.0 * cmath.pi / 3.0) inner_point = cmath.rect(inner_tick_radius, angle_in_radians) outer_point = cmath.rect(outer_tick_radius, angle_in_radians) self.canvas.create_line( *self.to_absolute(inner_point.real, inner_point.imag), *self.to_absolute(outer_point.real, outer_point.imag), width=1 )
[ "def", "_draw_background", "(", "self", ",", "divisions", "=", "10", ")", ":", "self", ".", "canvas", ".", "create_arc", "(", "2", ",", "2", ",", "self", ".", "size", "-", "2", ",", "self", ".", "size", "-", "2", ",", "style", "=", "tk", ".", "PIESLICE", ",", "start", "=", "-", "60", ",", "extent", "=", "30", ",", "fill", "=", "'red'", ")", "self", ".", "canvas", ".", "create_arc", "(", "2", ",", "2", ",", "self", ".", "size", "-", "2", ",", "self", ".", "size", "-", "2", ",", "style", "=", "tk", ".", "PIESLICE", ",", "start", "=", "-", "30", ",", "extent", "=", "60", ",", "fill", "=", "'yellow'", ")", "self", ".", "canvas", ".", "create_arc", "(", "2", ",", "2", ",", "self", ".", "size", "-", "2", ",", "self", ".", "size", "-", "2", ",", "style", "=", "tk", ".", "PIESLICE", ",", "start", "=", "30", ",", "extent", "=", "210", ",", "fill", "=", "'green'", ")", "# find the distance between the center and the inner tick radius", "inner_tick_radius", "=", "int", "(", "self", ".", "size", "*", "0.4", ")", "outer_tick_radius", "=", "int", "(", "self", ".", "size", "*", "0.5", ")", "for", "tick", "in", "range", "(", "divisions", ")", ":", "angle_in_radians", "=", "(", "2.0", "*", "cmath", ".", "pi", "/", "3.0", ")", "+", "tick", "/", "divisions", "*", "(", "5.0", "*", "cmath", ".", "pi", "/", "3.0", ")", "inner_point", "=", "cmath", ".", "rect", "(", "inner_tick_radius", ",", "angle_in_radians", ")", "outer_point", "=", "cmath", ".", "rect", "(", "outer_tick_radius", ",", "angle_in_radians", ")", "self", ".", "canvas", ".", "create_line", "(", "*", "self", ".", "to_absolute", "(", "inner_point", ".", "real", ",", "inner_point", ".", "imag", ")", ",", "*", "self", ".", "to_absolute", "(", "outer_point", ".", "real", ",", "outer_point", ".", "imag", ")", ",", "width", "=", "1", ")" ]
Draws the background of the dial :param divisions: the number of divisions between 'ticks' shown on the dial :return: None
[ "Draws", "the", "background", "of", "the", "dial" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/canvas.py#L147-L179
slightlynybbled/tk_tools
tk_tools/canvas.py
Graph.draw_axes
def draw_axes(self): """ Removes all existing series and re-draws the axes. :return: None """ self.canvas.delete('all') rect = 50, 50, self.w - 50, self.h - 50 self.canvas.create_rectangle(rect, outline="black") for x in self.frange(0, self.x_max - self.x_min + 1, self.x_tick): value = Decimal(self.x_min + x) if self.x_min <= value <= self.x_max: x_step = (self.px_x * x) / self.x_tick coord = 50 + x_step, self.h - 50, 50 + x_step, self.h - 45 self.canvas.create_line(coord, fill="black") coord = 50 + x_step, self.h - 40 label = round(Decimal(self.x_min + x), 1) self.canvas.create_text(coord, fill="black", text=label) for y in self.frange(0, self.y_max - self.y_min + 1, self.y_tick): value = Decimal(self.y_max - y) if self.y_min <= value <= self.y_max: y_step = (self.px_y * y) / self.y_tick coord = 45, 50 + y_step, 50, 50 + y_step self.canvas.create_line(coord, fill="black") coord = 35, 50 + y_step label = round(value, 1) self.canvas.create_text(coord, fill="black", text=label)
python
def draw_axes(self): """ Removes all existing series and re-draws the axes. :return: None """ self.canvas.delete('all') rect = 50, 50, self.w - 50, self.h - 50 self.canvas.create_rectangle(rect, outline="black") for x in self.frange(0, self.x_max - self.x_min + 1, self.x_tick): value = Decimal(self.x_min + x) if self.x_min <= value <= self.x_max: x_step = (self.px_x * x) / self.x_tick coord = 50 + x_step, self.h - 50, 50 + x_step, self.h - 45 self.canvas.create_line(coord, fill="black") coord = 50 + x_step, self.h - 40 label = round(Decimal(self.x_min + x), 1) self.canvas.create_text(coord, fill="black", text=label) for y in self.frange(0, self.y_max - self.y_min + 1, self.y_tick): value = Decimal(self.y_max - y) if self.y_min <= value <= self.y_max: y_step = (self.px_y * y) / self.y_tick coord = 45, 50 + y_step, 50, 50 + y_step self.canvas.create_line(coord, fill="black") coord = 35, 50 + y_step label = round(value, 1) self.canvas.create_text(coord, fill="black", text=label)
[ "def", "draw_axes", "(", "self", ")", ":", "self", ".", "canvas", ".", "delete", "(", "'all'", ")", "rect", "=", "50", ",", "50", ",", "self", ".", "w", "-", "50", ",", "self", ".", "h", "-", "50", "self", ".", "canvas", ".", "create_rectangle", "(", "rect", ",", "outline", "=", "\"black\"", ")", "for", "x", "in", "self", ".", "frange", "(", "0", ",", "self", ".", "x_max", "-", "self", ".", "x_min", "+", "1", ",", "self", ".", "x_tick", ")", ":", "value", "=", "Decimal", "(", "self", ".", "x_min", "+", "x", ")", "if", "self", ".", "x_min", "<=", "value", "<=", "self", ".", "x_max", ":", "x_step", "=", "(", "self", ".", "px_x", "*", "x", ")", "/", "self", ".", "x_tick", "coord", "=", "50", "+", "x_step", ",", "self", ".", "h", "-", "50", ",", "50", "+", "x_step", ",", "self", ".", "h", "-", "45", "self", ".", "canvas", ".", "create_line", "(", "coord", ",", "fill", "=", "\"black\"", ")", "coord", "=", "50", "+", "x_step", ",", "self", ".", "h", "-", "40", "label", "=", "round", "(", "Decimal", "(", "self", ".", "x_min", "+", "x", ")", ",", "1", ")", "self", ".", "canvas", ".", "create_text", "(", "coord", ",", "fill", "=", "\"black\"", ",", "text", "=", "label", ")", "for", "y", "in", "self", ".", "frange", "(", "0", ",", "self", ".", "y_max", "-", "self", ".", "y_min", "+", "1", ",", "self", ".", "y_tick", ")", ":", "value", "=", "Decimal", "(", "self", ".", "y_max", "-", "y", ")", "if", "self", ".", "y_min", "<=", "value", "<=", "self", ".", "y_max", ":", "y_step", "=", "(", "self", ".", "px_y", "*", "y", ")", "/", "self", ".", "y_tick", "coord", "=", "45", ",", "50", "+", "y_step", ",", "50", ",", "50", "+", "y_step", "self", ".", "canvas", ".", "create_line", "(", "coord", ",", "fill", "=", "\"black\"", ")", "coord", "=", "35", ",", "50", "+", "y_step", "label", "=", "round", "(", "value", ",", "1", ")", "self", ".", "canvas", ".", "create_text", "(", "coord", ",", "fill", "=", "\"black\"", ",", "text", "=", "label", ")" ]
Removes all existing series and re-draws the axes. :return: None
[ "Removes", "all", "existing", "series", "and", "re", "-", "draws", "the", "axes", "." ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/canvas.py#L391-L423
slightlynybbled/tk_tools
tk_tools/canvas.py
Graph.plot_point
def plot_point(self, x, y, visible=True, color='black', size=5): """ Places a single point on the grid :param x: the x coordinate :param y: the y coordinate :param visible: True if the individual point should be visible :param color: the color of the point :param size: the point size in pixels :return: The absolute coordinates as a tuple """ xp = (self.px_x * (x - self.x_min)) / self.x_tick yp = (self.px_y * (self.y_max - y)) / self.y_tick coord = 50 + xp, 50 + yp if visible: # divide down to an appropriate size size = int(size/2) if int(size/2) > 1 else 1 x, y = coord self.canvas.create_oval( x-size, y-size, x+size, y+size, fill=color ) return coord
python
def plot_point(self, x, y, visible=True, color='black', size=5): """ Places a single point on the grid :param x: the x coordinate :param y: the y coordinate :param visible: True if the individual point should be visible :param color: the color of the point :param size: the point size in pixels :return: The absolute coordinates as a tuple """ xp = (self.px_x * (x - self.x_min)) / self.x_tick yp = (self.px_y * (self.y_max - y)) / self.y_tick coord = 50 + xp, 50 + yp if visible: # divide down to an appropriate size size = int(size/2) if int(size/2) > 1 else 1 x, y = coord self.canvas.create_oval( x-size, y-size, x+size, y+size, fill=color ) return coord
[ "def", "plot_point", "(", "self", ",", "x", ",", "y", ",", "visible", "=", "True", ",", "color", "=", "'black'", ",", "size", "=", "5", ")", ":", "xp", "=", "(", "self", ".", "px_x", "*", "(", "x", "-", "self", ".", "x_min", ")", ")", "/", "self", ".", "x_tick", "yp", "=", "(", "self", ".", "px_y", "*", "(", "self", ".", "y_max", "-", "y", ")", ")", "/", "self", ".", "y_tick", "coord", "=", "50", "+", "xp", ",", "50", "+", "yp", "if", "visible", ":", "# divide down to an appropriate size", "size", "=", "int", "(", "size", "/", "2", ")", "if", "int", "(", "size", "/", "2", ")", ">", "1", "else", "1", "x", ",", "y", "=", "coord", "self", ".", "canvas", ".", "create_oval", "(", "x", "-", "size", ",", "y", "-", "size", ",", "x", "+", "size", ",", "y", "+", "size", ",", "fill", "=", "color", ")", "return", "coord" ]
Places a single point on the grid :param x: the x coordinate :param y: the y coordinate :param visible: True if the individual point should be visible :param color: the color of the point :param size: the point size in pixels :return: The absolute coordinates as a tuple
[ "Places", "a", "single", "point", "on", "the", "grid" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/canvas.py#L425-L451
slightlynybbled/tk_tools
tk_tools/canvas.py
Graph.plot_line
def plot_line(self, points: list, color='black', point_visibility=False): """ Plot a line of points :param points: a list of tuples, each tuple containing an (x, y) point :param color: the color of the line :param point_visibility: True if the points \ should be individually visible :return: None """ last_point = () for point in points: this_point = self.plot_point(point[0], point[1], color=color, visible=point_visibility) if last_point: self.canvas.create_line(last_point + this_point, fill=color) last_point = this_point
python
def plot_line(self, points: list, color='black', point_visibility=False): """ Plot a line of points :param points: a list of tuples, each tuple containing an (x, y) point :param color: the color of the line :param point_visibility: True if the points \ should be individually visible :return: None """ last_point = () for point in points: this_point = self.plot_point(point[0], point[1], color=color, visible=point_visibility) if last_point: self.canvas.create_line(last_point + this_point, fill=color) last_point = this_point
[ "def", "plot_line", "(", "self", ",", "points", ":", "list", ",", "color", "=", "'black'", ",", "point_visibility", "=", "False", ")", ":", "last_point", "=", "(", ")", "for", "point", "in", "points", ":", "this_point", "=", "self", ".", "plot_point", "(", "point", "[", "0", "]", ",", "point", "[", "1", "]", ",", "color", "=", "color", ",", "visible", "=", "point_visibility", ")", "if", "last_point", ":", "self", ".", "canvas", ".", "create_line", "(", "last_point", "+", "this_point", ",", "fill", "=", "color", ")", "last_point", "=", "this_point" ]
Plot a line of points :param points: a list of tuples, each tuple containing an (x, y) point :param color: the color of the line :param point_visibility: True if the points \ should be individually visible :return: None
[ "Plot", "a", "line", "of", "points" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/canvas.py#L453-L470
slightlynybbled/tk_tools
tk_tools/canvas.py
Graph.frange
def frange(start, stop, step, digits_to_round=3): """ Works like range for doubles :param start: starting value :param stop: ending value :param step: the increment_value :param digits_to_round: the digits to which to round \ (makes floating-point numbers much easier to work with) :return: generator """ while start < stop: yield round(start, digits_to_round) start += step
python
def frange(start, stop, step, digits_to_round=3): """ Works like range for doubles :param start: starting value :param stop: ending value :param step: the increment_value :param digits_to_round: the digits to which to round \ (makes floating-point numbers much easier to work with) :return: generator """ while start < stop: yield round(start, digits_to_round) start += step
[ "def", "frange", "(", "start", ",", "stop", ",", "step", ",", "digits_to_round", "=", "3", ")", ":", "while", "start", "<", "stop", ":", "yield", "round", "(", "start", ",", "digits_to_round", ")", "start", "+=", "step" ]
Works like range for doubles :param start: starting value :param stop: ending value :param step: the increment_value :param digits_to_round: the digits to which to round \ (makes floating-point numbers much easier to work with) :return: generator
[ "Works", "like", "range", "for", "doubles" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/canvas.py#L474-L487
slightlynybbled/tk_tools
tk_tools/canvas.py
Led._load_new
def _load_new(self, img_data: str): """ Load a new image. :param img_data: the image data as a base64 string :return: None """ self._image = tk.PhotoImage(data=img_data) self._image = self._image.subsample(int(200 / self._size), int(200 / self._size)) self._canvas.delete('all') self._canvas.create_image(0, 0, image=self._image, anchor='nw') if self._user_click_callback is not None: self._user_click_callback(self._on)
python
def _load_new(self, img_data: str): """ Load a new image. :param img_data: the image data as a base64 string :return: None """ self._image = tk.PhotoImage(data=img_data) self._image = self._image.subsample(int(200 / self._size), int(200 / self._size)) self._canvas.delete('all') self._canvas.create_image(0, 0, image=self._image, anchor='nw') if self._user_click_callback is not None: self._user_click_callback(self._on)
[ "def", "_load_new", "(", "self", ",", "img_data", ":", "str", ")", ":", "self", ".", "_image", "=", "tk", ".", "PhotoImage", "(", "data", "=", "img_data", ")", "self", ".", "_image", "=", "self", ".", "_image", ".", "subsample", "(", "int", "(", "200", "/", "self", ".", "_size", ")", ",", "int", "(", "200", "/", "self", ".", "_size", ")", ")", "self", ".", "_canvas", ".", "delete", "(", "'all'", ")", "self", ".", "_canvas", ".", "create_image", "(", "0", ",", "0", ",", "image", "=", "self", ".", "_image", ",", "anchor", "=", "'nw'", ")", "if", "self", ".", "_user_click_callback", "is", "not", "None", ":", "self", ".", "_user_click_callback", "(", "self", ".", "_on", ")" ]
Load a new image. :param img_data: the image data as a base64 string :return: None
[ "Load", "a", "new", "image", "." ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/canvas.py#L534-L548
slightlynybbled/tk_tools
tk_tools/canvas.py
Led.to_grey
def to_grey(self, on: bool=False): """ Change the LED to grey. :param on: Unused, here for API consistency with the other states :return: None """ self._on = False self._load_new(led_grey)
python
def to_grey(self, on: bool=False): """ Change the LED to grey. :param on: Unused, here for API consistency with the other states :return: None """ self._on = False self._load_new(led_grey)
[ "def", "to_grey", "(", "self", ",", "on", ":", "bool", "=", "False", ")", ":", "self", ".", "_on", "=", "False", "self", ".", "_load_new", "(", "led_grey", ")" ]
Change the LED to grey. :param on: Unused, here for API consistency with the other states :return: None
[ "Change", "the", "LED", "to", "grey", "." ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/canvas.py#L550-L558
slightlynybbled/tk_tools
tk_tools/canvas.py
Led.to_green
def to_green(self, on: bool=False): """ Change the LED to green (on or off). :param on: True or False :return: None """ self._on = on if on: self._load_new(led_green_on) if self._toggle_on_click: self._canvas.bind('<Button-1>', lambda x: self.to_green(False)) else: self._load_new(led_green) if self._toggle_on_click: self._canvas.bind('<Button-1>', lambda x: self.to_green(True))
python
def to_green(self, on: bool=False): """ Change the LED to green (on or off). :param on: True or False :return: None """ self._on = on if on: self._load_new(led_green_on) if self._toggle_on_click: self._canvas.bind('<Button-1>', lambda x: self.to_green(False)) else: self._load_new(led_green) if self._toggle_on_click: self._canvas.bind('<Button-1>', lambda x: self.to_green(True))
[ "def", "to_green", "(", "self", ",", "on", ":", "bool", "=", "False", ")", ":", "self", ".", "_on", "=", "on", "if", "on", ":", "self", ".", "_load_new", "(", "led_green_on", ")", "if", "self", ".", "_toggle_on_click", ":", "self", ".", "_canvas", ".", "bind", "(", "'<Button-1>'", ",", "lambda", "x", ":", "self", ".", "to_green", "(", "False", ")", ")", "else", ":", "self", ".", "_load_new", "(", "led_green", ")", "if", "self", ".", "_toggle_on_click", ":", "self", ".", "_canvas", ".", "bind", "(", "'<Button-1>'", ",", "lambda", "x", ":", "self", ".", "to_green", "(", "True", ")", ")" ]
Change the LED to green (on or off). :param on: True or False :return: None
[ "Change", "the", "LED", "to", "green", "(", "on", "or", "off", ")", "." ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/canvas.py#L560-L577
slightlynybbled/tk_tools
tk_tools/canvas.py
Led.to_red
def to_red(self, on: bool=False): """ Change the LED to red (on or off) :param on: True or False :return: None """ self._on = on if on: self._load_new(led_red_on) if self._toggle_on_click: self._canvas.bind('<Button-1>', lambda x: self.to_red(False)) else: self._load_new(led_red) if self._toggle_on_click: self._canvas.bind('<Button-1>', lambda x: self.to_red(True))
python
def to_red(self, on: bool=False): """ Change the LED to red (on or off) :param on: True or False :return: None """ self._on = on if on: self._load_new(led_red_on) if self._toggle_on_click: self._canvas.bind('<Button-1>', lambda x: self.to_red(False)) else: self._load_new(led_red) if self._toggle_on_click: self._canvas.bind('<Button-1>', lambda x: self.to_red(True))
[ "def", "to_red", "(", "self", ",", "on", ":", "bool", "=", "False", ")", ":", "self", ".", "_on", "=", "on", "if", "on", ":", "self", ".", "_load_new", "(", "led_red_on", ")", "if", "self", ".", "_toggle_on_click", ":", "self", ".", "_canvas", ".", "bind", "(", "'<Button-1>'", ",", "lambda", "x", ":", "self", ".", "to_red", "(", "False", ")", ")", "else", ":", "self", ".", "_load_new", "(", "led_red", ")", "if", "self", ".", "_toggle_on_click", ":", "self", ".", "_canvas", ".", "bind", "(", "'<Button-1>'", ",", "lambda", "x", ":", "self", ".", "to_red", "(", "True", ")", ")" ]
Change the LED to red (on or off) :param on: True or False :return: None
[ "Change", "the", "LED", "to", "red", "(", "on", "or", "off", ")", ":", "param", "on", ":", "True", "or", "False", ":", "return", ":", "None" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/canvas.py#L579-L595
slightlynybbled/tk_tools
tk_tools/canvas.py
Led.to_yellow
def to_yellow(self, on: bool=False): """ Change the LED to yellow (on or off) :param on: True or False :return: None """ self._on = on if on: self._load_new(led_yellow_on) if self._toggle_on_click: self._canvas.bind('<Button-1>', lambda x: self.to_yellow(False)) else: self._load_new(led_yellow) if self._toggle_on_click: self._canvas.bind('<Button-1>', lambda x: self.to_yellow(True))
python
def to_yellow(self, on: bool=False): """ Change the LED to yellow (on or off) :param on: True or False :return: None """ self._on = on if on: self._load_new(led_yellow_on) if self._toggle_on_click: self._canvas.bind('<Button-1>', lambda x: self.to_yellow(False)) else: self._load_new(led_yellow) if self._toggle_on_click: self._canvas.bind('<Button-1>', lambda x: self.to_yellow(True))
[ "def", "to_yellow", "(", "self", ",", "on", ":", "bool", "=", "False", ")", ":", "self", ".", "_on", "=", "on", "if", "on", ":", "self", ".", "_load_new", "(", "led_yellow_on", ")", "if", "self", ".", "_toggle_on_click", ":", "self", ".", "_canvas", ".", "bind", "(", "'<Button-1>'", ",", "lambda", "x", ":", "self", ".", "to_yellow", "(", "False", ")", ")", "else", ":", "self", ".", "_load_new", "(", "led_yellow", ")", "if", "self", ".", "_toggle_on_click", ":", "self", ".", "_canvas", ".", "bind", "(", "'<Button-1>'", ",", "lambda", "x", ":", "self", ".", "to_yellow", "(", "True", ")", ")" ]
Change the LED to yellow (on or off) :param on: True or False :return: None
[ "Change", "the", "LED", "to", "yellow", "(", "on", "or", "off", ")", ":", "param", "on", ":", "True", "or", "False", ":", "return", ":", "None" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/canvas.py#L597-L615
slightlynybbled/tk_tools
tk_tools/groups.py
_Grid._redraw
def _redraw(self): """ Forgets the current layout and redraws with the most recent information :return: None """ for row in self._rows: for widget in row: widget.grid_forget() offset = 0 if not self.headers else 1 for i, row in enumerate(self._rows): for j, widget in enumerate(row): widget.grid(row=i+offset, column=j)
python
def _redraw(self): """ Forgets the current layout and redraws with the most recent information :return: None """ for row in self._rows: for widget in row: widget.grid_forget() offset = 0 if not self.headers else 1 for i, row in enumerate(self._rows): for j, widget in enumerate(row): widget.grid(row=i+offset, column=j)
[ "def", "_redraw", "(", "self", ")", ":", "for", "row", "in", "self", ".", "_rows", ":", "for", "widget", "in", "row", ":", "widget", ".", "grid_forget", "(", ")", "offset", "=", "0", "if", "not", "self", ".", "headers", "else", "1", "for", "i", ",", "row", "in", "enumerate", "(", "self", ".", "_rows", ")", ":", "for", "j", ",", "widget", "in", "enumerate", "(", "row", ")", ":", "widget", ".", "grid", "(", "row", "=", "i", "+", "offset", ",", "column", "=", "j", ")" ]
Forgets the current layout and redraws with the most recent information :return: None
[ "Forgets", "the", "current", "layout", "and", "redraws", "with", "the", "most", "recent", "information" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/groups.py#L56-L69
slightlynybbled/tk_tools
tk_tools/groups.py
_Grid.remove_row
def remove_row(self, row_number: int=-1): """ Removes a specified row of data :param row_number: the row to remove (defaults to the last row) :return: None """ if len(self._rows) == 0: return row = self._rows.pop(row_number) for widget in row: widget.destroy()
python
def remove_row(self, row_number: int=-1): """ Removes a specified row of data :param row_number: the row to remove (defaults to the last row) :return: None """ if len(self._rows) == 0: return row = self._rows.pop(row_number) for widget in row: widget.destroy()
[ "def", "remove_row", "(", "self", ",", "row_number", ":", "int", "=", "-", "1", ")", ":", "if", "len", "(", "self", ".", "_rows", ")", "==", "0", ":", "return", "row", "=", "self", ".", "_rows", ".", "pop", "(", "row_number", ")", "for", "widget", "in", "row", ":", "widget", ".", "destroy", "(", ")" ]
Removes a specified row of data :param row_number: the row to remove (defaults to the last row) :return: None
[ "Removes", "a", "specified", "row", "of", "data" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/groups.py#L71-L83
slightlynybbled/tk_tools
tk_tools/groups.py
LabelGrid.add_row
def add_row(self, data: list): """ Add a row of data to the current widget :param data: a row of data :return: None """ # validation if self.headers: if len(self.headers) != len(data): raise ValueError if len(data) != self.num_of_columns: raise ValueError offset = 0 if not self.headers else 1 row = list() for i, element in enumerate(data): label = ttk.Label(self, text=str(element), relief=tk.GROOVE, padding=self.padding) label.grid(row=len(self._rows) + offset, column=i, sticky='E,W') row.append(label) self._rows.append(row)
python
def add_row(self, data: list): """ Add a row of data to the current widget :param data: a row of data :return: None """ # validation if self.headers: if len(self.headers) != len(data): raise ValueError if len(data) != self.num_of_columns: raise ValueError offset = 0 if not self.headers else 1 row = list() for i, element in enumerate(data): label = ttk.Label(self, text=str(element), relief=tk.GROOVE, padding=self.padding) label.grid(row=len(self._rows) + offset, column=i, sticky='E,W') row.append(label) self._rows.append(row)
[ "def", "add_row", "(", "self", ",", "data", ":", "list", ")", ":", "# validation", "if", "self", ".", "headers", ":", "if", "len", "(", "self", ".", "headers", ")", "!=", "len", "(", "data", ")", ":", "raise", "ValueError", "if", "len", "(", "data", ")", "!=", "self", ".", "num_of_columns", ":", "raise", "ValueError", "offset", "=", "0", "if", "not", "self", ".", "headers", "else", "1", "row", "=", "list", "(", ")", "for", "i", ",", "element", "in", "enumerate", "(", "data", ")", ":", "label", "=", "ttk", ".", "Label", "(", "self", ",", "text", "=", "str", "(", "element", ")", ",", "relief", "=", "tk", ".", "GROOVE", ",", "padding", "=", "self", ".", "padding", ")", "label", ".", "grid", "(", "row", "=", "len", "(", "self", ".", "_rows", ")", "+", "offset", ",", "column", "=", "i", ",", "sticky", "=", "'E,W'", ")", "row", ".", "append", "(", "label", ")", "self", ".", "_rows", ".", "append", "(", "row", ")" ]
Add a row of data to the current widget :param data: a row of data :return: None
[ "Add", "a", "row", "of", "data", "to", "the", "current", "widget" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/groups.py#L109-L132
slightlynybbled/tk_tools
tk_tools/groups.py
EntryGrid.add_row
def add_row(self, data: list=None): """ Add a row of data to the current widget, add a <Tab> \ binding to the last element of the last row, and set \ the focus at the beginning of the next row. :param data: a row of data :return: None """ # validation if self.headers and data: if len(self.headers) != len(data): raise ValueError offset = 0 if not self.headers else 1 row = list() if data: for i, element in enumerate(data): contents = '' if element is None else str(element) entry = ttk.Entry(self) entry.insert(0, contents) entry.grid(row=len(self._rows) + offset, column=i, sticky='E,W') row.append(entry) else: for i in range(self.num_of_columns): entry = ttk.Entry(self) entry.grid(row=len(self._rows) + offset, column=i, sticky='E,W') row.append(entry) self._rows.append(row) # clear all bindings for row in self._rows: for widget in row: widget.unbind('<Tab>') def add(e): self.add_row() last_entry = self._rows[-1][-1] last_entry.bind('<Tab>', add) e = self._rows[-1][0] e.focus_set() self._redraw()
python
def add_row(self, data: list=None): """ Add a row of data to the current widget, add a <Tab> \ binding to the last element of the last row, and set \ the focus at the beginning of the next row. :param data: a row of data :return: None """ # validation if self.headers and data: if len(self.headers) != len(data): raise ValueError offset = 0 if not self.headers else 1 row = list() if data: for i, element in enumerate(data): contents = '' if element is None else str(element) entry = ttk.Entry(self) entry.insert(0, contents) entry.grid(row=len(self._rows) + offset, column=i, sticky='E,W') row.append(entry) else: for i in range(self.num_of_columns): entry = ttk.Entry(self) entry.grid(row=len(self._rows) + offset, column=i, sticky='E,W') row.append(entry) self._rows.append(row) # clear all bindings for row in self._rows: for widget in row: widget.unbind('<Tab>') def add(e): self.add_row() last_entry = self._rows[-1][-1] last_entry.bind('<Tab>', add) e = self._rows[-1][0] e.focus_set() self._redraw()
[ "def", "add_row", "(", "self", ",", "data", ":", "list", "=", "None", ")", ":", "# validation", "if", "self", ".", "headers", "and", "data", ":", "if", "len", "(", "self", ".", "headers", ")", "!=", "len", "(", "data", ")", ":", "raise", "ValueError", "offset", "=", "0", "if", "not", "self", ".", "headers", "else", "1", "row", "=", "list", "(", ")", "if", "data", ":", "for", "i", ",", "element", "in", "enumerate", "(", "data", ")", ":", "contents", "=", "''", "if", "element", "is", "None", "else", "str", "(", "element", ")", "entry", "=", "ttk", ".", "Entry", "(", "self", ")", "entry", ".", "insert", "(", "0", ",", "contents", ")", "entry", ".", "grid", "(", "row", "=", "len", "(", "self", ".", "_rows", ")", "+", "offset", ",", "column", "=", "i", ",", "sticky", "=", "'E,W'", ")", "row", ".", "append", "(", "entry", ")", "else", ":", "for", "i", "in", "range", "(", "self", ".", "num_of_columns", ")", ":", "entry", "=", "ttk", ".", "Entry", "(", "self", ")", "entry", ".", "grid", "(", "row", "=", "len", "(", "self", ".", "_rows", ")", "+", "offset", ",", "column", "=", "i", ",", "sticky", "=", "'E,W'", ")", "row", ".", "append", "(", "entry", ")", "self", ".", "_rows", ".", "append", "(", "row", ")", "# clear all bindings", "for", "row", "in", "self", ".", "_rows", ":", "for", "widget", "in", "row", ":", "widget", ".", "unbind", "(", "'<Tab>'", ")", "def", "add", "(", "e", ")", ":", "self", ".", "add_row", "(", ")", "last_entry", "=", "self", ".", "_rows", "[", "-", "1", "]", "[", "-", "1", "]", "last_entry", ".", "bind", "(", "'<Tab>'", ",", "add", ")", "e", "=", "self", ".", "_rows", "[", "-", "1", "]", "[", "0", "]", "e", ".", "focus_set", "(", ")", "self", ".", "_redraw", "(", ")" ]
Add a row of data to the current widget, add a <Tab> \ binding to the last element of the last row, and set \ the focus at the beginning of the next row. :param data: a row of data :return: None
[ "Add", "a", "row", "of", "data", "to", "the", "current", "widget", "add", "a", "<Tab", ">", "\\", "binding", "to", "the", "last", "element", "of", "the", "last", "row", "and", "set", "\\", "the", "focus", "at", "the", "beginning", "of", "the", "next", "row", "." ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/groups.py#L148-L198
slightlynybbled/tk_tools
tk_tools/groups.py
EntryGrid._read_as_dict
def _read_as_dict(self): """ Read the data contained in all entries as a list of dictionaries with the headers as the dictionary keys :return: list of dicts containing all tabular data """ data = list() for row in self._rows: row_data = OrderedDict() for i, header in enumerate(self.headers): row_data[header.cget('text')] = row[i].get() data.append(row_data) return data
python
def _read_as_dict(self): """ Read the data contained in all entries as a list of dictionaries with the headers as the dictionary keys :return: list of dicts containing all tabular data """ data = list() for row in self._rows: row_data = OrderedDict() for i, header in enumerate(self.headers): row_data[header.cget('text')] = row[i].get() data.append(row_data) return data
[ "def", "_read_as_dict", "(", "self", ")", ":", "data", "=", "list", "(", ")", "for", "row", "in", "self", ".", "_rows", ":", "row_data", "=", "OrderedDict", "(", ")", "for", "i", ",", "header", "in", "enumerate", "(", "self", ".", "headers", ")", ":", "row_data", "[", "header", ".", "cget", "(", "'text'", ")", "]", "=", "row", "[", "i", "]", ".", "get", "(", ")", "data", ".", "append", "(", "row_data", ")", "return", "data" ]
Read the data contained in all entries as a list of dictionaries with the headers as the dictionary keys :return: list of dicts containing all tabular data
[ "Read", "the", "data", "contained", "in", "all", "entries", "as", "a", "list", "of", "dictionaries", "with", "the", "headers", "as", "the", "dictionary", "keys" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/groups.py#L200-L215
slightlynybbled/tk_tools
tk_tools/groups.py
EntryGrid._read_as_table
def _read_as_table(self): """ Read the data contained in all entries as a list of lists containing all of the data :return: list of dicts containing all tabular data """ rows = list() for row in self._rows: rows.append([row[i].get() for i in range(self.num_of_columns)]) return rows
python
def _read_as_table(self): """ Read the data contained in all entries as a list of lists containing all of the data :return: list of dicts containing all tabular data """ rows = list() for row in self._rows: rows.append([row[i].get() for i in range(self.num_of_columns)]) return rows
[ "def", "_read_as_table", "(", "self", ")", ":", "rows", "=", "list", "(", ")", "for", "row", "in", "self", ".", "_rows", ":", "rows", ".", "append", "(", "[", "row", "[", "i", "]", ".", "get", "(", ")", "for", "i", "in", "range", "(", "self", ".", "num_of_columns", ")", "]", ")", "return", "rows" ]
Read the data contained in all entries as a list of lists containing all of the data :return: list of dicts containing all tabular data
[ "Read", "the", "data", "contained", "in", "all", "entries", "as", "a", "list", "of", "lists", "containing", "all", "of", "the", "data" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/groups.py#L217-L229
slightlynybbled/tk_tools
tk_tools/groups.py
ButtonGrid.add_row
def add_row(self, data: list): """ Add a row of buttons each with their own callbacks to the current widget. Each element in `data` will consist of a label and a command. :param data: a list of tuples of the form ('label', <callback>) :return: None """ # validation if self.headers and data: if len(self.headers) != len(data): raise ValueError offset = 0 if not self.headers else 1 row = list() for i, e in enumerate(data): if not isinstance(e, tuple): raise ValueError('all elements must be a tuple ' 'consisting of ("label", <command>)') label, command = e button = tk.Button(self, text=str(label), relief=tk.RAISED, command=command, padx=self.padding, pady=self.padding) button.grid(row=len(self._rows) + offset, column=i, sticky='ew') row.append(button) self._rows.append(row)
python
def add_row(self, data: list): """ Add a row of buttons each with their own callbacks to the current widget. Each element in `data` will consist of a label and a command. :param data: a list of tuples of the form ('label', <callback>) :return: None """ # validation if self.headers and data: if len(self.headers) != len(data): raise ValueError offset = 0 if not self.headers else 1 row = list() for i, e in enumerate(data): if not isinstance(e, tuple): raise ValueError('all elements must be a tuple ' 'consisting of ("label", <command>)') label, command = e button = tk.Button(self, text=str(label), relief=tk.RAISED, command=command, padx=self.padding, pady=self.padding) button.grid(row=len(self._rows) + offset, column=i, sticky='ew') row.append(button) self._rows.append(row)
[ "def", "add_row", "(", "self", ",", "data", ":", "list", ")", ":", "# validation", "if", "self", ".", "headers", "and", "data", ":", "if", "len", "(", "self", ".", "headers", ")", "!=", "len", "(", "data", ")", ":", "raise", "ValueError", "offset", "=", "0", "if", "not", "self", ".", "headers", "else", "1", "row", "=", "list", "(", ")", "for", "i", ",", "e", "in", "enumerate", "(", "data", ")", ":", "if", "not", "isinstance", "(", "e", ",", "tuple", ")", ":", "raise", "ValueError", "(", "'all elements must be a tuple '", "'consisting of (\"label\", <command>)'", ")", "label", ",", "command", "=", "e", "button", "=", "tk", ".", "Button", "(", "self", ",", "text", "=", "str", "(", "label", ")", ",", "relief", "=", "tk", ".", "RAISED", ",", "command", "=", "command", ",", "padx", "=", "self", ".", "padding", ",", "pady", "=", "self", ".", "padding", ")", "button", ".", "grid", "(", "row", "=", "len", "(", "self", ".", "_rows", ")", "+", "offset", ",", "column", "=", "i", ",", "sticky", "=", "'ew'", ")", "row", ".", "append", "(", "button", ")", "self", ".", "_rows", ".", "append", "(", "row", ")" ]
Add a row of buttons each with their own callbacks to the current widget. Each element in `data` will consist of a label and a command. :param data: a list of tuples of the form ('label', <callback>) :return: None
[ "Add", "a", "row", "of", "buttons", "each", "with", "their", "own", "callbacks", "to", "the", "current", "widget", ".", "Each", "element", "in", "data", "will", "consist", "of", "a", "label", "and", "a", "command", ".", ":", "param", "data", ":", "a", "list", "of", "tuples", "of", "the", "form", "(", "label", "<callback", ">", ")", ":", "return", ":", "None" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/groups.py#L256-L287
slightlynybbled/tk_tools
tk_tools/groups.py
KeyValueEntry.add_row
def add_row(self, key: str, default: str=None, unit_label: str=None, enable: bool=None): """ Add a single row and re-draw as necessary :param key: the name and dict accessor :param default: the default value :param unit_label: the label that should be \ applied at the right of the entry :param enable: the 'enabled' state (defaults to True) :return: """ self.keys.append(ttk.Label(self, text=key)) self.defaults.append(default) self.unit_labels.append( ttk.Label(self, text=unit_label if unit_label else '') ) self.enables.append(enable) self.values.append(ttk.Entry(self)) row_offset = 1 if self.title is not None else 0 for i in range(len(self.keys)): self.keys[i].grid_forget() self.keys[i].grid(row=row_offset, column=0, sticky='e') self.values[i].grid(row=row_offset, column=1) if self.unit_labels[i]: self.unit_labels[i].grid(row=row_offset, column=3, sticky='w') if self.defaults[i]: self.values[i].config(state=tk.NORMAL) self.values[i].delete(0, tk.END) self.values[i].insert(0, self.defaults[i]) if self.enables[i] in [True, None]: self.values[i].config(state=tk.NORMAL) elif self.enables[i] is False: self.values[i].config(state=tk.DISABLED) row_offset += 1 # strip <Return> and <Tab> bindings, add callbacks to all entries self.values[i].unbind('<Return>') self.values[i].unbind('<Tab>') if self.callback is not None: def callback(event): self.callback() self.values[i].bind('<Return>', callback) self.values[i].bind('<Tab>', callback)
python
def add_row(self, key: str, default: str=None, unit_label: str=None, enable: bool=None): """ Add a single row and re-draw as necessary :param key: the name and dict accessor :param default: the default value :param unit_label: the label that should be \ applied at the right of the entry :param enable: the 'enabled' state (defaults to True) :return: """ self.keys.append(ttk.Label(self, text=key)) self.defaults.append(default) self.unit_labels.append( ttk.Label(self, text=unit_label if unit_label else '') ) self.enables.append(enable) self.values.append(ttk.Entry(self)) row_offset = 1 if self.title is not None else 0 for i in range(len(self.keys)): self.keys[i].grid_forget() self.keys[i].grid(row=row_offset, column=0, sticky='e') self.values[i].grid(row=row_offset, column=1) if self.unit_labels[i]: self.unit_labels[i].grid(row=row_offset, column=3, sticky='w') if self.defaults[i]: self.values[i].config(state=tk.NORMAL) self.values[i].delete(0, tk.END) self.values[i].insert(0, self.defaults[i]) if self.enables[i] in [True, None]: self.values[i].config(state=tk.NORMAL) elif self.enables[i] is False: self.values[i].config(state=tk.DISABLED) row_offset += 1 # strip <Return> and <Tab> bindings, add callbacks to all entries self.values[i].unbind('<Return>') self.values[i].unbind('<Tab>') if self.callback is not None: def callback(event): self.callback() self.values[i].bind('<Return>', callback) self.values[i].bind('<Tab>', callback)
[ "def", "add_row", "(", "self", ",", "key", ":", "str", ",", "default", ":", "str", "=", "None", ",", "unit_label", ":", "str", "=", "None", ",", "enable", ":", "bool", "=", "None", ")", ":", "self", ".", "keys", ".", "append", "(", "ttk", ".", "Label", "(", "self", ",", "text", "=", "key", ")", ")", "self", ".", "defaults", ".", "append", "(", "default", ")", "self", ".", "unit_labels", ".", "append", "(", "ttk", ".", "Label", "(", "self", ",", "text", "=", "unit_label", "if", "unit_label", "else", "''", ")", ")", "self", ".", "enables", ".", "append", "(", "enable", ")", "self", ".", "values", ".", "append", "(", "ttk", ".", "Entry", "(", "self", ")", ")", "row_offset", "=", "1", "if", "self", ".", "title", "is", "not", "None", "else", "0", "for", "i", "in", "range", "(", "len", "(", "self", ".", "keys", ")", ")", ":", "self", ".", "keys", "[", "i", "]", ".", "grid_forget", "(", ")", "self", ".", "keys", "[", "i", "]", ".", "grid", "(", "row", "=", "row_offset", ",", "column", "=", "0", ",", "sticky", "=", "'e'", ")", "self", ".", "values", "[", "i", "]", ".", "grid", "(", "row", "=", "row_offset", ",", "column", "=", "1", ")", "if", "self", ".", "unit_labels", "[", "i", "]", ":", "self", ".", "unit_labels", "[", "i", "]", ".", "grid", "(", "row", "=", "row_offset", ",", "column", "=", "3", ",", "sticky", "=", "'w'", ")", "if", "self", ".", "defaults", "[", "i", "]", ":", "self", ".", "values", "[", "i", "]", ".", "config", "(", "state", "=", "tk", ".", "NORMAL", ")", "self", ".", "values", "[", "i", "]", ".", "delete", "(", "0", ",", "tk", ".", "END", ")", "self", ".", "values", "[", "i", "]", ".", "insert", "(", "0", ",", "self", ".", "defaults", "[", "i", "]", ")", "if", "self", ".", "enables", "[", "i", "]", "in", "[", "True", ",", "None", "]", ":", "self", ".", "values", "[", "i", "]", ".", "config", "(", "state", "=", "tk", ".", "NORMAL", ")", "elif", "self", ".", "enables", "[", "i", "]", "is", "False", ":", "self", ".", "values", "[", "i", "]", ".", "config", "(", "state", "=", "tk", ".", "DISABLED", ")", "row_offset", "+=", "1", "# strip <Return> and <Tab> bindings, add callbacks to all entries", "self", ".", "values", "[", "i", "]", ".", "unbind", "(", "'<Return>'", ")", "self", ".", "values", "[", "i", "]", ".", "unbind", "(", "'<Tab>'", ")", "if", "self", ".", "callback", "is", "not", "None", ":", "def", "callback", "(", "event", ")", ":", "self", ".", "callback", "(", ")", "self", ".", "values", "[", "i", "]", ".", "bind", "(", "'<Return>'", ",", "callback", ")", "self", ".", "values", "[", "i", "]", ".", "bind", "(", "'<Tab>'", ",", "callback", ")" ]
Add a single row and re-draw as necessary :param key: the name and dict accessor :param default: the default value :param unit_label: the label that should be \ applied at the right of the entry :param enable: the 'enabled' state (defaults to True) :return:
[ "Add", "a", "single", "row", "and", "re", "-", "draw", "as", "necessary" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/groups.py#L346-L399
slightlynybbled/tk_tools
tk_tools/groups.py
KeyValueEntry.reset
def reset(self): """ Clears all entries. :return: None """ for i in range(len(self.values)): self.values[i].delete(0, tk.END) if self.defaults[i] is not None: self.values[i].insert(0, self.defaults[i])
python
def reset(self): """ Clears all entries. :return: None """ for i in range(len(self.values)): self.values[i].delete(0, tk.END) if self.defaults[i] is not None: self.values[i].insert(0, self.defaults[i])
[ "def", "reset", "(", "self", ")", ":", "for", "i", "in", "range", "(", "len", "(", "self", ".", "values", ")", ")", ":", "self", ".", "values", "[", "i", "]", ".", "delete", "(", "0", ",", "tk", ".", "END", ")", "if", "self", ".", "defaults", "[", "i", "]", "is", "not", "None", ":", "self", ".", "values", "[", "i", "]", ".", "insert", "(", "0", ",", "self", ".", "defaults", "[", "i", "]", ")" ]
Clears all entries. :return: None
[ "Clears", "all", "entries", "." ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/groups.py#L401-L411
slightlynybbled/tk_tools
tk_tools/groups.py
KeyValueEntry.change_enables
def change_enables(self, enables_list: list): """ Enable/disable inputs. :param enables_list: list containing enables for each key :return: None """ for i, entry in enumerate(self.values): if enables_list[i]: entry.config(state=tk.NORMAL) else: entry.config(state=tk.DISABLED)
python
def change_enables(self, enables_list: list): """ Enable/disable inputs. :param enables_list: list containing enables for each key :return: None """ for i, entry in enumerate(self.values): if enables_list[i]: entry.config(state=tk.NORMAL) else: entry.config(state=tk.DISABLED)
[ "def", "change_enables", "(", "self", ",", "enables_list", ":", "list", ")", ":", "for", "i", ",", "entry", "in", "enumerate", "(", "self", ".", "values", ")", ":", "if", "enables_list", "[", "i", "]", ":", "entry", ".", "config", "(", "state", "=", "tk", ".", "NORMAL", ")", "else", ":", "entry", ".", "config", "(", "state", "=", "tk", ".", "DISABLED", ")" ]
Enable/disable inputs. :param enables_list: list containing enables for each key :return: None
[ "Enable", "/", "disable", "inputs", "." ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/groups.py#L413-L424
slightlynybbled/tk_tools
tk_tools/groups.py
KeyValueEntry.load
def load(self, data: dict): """ Load values into the key/values via dict. :param data: dict containing the key/values that should be inserted :return: None """ for i, label in enumerate(self.keys): key = label.cget('text') if key in data.keys(): entry_was_enabled = True if \ self.values[i].cget('state') == 'normal' else False if not entry_was_enabled: self.values[i].config(state='normal') self.values[i].delete(0, tk.END) self.values[i].insert(0, str(data[key])) if not entry_was_enabled: self.values[i].config(state='disabled')
python
def load(self, data: dict): """ Load values into the key/values via dict. :param data: dict containing the key/values that should be inserted :return: None """ for i, label in enumerate(self.keys): key = label.cget('text') if key in data.keys(): entry_was_enabled = True if \ self.values[i].cget('state') == 'normal' else False if not entry_was_enabled: self.values[i].config(state='normal') self.values[i].delete(0, tk.END) self.values[i].insert(0, str(data[key])) if not entry_was_enabled: self.values[i].config(state='disabled')
[ "def", "load", "(", "self", ",", "data", ":", "dict", ")", ":", "for", "i", ",", "label", "in", "enumerate", "(", "self", ".", "keys", ")", ":", "key", "=", "label", ".", "cget", "(", "'text'", ")", "if", "key", "in", "data", ".", "keys", "(", ")", ":", "entry_was_enabled", "=", "True", "if", "self", ".", "values", "[", "i", "]", ".", "cget", "(", "'state'", ")", "==", "'normal'", "else", "False", "if", "not", "entry_was_enabled", ":", "self", ".", "values", "[", "i", "]", ".", "config", "(", "state", "=", "'normal'", ")", "self", ".", "values", "[", "i", "]", ".", "delete", "(", "0", ",", "tk", ".", "END", ")", "self", ".", "values", "[", "i", "]", ".", "insert", "(", "0", ",", "str", "(", "data", "[", "key", "]", ")", ")", "if", "not", "entry_was_enabled", ":", "self", ".", "values", "[", "i", "]", ".", "config", "(", "state", "=", "'disabled'", ")" ]
Load values into the key/values via dict. :param data: dict containing the key/values that should be inserted :return: None
[ "Load", "values", "into", "the", "key", "/", "values", "via", "dict", "." ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/groups.py#L426-L445
slightlynybbled/tk_tools
tk_tools/groups.py
KeyValueEntry.get
def get(self): """ Retrieve the GUI elements for program use. :return: a dictionary containing all \ of the data from the key/value entries """ data = dict() for label, entry in zip(self.keys, self.values): data[label.cget('text')] = entry.get() return data
python
def get(self): """ Retrieve the GUI elements for program use. :return: a dictionary containing all \ of the data from the key/value entries """ data = dict() for label, entry in zip(self.keys, self.values): data[label.cget('text')] = entry.get() return data
[ "def", "get", "(", "self", ")", ":", "data", "=", "dict", "(", ")", "for", "label", ",", "entry", "in", "zip", "(", "self", ".", "keys", ",", "self", ".", "values", ")", ":", "data", "[", "label", ".", "cget", "(", "'text'", ")", "]", "=", "entry", ".", "get", "(", ")", "return", "data" ]
Retrieve the GUI elements for program use. :return: a dictionary containing all \ of the data from the key/value entries
[ "Retrieve", "the", "GUI", "elements", "for", "program", "use", "." ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/groups.py#L447-L458
slightlynybbled/tk_tools
tk_tools/groups.py
Calendar._pressed
def _pressed(self, evt): """ Clicked somewhere in the calendar. """ x, y, widget = evt.x, evt.y, evt.widget item = widget.identify_row(y) column = widget.identify_column(x) if not column or not (item in self._items): # clicked in the weekdays row or just outside the columns return item_values = widget.item(item)['values'] if not len(item_values): # row is empty for this month return text = item_values[int(column[1]) - 1] if not text: # date is empty return bbox = widget.bbox(item, column) if not bbox: # calendar not visible yet return # update and then show selection text = '%02d' % text self._selection = (text, item, column) self._show_selection(text, bbox) if self.callback is not None: self.callback()
python
def _pressed(self, evt): """ Clicked somewhere in the calendar. """ x, y, widget = evt.x, evt.y, evt.widget item = widget.identify_row(y) column = widget.identify_column(x) if not column or not (item in self._items): # clicked in the weekdays row or just outside the columns return item_values = widget.item(item)['values'] if not len(item_values): # row is empty for this month return text = item_values[int(column[1]) - 1] if not text: # date is empty return bbox = widget.bbox(item, column) if not bbox: # calendar not visible yet return # update and then show selection text = '%02d' % text self._selection = (text, item, column) self._show_selection(text, bbox) if self.callback is not None: self.callback()
[ "def", "_pressed", "(", "self", ",", "evt", ")", ":", "x", ",", "y", ",", "widget", "=", "evt", ".", "x", ",", "evt", ".", "y", ",", "evt", ".", "widget", "item", "=", "widget", ".", "identify_row", "(", "y", ")", "column", "=", "widget", ".", "identify_column", "(", "x", ")", "if", "not", "column", "or", "not", "(", "item", "in", "self", ".", "_items", ")", ":", "# clicked in the weekdays row or just outside the columns", "return", "item_values", "=", "widget", ".", "item", "(", "item", ")", "[", "'values'", "]", "if", "not", "len", "(", "item_values", ")", ":", "# row is empty for this month", "return", "text", "=", "item_values", "[", "int", "(", "column", "[", "1", "]", ")", "-", "1", "]", "if", "not", "text", ":", "# date is empty", "return", "bbox", "=", "widget", ".", "bbox", "(", "item", ",", "column", ")", "if", "not", "bbox", ":", "# calendar not visible yet", "return", "# update and then show selection", "text", "=", "'%02d'", "%", "text", "self", ".", "_selection", "=", "(", "text", ",", "item", ",", "column", ")", "self", ".", "_show_selection", "(", "text", ",", "bbox", ")", "if", "self", ".", "callback", "is", "not", "None", ":", "self", ".", "callback", "(", ")" ]
Clicked somewhere in the calendar.
[ "Clicked", "somewhere", "in", "the", "calendar", "." ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/groups.py#L636-L666
slightlynybbled/tk_tools
tk_tools/groups.py
_SlotFrame.add
def add(self, string: (str, list)): """ Clear the contents of the entry field and insert the contents of string. :param string: an str containing the text to display :return: """ if len(self._entries) == 1: self._entries[0].delete(0, 'end') self._entries[0].insert(0, string) else: if len(string) != len(self._entries): raise ValueError('the "string" list must be ' 'equal to the number of entries') for i, e in enumerate(self._entries): self._entries[i].delete(0, 'end') self._entries[i].insert(0, string[i])
python
def add(self, string: (str, list)): """ Clear the contents of the entry field and insert the contents of string. :param string: an str containing the text to display :return: """ if len(self._entries) == 1: self._entries[0].delete(0, 'end') self._entries[0].insert(0, string) else: if len(string) != len(self._entries): raise ValueError('the "string" list must be ' 'equal to the number of entries') for i, e in enumerate(self._entries): self._entries[i].delete(0, 'end') self._entries[i].insert(0, string[i])
[ "def", "add", "(", "self", ",", "string", ":", "(", "str", ",", "list", ")", ")", ":", "if", "len", "(", "self", ".", "_entries", ")", "==", "1", ":", "self", ".", "_entries", "[", "0", "]", ".", "delete", "(", "0", ",", "'end'", ")", "self", ".", "_entries", "[", "0", "]", ".", "insert", "(", "0", ",", "string", ")", "else", ":", "if", "len", "(", "string", ")", "!=", "len", "(", "self", ".", "_entries", ")", ":", "raise", "ValueError", "(", "'the \"string\" list must be '", "'equal to the number of entries'", ")", "for", "i", ",", "e", "in", "enumerate", "(", "self", ".", "_entries", ")", ":", "self", ".", "_entries", "[", "i", "]", ".", "delete", "(", "0", ",", "'end'", ")", "self", ".", "_entries", "[", "i", "]", ".", "insert", "(", "0", ",", "string", "[", "i", "]", ")" ]
Clear the contents of the entry field and insert the contents of string. :param string: an str containing the text to display :return:
[ "Clear", "the", "contents", "of", "the", "entry", "field", "and", "insert", "the", "contents", "of", "string", "." ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/groups.py#L736-L754
slightlynybbled/tk_tools
tk_tools/groups.py
_SlotFrame.remove
def remove(self): """ Deletes itself. :return: None """ for e in self._entries: e.grid_forget() e.destroy() self._remove_btn.grid_forget() self._remove_btn.destroy() self.deleted = True if self._remove_callback: self._remove_callback()
python
def remove(self): """ Deletes itself. :return: None """ for e in self._entries: e.grid_forget() e.destroy() self._remove_btn.grid_forget() self._remove_btn.destroy() self.deleted = True if self._remove_callback: self._remove_callback()
[ "def", "remove", "(", "self", ")", ":", "for", "e", "in", "self", ".", "_entries", ":", "e", ".", "grid_forget", "(", ")", "e", ".", "destroy", "(", ")", "self", ".", "_remove_btn", ".", "grid_forget", "(", ")", "self", ".", "_remove_btn", ".", "destroy", "(", ")", "self", ".", "deleted", "=", "True", "if", "self", ".", "_remove_callback", ":", "self", ".", "_remove_callback", "(", ")" ]
Deletes itself. :return: None
[ "Deletes", "itself", ".", ":", "return", ":", "None" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/groups.py#L756-L771
slightlynybbled/tk_tools
tk_tools/groups.py
_SlotFrame.get
def get(self): """ Returns the value for the slot. :return: the entry value """ values = [e.get() for e in self._entries] if len(self._entries) == 1: return values[0] else: return values
python
def get(self): """ Returns the value for the slot. :return: the entry value """ values = [e.get() for e in self._entries] if len(self._entries) == 1: return values[0] else: return values
[ "def", "get", "(", "self", ")", ":", "values", "=", "[", "e", ".", "get", "(", ")", "for", "e", "in", "self", ".", "_entries", "]", "if", "len", "(", "self", ".", "_entries", ")", "==", "1", ":", "return", "values", "[", "0", "]", "else", ":", "return", "values" ]
Returns the value for the slot. :return: the entry value
[ "Returns", "the", "value", "for", "the", "slot", ".", ":", "return", ":", "the", "entry", "value" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/groups.py#L773-L782
slightlynybbled/tk_tools
tk_tools/groups.py
MultiSlotFrame._redraw
def _redraw(self): """ Clears the current layout and re-draws all elements in self._slots :return: """ if self._blank_label: self._blank_label.grid_forget() self._blank_label.destroy() self._blank_label = None for slot in self._slots: slot.grid_forget() self._slots = [slot for slot in self._slots if not slot.deleted] max_per_col = 8 for i, slot in enumerate(self._slots): slot.grid(row=i % max_per_col, column=int(i / max_per_col), sticky='ew')
python
def _redraw(self): """ Clears the current layout and re-draws all elements in self._slots :return: """ if self._blank_label: self._blank_label.grid_forget() self._blank_label.destroy() self._blank_label = None for slot in self._slots: slot.grid_forget() self._slots = [slot for slot in self._slots if not slot.deleted] max_per_col = 8 for i, slot in enumerate(self._slots): slot.grid(row=i % max_per_col, column=int(i / max_per_col), sticky='ew')
[ "def", "_redraw", "(", "self", ")", ":", "if", "self", ".", "_blank_label", ":", "self", ".", "_blank_label", ".", "grid_forget", "(", ")", "self", ".", "_blank_label", ".", "destroy", "(", ")", "self", ".", "_blank_label", "=", "None", "for", "slot", "in", "self", ".", "_slots", ":", "slot", ".", "grid_forget", "(", ")", "self", ".", "_slots", "=", "[", "slot", "for", "slot", "in", "self", ".", "_slots", "if", "not", "slot", ".", "deleted", "]", "max_per_col", "=", "8", "for", "i", ",", "slot", "in", "enumerate", "(", "self", ".", "_slots", ")", ":", "slot", ".", "grid", "(", "row", "=", "i", "%", "max_per_col", ",", "column", "=", "int", "(", "i", "/", "max_per_col", ")", ",", "sticky", "=", "'ew'", ")" ]
Clears the current layout and re-draws all elements in self._slots :return:
[ "Clears", "the", "current", "layout", "and", "re", "-", "draws", "all", "elements", "in", "self", ".", "_slots", ":", "return", ":" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/groups.py#L820-L838
slightlynybbled/tk_tools
tk_tools/groups.py
MultiSlotFrame.add
def add(self, string: (str, list)): """ Add a new slot to the multi-frame containing the string. :param string: a string to insert :return: None """ slot = _SlotFrame(self, remove_callback=self._redraw, entries=self._slot_columns) slot.add(string) self._slots.append(slot) self._redraw()
python
def add(self, string: (str, list)): """ Add a new slot to the multi-frame containing the string. :param string: a string to insert :return: None """ slot = _SlotFrame(self, remove_callback=self._redraw, entries=self._slot_columns) slot.add(string) self._slots.append(slot) self._redraw()
[ "def", "add", "(", "self", ",", "string", ":", "(", "str", ",", "list", ")", ")", ":", "slot", "=", "_SlotFrame", "(", "self", ",", "remove_callback", "=", "self", ".", "_redraw", ",", "entries", "=", "self", ".", "_slot_columns", ")", "slot", ".", "add", "(", "string", ")", "self", ".", "_slots", ".", "append", "(", "slot", ")", "self", ".", "_redraw", "(", ")" ]
Add a new slot to the multi-frame containing the string. :param string: a string to insert :return: None
[ "Add", "a", "new", "slot", "to", "the", "multi", "-", "frame", "containing", "the", "string", ".", ":", "param", "string", ":", "a", "string", "to", "insert", ":", "return", ":", "None" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/groups.py#L840-L853
slightlynybbled/tk_tools
tk_tools/groups.py
MultiSlotFrame.clear
def clear(self): """ Clear out the multi-frame :return: """ for slot in self._slots: slot.grid_forget() slot.destroy() self._slots = []
python
def clear(self): """ Clear out the multi-frame :return: """ for slot in self._slots: slot.grid_forget() slot.destroy() self._slots = []
[ "def", "clear", "(", "self", ")", ":", "for", "slot", "in", "self", ".", "_slots", ":", "slot", ".", "grid_forget", "(", ")", "slot", ".", "destroy", "(", ")", "self", ".", "_slots", "=", "[", "]" ]
Clear out the multi-frame :return:
[ "Clear", "out", "the", "multi", "-", "frame", ":", "return", ":" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/groups.py#L855-L864
slightlynybbled/tk_tools
tk_tools/groups.py
SevenSegment.clear
def clear(self): """ Clear the segment. :return: None """ for _, frame in self._segments.items(): frame.configure(background=self._bg_color)
python
def clear(self): """ Clear the segment. :return: None """ for _, frame in self._segments.items(): frame.configure(background=self._bg_color)
[ "def", "clear", "(", "self", ")", ":", "for", "_", ",", "frame", "in", "self", ".", "_segments", ".", "items", "(", ")", ":", "frame", ".", "configure", "(", "background", "=", "self", ".", "_bg_color", ")" ]
Clear the segment. :return: None
[ "Clear", "the", "segment", ".", ":", "return", ":", "None" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/groups.py#L944-L950
slightlynybbled/tk_tools
tk_tools/groups.py
SevenSegment.set_value
def set_value(self, value: str): """ Sets the value of the 7-segment display :param value: the desired value :return: None """ self.clear() if '.' in value: self._segments['period'].configure(background=self._color) if value in ['0', '0.']: self._segments['a'].configure(background=self._color) self._segments['b'].configure(background=self._color) self._segments['c'].configure(background=self._color) self._segments['d'].configure(background=self._color) self._segments['e'].configure(background=self._color) self._segments['f'].configure(background=self._color) elif value in ['1', '1.']: self._segments['b'].configure(background=self._color) self._segments['c'].configure(background=self._color) elif value in ['2', '2.']: self._segments['a'].configure(background=self._color) self._segments['b'].configure(background=self._color) self._segments['g'].configure(background=self._color) self._segments['e'].configure(background=self._color) self._segments['d'].configure(background=self._color) elif value in ['3', '3.']: self._segments['a'].configure(background=self._color) self._segments['b'].configure(background=self._color) self._segments['g'].configure(background=self._color) self._segments['c'].configure(background=self._color) self._segments['d'].configure(background=self._color) elif value in ['4', '4.']: self._segments['f'].configure(background=self._color) self._segments['g'].configure(background=self._color) self._segments['b'].configure(background=self._color) self._segments['c'].configure(background=self._color) elif value in ['5', '5.']: self._segments['a'].configure(background=self._color) self._segments['f'].configure(background=self._color) self._segments['g'].configure(background=self._color) self._segments['c'].configure(background=self._color) self._segments['d'].configure(background=self._color) elif value in ['6', '6.']: self._segments['f'].configure(background=self._color) self._segments['g'].configure(background=self._color) self._segments['c'].configure(background=self._color) self._segments['d'].configure(background=self._color) self._segments['e'].configure(background=self._color) elif value in ['7', '7.']: self._segments['a'].configure(background=self._color) self._segments['b'].configure(background=self._color) self._segments['c'].configure(background=self._color) elif value in ['8', '8.']: self._segments['a'].configure(background=self._color) self._segments['b'].configure(background=self._color) self._segments['c'].configure(background=self._color) self._segments['d'].configure(background=self._color) self._segments['e'].configure(background=self._color) self._segments['f'].configure(background=self._color) self._segments['g'].configure(background=self._color) elif value in ['9', '9.']: self._segments['a'].configure(background=self._color) self._segments['b'].configure(background=self._color) self._segments['c'].configure(background=self._color) self._segments['f'].configure(background=self._color) self._segments['g'].configure(background=self._color) elif value in ['-']: self._segments['g'].configure(background=self._color) else: raise ValueError('unsupported character: {}'.format(value))
python
def set_value(self, value: str): """ Sets the value of the 7-segment display :param value: the desired value :return: None """ self.clear() if '.' in value: self._segments['period'].configure(background=self._color) if value in ['0', '0.']: self._segments['a'].configure(background=self._color) self._segments['b'].configure(background=self._color) self._segments['c'].configure(background=self._color) self._segments['d'].configure(background=self._color) self._segments['e'].configure(background=self._color) self._segments['f'].configure(background=self._color) elif value in ['1', '1.']: self._segments['b'].configure(background=self._color) self._segments['c'].configure(background=self._color) elif value in ['2', '2.']: self._segments['a'].configure(background=self._color) self._segments['b'].configure(background=self._color) self._segments['g'].configure(background=self._color) self._segments['e'].configure(background=self._color) self._segments['d'].configure(background=self._color) elif value in ['3', '3.']: self._segments['a'].configure(background=self._color) self._segments['b'].configure(background=self._color) self._segments['g'].configure(background=self._color) self._segments['c'].configure(background=self._color) self._segments['d'].configure(background=self._color) elif value in ['4', '4.']: self._segments['f'].configure(background=self._color) self._segments['g'].configure(background=self._color) self._segments['b'].configure(background=self._color) self._segments['c'].configure(background=self._color) elif value in ['5', '5.']: self._segments['a'].configure(background=self._color) self._segments['f'].configure(background=self._color) self._segments['g'].configure(background=self._color) self._segments['c'].configure(background=self._color) self._segments['d'].configure(background=self._color) elif value in ['6', '6.']: self._segments['f'].configure(background=self._color) self._segments['g'].configure(background=self._color) self._segments['c'].configure(background=self._color) self._segments['d'].configure(background=self._color) self._segments['e'].configure(background=self._color) elif value in ['7', '7.']: self._segments['a'].configure(background=self._color) self._segments['b'].configure(background=self._color) self._segments['c'].configure(background=self._color) elif value in ['8', '8.']: self._segments['a'].configure(background=self._color) self._segments['b'].configure(background=self._color) self._segments['c'].configure(background=self._color) self._segments['d'].configure(background=self._color) self._segments['e'].configure(background=self._color) self._segments['f'].configure(background=self._color) self._segments['g'].configure(background=self._color) elif value in ['9', '9.']: self._segments['a'].configure(background=self._color) self._segments['b'].configure(background=self._color) self._segments['c'].configure(background=self._color) self._segments['f'].configure(background=self._color) self._segments['g'].configure(background=self._color) elif value in ['-']: self._segments['g'].configure(background=self._color) else: raise ValueError('unsupported character: {}'.format(value))
[ "def", "set_value", "(", "self", ",", "value", ":", "str", ")", ":", "self", ".", "clear", "(", ")", "if", "'.'", "in", "value", ":", "self", ".", "_segments", "[", "'period'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "if", "value", "in", "[", "'0'", ",", "'0.'", "]", ":", "self", ".", "_segments", "[", "'a'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'b'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'c'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'d'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'e'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'f'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "elif", "value", "in", "[", "'1'", ",", "'1.'", "]", ":", "self", ".", "_segments", "[", "'b'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'c'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "elif", "value", "in", "[", "'2'", ",", "'2.'", "]", ":", "self", ".", "_segments", "[", "'a'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'b'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'g'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'e'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'d'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "elif", "value", "in", "[", "'3'", ",", "'3.'", "]", ":", "self", ".", "_segments", "[", "'a'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'b'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'g'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'c'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'d'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "elif", "value", "in", "[", "'4'", ",", "'4.'", "]", ":", "self", ".", "_segments", "[", "'f'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'g'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'b'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'c'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "elif", "value", "in", "[", "'5'", ",", "'5.'", "]", ":", "self", ".", "_segments", "[", "'a'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'f'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'g'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'c'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'d'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "elif", "value", "in", "[", "'6'", ",", "'6.'", "]", ":", "self", ".", "_segments", "[", "'f'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'g'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'c'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'d'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'e'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "elif", "value", "in", "[", "'7'", ",", "'7.'", "]", ":", "self", ".", "_segments", "[", "'a'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'b'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'c'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "elif", "value", "in", "[", "'8'", ",", "'8.'", "]", ":", "self", ".", "_segments", "[", "'a'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'b'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'c'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'d'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'e'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'f'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'g'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "elif", "value", "in", "[", "'9'", ",", "'9.'", "]", ":", "self", ".", "_segments", "[", "'a'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'b'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'c'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'f'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "self", ".", "_segments", "[", "'g'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "elif", "value", "in", "[", "'-'", "]", ":", "self", ".", "_segments", "[", "'g'", "]", ".", "configure", "(", "background", "=", "self", ".", "_color", ")", "else", ":", "raise", "ValueError", "(", "'unsupported character: {}'", ".", "format", "(", "value", ")", ")" ]
Sets the value of the 7-segment display :param value: the desired value :return: None
[ "Sets", "the", "value", "of", "the", "7", "-", "segment", "display", ":", "param", "value", ":", "the", "desired", "value", ":", "return", ":", "None" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/groups.py#L952-L1025
slightlynybbled/tk_tools
tk_tools/groups.py
SevenSegmentDigits._group
def _group(self, value: str): """ Takes a string and groups it appropriately with any period or other appropriate punctuation so that it is displayed correctly. :param value: a string containing an integer or float :return: None """ reversed_v = value[::-1] parts = [] has_period = False for c in reversed_v: if has_period: parts.append(c + '.') has_period = False elif c == '.': has_period = True else: parts.append(c) parts = parts[:len(self._digits)] return parts
python
def _group(self, value: str): """ Takes a string and groups it appropriately with any period or other appropriate punctuation so that it is displayed correctly. :param value: a string containing an integer or float :return: None """ reversed_v = value[::-1] parts = [] has_period = False for c in reversed_v: if has_period: parts.append(c + '.') has_period = False elif c == '.': has_period = True else: parts.append(c) parts = parts[:len(self._digits)] return parts
[ "def", "_group", "(", "self", ",", "value", ":", "str", ")", ":", "reversed_v", "=", "value", "[", ":", ":", "-", "1", "]", "parts", "=", "[", "]", "has_period", "=", "False", "for", "c", "in", "reversed_v", ":", "if", "has_period", ":", "parts", ".", "append", "(", "c", "+", "'.'", ")", "has_period", "=", "False", "elif", "c", "==", "'.'", ":", "has_period", "=", "True", "else", ":", "parts", ".", "append", "(", "c", ")", "parts", "=", "parts", "[", ":", "len", "(", "self", ".", "_digits", ")", "]", "return", "parts" ]
Takes a string and groups it appropriately with any period or other appropriate punctuation so that it is displayed correctly. :param value: a string containing an integer or float :return: None
[ "Takes", "a", "string", "and", "groups", "it", "appropriately", "with", "any", "period", "or", "other", "appropriate", "punctuation", "so", "that", "it", "is", "displayed", "correctly", ".", ":", "param", "value", ":", "a", "string", "containing", "an", "integer", "or", "float", ":", "return", ":", "None" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/groups.py#L1066-L1090
slightlynybbled/tk_tools
tk_tools/groups.py
SevenSegmentDigits.set_value
def set_value(self, value: str): """ Sets the displayed digits based on the value string. :param value: a string containing an integer or float value :return: None """ [digit.clear() for digit in self._digits] grouped = self._group(value) # return the parts, reversed digits = self._digits[::-1] # reverse the digits # fill from right to left has_period = False for i, digit_value in enumerate(grouped): try: if has_period: digits[i].set_value(digit_value + '.') has_period = False elif grouped[i] == '.': has_period = True else: digits[i].set_value(digit_value) except IndexError: raise ValueError('the value "{}" contains too ' 'many digits'.format(value))
python
def set_value(self, value: str): """ Sets the displayed digits based on the value string. :param value: a string containing an integer or float value :return: None """ [digit.clear() for digit in self._digits] grouped = self._group(value) # return the parts, reversed digits = self._digits[::-1] # reverse the digits # fill from right to left has_period = False for i, digit_value in enumerate(grouped): try: if has_period: digits[i].set_value(digit_value + '.') has_period = False elif grouped[i] == '.': has_period = True else: digits[i].set_value(digit_value) except IndexError: raise ValueError('the value "{}" contains too ' 'many digits'.format(value))
[ "def", "set_value", "(", "self", ",", "value", ":", "str", ")", ":", "[", "digit", ".", "clear", "(", ")", "for", "digit", "in", "self", ".", "_digits", "]", "grouped", "=", "self", ".", "_group", "(", "value", ")", "# return the parts, reversed", "digits", "=", "self", ".", "_digits", "[", ":", ":", "-", "1", "]", "# reverse the digits", "# fill from right to left", "has_period", "=", "False", "for", "i", ",", "digit_value", "in", "enumerate", "(", "grouped", ")", ":", "try", ":", "if", "has_period", ":", "digits", "[", "i", "]", ".", "set_value", "(", "digit_value", "+", "'.'", ")", "has_period", "=", "False", "elif", "grouped", "[", "i", "]", "==", "'.'", ":", "has_period", "=", "True", "else", ":", "digits", "[", "i", "]", ".", "set_value", "(", "digit_value", ")", "except", "IndexError", ":", "raise", "ValueError", "(", "'the value \"{}\" contains too '", "'many digits'", ".", "format", "(", "value", ")", ")" ]
Sets the displayed digits based on the value string. :param value: a string containing an integer or float value :return: None
[ "Sets", "the", "displayed", "digits", "based", "on", "the", "value", "string", ".", ":", "param", "value", ":", "a", "string", "containing", "an", "integer", "or", "float", "value", ":", "return", ":", "None" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/groups.py#L1092-L1118
slightlynybbled/tk_tools
tk_tools/widgets.py
SmartWidget.add_callback
def add_callback(self, callback: callable): """ Add a callback on change :param callback: callable function :return: None """ def internal_callback(*args): try: callback() except TypeError: callback(self.get()) self._var.trace('w', internal_callback)
python
def add_callback(self, callback: callable): """ Add a callback on change :param callback: callable function :return: None """ def internal_callback(*args): try: callback() except TypeError: callback(self.get()) self._var.trace('w', internal_callback)
[ "def", "add_callback", "(", "self", ",", "callback", ":", "callable", ")", ":", "def", "internal_callback", "(", "*", "args", ")", ":", "try", ":", "callback", "(", ")", "except", "TypeError", ":", "callback", "(", "self", ".", "get", "(", ")", ")", "self", ".", "_var", ".", "trace", "(", "'w'", ",", "internal_callback", ")" ]
Add a callback on change :param callback: callable function :return: None
[ "Add", "a", "callback", "on", "change" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/widgets.py#L18-L31
slightlynybbled/tk_tools
tk_tools/widgets.py
BinaryLabel.set
def set(self, value: int): """ Set the current value :param value: :return: None """ max_value = int(''.join(['1' for _ in range(self._bit_width)]), 2) if value > max_value: raise ValueError('the value {} is larger than ' 'the maximum value {}'.format(value, max_value)) self._value = value self._text_update()
python
def set(self, value: int): """ Set the current value :param value: :return: None """ max_value = int(''.join(['1' for _ in range(self._bit_width)]), 2) if value > max_value: raise ValueError('the value {} is larger than ' 'the maximum value {}'.format(value, max_value)) self._value = value self._text_update()
[ "def", "set", "(", "self", ",", "value", ":", "int", ")", ":", "max_value", "=", "int", "(", "''", ".", "join", "(", "[", "'1'", "for", "_", "in", "range", "(", "self", ".", "_bit_width", ")", "]", ")", ",", "2", ")", "if", "value", ">", "max_value", ":", "raise", "ValueError", "(", "'the value {} is larger than '", "'the maximum value {}'", ".", "format", "(", "value", ",", "max_value", ")", ")", "self", ".", "_value", "=", "value", "self", ".", "_text_update", "(", ")" ]
Set the current value :param value: :return: None
[ "Set", "the", "current", "value" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/widgets.py#L217-L231
slightlynybbled/tk_tools
tk_tools/widgets.py
BinaryLabel.get_bit
def get_bit(self, position: int): """ Returns the bit value at position :param position: integer between 0 and <width>, inclusive :return: the value at position as a integer """ if position > (self._bit_width - 1): raise ValueError('position greater than the bit width') if self._value & (1 << position): return 1 else: return 0
python
def get_bit(self, position: int): """ Returns the bit value at position :param position: integer between 0 and <width>, inclusive :return: the value at position as a integer """ if position > (self._bit_width - 1): raise ValueError('position greater than the bit width') if self._value & (1 << position): return 1 else: return 0
[ "def", "get_bit", "(", "self", ",", "position", ":", "int", ")", ":", "if", "position", ">", "(", "self", ".", "_bit_width", "-", "1", ")", ":", "raise", "ValueError", "(", "'position greater than the bit width'", ")", "if", "self", ".", "_value", "&", "(", "1", "<<", "position", ")", ":", "return", "1", "else", ":", "return", "0" ]
Returns the bit value at position :param position: integer between 0 and <width>, inclusive :return: the value at position as a integer
[ "Returns", "the", "bit", "value", "at", "position" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/widgets.py#L238-L252
slightlynybbled/tk_tools
tk_tools/widgets.py
BinaryLabel.toggle_bit
def toggle_bit(self, position: int): """ Toggles the value at position :param position: integer between 0 and 7, inclusive :return: None """ if position > (self._bit_width - 1): raise ValueError('position greater than the bit width') self._value ^= (1 << position) self._text_update()
python
def toggle_bit(self, position: int): """ Toggles the value at position :param position: integer between 0 and 7, inclusive :return: None """ if position > (self._bit_width - 1): raise ValueError('position greater than the bit width') self._value ^= (1 << position) self._text_update()
[ "def", "toggle_bit", "(", "self", ",", "position", ":", "int", ")", ":", "if", "position", ">", "(", "self", ".", "_bit_width", "-", "1", ")", ":", "raise", "ValueError", "(", "'position greater than the bit width'", ")", "self", ".", "_value", "^=", "(", "1", "<<", "position", ")", "self", ".", "_text_update", "(", ")" ]
Toggles the value at position :param position: integer between 0 and 7, inclusive :return: None
[ "Toggles", "the", "value", "at", "position" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/widgets.py#L254-L265
slightlynybbled/tk_tools
tk_tools/widgets.py
BinaryLabel.set_bit
def set_bit(self, position: int): """ Sets the value at position :param position: integer between 0 and 7, inclusive :return: None """ if position > (self._bit_width - 1): raise ValueError('position greater than the bit width') self._value |= (1 << position) self._text_update()
python
def set_bit(self, position: int): """ Sets the value at position :param position: integer between 0 and 7, inclusive :return: None """ if position > (self._bit_width - 1): raise ValueError('position greater than the bit width') self._value |= (1 << position) self._text_update()
[ "def", "set_bit", "(", "self", ",", "position", ":", "int", ")", ":", "if", "position", ">", "(", "self", ".", "_bit_width", "-", "1", ")", ":", "raise", "ValueError", "(", "'position greater than the bit width'", ")", "self", ".", "_value", "|=", "(", "1", "<<", "position", ")", "self", ".", "_text_update", "(", ")" ]
Sets the value at position :param position: integer between 0 and 7, inclusive :return: None
[ "Sets", "the", "value", "at", "position" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/widgets.py#L267-L278
slightlynybbled/tk_tools
tk_tools/widgets.py
BinaryLabel.clear_bit
def clear_bit(self, position: int): """ Clears the value at position :param position: integer between 0 and 7, inclusive :return: None """ if position > (self._bit_width - 1): raise ValueError('position greater than the bit width') self._value &= ~(1 << position) self._text_update()
python
def clear_bit(self, position: int): """ Clears the value at position :param position: integer between 0 and 7, inclusive :return: None """ if position > (self._bit_width - 1): raise ValueError('position greater than the bit width') self._value &= ~(1 << position) self._text_update()
[ "def", "clear_bit", "(", "self", ",", "position", ":", "int", ")", ":", "if", "position", ">", "(", "self", ".", "_bit_width", "-", "1", ")", ":", "raise", "ValueError", "(", "'position greater than the bit width'", ")", "self", ".", "_value", "&=", "~", "(", "1", "<<", "position", ")", "self", ".", "_text_update", "(", ")" ]
Clears the value at position :param position: integer between 0 and 7, inclusive :return: None
[ "Clears", "the", "value", "at", "position" ]
train
https://github.com/slightlynybbled/tk_tools/blob/7c1792cad42890251a34f0617ce9b4b3e7abcf50/tk_tools/widgets.py#L280-L291
willhardy/django-seo
rollyourown/seo/utils.py
LazyList._populate
def _populate(self): """ Populate this list by calling populate(), but only once. """ if not self._populated: logging.debug("Populating lazy list %d (%s)" % (id(self), self.__class__.__name__)) try: self.populate() self._populated = True except Exception, e: logging.debug("Currently unable to populate lazy list: %s" % e)
python
def _populate(self): """ Populate this list by calling populate(), but only once. """ if not self._populated: logging.debug("Populating lazy list %d (%s)" % (id(self), self.__class__.__name__)) try: self.populate() self._populated = True except Exception, e: logging.debug("Currently unable to populate lazy list: %s" % e)
[ "def", "_populate", "(", "self", ")", ":", "if", "not", "self", ".", "_populated", ":", "logging", ".", "debug", "(", "\"Populating lazy list %d (%s)\"", "%", "(", "id", "(", "self", ")", ",", "self", ".", "__class__", ".", "__name__", ")", ")", "try", ":", "self", ".", "populate", "(", ")", "self", ".", "_populated", "=", "True", "except", "Exception", ",", "e", ":", "logging", ".", "debug", "(", "\"Currently unable to populate lazy list: %s\"", "%", "e", ")" ]
Populate this list by calling populate(), but only once.
[ "Populate", "this", "list", "by", "calling", "populate", "()", "but", "only", "once", "." ]
train
https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/utils.py#L43-L51
willhardy/django-seo
rollyourown/seo/admin.py
_register_admin
def _register_admin(admin_site, model, admin_class): """ Register model in the admin, ignoring any previously registered models. Alternatively it could be used in the future to replace a previously registered model. """ try: admin_site.register(model, admin_class) except admin.sites.AlreadyRegistered: pass
python
def _register_admin(admin_site, model, admin_class): """ Register model in the admin, ignoring any previously registered models. Alternatively it could be used in the future to replace a previously registered model. """ try: admin_site.register(model, admin_class) except admin.sites.AlreadyRegistered: pass
[ "def", "_register_admin", "(", "admin_site", ",", "model", ",", "admin_class", ")", ":", "try", ":", "admin_site", ".", "register", "(", "model", ",", "admin_class", ")", "except", "admin", ".", "sites", ".", "AlreadyRegistered", ":", "pass" ]
Register model in the admin, ignoring any previously registered models. Alternatively it could be used in the future to replace a previously registered model.
[ "Register", "model", "in", "the", "admin", "ignoring", "any", "previously", "registered", "models", ".", "Alternatively", "it", "could", "be", "used", "in", "the", "future", "to", "replace", "a", "previously", "registered", "model", "." ]
train
https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/admin.py#L81-L89
willhardy/django-seo
rollyourown/seo/admin.py
core_choice_fields
def core_choice_fields(metadata_class): """ If the 'optional' core fields (_site and _language) are required, list them here. """ fields = [] if metadata_class._meta.use_sites: fields.append('_site') if metadata_class._meta.use_i18n: fields.append('_language') return fields
python
def core_choice_fields(metadata_class): """ If the 'optional' core fields (_site and _language) are required, list them here. """ fields = [] if metadata_class._meta.use_sites: fields.append('_site') if metadata_class._meta.use_i18n: fields.append('_language') return fields
[ "def", "core_choice_fields", "(", "metadata_class", ")", ":", "fields", "=", "[", "]", "if", "metadata_class", ".", "_meta", ".", "use_sites", ":", "fields", ".", "append", "(", "'_site'", ")", "if", "metadata_class", ".", "_meta", ".", "use_i18n", ":", "fields", ".", "append", "(", "'_language'", ")", "return", "fields" ]
If the 'optional' core fields (_site and _language) are required, list them here.
[ "If", "the", "optional", "core", "fields", "(", "_site", "and", "_language", ")", "are", "required", "list", "them", "here", "." ]
train
https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/admin.py#L187-L196
willhardy/django-seo
rollyourown/seo/admin.py
_monkey_inline
def _monkey_inline(model, admin_class_instance, metadata_class, inline_class, admin_site): """ Monkey patch the inline onto the given admin_class instance. """ if model in metadata_class._meta.seo_models: # *Not* adding to the class attribute "inlines", as this will affect # all instances from this class. Explicitly adding to instance attribute. admin_class_instance.__dict__['inlines'] = admin_class_instance.inlines + [inline_class] # Because we've missed the registration, we need to perform actions # that were done then (on admin class instantiation) inline_instance = inline_class(admin_class_instance.model, admin_site) admin_class_instance.inline_instances.append(inline_instance)
python
def _monkey_inline(model, admin_class_instance, metadata_class, inline_class, admin_site): """ Monkey patch the inline onto the given admin_class instance. """ if model in metadata_class._meta.seo_models: # *Not* adding to the class attribute "inlines", as this will affect # all instances from this class. Explicitly adding to instance attribute. admin_class_instance.__dict__['inlines'] = admin_class_instance.inlines + [inline_class] # Because we've missed the registration, we need to perform actions # that were done then (on admin class instantiation) inline_instance = inline_class(admin_class_instance.model, admin_site) admin_class_instance.inline_instances.append(inline_instance)
[ "def", "_monkey_inline", "(", "model", ",", "admin_class_instance", ",", "metadata_class", ",", "inline_class", ",", "admin_site", ")", ":", "if", "model", "in", "metadata_class", ".", "_meta", ".", "seo_models", ":", "# *Not* adding to the class attribute \"inlines\", as this will affect", "# all instances from this class. Explicitly adding to instance attribute.", "admin_class_instance", ".", "__dict__", "[", "'inlines'", "]", "=", "admin_class_instance", ".", "inlines", "+", "[", "inline_class", "]", "# Because we've missed the registration, we need to perform actions", "# that were done then (on admin class instantiation)", "inline_instance", "=", "inline_class", "(", "admin_class_instance", ".", "model", ",", "admin_site", ")", "admin_class_instance", ".", "inline_instances", ".", "append", "(", "inline_instance", ")" ]
Monkey patch the inline onto the given admin_class instance.
[ "Monkey", "patch", "the", "inline", "onto", "the", "given", "admin_class", "instance", "." ]
train
https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/admin.py#L199-L209
willhardy/django-seo
rollyourown/seo/admin.py
_with_inline
def _with_inline(func, admin_site, metadata_class, inline_class): """ Decorator for register function that adds an appropriate inline.""" def register(model_or_iterable, admin_class=None, **options): # Call the (bound) function we were given. # We have to assume it will be bound to admin_site func(model_or_iterable, admin_class, **options) _monkey_inline(model_or_iterable, admin_site._registry[model_or_iterable], metadata_class, inline_class, admin_site) return register
python
def _with_inline(func, admin_site, metadata_class, inline_class): """ Decorator for register function that adds an appropriate inline.""" def register(model_or_iterable, admin_class=None, **options): # Call the (bound) function we were given. # We have to assume it will be bound to admin_site func(model_or_iterable, admin_class, **options) _monkey_inline(model_or_iterable, admin_site._registry[model_or_iterable], metadata_class, inline_class, admin_site) return register
[ "def", "_with_inline", "(", "func", ",", "admin_site", ",", "metadata_class", ",", "inline_class", ")", ":", "def", "register", "(", "model_or_iterable", ",", "admin_class", "=", "None", ",", "*", "*", "options", ")", ":", "# Call the (bound) function we were given.", "# We have to assume it will be bound to admin_site", "func", "(", "model_or_iterable", ",", "admin_class", ",", "*", "*", "options", ")", "_monkey_inline", "(", "model_or_iterable", ",", "admin_site", ".", "_registry", "[", "model_or_iterable", "]", ",", "metadata_class", ",", "inline_class", ",", "admin_site", ")", "return", "register" ]
Decorator for register function that adds an appropriate inline.
[ "Decorator", "for", "register", "function", "that", "adds", "an", "appropriate", "inline", "." ]
train
https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/admin.py#L211-L220
willhardy/django-seo
rollyourown/seo/admin.py
auto_register_inlines
def auto_register_inlines(admin_site, metadata_class): """ This is a questionable function that automatically adds our metadata inline to all relevant models in the site. """ inline_class = get_inline(metadata_class) for model, admin_class_instance in admin_site._registry.items(): _monkey_inline(model, admin_class_instance, metadata_class, inline_class, admin_site) # Monkey patch the register method to automatically add an inline for this site. # _with_inline() is a decorator that wraps the register function with the same injection code # used above (_monkey_inline). admin_site.register = _with_inline(admin_site.register, admin_site, metadata_class, inline_class)
python
def auto_register_inlines(admin_site, metadata_class): """ This is a questionable function that automatically adds our metadata inline to all relevant models in the site. """ inline_class = get_inline(metadata_class) for model, admin_class_instance in admin_site._registry.items(): _monkey_inline(model, admin_class_instance, metadata_class, inline_class, admin_site) # Monkey patch the register method to automatically add an inline for this site. # _with_inline() is a decorator that wraps the register function with the same injection code # used above (_monkey_inline). admin_site.register = _with_inline(admin_site.register, admin_site, metadata_class, inline_class)
[ "def", "auto_register_inlines", "(", "admin_site", ",", "metadata_class", ")", ":", "inline_class", "=", "get_inline", "(", "metadata_class", ")", "for", "model", ",", "admin_class_instance", "in", "admin_site", ".", "_registry", ".", "items", "(", ")", ":", "_monkey_inline", "(", "model", ",", "admin_class_instance", ",", "metadata_class", ",", "inline_class", ",", "admin_site", ")", "# Monkey patch the register method to automatically add an inline for this site.", "# _with_inline() is a decorator that wraps the register function with the same injection code", "# used above (_monkey_inline).", "admin_site", ".", "register", "=", "_with_inline", "(", "admin_site", ".", "register", ",", "admin_site", ",", "metadata_class", ",", "inline_class", ")" ]
This is a questionable function that automatically adds our metadata inline to all relevant models in the site.
[ "This", "is", "a", "questionable", "function", "that", "automatically", "adds", "our", "metadata", "inline", "to", "all", "relevant", "models", "in", "the", "site", "." ]
train
https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/admin.py#L222-L234
willhardy/django-seo
rollyourown/seo/base.py
get_linked_metadata
def get_linked_metadata(obj, name=None, context=None, site=None, language=None): """ Gets metadata linked from the given object. """ # XXX Check that 'modelinstance' and 'model' metadata are installed in backends # I believe that get_model() would return None if not Metadata = _get_metadata_model(name) InstanceMetadata = Metadata._meta.get_model('modelinstance') ModelMetadata = Metadata._meta.get_model('model') content_type = ContentType.objects.get_for_model(obj) instances = [] if InstanceMetadata is not None: try: instance_md = InstanceMetadata.objects.get(_content_type=content_type, _object_id=obj.pk) except InstanceMetadata.DoesNotExist: instance_md = InstanceMetadata(_content_object=obj) instances.append(instance_md) if ModelMetadata is not None: try: model_md = ModelMetadata.objects.get(_content_type=content_type) except ModelMetadata.DoesNotExist: model_md = ModelMetadata(_content_type=content_type) instances.append(model_md) return FormattedMetadata(Metadata, instances, '', site, language)
python
def get_linked_metadata(obj, name=None, context=None, site=None, language=None): """ Gets metadata linked from the given object. """ # XXX Check that 'modelinstance' and 'model' metadata are installed in backends # I believe that get_model() would return None if not Metadata = _get_metadata_model(name) InstanceMetadata = Metadata._meta.get_model('modelinstance') ModelMetadata = Metadata._meta.get_model('model') content_type = ContentType.objects.get_for_model(obj) instances = [] if InstanceMetadata is not None: try: instance_md = InstanceMetadata.objects.get(_content_type=content_type, _object_id=obj.pk) except InstanceMetadata.DoesNotExist: instance_md = InstanceMetadata(_content_object=obj) instances.append(instance_md) if ModelMetadata is not None: try: model_md = ModelMetadata.objects.get(_content_type=content_type) except ModelMetadata.DoesNotExist: model_md = ModelMetadata(_content_type=content_type) instances.append(model_md) return FormattedMetadata(Metadata, instances, '', site, language)
[ "def", "get_linked_metadata", "(", "obj", ",", "name", "=", "None", ",", "context", "=", "None", ",", "site", "=", "None", ",", "language", "=", "None", ")", ":", "# XXX Check that 'modelinstance' and 'model' metadata are installed in backends", "# I believe that get_model() would return None if not", "Metadata", "=", "_get_metadata_model", "(", "name", ")", "InstanceMetadata", "=", "Metadata", ".", "_meta", ".", "get_model", "(", "'modelinstance'", ")", "ModelMetadata", "=", "Metadata", ".", "_meta", ".", "get_model", "(", "'model'", ")", "content_type", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "obj", ")", "instances", "=", "[", "]", "if", "InstanceMetadata", "is", "not", "None", ":", "try", ":", "instance_md", "=", "InstanceMetadata", ".", "objects", ".", "get", "(", "_content_type", "=", "content_type", ",", "_object_id", "=", "obj", ".", "pk", ")", "except", "InstanceMetadata", ".", "DoesNotExist", ":", "instance_md", "=", "InstanceMetadata", "(", "_content_object", "=", "obj", ")", "instances", ".", "append", "(", "instance_md", ")", "if", "ModelMetadata", "is", "not", "None", ":", "try", ":", "model_md", "=", "ModelMetadata", ".", "objects", ".", "get", "(", "_content_type", "=", "content_type", ")", "except", "ModelMetadata", ".", "DoesNotExist", ":", "model_md", "=", "ModelMetadata", "(", "_content_type", "=", "content_type", ")", "instances", ".", "append", "(", "model_md", ")", "return", "FormattedMetadata", "(", "Metadata", ",", "instances", ",", "''", ",", "site", ",", "language", ")" ]
Gets metadata linked from the given object.
[ "Gets", "metadata", "linked", "from", "the", "given", "object", "." ]
train
https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/base.py#L262-L283
willhardy/django-seo
rollyourown/seo/base.py
populate_metadata
def populate_metadata(model, MetadataClass): """ For a given model and metadata class, ensure there is metadata for every instance. """ content_type = ContentType.objects.get_for_model(model) for instance in model.objects.all(): create_metadata_instance(MetadataClass, instance)
python
def populate_metadata(model, MetadataClass): """ For a given model and metadata class, ensure there is metadata for every instance. """ content_type = ContentType.objects.get_for_model(model) for instance in model.objects.all(): create_metadata_instance(MetadataClass, instance)
[ "def", "populate_metadata", "(", "model", ",", "MetadataClass", ")", ":", "content_type", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "model", ")", "for", "instance", "in", "model", ".", "objects", ".", "all", "(", ")", ":", "create_metadata_instance", "(", "MetadataClass", ",", "instance", ")" ]
For a given model and metadata class, ensure there is metadata for every instance.
[ "For", "a", "given", "model", "and", "metadata", "class", "ensure", "there", "is", "metadata", "for", "every", "instance", "." ]
train
https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/base.py#L326-L331
willhardy/django-seo
rollyourown/seo/base.py
FormattedMetadata.__instances
def __instances(self): """ Cache instances, allowing generators to be used and reused. This fills a cache as the generator gets emptied, eventually reading exclusively from the cache. """ for instance in self.__instances_cache: yield instance for instance in self.__instances_original: self.__instances_cache.append(instance) yield instance
python
def __instances(self): """ Cache instances, allowing generators to be used and reused. This fills a cache as the generator gets emptied, eventually reading exclusively from the cache. """ for instance in self.__instances_cache: yield instance for instance in self.__instances_original: self.__instances_cache.append(instance) yield instance
[ "def", "__instances", "(", "self", ")", ":", "for", "instance", "in", "self", ".", "__instances_cache", ":", "yield", "instance", "for", "instance", "in", "self", ".", "__instances_original", ":", "self", ".", "__instances_cache", ".", "append", "(", "instance", ")", "yield", "instance" ]
Cache instances, allowing generators to be used and reused. This fills a cache as the generator gets emptied, eventually reading exclusively from the cache.
[ "Cache", "instances", "allowing", "generators", "to", "be", "used", "and", "reused", ".", "This", "fills", "a", "cache", "as", "the", "generator", "gets", "emptied", "eventually", "reading", "exclusively", "from", "the", "cache", "." ]
train
https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/base.py#L50-L59
willhardy/django-seo
rollyourown/seo/base.py
FormattedMetadata._resolve_value
def _resolve_value(self, name): """ Returns an appropriate value for the given name. This simply asks each of the instances for a value. """ for instance in self.__instances(): value = instance._resolve_value(name) if value: return value # Otherwise, return an appropriate default value (populate_from) # TODO: This is duplicated in meta_models. Move this to a common home. if name in self.__metadata._meta.elements: populate_from = self.__metadata._meta.elements[name].populate_from if callable(populate_from): return populate_from(None) elif isinstance(populate_from, Literal): return populate_from.value elif populate_from is not NotSet: return self._resolve_value(populate_from)
python
def _resolve_value(self, name): """ Returns an appropriate value for the given name. This simply asks each of the instances for a value. """ for instance in self.__instances(): value = instance._resolve_value(name) if value: return value # Otherwise, return an appropriate default value (populate_from) # TODO: This is duplicated in meta_models. Move this to a common home. if name in self.__metadata._meta.elements: populate_from = self.__metadata._meta.elements[name].populate_from if callable(populate_from): return populate_from(None) elif isinstance(populate_from, Literal): return populate_from.value elif populate_from is not NotSet: return self._resolve_value(populate_from)
[ "def", "_resolve_value", "(", "self", ",", "name", ")", ":", "for", "instance", "in", "self", ".", "__instances", "(", ")", ":", "value", "=", "instance", ".", "_resolve_value", "(", "name", ")", "if", "value", ":", "return", "value", "# Otherwise, return an appropriate default value (populate_from)", "# TODO: This is duplicated in meta_models. Move this to a common home.", "if", "name", "in", "self", ".", "__metadata", ".", "_meta", ".", "elements", ":", "populate_from", "=", "self", ".", "__metadata", ".", "_meta", ".", "elements", "[", "name", "]", ".", "populate_from", "if", "callable", "(", "populate_from", ")", ":", "return", "populate_from", "(", "None", ")", "elif", "isinstance", "(", "populate_from", ",", "Literal", ")", ":", "return", "populate_from", ".", "value", "elif", "populate_from", "is", "not", "NotSet", ":", "return", "self", ".", "_resolve_value", "(", "populate_from", ")" ]
Returns an appropriate value for the given name. This simply asks each of the instances for a value.
[ "Returns", "an", "appropriate", "value", "for", "the", "given", "name", ".", "This", "simply", "asks", "each", "of", "the", "instances", "for", "a", "value", "." ]
train
https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/base.py#L61-L79
willhardy/django-seo
rollyourown/seo/base.py
MetadataBase._get_formatted_data
def _get_formatted_data(cls, path, context=None, site=None, language=None): """ Return an object to conveniently access the appropriate values. """ return FormattedMetadata(cls(), cls._get_instances(path, context, site, language), path, site, language)
python
def _get_formatted_data(cls, path, context=None, site=None, language=None): """ Return an object to conveniently access the appropriate values. """ return FormattedMetadata(cls(), cls._get_instances(path, context, site, language), path, site, language)
[ "def", "_get_formatted_data", "(", "cls", ",", "path", ",", "context", "=", "None", ",", "site", "=", "None", ",", "language", "=", "None", ")", ":", "return", "FormattedMetadata", "(", "cls", "(", ")", ",", "cls", ".", "_get_instances", "(", "path", ",", "context", ",", "site", ",", "language", ")", ",", "path", ",", "site", ",", "language", ")" ]
Return an object to conveniently access the appropriate values.
[ "Return", "an", "object", "to", "conveniently", "access", "the", "appropriate", "values", "." ]
train
https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/base.py#L217-L219
willhardy/django-seo
rollyourown/seo/base.py
MetadataBase._get_instances
def _get_instances(cls, path, context=None, site=None, language=None): """ A sequence of instances to discover metadata. Each instance from each backend is looked up when possible/necessary. This is a generator to eliminate unnecessary queries. """ backend_context = {'view_context': context } for model in cls._meta.models.values(): for instance in model.objects.get_instances(path, site, language, backend_context) or []: if hasattr(instance, '_process_context'): instance._process_context(backend_context) yield instance
python
def _get_instances(cls, path, context=None, site=None, language=None): """ A sequence of instances to discover metadata. Each instance from each backend is looked up when possible/necessary. This is a generator to eliminate unnecessary queries. """ backend_context = {'view_context': context } for model in cls._meta.models.values(): for instance in model.objects.get_instances(path, site, language, backend_context) or []: if hasattr(instance, '_process_context'): instance._process_context(backend_context) yield instance
[ "def", "_get_instances", "(", "cls", ",", "path", ",", "context", "=", "None", ",", "site", "=", "None", ",", "language", "=", "None", ")", ":", "backend_context", "=", "{", "'view_context'", ":", "context", "}", "for", "model", "in", "cls", ".", "_meta", ".", "models", ".", "values", "(", ")", ":", "for", "instance", "in", "model", ".", "objects", ".", "get_instances", "(", "path", ",", "site", ",", "language", ",", "backend_context", ")", "or", "[", "]", ":", "if", "hasattr", "(", "instance", ",", "'_process_context'", ")", ":", "instance", ".", "_process_context", "(", "backend_context", ")", "yield", "instance" ]
A sequence of instances to discover metadata. Each instance from each backend is looked up when possible/necessary. This is a generator to eliminate unnecessary queries.
[ "A", "sequence", "of", "instances", "to", "discover", "metadata", ".", "Each", "instance", "from", "each", "backend", "is", "looked", "up", "when", "possible", "/", "necessary", ".", "This", "is", "a", "generator", "to", "eliminate", "unnecessary", "queries", "." ]
train
https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/base.py#L223-L234
willhardy/django-seo
rollyourown/seo/backends.py
_resolve
def _resolve(value, model_instance=None, context=None): """ Resolves any template references in the given value. """ if isinstance(value, basestring) and "{" in value: if context is None: context = Context() if model_instance is not None: context[model_instance._meta.module_name] = model_instance value = Template(value).render(context) return value
python
def _resolve(value, model_instance=None, context=None): """ Resolves any template references in the given value. """ if isinstance(value, basestring) and "{" in value: if context is None: context = Context() if model_instance is not None: context[model_instance._meta.module_name] = model_instance value = Template(value).render(context) return value
[ "def", "_resolve", "(", "value", ",", "model_instance", "=", "None", ",", "context", "=", "None", ")", ":", "if", "isinstance", "(", "value", ",", "basestring", ")", "and", "\"{\"", "in", "value", ":", "if", "context", "is", "None", ":", "context", "=", "Context", "(", ")", "if", "model_instance", "is", "not", "None", ":", "context", "[", "model_instance", ".", "_meta", ".", "module_name", "]", "=", "model_instance", "value", "=", "Template", "(", "value", ")", ".", "render", "(", "context", ")", "return", "value" ]
Resolves any template references in the given value.
[ "Resolves", "any", "template", "references", "in", "the", "given", "value", "." ]
train
https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/backends.py#L331-L341
willhardy/django-seo
rollyourown/seo/backends.py
ModelBackend.validate
def validate(options): """ Validates the application of this backend to a given metadata """ try: if options.backends.index('modelinstance') > options.backends.index('model'): raise Exception("Metadata backend 'modelinstance' must come before 'model' backend") except ValueError: raise Exception("Metadata backend 'modelinstance' must be installed in order to use 'model' backend")
python
def validate(options): """ Validates the application of this backend to a given metadata """ try: if options.backends.index('modelinstance') > options.backends.index('model'): raise Exception("Metadata backend 'modelinstance' must come before 'model' backend") except ValueError: raise Exception("Metadata backend 'modelinstance' must be installed in order to use 'model' backend")
[ "def", "validate", "(", "options", ")", ":", "try", ":", "if", "options", ".", "backends", ".", "index", "(", "'modelinstance'", ")", ">", "options", ".", "backends", ".", "index", "(", "'model'", ")", ":", "raise", "Exception", "(", "\"Metadata backend 'modelinstance' must come before 'model' backend\"", ")", "except", "ValueError", ":", "raise", "Exception", "(", "\"Metadata backend 'modelinstance' must be installed in order to use 'model' backend\"", ")" ]
Validates the application of this backend to a given metadata
[ "Validates", "the", "application", "of", "this", "backend", "to", "a", "given", "metadata" ]
train
https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/backends.py#L320-L327
willhardy/django-seo
rollyourown/seo/options.py
Options._register_elements
def _register_elements(self, elements): """ Takes elements from the metadata class and creates a base model for all backend models . """ self.elements = elements for key, obj in elements.items(): obj.contribute_to_class(self.metadata, key) # Create the common Django fields fields = {} for key, obj in elements.items(): if obj.editable: field = obj.get_field() if not field.help_text: if key in self.bulk_help_text: field.help_text = self.bulk_help_text[key] fields[key] = field # 0. Abstract base model with common fields base_meta = type('Meta', (), self.original_meta) class BaseMeta(base_meta): abstract = True app_label = 'seo' fields['Meta'] = BaseMeta # Do we need this? fields['__module__'] = __name__ #attrs['__module__'] self.MetadataBaseModel = type('%sBase' % self.name, (models.Model,), fields)
python
def _register_elements(self, elements): """ Takes elements from the metadata class and creates a base model for all backend models . """ self.elements = elements for key, obj in elements.items(): obj.contribute_to_class(self.metadata, key) # Create the common Django fields fields = {} for key, obj in elements.items(): if obj.editable: field = obj.get_field() if not field.help_text: if key in self.bulk_help_text: field.help_text = self.bulk_help_text[key] fields[key] = field # 0. Abstract base model with common fields base_meta = type('Meta', (), self.original_meta) class BaseMeta(base_meta): abstract = True app_label = 'seo' fields['Meta'] = BaseMeta # Do we need this? fields['__module__'] = __name__ #attrs['__module__'] self.MetadataBaseModel = type('%sBase' % self.name, (models.Model,), fields)
[ "def", "_register_elements", "(", "self", ",", "elements", ")", ":", "self", ".", "elements", "=", "elements", "for", "key", ",", "obj", "in", "elements", ".", "items", "(", ")", ":", "obj", ".", "contribute_to_class", "(", "self", ".", "metadata", ",", "key", ")", "# Create the common Django fields", "fields", "=", "{", "}", "for", "key", ",", "obj", "in", "elements", ".", "items", "(", ")", ":", "if", "obj", ".", "editable", ":", "field", "=", "obj", ".", "get_field", "(", ")", "if", "not", "field", ".", "help_text", ":", "if", "key", "in", "self", ".", "bulk_help_text", ":", "field", ".", "help_text", "=", "self", ".", "bulk_help_text", "[", "key", "]", "fields", "[", "key", "]", "=", "field", "# 0. Abstract base model with common fields", "base_meta", "=", "type", "(", "'Meta'", ",", "(", ")", ",", "self", ".", "original_meta", ")", "class", "BaseMeta", "(", "base_meta", ")", ":", "abstract", "=", "True", "app_label", "=", "'seo'", "fields", "[", "'Meta'", "]", "=", "BaseMeta", "# Do we need this?", "fields", "[", "'__module__'", "]", "=", "__name__", "#attrs['__module__']", "self", ".", "MetadataBaseModel", "=", "type", "(", "'%sBase'", "%", "self", ".", "name", ",", "(", "models", ".", "Model", ",", ")", ",", "fields", ")" ]
Takes elements from the metadata class and creates a base model for all backend models .
[ "Takes", "elements", "from", "the", "metadata", "class", "and", "creates", "a", "base", "model", "for", "all", "backend", "models", "." ]
train
https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/options.py#L38-L64
willhardy/django-seo
rollyourown/seo/options.py
Options._add_backend
def _add_backend(self, backend): """ Builds a subclass model for the given backend """ md_type = backend.verbose_name base = backend().get_model(self) # TODO: Rename this field new_md_attrs = {'_metadata': self.metadata, '__module__': __name__ } new_md_meta = {} new_md_meta['verbose_name'] = '%s (%s)' % (self.verbose_name, md_type) new_md_meta['verbose_name_plural'] = '%s (%s)' % (self.verbose_name_plural, md_type) new_md_meta['unique_together'] = base._meta.unique_together new_md_attrs['Meta'] = type("Meta", (), new_md_meta) new_md_attrs['_metadata_type'] = backend.name model = type("%s%s"%(self.name,"".join(md_type.split())), (base, self.MetadataBaseModel), new_md_attrs.copy()) self.models[backend.name] = model # This is a little dangerous, but because we set __module__ to __name__, the model needs tobe accessible here globals()[model.__name__] = model
python
def _add_backend(self, backend): """ Builds a subclass model for the given backend """ md_type = backend.verbose_name base = backend().get_model(self) # TODO: Rename this field new_md_attrs = {'_metadata': self.metadata, '__module__': __name__ } new_md_meta = {} new_md_meta['verbose_name'] = '%s (%s)' % (self.verbose_name, md_type) new_md_meta['verbose_name_plural'] = '%s (%s)' % (self.verbose_name_plural, md_type) new_md_meta['unique_together'] = base._meta.unique_together new_md_attrs['Meta'] = type("Meta", (), new_md_meta) new_md_attrs['_metadata_type'] = backend.name model = type("%s%s"%(self.name,"".join(md_type.split())), (base, self.MetadataBaseModel), new_md_attrs.copy()) self.models[backend.name] = model # This is a little dangerous, but because we set __module__ to __name__, the model needs tobe accessible here globals()[model.__name__] = model
[ "def", "_add_backend", "(", "self", ",", "backend", ")", ":", "md_type", "=", "backend", ".", "verbose_name", "base", "=", "backend", "(", ")", ".", "get_model", "(", "self", ")", "# TODO: Rename this field", "new_md_attrs", "=", "{", "'_metadata'", ":", "self", ".", "metadata", ",", "'__module__'", ":", "__name__", "}", "new_md_meta", "=", "{", "}", "new_md_meta", "[", "'verbose_name'", "]", "=", "'%s (%s)'", "%", "(", "self", ".", "verbose_name", ",", "md_type", ")", "new_md_meta", "[", "'verbose_name_plural'", "]", "=", "'%s (%s)'", "%", "(", "self", ".", "verbose_name_plural", ",", "md_type", ")", "new_md_meta", "[", "'unique_together'", "]", "=", "base", ".", "_meta", ".", "unique_together", "new_md_attrs", "[", "'Meta'", "]", "=", "type", "(", "\"Meta\"", ",", "(", ")", ",", "new_md_meta", ")", "new_md_attrs", "[", "'_metadata_type'", "]", "=", "backend", ".", "name", "model", "=", "type", "(", "\"%s%s\"", "%", "(", "self", ".", "name", ",", "\"\"", ".", "join", "(", "md_type", ".", "split", "(", ")", ")", ")", ",", "(", "base", ",", "self", ".", "MetadataBaseModel", ")", ",", "new_md_attrs", ".", "copy", "(", ")", ")", "self", ".", "models", "[", "backend", ".", "name", "]", "=", "model", "# This is a little dangerous, but because we set __module__ to __name__, the model needs tobe accessible here", "globals", "(", ")", "[", "model", ".", "__name__", "]", "=", "model" ]
Builds a subclass model for the given backend
[ "Builds", "a", "subclass", "model", "for", "the", "given", "backend" ]
train
https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/options.py#L66-L82
willhardy/django-seo
rollyourown/seo/options.py
Options._set_seo_models
def _set_seo_models(self, value): """ Gets the actual models to be used. """ seo_models = [] for model_name in value: if "." in model_name: app_label, model_name = model_name.split(".", 1) model = models.get_model(app_label, model_name) if model: seo_models.append(model) else: app = models.get_app(model_name) if app: seo_models.extend(models.get_models(app)) self.seo_models = seo_models
python
def _set_seo_models(self, value): """ Gets the actual models to be used. """ seo_models = [] for model_name in value: if "." in model_name: app_label, model_name = model_name.split(".", 1) model = models.get_model(app_label, model_name) if model: seo_models.append(model) else: app = models.get_app(model_name) if app: seo_models.extend(models.get_models(app)) self.seo_models = seo_models
[ "def", "_set_seo_models", "(", "self", ",", "value", ")", ":", "seo_models", "=", "[", "]", "for", "model_name", "in", "value", ":", "if", "\".\"", "in", "model_name", ":", "app_label", ",", "model_name", "=", "model_name", ".", "split", "(", "\".\"", ",", "1", ")", "model", "=", "models", ".", "get_model", "(", "app_label", ",", "model_name", ")", "if", "model", ":", "seo_models", ".", "append", "(", "model", ")", "else", ":", "app", "=", "models", ".", "get_app", "(", "model_name", ")", "if", "app", ":", "seo_models", ".", "extend", "(", "models", ".", "get_models", "(", "app", ")", ")", "self", ".", "seo_models", "=", "seo_models" ]
Gets the actual models to be used.
[ "Gets", "the", "actual", "models", "to", "be", "used", "." ]
train
https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/options.py#L84-L98
willhardy/django-seo
rollyourown/seo/fields.py
MetadataField.validate
def validate(self): """ Discover certain illegal configurations """ if not self.editable: assert self.populate_from is not NotSet, u"If field (%s) is not editable, you must set populate_from" % self.name
python
def validate(self): """ Discover certain illegal configurations """ if not self.editable: assert self.populate_from is not NotSet, u"If field (%s) is not editable, you must set populate_from" % self.name
[ "def", "validate", "(", "self", ")", ":", "if", "not", "self", ".", "editable", ":", "assert", "self", ".", "populate_from", "is", "not", "NotSet", ",", "u\"If field (%s) is not editable, you must set populate_from\"", "%", "self", ".", "name" ]
Discover certain illegal configurations
[ "Discover", "certain", "illegal", "configurations" ]
train
https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/fields.py#L72-L75
willhardy/django-seo
rollyourown/seo/management/__init__.py
populate_all_metadata
def populate_all_metadata(): """ Create metadata instances for all models in seo_models if empty. Once you have created a single metadata instance, this will not run. This is because it is a potentially slow operation that need only be done once. If you want to ensure that everything is populated, run the populate_metadata management command. """ for Metadata in registry.values(): InstanceMetadata = Metadata._meta.get_model('modelinstance') if InstanceMetadata is not None: for model in Metadata._meta.seo_models: populate_metadata(model, InstanceMetadata)
python
def populate_all_metadata(): """ Create metadata instances for all models in seo_models if empty. Once you have created a single metadata instance, this will not run. This is because it is a potentially slow operation that need only be done once. If you want to ensure that everything is populated, run the populate_metadata management command. """ for Metadata in registry.values(): InstanceMetadata = Metadata._meta.get_model('modelinstance') if InstanceMetadata is not None: for model in Metadata._meta.seo_models: populate_metadata(model, InstanceMetadata)
[ "def", "populate_all_metadata", "(", ")", ":", "for", "Metadata", "in", "registry", ".", "values", "(", ")", ":", "InstanceMetadata", "=", "Metadata", ".", "_meta", ".", "get_model", "(", "'modelinstance'", ")", "if", "InstanceMetadata", "is", "not", "None", ":", "for", "model", "in", "Metadata", ".", "_meta", ".", "seo_models", ":", "populate_metadata", "(", "model", ",", "InstanceMetadata", ")" ]
Create metadata instances for all models in seo_models if empty. Once you have created a single metadata instance, this will not run. This is because it is a potentially slow operation that need only be done once. If you want to ensure that everything is populated, run the populate_metadata management command.
[ "Create", "metadata", "instances", "for", "all", "models", "in", "seo_models", "if", "empty", ".", "Once", "you", "have", "created", "a", "single", "metadata", "instance", "this", "will", "not", "run", ".", "This", "is", "because", "it", "is", "a", "potentially", "slow", "operation", "that", "need", "only", "be", "done", "once", ".", "If", "you", "want", "to", "ensure", "that", "everything", "is", "populated", "run", "the", "populate_metadata", "management", "command", "." ]
train
https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/management/__init__.py#L30-L41
willhardy/django-seo
rollyourown/seo/systemviews.py
SystemViews.populate
def populate(self): """ Populate this list with all views that take no arguments. """ from django.conf import settings from django.core import urlresolvers self.append(("", "")) urlconf = settings.ROOT_URLCONF resolver = urlresolvers.RegexURLResolver(r'^/', urlconf) # Collect base level views for key, value in resolver.reverse_dict.items(): if isinstance(key, basestring): args = value[0][0][1] url = "/" + value[0][0][0] self.append((key, " ".join(key.split("_")))) # Collect namespaces (TODO: merge these two sections into one) for namespace, url in resolver.namespace_dict.items(): for key, value in url[1].reverse_dict.items(): if isinstance(key, basestring): args = value[0][0][1] full_key = '%s:%s' % (namespace, key) self.append((full_key, "%s: %s" % (namespace, " ".join(key.split("_"))))) self.sort()
python
def populate(self): """ Populate this list with all views that take no arguments. """ from django.conf import settings from django.core import urlresolvers self.append(("", "")) urlconf = settings.ROOT_URLCONF resolver = urlresolvers.RegexURLResolver(r'^/', urlconf) # Collect base level views for key, value in resolver.reverse_dict.items(): if isinstance(key, basestring): args = value[0][0][1] url = "/" + value[0][0][0] self.append((key, " ".join(key.split("_")))) # Collect namespaces (TODO: merge these two sections into one) for namespace, url in resolver.namespace_dict.items(): for key, value in url[1].reverse_dict.items(): if isinstance(key, basestring): args = value[0][0][1] full_key = '%s:%s' % (namespace, key) self.append((full_key, "%s: %s" % (namespace, " ".join(key.split("_"))))) self.sort()
[ "def", "populate", "(", "self", ")", ":", "from", "django", ".", "conf", "import", "settings", "from", "django", ".", "core", "import", "urlresolvers", "self", ".", "append", "(", "(", "\"\"", ",", "\"\"", ")", ")", "urlconf", "=", "settings", ".", "ROOT_URLCONF", "resolver", "=", "urlresolvers", ".", "RegexURLResolver", "(", "r'^/'", ",", "urlconf", ")", "# Collect base level views", "for", "key", ",", "value", "in", "resolver", ".", "reverse_dict", ".", "items", "(", ")", ":", "if", "isinstance", "(", "key", ",", "basestring", ")", ":", "args", "=", "value", "[", "0", "]", "[", "0", "]", "[", "1", "]", "url", "=", "\"/\"", "+", "value", "[", "0", "]", "[", "0", "]", "[", "0", "]", "self", ".", "append", "(", "(", "key", ",", "\" \"", ".", "join", "(", "key", ".", "split", "(", "\"_\"", ")", ")", ")", ")", "# Collect namespaces (TODO: merge these two sections into one)", "for", "namespace", ",", "url", "in", "resolver", ".", "namespace_dict", ".", "items", "(", ")", ":", "for", "key", ",", "value", "in", "url", "[", "1", "]", ".", "reverse_dict", ".", "items", "(", ")", ":", "if", "isinstance", "(", "key", ",", "basestring", ")", ":", "args", "=", "value", "[", "0", "]", "[", "0", "]", "[", "1", "]", "full_key", "=", "'%s:%s'", "%", "(", "namespace", ",", "key", ")", "self", ".", "append", "(", "(", "full_key", ",", "\"%s: %s\"", "%", "(", "namespace", ",", "\" \"", ".", "join", "(", "key", ".", "split", "(", "\"_\"", ")", ")", ")", ")", ")", "self", ".", "sort", "(", ")" ]
Populate this list with all views that take no arguments.
[ "Populate", "this", "list", "with", "all", "views", "that", "take", "no", "arguments", "." ]
train
https://github.com/willhardy/django-seo/blob/3089686a3c490091315860979ad15ef2527c3e3e/rollyourown/seo/systemviews.py#L43-L65
geomet/geomet
geomet/util.py
block_splitter
def block_splitter(data, block_size): """ Creates a generator by slicing ``data`` into chunks of ``block_size``. >>> data = range(10) >>> list(block_splitter(data, 2)) [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]] If ``data`` cannot be evenly divided by ``block_size``, the last block will simply be the remainder of the data. Example: >>> data = range(10) >>> list(block_splitter(data, 3)) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] If the ``block_size`` is greater than the total length of ``data``, a single block will be generated: >>> data = range(3) >>> list(block_splitter(data, 4)) [[0, 1, 2]] :param data: Any iterable. If ``data`` is a generator, it will be exhausted, obviously. :param int block_site: Desired (maximum) block size. """ buf = [] for i, datum in enumerate(data): buf.append(datum) if len(buf) == block_size: yield buf buf = [] # If there's anything leftover (a partial block), # yield it as well. if buf: yield buf
python
def block_splitter(data, block_size): """ Creates a generator by slicing ``data`` into chunks of ``block_size``. >>> data = range(10) >>> list(block_splitter(data, 2)) [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]] If ``data`` cannot be evenly divided by ``block_size``, the last block will simply be the remainder of the data. Example: >>> data = range(10) >>> list(block_splitter(data, 3)) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] If the ``block_size`` is greater than the total length of ``data``, a single block will be generated: >>> data = range(3) >>> list(block_splitter(data, 4)) [[0, 1, 2]] :param data: Any iterable. If ``data`` is a generator, it will be exhausted, obviously. :param int block_site: Desired (maximum) block size. """ buf = [] for i, datum in enumerate(data): buf.append(datum) if len(buf) == block_size: yield buf buf = [] # If there's anything leftover (a partial block), # yield it as well. if buf: yield buf
[ "def", "block_splitter", "(", "data", ",", "block_size", ")", ":", "buf", "=", "[", "]", "for", "i", ",", "datum", "in", "enumerate", "(", "data", ")", ":", "buf", ".", "append", "(", "datum", ")", "if", "len", "(", "buf", ")", "==", "block_size", ":", "yield", "buf", "buf", "=", "[", "]", "# If there's anything leftover (a partial block),", "# yield it as well.", "if", "buf", ":", "yield", "buf" ]
Creates a generator by slicing ``data`` into chunks of ``block_size``. >>> data = range(10) >>> list(block_splitter(data, 2)) [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]] If ``data`` cannot be evenly divided by ``block_size``, the last block will simply be the remainder of the data. Example: >>> data = range(10) >>> list(block_splitter(data, 3)) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] If the ``block_size`` is greater than the total length of ``data``, a single block will be generated: >>> data = range(3) >>> list(block_splitter(data, 4)) [[0, 1, 2]] :param data: Any iterable. If ``data`` is a generator, it will be exhausted, obviously. :param int block_site: Desired (maximum) block size.
[ "Creates", "a", "generator", "by", "slicing", "data", "into", "chunks", "of", "block_size", "." ]
train
https://github.com/geomet/geomet/blob/b82d7118113ab723751eba3de5df98c368423c2b/geomet/util.py#L19-L57
geomet/geomet
geomet/util.py
round_geom
def round_geom(geom, precision=None): """Round coordinates of a geometric object to given precision.""" if geom['type'] == 'Point': x, y = geom['coordinates'] xp, yp = [x], [y] if precision is not None: xp = [round(v, precision) for v in xp] yp = [round(v, precision) for v in yp] new_coords = tuple(zip(xp, yp))[0] if geom['type'] in ['LineString', 'MultiPoint']: xp, yp = zip(*geom['coordinates']) if precision is not None: xp = [round(v, precision) for v in xp] yp = [round(v, precision) for v in yp] new_coords = tuple(zip(xp, yp)) elif geom['type'] in ['Polygon', 'MultiLineString']: new_coords = [] for piece in geom['coordinates']: xp, yp = zip(*piece) if precision is not None: xp = [round(v, precision) for v in xp] yp = [round(v, precision) for v in yp] new_coords.append(tuple(zip(xp, yp))) elif geom['type'] == 'MultiPolygon': parts = geom['coordinates'] new_coords = [] for part in parts: inner_coords = [] for ring in part: xp, yp = zip(*ring) if precision is not None: xp = [round(v, precision) for v in xp] yp = [round(v, precision) for v in yp] inner_coords.append(tuple(zip(xp, yp))) new_coords.append(inner_coords) return {'type': geom['type'], 'coordinates': new_coords}
python
def round_geom(geom, precision=None): """Round coordinates of a geometric object to given precision.""" if geom['type'] == 'Point': x, y = geom['coordinates'] xp, yp = [x], [y] if precision is not None: xp = [round(v, precision) for v in xp] yp = [round(v, precision) for v in yp] new_coords = tuple(zip(xp, yp))[0] if geom['type'] in ['LineString', 'MultiPoint']: xp, yp = zip(*geom['coordinates']) if precision is not None: xp = [round(v, precision) for v in xp] yp = [round(v, precision) for v in yp] new_coords = tuple(zip(xp, yp)) elif geom['type'] in ['Polygon', 'MultiLineString']: new_coords = [] for piece in geom['coordinates']: xp, yp = zip(*piece) if precision is not None: xp = [round(v, precision) for v in xp] yp = [round(v, precision) for v in yp] new_coords.append(tuple(zip(xp, yp))) elif geom['type'] == 'MultiPolygon': parts = geom['coordinates'] new_coords = [] for part in parts: inner_coords = [] for ring in part: xp, yp = zip(*ring) if precision is not None: xp = [round(v, precision) for v in xp] yp = [round(v, precision) for v in yp] inner_coords.append(tuple(zip(xp, yp))) new_coords.append(inner_coords) return {'type': geom['type'], 'coordinates': new_coords}
[ "def", "round_geom", "(", "geom", ",", "precision", "=", "None", ")", ":", "if", "geom", "[", "'type'", "]", "==", "'Point'", ":", "x", ",", "y", "=", "geom", "[", "'coordinates'", "]", "xp", ",", "yp", "=", "[", "x", "]", ",", "[", "y", "]", "if", "precision", "is", "not", "None", ":", "xp", "=", "[", "round", "(", "v", ",", "precision", ")", "for", "v", "in", "xp", "]", "yp", "=", "[", "round", "(", "v", ",", "precision", ")", "for", "v", "in", "yp", "]", "new_coords", "=", "tuple", "(", "zip", "(", "xp", ",", "yp", ")", ")", "[", "0", "]", "if", "geom", "[", "'type'", "]", "in", "[", "'LineString'", ",", "'MultiPoint'", "]", ":", "xp", ",", "yp", "=", "zip", "(", "*", "geom", "[", "'coordinates'", "]", ")", "if", "precision", "is", "not", "None", ":", "xp", "=", "[", "round", "(", "v", ",", "precision", ")", "for", "v", "in", "xp", "]", "yp", "=", "[", "round", "(", "v", ",", "precision", ")", "for", "v", "in", "yp", "]", "new_coords", "=", "tuple", "(", "zip", "(", "xp", ",", "yp", ")", ")", "elif", "geom", "[", "'type'", "]", "in", "[", "'Polygon'", ",", "'MultiLineString'", "]", ":", "new_coords", "=", "[", "]", "for", "piece", "in", "geom", "[", "'coordinates'", "]", ":", "xp", ",", "yp", "=", "zip", "(", "*", "piece", ")", "if", "precision", "is", "not", "None", ":", "xp", "=", "[", "round", "(", "v", ",", "precision", ")", "for", "v", "in", "xp", "]", "yp", "=", "[", "round", "(", "v", ",", "precision", ")", "for", "v", "in", "yp", "]", "new_coords", ".", "append", "(", "tuple", "(", "zip", "(", "xp", ",", "yp", ")", ")", ")", "elif", "geom", "[", "'type'", "]", "==", "'MultiPolygon'", ":", "parts", "=", "geom", "[", "'coordinates'", "]", "new_coords", "=", "[", "]", "for", "part", "in", "parts", ":", "inner_coords", "=", "[", "]", "for", "ring", "in", "part", ":", "xp", ",", "yp", "=", "zip", "(", "*", "ring", ")", "if", "precision", "is", "not", "None", ":", "xp", "=", "[", "round", "(", "v", ",", "precision", ")", "for", "v", "in", "xp", "]", "yp", "=", "[", "round", "(", "v", ",", "precision", ")", "for", "v", "in", "yp", "]", "inner_coords", ".", "append", "(", "tuple", "(", "zip", "(", "xp", ",", "yp", ")", ")", ")", "new_coords", ".", "append", "(", "inner_coords", ")", "return", "{", "'type'", ":", "geom", "[", "'type'", "]", ",", "'coordinates'", ":", "new_coords", "}" ]
Round coordinates of a geometric object to given precision.
[ "Round", "coordinates", "of", "a", "geometric", "object", "to", "given", "precision", "." ]
train
https://github.com/geomet/geomet/blob/b82d7118113ab723751eba3de5df98c368423c2b/geomet/util.py#L77-L112
geomet/geomet
geomet/util.py
flatten_multi_dim
def flatten_multi_dim(sequence): """Flatten a multi-dimensional array-like to a single dimensional sequence (as a generator). """ for x in sequence: if (isinstance(x, collections.Iterable) and not isinstance(x, six.string_types)): for y in flatten_multi_dim(x): yield y else: yield x
python
def flatten_multi_dim(sequence): """Flatten a multi-dimensional array-like to a single dimensional sequence (as a generator). """ for x in sequence: if (isinstance(x, collections.Iterable) and not isinstance(x, six.string_types)): for y in flatten_multi_dim(x): yield y else: yield x
[ "def", "flatten_multi_dim", "(", "sequence", ")", ":", "for", "x", "in", "sequence", ":", "if", "(", "isinstance", "(", "x", ",", "collections", ".", "Iterable", ")", "and", "not", "isinstance", "(", "x", ",", "six", ".", "string_types", ")", ")", ":", "for", "y", "in", "flatten_multi_dim", "(", "x", ")", ":", "yield", "y", "else", ":", "yield", "x" ]
Flatten a multi-dimensional array-like to a single dimensional sequence (as a generator).
[ "Flatten", "a", "multi", "-", "dimensional", "array", "-", "like", "to", "a", "single", "dimensional", "sequence", "(", "as", "a", "generator", ")", "." ]
train
https://github.com/geomet/geomet/blob/b82d7118113ab723751eba3de5df98c368423c2b/geomet/util.py#L115-L125
geomet/geomet
geomet/tool.py
cli
def cli(input, verbose, quiet, output_format, precision, indent): """Convert text read from the first positional argument, stdin, or a file to GeoJSON and write to stdout.""" verbosity = verbose - quiet configure_logging(verbosity) logger = logging.getLogger('geomet') # Handle the case of file, stream, or string input. try: src = click.open_file(input).readlines() except IOError: src = [input] stdout = click.get_text_stream('stdout') # Read-write loop. try: for line in src: text = line.strip() logger.debug("Input: %r", text) output = translate( text, output_format=output_format, indent=indent, precision=precision ) logger.debug("Output: %r", output) stdout.write(output) stdout.write('\n') sys.exit(0) except Exception: logger.exception("Failed. Exception caught") sys.exit(1)
python
def cli(input, verbose, quiet, output_format, precision, indent): """Convert text read from the first positional argument, stdin, or a file to GeoJSON and write to stdout.""" verbosity = verbose - quiet configure_logging(verbosity) logger = logging.getLogger('geomet') # Handle the case of file, stream, or string input. try: src = click.open_file(input).readlines() except IOError: src = [input] stdout = click.get_text_stream('stdout') # Read-write loop. try: for line in src: text = line.strip() logger.debug("Input: %r", text) output = translate( text, output_format=output_format, indent=indent, precision=precision ) logger.debug("Output: %r", output) stdout.write(output) stdout.write('\n') sys.exit(0) except Exception: logger.exception("Failed. Exception caught") sys.exit(1)
[ "def", "cli", "(", "input", ",", "verbose", ",", "quiet", ",", "output_format", ",", "precision", ",", "indent", ")", ":", "verbosity", "=", "verbose", "-", "quiet", "configure_logging", "(", "verbosity", ")", "logger", "=", "logging", ".", "getLogger", "(", "'geomet'", ")", "# Handle the case of file, stream, or string input.", "try", ":", "src", "=", "click", ".", "open_file", "(", "input", ")", ".", "readlines", "(", ")", "except", "IOError", ":", "src", "=", "[", "input", "]", "stdout", "=", "click", ".", "get_text_stream", "(", "'stdout'", ")", "# Read-write loop.", "try", ":", "for", "line", "in", "src", ":", "text", "=", "line", ".", "strip", "(", ")", "logger", ".", "debug", "(", "\"Input: %r\"", ",", "text", ")", "output", "=", "translate", "(", "text", ",", "output_format", "=", "output_format", ",", "indent", "=", "indent", ",", "precision", "=", "precision", ")", "logger", ".", "debug", "(", "\"Output: %r\"", ",", "output", ")", "stdout", ".", "write", "(", "output", ")", "stdout", ".", "write", "(", "'\\n'", ")", "sys", ".", "exit", "(", "0", ")", "except", "Exception", ":", "logger", ".", "exception", "(", "\"Failed. Exception caught\"", ")", "sys", ".", "exit", "(", "1", ")" ]
Convert text read from the first positional argument, stdin, or a file to GeoJSON and write to stdout.
[ "Convert", "text", "read", "from", "the", "first", "positional", "argument", "stdin", "or", "a", "file", "to", "GeoJSON", "and", "write", "to", "stdout", "." ]
train
https://github.com/geomet/geomet/blob/b82d7118113ab723751eba3de5df98c368423c2b/geomet/tool.py#L83-L116
geomet/geomet
geomet/wkb.py
_get_geom_type
def _get_geom_type(type_bytes): """Get the GeoJSON geometry type label from a WKB type byte string. :param type_bytes: 4 byte string in big endian byte order containing a WKB type number. It may also contain a "has SRID" flag in the high byte (the first type, since this is big endian byte order), indicated as 0x20. If the SRID flag is not set, the high byte will always be null (0x00). :returns: 3-tuple ofGeoJSON geometry type label, the bytes resprenting the geometry type, and a separate "has SRID" flag. If the input `type_bytes` contains an SRID flag, it will be removed. >>> # Z Point, with SRID flag >>> _get_geom_type(b'\\x20\\x00\\x03\\xe9') == ( ... 'Point', b'\\x00\\x00\\x03\\xe9', True) True >>> # 2D MultiLineString, without SRID flag >>> _get_geom_type(b'\\x00\\x00\\x00\\x05') == ( ... 'MultiLineString', b'\\x00\\x00\\x00\\x05', False) True """ # slice off the high byte, which may contain the SRID flag high_byte = type_bytes[0] if six.PY3: high_byte = bytes([high_byte]) has_srid = high_byte == b'\x20' if has_srid: # replace the high byte with a null byte type_bytes = as_bin_str(b'\x00' + type_bytes[1:]) else: type_bytes = as_bin_str(type_bytes) # look up the geometry type geom_type = _BINARY_TO_GEOM_TYPE.get(type_bytes) return geom_type, type_bytes, has_srid
python
def _get_geom_type(type_bytes): """Get the GeoJSON geometry type label from a WKB type byte string. :param type_bytes: 4 byte string in big endian byte order containing a WKB type number. It may also contain a "has SRID" flag in the high byte (the first type, since this is big endian byte order), indicated as 0x20. If the SRID flag is not set, the high byte will always be null (0x00). :returns: 3-tuple ofGeoJSON geometry type label, the bytes resprenting the geometry type, and a separate "has SRID" flag. If the input `type_bytes` contains an SRID flag, it will be removed. >>> # Z Point, with SRID flag >>> _get_geom_type(b'\\x20\\x00\\x03\\xe9') == ( ... 'Point', b'\\x00\\x00\\x03\\xe9', True) True >>> # 2D MultiLineString, without SRID flag >>> _get_geom_type(b'\\x00\\x00\\x00\\x05') == ( ... 'MultiLineString', b'\\x00\\x00\\x00\\x05', False) True """ # slice off the high byte, which may contain the SRID flag high_byte = type_bytes[0] if six.PY3: high_byte = bytes([high_byte]) has_srid = high_byte == b'\x20' if has_srid: # replace the high byte with a null byte type_bytes = as_bin_str(b'\x00' + type_bytes[1:]) else: type_bytes = as_bin_str(type_bytes) # look up the geometry type geom_type = _BINARY_TO_GEOM_TYPE.get(type_bytes) return geom_type, type_bytes, has_srid
[ "def", "_get_geom_type", "(", "type_bytes", ")", ":", "# slice off the high byte, which may contain the SRID flag", "high_byte", "=", "type_bytes", "[", "0", "]", "if", "six", ".", "PY3", ":", "high_byte", "=", "bytes", "(", "[", "high_byte", "]", ")", "has_srid", "=", "high_byte", "==", "b'\\x20'", "if", "has_srid", ":", "# replace the high byte with a null byte", "type_bytes", "=", "as_bin_str", "(", "b'\\x00'", "+", "type_bytes", "[", "1", ":", "]", ")", "else", ":", "type_bytes", "=", "as_bin_str", "(", "type_bytes", ")", "# look up the geometry type", "geom_type", "=", "_BINARY_TO_GEOM_TYPE", ".", "get", "(", "type_bytes", ")", "return", "geom_type", ",", "type_bytes", ",", "has_srid" ]
Get the GeoJSON geometry type label from a WKB type byte string. :param type_bytes: 4 byte string in big endian byte order containing a WKB type number. It may also contain a "has SRID" flag in the high byte (the first type, since this is big endian byte order), indicated as 0x20. If the SRID flag is not set, the high byte will always be null (0x00). :returns: 3-tuple ofGeoJSON geometry type label, the bytes resprenting the geometry type, and a separate "has SRID" flag. If the input `type_bytes` contains an SRID flag, it will be removed. >>> # Z Point, with SRID flag >>> _get_geom_type(b'\\x20\\x00\\x03\\xe9') == ( ... 'Point', b'\\x00\\x00\\x03\\xe9', True) True >>> # 2D MultiLineString, without SRID flag >>> _get_geom_type(b'\\x00\\x00\\x00\\x05') == ( ... 'MultiLineString', b'\\x00\\x00\\x00\\x05', False) True
[ "Get", "the", "GeoJSON", "geometry", "type", "label", "from", "a", "WKB", "type", "byte", "string", "." ]
train
https://github.com/geomet/geomet/blob/b82d7118113ab723751eba3de5df98c368423c2b/geomet/wkb.py#L110-L147
geomet/geomet
geomet/wkb.py
dumps
def dumps(obj, big_endian=True): """ Dump a GeoJSON-like `dict` to a WKB string. .. note:: The dimensions of the generated WKB will be inferred from the first vertex in the GeoJSON `coordinates`. It will be assumed that all vertices are uniform. There are 4 types: - 2D (X, Y): 2-dimensional geometry - Z (X, Y, Z): 3-dimensional geometry - M (X, Y, M): 2-dimensional geometry with a "Measure" - ZM (X, Y, Z, M): 3-dimensional geometry with a "Measure" If the first vertex contains 2 values, we assume a 2D geometry. If the first vertex contains 3 values, this is slightly ambiguous and so the most common case is chosen: Z. If the first vertex contains 4 values, we assume a ZM geometry. The WKT/WKB standards provide a way of differentiating normal (2D), Z, M, and ZM geometries (http://en.wikipedia.org/wiki/Well-known_text), but the GeoJSON spec does not. Therefore, for the sake of interface simplicity, we assume that geometry that looks 3D contains XYZ components, instead of XYM. If the coordinates list has no coordinate values (this includes nested lists, for example, `[[[[],[]], []]]`, the geometry is considered to be empty. Geometries, with the exception of points, have a reasonable "empty" representation in WKB; however, without knowing the number of coordinate values per vertex, the type is ambigious, and thus we don't know if the geometry type is 2D, Z, M, or ZM. Therefore in this case we expect a `ValueError` to be raised. :param dict obj: GeoJson-like `dict` object. :param bool big_endian: Defaults to `True`. If `True`, data values in the generated WKB will be represented using big endian byte order. Else, little endian. TODO: remove this :param str dims: Indicates to WKB representation desired from converting the given GeoJSON `dict` ``obj``. The accepted values are: * '2D': 2-dimensional geometry (X, Y) * 'Z': 3-dimensional geometry (X, Y, Z) * 'M': 3-dimensional geometry (X, Y, M) * 'ZM': 4-dimensional geometry (X, Y, Z, M) :returns: A WKB binary string representing of the ``obj``. """ geom_type = obj['type'] meta = obj.get('meta', {}) exporter = _dumps_registry.get(geom_type) if exporter is None: _unsupported_geom_type(geom_type) # Check for empty geometries. GeometryCollections have a slightly different # JSON/dict structure, but that's handled. coords_or_geoms = obj.get('coordinates', obj.get('geometries')) if len(list(flatten_multi_dim(coords_or_geoms))) == 0: raise ValueError( 'Empty geometries cannot be represented in WKB. Reason: The ' 'dimensionality of the WKB would be ambiguous.' ) return exporter(obj, big_endian, meta)
python
def dumps(obj, big_endian=True): """ Dump a GeoJSON-like `dict` to a WKB string. .. note:: The dimensions of the generated WKB will be inferred from the first vertex in the GeoJSON `coordinates`. It will be assumed that all vertices are uniform. There are 4 types: - 2D (X, Y): 2-dimensional geometry - Z (X, Y, Z): 3-dimensional geometry - M (X, Y, M): 2-dimensional geometry with a "Measure" - ZM (X, Y, Z, M): 3-dimensional geometry with a "Measure" If the first vertex contains 2 values, we assume a 2D geometry. If the first vertex contains 3 values, this is slightly ambiguous and so the most common case is chosen: Z. If the first vertex contains 4 values, we assume a ZM geometry. The WKT/WKB standards provide a way of differentiating normal (2D), Z, M, and ZM geometries (http://en.wikipedia.org/wiki/Well-known_text), but the GeoJSON spec does not. Therefore, for the sake of interface simplicity, we assume that geometry that looks 3D contains XYZ components, instead of XYM. If the coordinates list has no coordinate values (this includes nested lists, for example, `[[[[],[]], []]]`, the geometry is considered to be empty. Geometries, with the exception of points, have a reasonable "empty" representation in WKB; however, without knowing the number of coordinate values per vertex, the type is ambigious, and thus we don't know if the geometry type is 2D, Z, M, or ZM. Therefore in this case we expect a `ValueError` to be raised. :param dict obj: GeoJson-like `dict` object. :param bool big_endian: Defaults to `True`. If `True`, data values in the generated WKB will be represented using big endian byte order. Else, little endian. TODO: remove this :param str dims: Indicates to WKB representation desired from converting the given GeoJSON `dict` ``obj``. The accepted values are: * '2D': 2-dimensional geometry (X, Y) * 'Z': 3-dimensional geometry (X, Y, Z) * 'M': 3-dimensional geometry (X, Y, M) * 'ZM': 4-dimensional geometry (X, Y, Z, M) :returns: A WKB binary string representing of the ``obj``. """ geom_type = obj['type'] meta = obj.get('meta', {}) exporter = _dumps_registry.get(geom_type) if exporter is None: _unsupported_geom_type(geom_type) # Check for empty geometries. GeometryCollections have a slightly different # JSON/dict structure, but that's handled. coords_or_geoms = obj.get('coordinates', obj.get('geometries')) if len(list(flatten_multi_dim(coords_or_geoms))) == 0: raise ValueError( 'Empty geometries cannot be represented in WKB. Reason: The ' 'dimensionality of the WKB would be ambiguous.' ) return exporter(obj, big_endian, meta)
[ "def", "dumps", "(", "obj", ",", "big_endian", "=", "True", ")", ":", "geom_type", "=", "obj", "[", "'type'", "]", "meta", "=", "obj", ".", "get", "(", "'meta'", ",", "{", "}", ")", "exporter", "=", "_dumps_registry", ".", "get", "(", "geom_type", ")", "if", "exporter", "is", "None", ":", "_unsupported_geom_type", "(", "geom_type", ")", "# Check for empty geometries. GeometryCollections have a slightly different", "# JSON/dict structure, but that's handled.", "coords_or_geoms", "=", "obj", ".", "get", "(", "'coordinates'", ",", "obj", ".", "get", "(", "'geometries'", ")", ")", "if", "len", "(", "list", "(", "flatten_multi_dim", "(", "coords_or_geoms", ")", ")", ")", "==", "0", ":", "raise", "ValueError", "(", "'Empty geometries cannot be represented in WKB. Reason: The '", "'dimensionality of the WKB would be ambiguous.'", ")", "return", "exporter", "(", "obj", ",", "big_endian", ",", "meta", ")" ]
Dump a GeoJSON-like `dict` to a WKB string. .. note:: The dimensions of the generated WKB will be inferred from the first vertex in the GeoJSON `coordinates`. It will be assumed that all vertices are uniform. There are 4 types: - 2D (X, Y): 2-dimensional geometry - Z (X, Y, Z): 3-dimensional geometry - M (X, Y, M): 2-dimensional geometry with a "Measure" - ZM (X, Y, Z, M): 3-dimensional geometry with a "Measure" If the first vertex contains 2 values, we assume a 2D geometry. If the first vertex contains 3 values, this is slightly ambiguous and so the most common case is chosen: Z. If the first vertex contains 4 values, we assume a ZM geometry. The WKT/WKB standards provide a way of differentiating normal (2D), Z, M, and ZM geometries (http://en.wikipedia.org/wiki/Well-known_text), but the GeoJSON spec does not. Therefore, for the sake of interface simplicity, we assume that geometry that looks 3D contains XYZ components, instead of XYM. If the coordinates list has no coordinate values (this includes nested lists, for example, `[[[[],[]], []]]`, the geometry is considered to be empty. Geometries, with the exception of points, have a reasonable "empty" representation in WKB; however, without knowing the number of coordinate values per vertex, the type is ambigious, and thus we don't know if the geometry type is 2D, Z, M, or ZM. Therefore in this case we expect a `ValueError` to be raised. :param dict obj: GeoJson-like `dict` object. :param bool big_endian: Defaults to `True`. If `True`, data values in the generated WKB will be represented using big endian byte order. Else, little endian. TODO: remove this :param str dims: Indicates to WKB representation desired from converting the given GeoJSON `dict` ``obj``. The accepted values are: * '2D': 2-dimensional geometry (X, Y) * 'Z': 3-dimensional geometry (X, Y, Z) * 'M': 3-dimensional geometry (X, Y, M) * 'ZM': 4-dimensional geometry (X, Y, Z, M) :returns: A WKB binary string representing of the ``obj``.
[ "Dump", "a", "GeoJSON", "-", "like", "dict", "to", "a", "WKB", "string", "." ]
train
https://github.com/geomet/geomet/blob/b82d7118113ab723751eba3de5df98c368423c2b/geomet/wkb.py#L177-L246
geomet/geomet
geomet/wkb.py
loads
def loads(string): """ Construct a GeoJSON `dict` from WKB (`string`). The resulting GeoJSON `dict` will include the SRID as an integer in the `meta` object. This was an arbitrary decision made by `geomet, the discussion of which took place here: https://github.com/geomet/geomet/issues/28. In order to be consistent with other libraries [1] and (deprecated) specifications [2], also include the same information in a `crs` object. This isn't ideal, but the `crs` member is no longer part of the GeoJSON standard, according to RFC7946 [3]. However, it's still useful to include this information in GeoJSON payloads because it supports conversion to EWKT/EWKB (which are canonical formats used by PostGIS and the like). Example: {'type': 'Point', 'coordinates': [0.0, 1.0], 'meta': {'srid': 4326}, 'crs': {'type': 'name', 'properties': {'name': 'EPSG4326'}}} NOTE(larsbutler): I'm not sure if it's valid to just prefix EPSG (European Petroluem Survey Group) to an SRID like this, but we'll stick with it for now until it becomes a problem. NOTE(larsbutler): Ideally, we should use URNs instead of this notation, according to the new GeoJSON spec [4]. However, in order to be consistent with [1], we'll stick with this approach for now. References: [1] - https://github.com/bryanjos/geo/issues/76 [2] - http://geojson.org/geojson-spec.html#coordinate-reference-system-objects [3] - https://tools.ietf.org/html/rfc7946#appendix-B.1 [4] - https://tools.ietf.org/html/rfc7946#section-4 """ # noqa string = iter(string) # endianness = string[0:1] endianness = as_bin_str(take(1, string)) if endianness == BIG_ENDIAN: big_endian = True elif endianness == LITTLE_ENDIAN: big_endian = False else: raise ValueError("Invalid endian byte: '0x%s'. Expected 0x00 or 0x01" % binascii.hexlify(endianness.encode()).decode()) endian_token = '>' if big_endian else '<' # type_bytes = string[1:5] type_bytes = as_bin_str(take(4, string)) if not big_endian: # To identify the type, order the type bytes in big endian: type_bytes = type_bytes[::-1] geom_type, type_bytes, has_srid = _get_geom_type(type_bytes) srid = None if has_srid: srid_field = as_bin_str(take(4, string)) [srid] = struct.unpack('%si' % endian_token, srid_field) # data_bytes = string[5:] # FIXME: This won't work for GeometryCollections data_bytes = string importer = _loads_registry.get(geom_type) if importer is None: _unsupported_geom_type(geom_type) data_bytes = iter(data_bytes) result = importer(big_endian, type_bytes, data_bytes) if has_srid: # As mentioned in the docstring above, include both approaches to # indicating the SRID. result['meta'] = {'srid': int(srid)} result['crs'] = { 'type': 'name', 'properties': {'name': 'EPSG%s' % srid}, } return result
python
def loads(string): """ Construct a GeoJSON `dict` from WKB (`string`). The resulting GeoJSON `dict` will include the SRID as an integer in the `meta` object. This was an arbitrary decision made by `geomet, the discussion of which took place here: https://github.com/geomet/geomet/issues/28. In order to be consistent with other libraries [1] and (deprecated) specifications [2], also include the same information in a `crs` object. This isn't ideal, but the `crs` member is no longer part of the GeoJSON standard, according to RFC7946 [3]. However, it's still useful to include this information in GeoJSON payloads because it supports conversion to EWKT/EWKB (which are canonical formats used by PostGIS and the like). Example: {'type': 'Point', 'coordinates': [0.0, 1.0], 'meta': {'srid': 4326}, 'crs': {'type': 'name', 'properties': {'name': 'EPSG4326'}}} NOTE(larsbutler): I'm not sure if it's valid to just prefix EPSG (European Petroluem Survey Group) to an SRID like this, but we'll stick with it for now until it becomes a problem. NOTE(larsbutler): Ideally, we should use URNs instead of this notation, according to the new GeoJSON spec [4]. However, in order to be consistent with [1], we'll stick with this approach for now. References: [1] - https://github.com/bryanjos/geo/issues/76 [2] - http://geojson.org/geojson-spec.html#coordinate-reference-system-objects [3] - https://tools.ietf.org/html/rfc7946#appendix-B.1 [4] - https://tools.ietf.org/html/rfc7946#section-4 """ # noqa string = iter(string) # endianness = string[0:1] endianness = as_bin_str(take(1, string)) if endianness == BIG_ENDIAN: big_endian = True elif endianness == LITTLE_ENDIAN: big_endian = False else: raise ValueError("Invalid endian byte: '0x%s'. Expected 0x00 or 0x01" % binascii.hexlify(endianness.encode()).decode()) endian_token = '>' if big_endian else '<' # type_bytes = string[1:5] type_bytes = as_bin_str(take(4, string)) if not big_endian: # To identify the type, order the type bytes in big endian: type_bytes = type_bytes[::-1] geom_type, type_bytes, has_srid = _get_geom_type(type_bytes) srid = None if has_srid: srid_field = as_bin_str(take(4, string)) [srid] = struct.unpack('%si' % endian_token, srid_field) # data_bytes = string[5:] # FIXME: This won't work for GeometryCollections data_bytes = string importer = _loads_registry.get(geom_type) if importer is None: _unsupported_geom_type(geom_type) data_bytes = iter(data_bytes) result = importer(big_endian, type_bytes, data_bytes) if has_srid: # As mentioned in the docstring above, include both approaches to # indicating the SRID. result['meta'] = {'srid': int(srid)} result['crs'] = { 'type': 'name', 'properties': {'name': 'EPSG%s' % srid}, } return result
[ "def", "loads", "(", "string", ")", ":", "# noqa", "string", "=", "iter", "(", "string", ")", "# endianness = string[0:1]", "endianness", "=", "as_bin_str", "(", "take", "(", "1", ",", "string", ")", ")", "if", "endianness", "==", "BIG_ENDIAN", ":", "big_endian", "=", "True", "elif", "endianness", "==", "LITTLE_ENDIAN", ":", "big_endian", "=", "False", "else", ":", "raise", "ValueError", "(", "\"Invalid endian byte: '0x%s'. Expected 0x00 or 0x01\"", "%", "binascii", ".", "hexlify", "(", "endianness", ".", "encode", "(", ")", ")", ".", "decode", "(", ")", ")", "endian_token", "=", "'>'", "if", "big_endian", "else", "'<'", "# type_bytes = string[1:5]", "type_bytes", "=", "as_bin_str", "(", "take", "(", "4", ",", "string", ")", ")", "if", "not", "big_endian", ":", "# To identify the type, order the type bytes in big endian:", "type_bytes", "=", "type_bytes", "[", ":", ":", "-", "1", "]", "geom_type", ",", "type_bytes", ",", "has_srid", "=", "_get_geom_type", "(", "type_bytes", ")", "srid", "=", "None", "if", "has_srid", ":", "srid_field", "=", "as_bin_str", "(", "take", "(", "4", ",", "string", ")", ")", "[", "srid", "]", "=", "struct", ".", "unpack", "(", "'%si'", "%", "endian_token", ",", "srid_field", ")", "# data_bytes = string[5:] # FIXME: This won't work for GeometryCollections", "data_bytes", "=", "string", "importer", "=", "_loads_registry", ".", "get", "(", "geom_type", ")", "if", "importer", "is", "None", ":", "_unsupported_geom_type", "(", "geom_type", ")", "data_bytes", "=", "iter", "(", "data_bytes", ")", "result", "=", "importer", "(", "big_endian", ",", "type_bytes", ",", "data_bytes", ")", "if", "has_srid", ":", "# As mentioned in the docstring above, include both approaches to", "# indicating the SRID.", "result", "[", "'meta'", "]", "=", "{", "'srid'", ":", "int", "(", "srid", ")", "}", "result", "[", "'crs'", "]", "=", "{", "'type'", ":", "'name'", ",", "'properties'", ":", "{", "'name'", ":", "'EPSG%s'", "%", "srid", "}", ",", "}", "return", "result" ]
Construct a GeoJSON `dict` from WKB (`string`). The resulting GeoJSON `dict` will include the SRID as an integer in the `meta` object. This was an arbitrary decision made by `geomet, the discussion of which took place here: https://github.com/geomet/geomet/issues/28. In order to be consistent with other libraries [1] and (deprecated) specifications [2], also include the same information in a `crs` object. This isn't ideal, but the `crs` member is no longer part of the GeoJSON standard, according to RFC7946 [3]. However, it's still useful to include this information in GeoJSON payloads because it supports conversion to EWKT/EWKB (which are canonical formats used by PostGIS and the like). Example: {'type': 'Point', 'coordinates': [0.0, 1.0], 'meta': {'srid': 4326}, 'crs': {'type': 'name', 'properties': {'name': 'EPSG4326'}}} NOTE(larsbutler): I'm not sure if it's valid to just prefix EPSG (European Petroluem Survey Group) to an SRID like this, but we'll stick with it for now until it becomes a problem. NOTE(larsbutler): Ideally, we should use URNs instead of this notation, according to the new GeoJSON spec [4]. However, in order to be consistent with [1], we'll stick with this approach for now. References: [1] - https://github.com/bryanjos/geo/issues/76 [2] - http://geojson.org/geojson-spec.html#coordinate-reference-system-objects [3] - https://tools.ietf.org/html/rfc7946#appendix-B.1 [4] - https://tools.ietf.org/html/rfc7946#section-4
[ "Construct", "a", "GeoJSON", "dict", "from", "WKB", "(", "string", ")", "." ]
train
https://github.com/geomet/geomet/blob/b82d7118113ab723751eba3de5df98c368423c2b/geomet/wkb.py#L249-L331
geomet/geomet
geomet/wkb.py
_header_bytefmt_byteorder
def _header_bytefmt_byteorder(geom_type, num_dims, big_endian, meta=None): """ Utility function to get the WKB header (endian byte + type header), byte format string, and byte order string. """ dim = _INT_TO_DIM_LABEL.get(num_dims) if dim is None: pass # TODO: raise type_byte_str = _WKB[dim][geom_type] srid = meta.get('srid') if srid is not None: # Add the srid flag type_byte_str = SRID_FLAG + type_byte_str[1:] if big_endian: header = BIG_ENDIAN byte_fmt = b'>' byte_order = '>' else: header = LITTLE_ENDIAN byte_fmt = b'<' byte_order = '<' # reverse the byte ordering for little endian type_byte_str = type_byte_str[::-1] header += type_byte_str if srid is not None: srid = int(srid) if big_endian: srid_header = struct.pack('>i', srid) else: srid_header = struct.pack('<i', srid) header += srid_header byte_fmt += b'd' * num_dims return header, byte_fmt, byte_order
python
def _header_bytefmt_byteorder(geom_type, num_dims, big_endian, meta=None): """ Utility function to get the WKB header (endian byte + type header), byte format string, and byte order string. """ dim = _INT_TO_DIM_LABEL.get(num_dims) if dim is None: pass # TODO: raise type_byte_str = _WKB[dim][geom_type] srid = meta.get('srid') if srid is not None: # Add the srid flag type_byte_str = SRID_FLAG + type_byte_str[1:] if big_endian: header = BIG_ENDIAN byte_fmt = b'>' byte_order = '>' else: header = LITTLE_ENDIAN byte_fmt = b'<' byte_order = '<' # reverse the byte ordering for little endian type_byte_str = type_byte_str[::-1] header += type_byte_str if srid is not None: srid = int(srid) if big_endian: srid_header = struct.pack('>i', srid) else: srid_header = struct.pack('<i', srid) header += srid_header byte_fmt += b'd' * num_dims return header, byte_fmt, byte_order
[ "def", "_header_bytefmt_byteorder", "(", "geom_type", ",", "num_dims", ",", "big_endian", ",", "meta", "=", "None", ")", ":", "dim", "=", "_INT_TO_DIM_LABEL", ".", "get", "(", "num_dims", ")", "if", "dim", "is", "None", ":", "pass", "# TODO: raise", "type_byte_str", "=", "_WKB", "[", "dim", "]", "[", "geom_type", "]", "srid", "=", "meta", ".", "get", "(", "'srid'", ")", "if", "srid", "is", "not", "None", ":", "# Add the srid flag", "type_byte_str", "=", "SRID_FLAG", "+", "type_byte_str", "[", "1", ":", "]", "if", "big_endian", ":", "header", "=", "BIG_ENDIAN", "byte_fmt", "=", "b'>'", "byte_order", "=", "'>'", "else", ":", "header", "=", "LITTLE_ENDIAN", "byte_fmt", "=", "b'<'", "byte_order", "=", "'<'", "# reverse the byte ordering for little endian", "type_byte_str", "=", "type_byte_str", "[", ":", ":", "-", "1", "]", "header", "+=", "type_byte_str", "if", "srid", "is", "not", "None", ":", "srid", "=", "int", "(", "srid", ")", "if", "big_endian", ":", "srid_header", "=", "struct", ".", "pack", "(", "'>i'", ",", "srid", ")", "else", ":", "srid_header", "=", "struct", ".", "pack", "(", "'<i'", ",", "srid", ")", "header", "+=", "srid_header", "byte_fmt", "+=", "b'd'", "*", "num_dims", "return", "header", ",", "byte_fmt", ",", "byte_order" ]
Utility function to get the WKB header (endian byte + type header), byte format string, and byte order string.
[ "Utility", "function", "to", "get", "the", "WKB", "header", "(", "endian", "byte", "+", "type", "header", ")", "byte", "format", "string", "and", "byte", "order", "string", "." ]
train
https://github.com/geomet/geomet/blob/b82d7118113ab723751eba3de5df98c368423c2b/geomet/wkb.py#L339-L376