id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
23,100
allenai/allennlp
allennlp/commands/elmo.py
ElmoEmbedder.embed_sentence
def embed_sentence(self, sentence: List[str]) -> numpy.ndarray: """ Computes the ELMo embeddings for a single tokenized sentence. Please note that ELMo has internal state and will give different results for the same input. See the comment under the class definition. Parameters ---------- sentence : ``List[str]``, required A tokenized sentence. Returns ------- A tensor containing the ELMo vectors. """ return self.embed_batch([sentence])[0]
python
def embed_sentence(self, sentence: List[str]) -> numpy.ndarray: """ Computes the ELMo embeddings for a single tokenized sentence. Please note that ELMo has internal state and will give different results for the same input. See the comment under the class definition. Parameters ---------- sentence : ``List[str]``, required A tokenized sentence. Returns ------- A tensor containing the ELMo vectors. """ return self.embed_batch([sentence])[0]
[ "def", "embed_sentence", "(", "self", ",", "sentence", ":", "List", "[", "str", "]", ")", "->", "numpy", ".", "ndarray", ":", "return", "self", ".", "embed_batch", "(", "[", "sentence", "]", ")", "[", "0", "]" ]
Computes the ELMo embeddings for a single tokenized sentence. Please note that ELMo has internal state and will give different results for the same input. See the comment under the class definition. Parameters ---------- sentence : ``List[str]``, required A tokenized sentence. Returns ------- A tensor containing the ELMo vectors.
[ "Computes", "the", "ELMo", "embeddings", "for", "a", "single", "tokenized", "sentence", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/commands/elmo.py#L203-L220
23,101
allenai/allennlp
allennlp/commands/elmo.py
ElmoEmbedder.embed_batch
def embed_batch(self, batch: List[List[str]]) -> List[numpy.ndarray]: """ Computes the ELMo embeddings for a batch of tokenized sentences. Please note that ELMo has internal state and will give different results for the same input. See the comment under the class definition. Parameters ---------- batch : ``List[List[str]]``, required A list of tokenized sentences. Returns ------- A list of tensors, each representing the ELMo vectors for the input sentence at the same index. """ elmo_embeddings = [] # Batches with only an empty sentence will throw an exception inside AllenNLP, so we handle this case # and return an empty embedding instead. if batch == [[]]: elmo_embeddings.append(empty_embedding()) else: embeddings, mask = self.batch_to_embeddings(batch) for i in range(len(batch)): length = int(mask[i, :].sum()) # Slicing the embedding :0 throws an exception so we need to special case for empty sentences. if length == 0: elmo_embeddings.append(empty_embedding()) else: elmo_embeddings.append(embeddings[i, :, :length, :].detach().cpu().numpy()) return elmo_embeddings
python
def embed_batch(self, batch: List[List[str]]) -> List[numpy.ndarray]: """ Computes the ELMo embeddings for a batch of tokenized sentences. Please note that ELMo has internal state and will give different results for the same input. See the comment under the class definition. Parameters ---------- batch : ``List[List[str]]``, required A list of tokenized sentences. Returns ------- A list of tensors, each representing the ELMo vectors for the input sentence at the same index. """ elmo_embeddings = [] # Batches with only an empty sentence will throw an exception inside AllenNLP, so we handle this case # and return an empty embedding instead. if batch == [[]]: elmo_embeddings.append(empty_embedding()) else: embeddings, mask = self.batch_to_embeddings(batch) for i in range(len(batch)): length = int(mask[i, :].sum()) # Slicing the embedding :0 throws an exception so we need to special case for empty sentences. if length == 0: elmo_embeddings.append(empty_embedding()) else: elmo_embeddings.append(embeddings[i, :, :length, :].detach().cpu().numpy()) return elmo_embeddings
[ "def", "embed_batch", "(", "self", ",", "batch", ":", "List", "[", "List", "[", "str", "]", "]", ")", "->", "List", "[", "numpy", ".", "ndarray", "]", ":", "elmo_embeddings", "=", "[", "]", "# Batches with only an empty sentence will throw an exception inside AllenNLP, so we handle this case", "# and return an empty embedding instead.", "if", "batch", "==", "[", "[", "]", "]", ":", "elmo_embeddings", ".", "append", "(", "empty_embedding", "(", ")", ")", "else", ":", "embeddings", ",", "mask", "=", "self", ".", "batch_to_embeddings", "(", "batch", ")", "for", "i", "in", "range", "(", "len", "(", "batch", ")", ")", ":", "length", "=", "int", "(", "mask", "[", "i", ",", ":", "]", ".", "sum", "(", ")", ")", "# Slicing the embedding :0 throws an exception so we need to special case for empty sentences.", "if", "length", "==", "0", ":", "elmo_embeddings", ".", "append", "(", "empty_embedding", "(", ")", ")", "else", ":", "elmo_embeddings", ".", "append", "(", "embeddings", "[", "i", ",", ":", ",", ":", "length", ",", ":", "]", ".", "detach", "(", ")", ".", "cpu", "(", ")", ".", "numpy", "(", ")", ")", "return", "elmo_embeddings" ]
Computes the ELMo embeddings for a batch of tokenized sentences. Please note that ELMo has internal state and will give different results for the same input. See the comment under the class definition. Parameters ---------- batch : ``List[List[str]]``, required A list of tokenized sentences. Returns ------- A list of tensors, each representing the ELMo vectors for the input sentence at the same index.
[ "Computes", "the", "ELMo", "embeddings", "for", "a", "batch", "of", "tokenized", "sentences", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/commands/elmo.py#L222-L254
23,102
allenai/allennlp
allennlp/commands/elmo.py
ElmoEmbedder.embed_sentences
def embed_sentences(self, sentences: Iterable[List[str]], batch_size: int = DEFAULT_BATCH_SIZE) -> Iterable[numpy.ndarray]: """ Computes the ELMo embeddings for a iterable of sentences. Please note that ELMo has internal state and will give different results for the same input. See the comment under the class definition. Parameters ---------- sentences : ``Iterable[List[str]]``, required An iterable of tokenized sentences. batch_size : ``int``, required The number of sentences ELMo should process at once. Returns ------- A list of tensors, each representing the ELMo vectors for the input sentence at the same index. """ for batch in lazy_groups_of(iter(sentences), batch_size): yield from self.embed_batch(batch)
python
def embed_sentences(self, sentences: Iterable[List[str]], batch_size: int = DEFAULT_BATCH_SIZE) -> Iterable[numpy.ndarray]: """ Computes the ELMo embeddings for a iterable of sentences. Please note that ELMo has internal state and will give different results for the same input. See the comment under the class definition. Parameters ---------- sentences : ``Iterable[List[str]]``, required An iterable of tokenized sentences. batch_size : ``int``, required The number of sentences ELMo should process at once. Returns ------- A list of tensors, each representing the ELMo vectors for the input sentence at the same index. """ for batch in lazy_groups_of(iter(sentences), batch_size): yield from self.embed_batch(batch)
[ "def", "embed_sentences", "(", "self", ",", "sentences", ":", "Iterable", "[", "List", "[", "str", "]", "]", ",", "batch_size", ":", "int", "=", "DEFAULT_BATCH_SIZE", ")", "->", "Iterable", "[", "numpy", ".", "ndarray", "]", ":", "for", "batch", "in", "lazy_groups_of", "(", "iter", "(", "sentences", ")", ",", "batch_size", ")", ":", "yield", "from", "self", ".", "embed_batch", "(", "batch", ")" ]
Computes the ELMo embeddings for a iterable of sentences. Please note that ELMo has internal state and will give different results for the same input. See the comment under the class definition. Parameters ---------- sentences : ``Iterable[List[str]]``, required An iterable of tokenized sentences. batch_size : ``int``, required The number of sentences ELMo should process at once. Returns ------- A list of tensors, each representing the ELMo vectors for the input sentence at the same index.
[ "Computes", "the", "ELMo", "embeddings", "for", "a", "iterable", "of", "sentences", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/commands/elmo.py#L256-L277
23,103
allenai/allennlp
allennlp/commands/elmo.py
ElmoEmbedder.embed_file
def embed_file(self, input_file: IO, output_file_path: str, output_format: str = "all", batch_size: int = DEFAULT_BATCH_SIZE, forget_sentences: bool = False, use_sentence_keys: bool = False) -> None: """ Computes ELMo embeddings from an input_file where each line contains a sentence tokenized by whitespace. The ELMo embeddings are written out in HDF5 format, where each sentence embedding is saved in a dataset with the line number in the original file as the key. Parameters ---------- input_file : ``IO``, required A file with one tokenized sentence per line. output_file_path : ``str``, required A path to the output hdf5 file. output_format : ``str``, optional, (default = "all") The embeddings to output. Must be one of "all", "top", or "average". batch_size : ``int``, optional, (default = 64) The number of sentences to process in ELMo at one time. forget_sentences : ``bool``, optional, (default = False). If use_sentence_keys is False, whether or not to include a string serialized JSON dictionary that associates sentences with their line number (its HDF5 key). The mapping is placed in the "sentence_to_index" HDF5 key. This is useful if you want to use the embeddings without keeping the original file of sentences around. use_sentence_keys : ``bool``, optional, (default = False). Whether or not to use full sentences as keys. By default, the line numbers of the input file are used as ids, which is more robust. """ assert output_format in ["all", "top", "average"] # Tokenizes the sentences. sentences = [line.strip() for line in input_file] blank_lines = [i for (i, line) in enumerate(sentences) if line == ""] if blank_lines: raise ConfigurationError(f"Your input file contains empty lines at indexes " f"{blank_lines}. Please remove them.") split_sentences = [sentence.split() for sentence in sentences] # Uses the sentence index as the key. if use_sentence_keys: logger.warning("Using sentences as keys can fail if sentences " "contain forward slashes or colons. Use with caution.") embedded_sentences = zip(sentences, self.embed_sentences(split_sentences, batch_size)) else: embedded_sentences = ((str(i), x) for i, x in enumerate(self.embed_sentences(split_sentences, batch_size))) sentence_to_index = {} logger.info("Processing sentences.") with h5py.File(output_file_path, 'w') as fout: for key, embeddings in Tqdm.tqdm(embedded_sentences): if use_sentence_keys and key in fout.keys(): raise ConfigurationError(f"Key already exists in {output_file_path}. " f"To encode duplicate sentences, do not pass " f"the --use-sentence-keys flag.") if not forget_sentences and not use_sentence_keys: sentence = sentences[int(key)] sentence_to_index[sentence] = key if output_format == "all": output = embeddings elif output_format == "top": output = embeddings[-1] elif output_format == "average": output = numpy.average(embeddings, axis=0) fout.create_dataset( str(key), output.shape, dtype='float32', data=output ) if not forget_sentences and not use_sentence_keys: sentence_index_dataset = fout.create_dataset( "sentence_to_index", (1,), dtype=h5py.special_dtype(vlen=str)) sentence_index_dataset[0] = json.dumps(sentence_to_index) input_file.close()
python
def embed_file(self, input_file: IO, output_file_path: str, output_format: str = "all", batch_size: int = DEFAULT_BATCH_SIZE, forget_sentences: bool = False, use_sentence_keys: bool = False) -> None: """ Computes ELMo embeddings from an input_file where each line contains a sentence tokenized by whitespace. The ELMo embeddings are written out in HDF5 format, where each sentence embedding is saved in a dataset with the line number in the original file as the key. Parameters ---------- input_file : ``IO``, required A file with one tokenized sentence per line. output_file_path : ``str``, required A path to the output hdf5 file. output_format : ``str``, optional, (default = "all") The embeddings to output. Must be one of "all", "top", or "average". batch_size : ``int``, optional, (default = 64) The number of sentences to process in ELMo at one time. forget_sentences : ``bool``, optional, (default = False). If use_sentence_keys is False, whether or not to include a string serialized JSON dictionary that associates sentences with their line number (its HDF5 key). The mapping is placed in the "sentence_to_index" HDF5 key. This is useful if you want to use the embeddings without keeping the original file of sentences around. use_sentence_keys : ``bool``, optional, (default = False). Whether or not to use full sentences as keys. By default, the line numbers of the input file are used as ids, which is more robust. """ assert output_format in ["all", "top", "average"] # Tokenizes the sentences. sentences = [line.strip() for line in input_file] blank_lines = [i for (i, line) in enumerate(sentences) if line == ""] if blank_lines: raise ConfigurationError(f"Your input file contains empty lines at indexes " f"{blank_lines}. Please remove them.") split_sentences = [sentence.split() for sentence in sentences] # Uses the sentence index as the key. if use_sentence_keys: logger.warning("Using sentences as keys can fail if sentences " "contain forward slashes or colons. Use with caution.") embedded_sentences = zip(sentences, self.embed_sentences(split_sentences, batch_size)) else: embedded_sentences = ((str(i), x) for i, x in enumerate(self.embed_sentences(split_sentences, batch_size))) sentence_to_index = {} logger.info("Processing sentences.") with h5py.File(output_file_path, 'w') as fout: for key, embeddings in Tqdm.tqdm(embedded_sentences): if use_sentence_keys and key in fout.keys(): raise ConfigurationError(f"Key already exists in {output_file_path}. " f"To encode duplicate sentences, do not pass " f"the --use-sentence-keys flag.") if not forget_sentences and not use_sentence_keys: sentence = sentences[int(key)] sentence_to_index[sentence] = key if output_format == "all": output = embeddings elif output_format == "top": output = embeddings[-1] elif output_format == "average": output = numpy.average(embeddings, axis=0) fout.create_dataset( str(key), output.shape, dtype='float32', data=output ) if not forget_sentences and not use_sentence_keys: sentence_index_dataset = fout.create_dataset( "sentence_to_index", (1,), dtype=h5py.special_dtype(vlen=str)) sentence_index_dataset[0] = json.dumps(sentence_to_index) input_file.close()
[ "def", "embed_file", "(", "self", ",", "input_file", ":", "IO", ",", "output_file_path", ":", "str", ",", "output_format", ":", "str", "=", "\"all\"", ",", "batch_size", ":", "int", "=", "DEFAULT_BATCH_SIZE", ",", "forget_sentences", ":", "bool", "=", "False", ",", "use_sentence_keys", ":", "bool", "=", "False", ")", "->", "None", ":", "assert", "output_format", "in", "[", "\"all\"", ",", "\"top\"", ",", "\"average\"", "]", "# Tokenizes the sentences.", "sentences", "=", "[", "line", ".", "strip", "(", ")", "for", "line", "in", "input_file", "]", "blank_lines", "=", "[", "i", "for", "(", "i", ",", "line", ")", "in", "enumerate", "(", "sentences", ")", "if", "line", "==", "\"\"", "]", "if", "blank_lines", ":", "raise", "ConfigurationError", "(", "f\"Your input file contains empty lines at indexes \"", "f\"{blank_lines}. Please remove them.\"", ")", "split_sentences", "=", "[", "sentence", ".", "split", "(", ")", "for", "sentence", "in", "sentences", "]", "# Uses the sentence index as the key.", "if", "use_sentence_keys", ":", "logger", ".", "warning", "(", "\"Using sentences as keys can fail if sentences \"", "\"contain forward slashes or colons. Use with caution.\"", ")", "embedded_sentences", "=", "zip", "(", "sentences", ",", "self", ".", "embed_sentences", "(", "split_sentences", ",", "batch_size", ")", ")", "else", ":", "embedded_sentences", "=", "(", "(", "str", "(", "i", ")", ",", "x", ")", "for", "i", ",", "x", "in", "enumerate", "(", "self", ".", "embed_sentences", "(", "split_sentences", ",", "batch_size", ")", ")", ")", "sentence_to_index", "=", "{", "}", "logger", ".", "info", "(", "\"Processing sentences.\"", ")", "with", "h5py", ".", "File", "(", "output_file_path", ",", "'w'", ")", "as", "fout", ":", "for", "key", ",", "embeddings", "in", "Tqdm", ".", "tqdm", "(", "embedded_sentences", ")", ":", "if", "use_sentence_keys", "and", "key", "in", "fout", ".", "keys", "(", ")", ":", "raise", "ConfigurationError", "(", "f\"Key already exists in {output_file_path}. \"", "f\"To encode duplicate sentences, do not pass \"", "f\"the --use-sentence-keys flag.\"", ")", "if", "not", "forget_sentences", "and", "not", "use_sentence_keys", ":", "sentence", "=", "sentences", "[", "int", "(", "key", ")", "]", "sentence_to_index", "[", "sentence", "]", "=", "key", "if", "output_format", "==", "\"all\"", ":", "output", "=", "embeddings", "elif", "output_format", "==", "\"top\"", ":", "output", "=", "embeddings", "[", "-", "1", "]", "elif", "output_format", "==", "\"average\"", ":", "output", "=", "numpy", ".", "average", "(", "embeddings", ",", "axis", "=", "0", ")", "fout", ".", "create_dataset", "(", "str", "(", "key", ")", ",", "output", ".", "shape", ",", "dtype", "=", "'float32'", ",", "data", "=", "output", ")", "if", "not", "forget_sentences", "and", "not", "use_sentence_keys", ":", "sentence_index_dataset", "=", "fout", ".", "create_dataset", "(", "\"sentence_to_index\"", ",", "(", "1", ",", ")", ",", "dtype", "=", "h5py", ".", "special_dtype", "(", "vlen", "=", "str", ")", ")", "sentence_index_dataset", "[", "0", "]", "=", "json", ".", "dumps", "(", "sentence_to_index", ")", "input_file", ".", "close", "(", ")" ]
Computes ELMo embeddings from an input_file where each line contains a sentence tokenized by whitespace. The ELMo embeddings are written out in HDF5 format, where each sentence embedding is saved in a dataset with the line number in the original file as the key. Parameters ---------- input_file : ``IO``, required A file with one tokenized sentence per line. output_file_path : ``str``, required A path to the output hdf5 file. output_format : ``str``, optional, (default = "all") The embeddings to output. Must be one of "all", "top", or "average". batch_size : ``int``, optional, (default = 64) The number of sentences to process in ELMo at one time. forget_sentences : ``bool``, optional, (default = False). If use_sentence_keys is False, whether or not to include a string serialized JSON dictionary that associates sentences with their line number (its HDF5 key). The mapping is placed in the "sentence_to_index" HDF5 key. This is useful if you want to use the embeddings without keeping the original file of sentences around. use_sentence_keys : ``bool``, optional, (default = False). Whether or not to use full sentences as keys. By default, the line numbers of the input file are used as ids, which is more robust.
[ "Computes", "ELMo", "embeddings", "from", "an", "input_file", "where", "each", "line", "contains", "a", "sentence", "tokenized", "by", "whitespace", ".", "The", "ELMo", "embeddings", "are", "written", "out", "in", "HDF5", "format", "where", "each", "sentence", "embedding", "is", "saved", "in", "a", "dataset", "with", "the", "line", "number", "in", "the", "original", "file", "as", "the", "key", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/commands/elmo.py#L279-L365
23,104
allenai/allennlp
allennlp/data/instance.py
Instance.add_field
def add_field(self, field_name: str, field: Field, vocab: Vocabulary = None) -> None: """ Add the field to the existing fields mapping. If we have already indexed the Instance, then we also index `field`, so it is necessary to supply the vocab. """ self.fields[field_name] = field if self.indexed: field.index(vocab)
python
def add_field(self, field_name: str, field: Field, vocab: Vocabulary = None) -> None: """ Add the field to the existing fields mapping. If we have already indexed the Instance, then we also index `field`, so it is necessary to supply the vocab. """ self.fields[field_name] = field if self.indexed: field.index(vocab)
[ "def", "add_field", "(", "self", ",", "field_name", ":", "str", ",", "field", ":", "Field", ",", "vocab", ":", "Vocabulary", "=", "None", ")", "->", "None", ":", "self", ".", "fields", "[", "field_name", "]", "=", "field", "if", "self", ".", "indexed", ":", "field", ".", "index", "(", "vocab", ")" ]
Add the field to the existing fields mapping. If we have already indexed the Instance, then we also index `field`, so it is necessary to supply the vocab.
[ "Add", "the", "field", "to", "the", "existing", "fields", "mapping", ".", "If", "we", "have", "already", "indexed", "the", "Instance", "then", "we", "also", "index", "field", "so", "it", "is", "necessary", "to", "supply", "the", "vocab", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/instance.py#L41-L49
23,105
allenai/allennlp
allennlp/data/instance.py
Instance.count_vocab_items
def count_vocab_items(self, counter: Dict[str, Dict[str, int]]): """ Increments counts in the given ``counter`` for all of the vocabulary items in all of the ``Fields`` in this ``Instance``. """ for field in self.fields.values(): field.count_vocab_items(counter)
python
def count_vocab_items(self, counter: Dict[str, Dict[str, int]]): """ Increments counts in the given ``counter`` for all of the vocabulary items in all of the ``Fields`` in this ``Instance``. """ for field in self.fields.values(): field.count_vocab_items(counter)
[ "def", "count_vocab_items", "(", "self", ",", "counter", ":", "Dict", "[", "str", ",", "Dict", "[", "str", ",", "int", "]", "]", ")", ":", "for", "field", "in", "self", ".", "fields", ".", "values", "(", ")", ":", "field", ".", "count_vocab_items", "(", "counter", ")" ]
Increments counts in the given ``counter`` for all of the vocabulary items in all of the ``Fields`` in this ``Instance``.
[ "Increments", "counts", "in", "the", "given", "counter", "for", "all", "of", "the", "vocabulary", "items", "in", "all", "of", "the", "Fields", "in", "this", "Instance", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/instance.py#L51-L57
23,106
allenai/allennlp
allennlp/data/instance.py
Instance.index_fields
def index_fields(self, vocab: Vocabulary) -> None: """ Indexes all fields in this ``Instance`` using the provided ``Vocabulary``. This `mutates` the current object, it does not return a new ``Instance``. A ``DataIterator`` will call this on each pass through a dataset; we use the ``indexed`` flag to make sure that indexing only happens once. This means that if for some reason you modify your vocabulary after you've indexed your instances, you might get unexpected behavior. """ if not self.indexed: self.indexed = True for field in self.fields.values(): field.index(vocab)
python
def index_fields(self, vocab: Vocabulary) -> None: """ Indexes all fields in this ``Instance`` using the provided ``Vocabulary``. This `mutates` the current object, it does not return a new ``Instance``. A ``DataIterator`` will call this on each pass through a dataset; we use the ``indexed`` flag to make sure that indexing only happens once. This means that if for some reason you modify your vocabulary after you've indexed your instances, you might get unexpected behavior. """ if not self.indexed: self.indexed = True for field in self.fields.values(): field.index(vocab)
[ "def", "index_fields", "(", "self", ",", "vocab", ":", "Vocabulary", ")", "->", "None", ":", "if", "not", "self", ".", "indexed", ":", "self", ".", "indexed", "=", "True", "for", "field", "in", "self", ".", "fields", ".", "values", "(", ")", ":", "field", ".", "index", "(", "vocab", ")" ]
Indexes all fields in this ``Instance`` using the provided ``Vocabulary``. This `mutates` the current object, it does not return a new ``Instance``. A ``DataIterator`` will call this on each pass through a dataset; we use the ``indexed`` flag to make sure that indexing only happens once. This means that if for some reason you modify your vocabulary after you've indexed your instances, you might get unexpected behavior.
[ "Indexes", "all", "fields", "in", "this", "Instance", "using", "the", "provided", "Vocabulary", ".", "This", "mutates", "the", "current", "object", "it", "does", "not", "return", "a", "new", "Instance", ".", "A", "DataIterator", "will", "call", "this", "on", "each", "pass", "through", "a", "dataset", ";", "we", "use", "the", "indexed", "flag", "to", "make", "sure", "that", "indexing", "only", "happens", "once", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/instance.py#L59-L72
23,107
allenai/allennlp
allennlp/data/instance.py
Instance.get_padding_lengths
def get_padding_lengths(self) -> Dict[str, Dict[str, int]]: """ Returns a dictionary of padding lengths, keyed by field name. Each ``Field`` returns a mapping from padding keys to actual lengths, and we just key that dictionary by field name. """ lengths = {} for field_name, field in self.fields.items(): lengths[field_name] = field.get_padding_lengths() return lengths
python
def get_padding_lengths(self) -> Dict[str, Dict[str, int]]: """ Returns a dictionary of padding lengths, keyed by field name. Each ``Field`` returns a mapping from padding keys to actual lengths, and we just key that dictionary by field name. """ lengths = {} for field_name, field in self.fields.items(): lengths[field_name] = field.get_padding_lengths() return lengths
[ "def", "get_padding_lengths", "(", "self", ")", "->", "Dict", "[", "str", ",", "Dict", "[", "str", ",", "int", "]", "]", ":", "lengths", "=", "{", "}", "for", "field_name", ",", "field", "in", "self", ".", "fields", ".", "items", "(", ")", ":", "lengths", "[", "field_name", "]", "=", "field", ".", "get_padding_lengths", "(", ")", "return", "lengths" ]
Returns a dictionary of padding lengths, keyed by field name. Each ``Field`` returns a mapping from padding keys to actual lengths, and we just key that dictionary by field name.
[ "Returns", "a", "dictionary", "of", "padding", "lengths", "keyed", "by", "field", "name", ".", "Each", "Field", "returns", "a", "mapping", "from", "padding", "keys", "to", "actual", "lengths", "and", "we", "just", "key", "that", "dictionary", "by", "field", "name", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/instance.py#L74-L82
23,108
allenai/allennlp
allennlp/common/configuration.py
_docspec_comments
def _docspec_comments(obj) -> Dict[str, str]: """ Inspect the docstring and get the comments for each parameter. """ # Sometimes our docstring is on the class, and sometimes it's on the initializer, # so we've got to check both. class_docstring = getattr(obj, '__doc__', None) init_docstring = getattr(obj.__init__, '__doc__', None) if hasattr(obj, '__init__') else None docstring = class_docstring or init_docstring or '' doc = NumpyDocString(docstring) params = doc["Parameters"] comments: Dict[str, str] = {} for line in params: # It looks like when there's not a space after the parameter name, # numpydocstring parses it incorrectly. name_bad = line[0] name = name_bad.split(":")[0] # Sometimes the line has 3 fields, sometimes it has 4 fields. comment = "\n".join(line[-1]) comments[name] = comment return comments
python
def _docspec_comments(obj) -> Dict[str, str]: """ Inspect the docstring and get the comments for each parameter. """ # Sometimes our docstring is on the class, and sometimes it's on the initializer, # so we've got to check both. class_docstring = getattr(obj, '__doc__', None) init_docstring = getattr(obj.__init__, '__doc__', None) if hasattr(obj, '__init__') else None docstring = class_docstring or init_docstring or '' doc = NumpyDocString(docstring) params = doc["Parameters"] comments: Dict[str, str] = {} for line in params: # It looks like when there's not a space after the parameter name, # numpydocstring parses it incorrectly. name_bad = line[0] name = name_bad.split(":")[0] # Sometimes the line has 3 fields, sometimes it has 4 fields. comment = "\n".join(line[-1]) comments[name] = comment return comments
[ "def", "_docspec_comments", "(", "obj", ")", "->", "Dict", "[", "str", ",", "str", "]", ":", "# Sometimes our docstring is on the class, and sometimes it's on the initializer,", "# so we've got to check both.", "class_docstring", "=", "getattr", "(", "obj", ",", "'__doc__'", ",", "None", ")", "init_docstring", "=", "getattr", "(", "obj", ".", "__init__", ",", "'__doc__'", ",", "None", ")", "if", "hasattr", "(", "obj", ",", "'__init__'", ")", "else", "None", "docstring", "=", "class_docstring", "or", "init_docstring", "or", "''", "doc", "=", "NumpyDocString", "(", "docstring", ")", "params", "=", "doc", "[", "\"Parameters\"", "]", "comments", ":", "Dict", "[", "str", ",", "str", "]", "=", "{", "}", "for", "line", "in", "params", ":", "# It looks like when there's not a space after the parameter name,", "# numpydocstring parses it incorrectly.", "name_bad", "=", "line", "[", "0", "]", "name", "=", "name_bad", ".", "split", "(", "\":\"", ")", "[", "0", "]", "# Sometimes the line has 3 fields, sometimes it has 4 fields.", "comment", "=", "\"\\n\"", ".", "join", "(", "line", "[", "-", "1", "]", ")", "comments", "[", "name", "]", "=", "comment", "return", "comments" ]
Inspect the docstring and get the comments for each parameter.
[ "Inspect", "the", "docstring", "and", "get", "the", "comments", "for", "each", "parameter", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/configuration.py#L195-L221
23,109
allenai/allennlp
allennlp/common/configuration.py
render_config
def render_config(config: Config, indent: str = "") -> str: """ Pretty-print a config in sort-of-JSON+comments. """ # Add four spaces to the indent. new_indent = indent + " " return "".join([ # opening brace + newline "{\n", # "type": "...", (if present) f'{new_indent}"type": "{config.typ3}",\n' if config.typ3 else '', # render each item "".join(_render(item, new_indent) for item in config.items), # indent and close the brace indent, "}\n" ])
python
def render_config(config: Config, indent: str = "") -> str: """ Pretty-print a config in sort-of-JSON+comments. """ # Add four spaces to the indent. new_indent = indent + " " return "".join([ # opening brace + newline "{\n", # "type": "...", (if present) f'{new_indent}"type": "{config.typ3}",\n' if config.typ3 else '', # render each item "".join(_render(item, new_indent) for item in config.items), # indent and close the brace indent, "}\n" ])
[ "def", "render_config", "(", "config", ":", "Config", ",", "indent", ":", "str", "=", "\"\"", ")", "->", "str", ":", "# Add four spaces to the indent.", "new_indent", "=", "indent", "+", "\" \"", "return", "\"\"", ".", "join", "(", "[", "# opening brace + newline", "\"{\\n\"", ",", "# \"type\": \"...\", (if present)", "f'{new_indent}\"type\": \"{config.typ3}\",\\n'", "if", "config", ".", "typ3", "else", "''", ",", "# render each item", "\"\"", ".", "join", "(", "_render", "(", "item", ",", "new_indent", ")", "for", "item", "in", "config", ".", "items", ")", ",", "# indent and close the brace", "indent", ",", "\"}\\n\"", "]", ")" ]
Pretty-print a config in sort-of-JSON+comments.
[ "Pretty", "-", "print", "a", "config", "in", "sort", "-", "of", "-", "JSON", "+", "comments", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/configuration.py#L298-L315
23,110
allenai/allennlp
allennlp/common/configuration.py
_render
def _render(item: ConfigItem, indent: str = "") -> str: """ Render a single config item, with the provided indent """ optional = item.default_value != _NO_DEFAULT if is_configurable(item.annotation): rendered_annotation = f"{item.annotation} (configurable)" else: rendered_annotation = str(item.annotation) rendered_item = "".join([ # rendered_comment, indent, "// " if optional else "", f'"{item.name}": ', rendered_annotation, f" (default: {item.default_value} )" if optional else "", f" // {item.comment}" if item.comment else "", "\n" ]) return rendered_item
python
def _render(item: ConfigItem, indent: str = "") -> str: """ Render a single config item, with the provided indent """ optional = item.default_value != _NO_DEFAULT if is_configurable(item.annotation): rendered_annotation = f"{item.annotation} (configurable)" else: rendered_annotation = str(item.annotation) rendered_item = "".join([ # rendered_comment, indent, "// " if optional else "", f'"{item.name}": ', rendered_annotation, f" (default: {item.default_value} )" if optional else "", f" // {item.comment}" if item.comment else "", "\n" ]) return rendered_item
[ "def", "_render", "(", "item", ":", "ConfigItem", ",", "indent", ":", "str", "=", "\"\"", ")", "->", "str", ":", "optional", "=", "item", ".", "default_value", "!=", "_NO_DEFAULT", "if", "is_configurable", "(", "item", ".", "annotation", ")", ":", "rendered_annotation", "=", "f\"{item.annotation} (configurable)\"", "else", ":", "rendered_annotation", "=", "str", "(", "item", ".", "annotation", ")", "rendered_item", "=", "\"\"", ".", "join", "(", "[", "# rendered_comment,", "indent", ",", "\"// \"", "if", "optional", "else", "\"\"", ",", "f'\"{item.name}\": '", ",", "rendered_annotation", ",", "f\" (default: {item.default_value} )\"", "if", "optional", "else", "\"\"", ",", "f\" // {item.comment}\"", "if", "item", ".", "comment", "else", "\"\"", ",", "\"\\n\"", "]", ")", "return", "rendered_item" ]
Render a single config item, with the provided indent
[ "Render", "a", "single", "config", "item", "with", "the", "provided", "indent" ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/configuration.py#L355-L377
23,111
allenai/allennlp
allennlp/common/file_utils.py
url_to_filename
def url_to_filename(url: str, etag: str = None) -> str: """ Convert `url` into a hashed filename in a repeatable way. If `etag` is specified, append its hash to the url's, delimited by a period. """ url_bytes = url.encode('utf-8') url_hash = sha256(url_bytes) filename = url_hash.hexdigest() if etag: etag_bytes = etag.encode('utf-8') etag_hash = sha256(etag_bytes) filename += '.' + etag_hash.hexdigest() return filename
python
def url_to_filename(url: str, etag: str = None) -> str: """ Convert `url` into a hashed filename in a repeatable way. If `etag` is specified, append its hash to the url's, delimited by a period. """ url_bytes = url.encode('utf-8') url_hash = sha256(url_bytes) filename = url_hash.hexdigest() if etag: etag_bytes = etag.encode('utf-8') etag_hash = sha256(etag_bytes) filename += '.' + etag_hash.hexdigest() return filename
[ "def", "url_to_filename", "(", "url", ":", "str", ",", "etag", ":", "str", "=", "None", ")", "->", "str", ":", "url_bytes", "=", "url", ".", "encode", "(", "'utf-8'", ")", "url_hash", "=", "sha256", "(", "url_bytes", ")", "filename", "=", "url_hash", ".", "hexdigest", "(", ")", "if", "etag", ":", "etag_bytes", "=", "etag", ".", "encode", "(", "'utf-8'", ")", "etag_hash", "=", "sha256", "(", "etag_bytes", ")", "filename", "+=", "'.'", "+", "etag_hash", ".", "hexdigest", "(", ")", "return", "filename" ]
Convert `url` into a hashed filename in a repeatable way. If `etag` is specified, append its hash to the url's, delimited by a period.
[ "Convert", "url", "into", "a", "hashed", "filename", "in", "a", "repeatable", "way", ".", "If", "etag", "is", "specified", "append", "its", "hash", "to", "the", "url", "s", "delimited", "by", "a", "period", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/file_utils.py#L39-L54
23,112
allenai/allennlp
allennlp/common/file_utils.py
split_s3_path
def split_s3_path(url: str) -> Tuple[str, str]: """Split a full s3 path into the bucket name and path.""" parsed = urlparse(url) if not parsed.netloc or not parsed.path: raise ValueError("bad s3 path {}".format(url)) bucket_name = parsed.netloc s3_path = parsed.path # Remove '/' at beginning of path. if s3_path.startswith("/"): s3_path = s3_path[1:] return bucket_name, s3_path
python
def split_s3_path(url: str) -> Tuple[str, str]: """Split a full s3 path into the bucket name and path.""" parsed = urlparse(url) if not parsed.netloc or not parsed.path: raise ValueError("bad s3 path {}".format(url)) bucket_name = parsed.netloc s3_path = parsed.path # Remove '/' at beginning of path. if s3_path.startswith("/"): s3_path = s3_path[1:] return bucket_name, s3_path
[ "def", "split_s3_path", "(", "url", ":", "str", ")", "->", "Tuple", "[", "str", ",", "str", "]", ":", "parsed", "=", "urlparse", "(", "url", ")", "if", "not", "parsed", ".", "netloc", "or", "not", "parsed", ".", "path", ":", "raise", "ValueError", "(", "\"bad s3 path {}\"", ".", "format", "(", "url", ")", ")", "bucket_name", "=", "parsed", ".", "netloc", "s3_path", "=", "parsed", ".", "path", "# Remove '/' at beginning of path.", "if", "s3_path", ".", "startswith", "(", "\"/\"", ")", ":", "s3_path", "=", "s3_path", "[", "1", ":", "]", "return", "bucket_name", ",", "s3_path" ]
Split a full s3 path into the bucket name and path.
[ "Split", "a", "full", "s3", "path", "into", "the", "bucket", "name", "and", "path", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/file_utils.py#L120-L130
23,113
allenai/allennlp
allennlp/common/file_utils.py
s3_request
def s3_request(func: Callable): """ Wrapper function for s3 requests in order to create more helpful error messages. """ @wraps(func) def wrapper(url: str, *args, **kwargs): try: return func(url, *args, **kwargs) except ClientError as exc: if int(exc.response["Error"]["Code"]) == 404: raise FileNotFoundError("file {} not found".format(url)) else: raise return wrapper
python
def s3_request(func: Callable): """ Wrapper function for s3 requests in order to create more helpful error messages. """ @wraps(func) def wrapper(url: str, *args, **kwargs): try: return func(url, *args, **kwargs) except ClientError as exc: if int(exc.response["Error"]["Code"]) == 404: raise FileNotFoundError("file {} not found".format(url)) else: raise return wrapper
[ "def", "s3_request", "(", "func", ":", "Callable", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "url", ":", "str", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "func", "(", "url", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "ClientError", "as", "exc", ":", "if", "int", "(", "exc", ".", "response", "[", "\"Error\"", "]", "[", "\"Code\"", "]", ")", "==", "404", ":", "raise", "FileNotFoundError", "(", "\"file {} not found\"", ".", "format", "(", "url", ")", ")", "else", ":", "raise", "return", "wrapper" ]
Wrapper function for s3 requests in order to create more helpful error messages.
[ "Wrapper", "function", "for", "s3", "requests", "in", "order", "to", "create", "more", "helpful", "error", "messages", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/file_utils.py#L133-L149
23,114
allenai/allennlp
allennlp/common/file_utils.py
s3_etag
def s3_etag(url: str) -> Optional[str]: """Check ETag on S3 object.""" s3_resource = boto3.resource("s3") bucket_name, s3_path = split_s3_path(url) s3_object = s3_resource.Object(bucket_name, s3_path) return s3_object.e_tag
python
def s3_etag(url: str) -> Optional[str]: """Check ETag on S3 object.""" s3_resource = boto3.resource("s3") bucket_name, s3_path = split_s3_path(url) s3_object = s3_resource.Object(bucket_name, s3_path) return s3_object.e_tag
[ "def", "s3_etag", "(", "url", ":", "str", ")", "->", "Optional", "[", "str", "]", ":", "s3_resource", "=", "boto3", ".", "resource", "(", "\"s3\"", ")", "bucket_name", ",", "s3_path", "=", "split_s3_path", "(", "url", ")", "s3_object", "=", "s3_resource", ".", "Object", "(", "bucket_name", ",", "s3_path", ")", "return", "s3_object", ".", "e_tag" ]
Check ETag on S3 object.
[ "Check", "ETag", "on", "S3", "object", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/file_utils.py#L153-L158
23,115
allenai/allennlp
allennlp/common/file_utils.py
s3_get
def s3_get(url: str, temp_file: IO) -> None: """Pull a file directly from S3.""" s3_resource = boto3.resource("s3") bucket_name, s3_path = split_s3_path(url) s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
python
def s3_get(url: str, temp_file: IO) -> None: """Pull a file directly from S3.""" s3_resource = boto3.resource("s3") bucket_name, s3_path = split_s3_path(url) s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
[ "def", "s3_get", "(", "url", ":", "str", ",", "temp_file", ":", "IO", ")", "->", "None", ":", "s3_resource", "=", "boto3", ".", "resource", "(", "\"s3\"", ")", "bucket_name", ",", "s3_path", "=", "split_s3_path", "(", "url", ")", "s3_resource", ".", "Bucket", "(", "bucket_name", ")", ".", "download_fileobj", "(", "s3_path", ",", "temp_file", ")" ]
Pull a file directly from S3.
[ "Pull", "a", "file", "directly", "from", "S3", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/file_utils.py#L162-L166
23,116
allenai/allennlp
allennlp/common/file_utils.py
get_from_cache
def get_from_cache(url: str, cache_dir: str = None) -> str: """ Given a URL, look for the corresponding dataset in the local cache. If it's not there, download it. Then return the path to the cached file. """ if cache_dir is None: cache_dir = CACHE_DIRECTORY os.makedirs(cache_dir, exist_ok=True) # Get eTag to add to filename, if it exists. if url.startswith("s3://"): etag = s3_etag(url) else: response = requests.head(url, allow_redirects=True) if response.status_code != 200: raise IOError("HEAD request failed for url {} with status code {}" .format(url, response.status_code)) etag = response.headers.get("ETag") filename = url_to_filename(url, etag) # get cache path to put the file cache_path = os.path.join(cache_dir, filename) if not os.path.exists(cache_path): # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with tempfile.NamedTemporaryFile() as temp_file: logger.info("%s not found in cache, downloading to %s", url, temp_file.name) # GET file object if url.startswith("s3://"): s3_get(url, temp_file) else: http_get(url, temp_file) # we are copying the file before closing it, so flush to avoid truncation temp_file.flush() # shutil.copyfileobj() starts at the current position, so go to the start temp_file.seek(0) logger.info("copying %s to cache at %s", temp_file.name, cache_path) with open(cache_path, 'wb') as cache_file: shutil.copyfileobj(temp_file, cache_file) logger.info("creating metadata file for %s", cache_path) meta = {'url': url, 'etag': etag} meta_path = cache_path + '.json' with open(meta_path, 'w') as meta_file: json.dump(meta, meta_file) logger.info("removing temp file %s", temp_file.name) return cache_path
python
def get_from_cache(url: str, cache_dir: str = None) -> str: """ Given a URL, look for the corresponding dataset in the local cache. If it's not there, download it. Then return the path to the cached file. """ if cache_dir is None: cache_dir = CACHE_DIRECTORY os.makedirs(cache_dir, exist_ok=True) # Get eTag to add to filename, if it exists. if url.startswith("s3://"): etag = s3_etag(url) else: response = requests.head(url, allow_redirects=True) if response.status_code != 200: raise IOError("HEAD request failed for url {} with status code {}" .format(url, response.status_code)) etag = response.headers.get("ETag") filename = url_to_filename(url, etag) # get cache path to put the file cache_path = os.path.join(cache_dir, filename) if not os.path.exists(cache_path): # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with tempfile.NamedTemporaryFile() as temp_file: logger.info("%s not found in cache, downloading to %s", url, temp_file.name) # GET file object if url.startswith("s3://"): s3_get(url, temp_file) else: http_get(url, temp_file) # we are copying the file before closing it, so flush to avoid truncation temp_file.flush() # shutil.copyfileobj() starts at the current position, so go to the start temp_file.seek(0) logger.info("copying %s to cache at %s", temp_file.name, cache_path) with open(cache_path, 'wb') as cache_file: shutil.copyfileobj(temp_file, cache_file) logger.info("creating metadata file for %s", cache_path) meta = {'url': url, 'etag': etag} meta_path = cache_path + '.json' with open(meta_path, 'w') as meta_file: json.dump(meta, meta_file) logger.info("removing temp file %s", temp_file.name) return cache_path
[ "def", "get_from_cache", "(", "url", ":", "str", ",", "cache_dir", ":", "str", "=", "None", ")", "->", "str", ":", "if", "cache_dir", "is", "None", ":", "cache_dir", "=", "CACHE_DIRECTORY", "os", ".", "makedirs", "(", "cache_dir", ",", "exist_ok", "=", "True", ")", "# Get eTag to add to filename, if it exists.", "if", "url", ".", "startswith", "(", "\"s3://\"", ")", ":", "etag", "=", "s3_etag", "(", "url", ")", "else", ":", "response", "=", "requests", ".", "head", "(", "url", ",", "allow_redirects", "=", "True", ")", "if", "response", ".", "status_code", "!=", "200", ":", "raise", "IOError", "(", "\"HEAD request failed for url {} with status code {}\"", ".", "format", "(", "url", ",", "response", ".", "status_code", ")", ")", "etag", "=", "response", ".", "headers", ".", "get", "(", "\"ETag\"", ")", "filename", "=", "url_to_filename", "(", "url", ",", "etag", ")", "# get cache path to put the file", "cache_path", "=", "os", ".", "path", ".", "join", "(", "cache_dir", ",", "filename", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "cache_path", ")", ":", "# Download to temporary file, then copy to cache dir once finished.", "# Otherwise you get corrupt cache entries if the download gets interrupted.", "with", "tempfile", ".", "NamedTemporaryFile", "(", ")", "as", "temp_file", ":", "logger", ".", "info", "(", "\"%s not found in cache, downloading to %s\"", ",", "url", ",", "temp_file", ".", "name", ")", "# GET file object", "if", "url", ".", "startswith", "(", "\"s3://\"", ")", ":", "s3_get", "(", "url", ",", "temp_file", ")", "else", ":", "http_get", "(", "url", ",", "temp_file", ")", "# we are copying the file before closing it, so flush to avoid truncation", "temp_file", ".", "flush", "(", ")", "# shutil.copyfileobj() starts at the current position, so go to the start", "temp_file", ".", "seek", "(", "0", ")", "logger", ".", "info", "(", "\"copying %s to cache at %s\"", ",", "temp_file", ".", "name", ",", "cache_path", ")", "with", "open", "(", "cache_path", ",", "'wb'", ")", "as", "cache_file", ":", "shutil", ".", "copyfileobj", "(", "temp_file", ",", "cache_file", ")", "logger", ".", "info", "(", "\"creating metadata file for %s\"", ",", "cache_path", ")", "meta", "=", "{", "'url'", ":", "url", ",", "'etag'", ":", "etag", "}", "meta_path", "=", "cache_path", "+", "'.json'", "with", "open", "(", "meta_path", ",", "'w'", ")", "as", "meta_file", ":", "json", ".", "dump", "(", "meta", ",", "meta_file", ")", "logger", ".", "info", "(", "\"removing temp file %s\"", ",", "temp_file", ".", "name", ")", "return", "cache_path" ]
Given a URL, look for the corresponding dataset in the local cache. If it's not there, download it. Then return the path to the cached file.
[ "Given", "a", "URL", "look", "for", "the", "corresponding", "dataset", "in", "the", "local", "cache", ".", "If", "it", "s", "not", "there", "download", "it", ".", "Then", "return", "the", "path", "to", "the", "cached", "file", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/file_utils.py#L182-L236
23,117
allenai/allennlp
allennlp/data/tokenizers/sentence_splitter.py
SentenceSplitter.batch_split_sentences
def batch_split_sentences(self, texts: List[str]) -> List[List[str]]: """ This method lets you take advantage of spacy's batch processing. Default implementation is to just iterate over the texts and call ``split_sentences``. """ return [self.split_sentences(text) for text in texts]
python
def batch_split_sentences(self, texts: List[str]) -> List[List[str]]: """ This method lets you take advantage of spacy's batch processing. Default implementation is to just iterate over the texts and call ``split_sentences``. """ return [self.split_sentences(text) for text in texts]
[ "def", "batch_split_sentences", "(", "self", ",", "texts", ":", "List", "[", "str", "]", ")", "->", "List", "[", "List", "[", "str", "]", "]", ":", "return", "[", "self", ".", "split_sentences", "(", "text", ")", "for", "text", "in", "texts", "]" ]
This method lets you take advantage of spacy's batch processing. Default implementation is to just iterate over the texts and call ``split_sentences``.
[ "This", "method", "lets", "you", "take", "advantage", "of", "spacy", "s", "batch", "processing", ".", "Default", "implementation", "is", "to", "just", "iterate", "over", "the", "texts", "and", "call", "split_sentences", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/tokenizers/sentence_splitter.py#L22-L27
23,118
allenai/allennlp
allennlp/data/dataset_readers/dataset_utils/ontonotes.py
Ontonotes.dataset_iterator
def dataset_iterator(self, file_path: str) -> Iterator[OntonotesSentence]: """ An iterator over the entire dataset, yielding all sentences processed. """ for conll_file in self.dataset_path_iterator(file_path): yield from self.sentence_iterator(conll_file)
python
def dataset_iterator(self, file_path: str) -> Iterator[OntonotesSentence]: """ An iterator over the entire dataset, yielding all sentences processed. """ for conll_file in self.dataset_path_iterator(file_path): yield from self.sentence_iterator(conll_file)
[ "def", "dataset_iterator", "(", "self", ",", "file_path", ":", "str", ")", "->", "Iterator", "[", "OntonotesSentence", "]", ":", "for", "conll_file", "in", "self", ".", "dataset_path_iterator", "(", "file_path", ")", ":", "yield", "from", "self", ".", "sentence_iterator", "(", "conll_file", ")" ]
An iterator over the entire dataset, yielding all sentences processed.
[ "An", "iterator", "over", "the", "entire", "dataset", "yielding", "all", "sentences", "processed", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/dataset_readers/dataset_utils/ontonotes.py#L176-L181
23,119
allenai/allennlp
allennlp/data/dataset_readers/dataset_utils/ontonotes.py
Ontonotes.dataset_path_iterator
def dataset_path_iterator(file_path: str) -> Iterator[str]: """ An iterator returning file_paths in a directory containing CONLL-formatted files. """ logger.info("Reading CONLL sentences from dataset files at: %s", file_path) for root, _, files in list(os.walk(file_path)): for data_file in files: # These are a relic of the dataset pre-processing. Every # file will be duplicated - one file called filename.gold_skel # and one generated from the preprocessing called filename.gold_conll. if not data_file.endswith("gold_conll"): continue yield os.path.join(root, data_file)
python
def dataset_path_iterator(file_path: str) -> Iterator[str]: """ An iterator returning file_paths in a directory containing CONLL-formatted files. """ logger.info("Reading CONLL sentences from dataset files at: %s", file_path) for root, _, files in list(os.walk(file_path)): for data_file in files: # These are a relic of the dataset pre-processing. Every # file will be duplicated - one file called filename.gold_skel # and one generated from the preprocessing called filename.gold_conll. if not data_file.endswith("gold_conll"): continue yield os.path.join(root, data_file)
[ "def", "dataset_path_iterator", "(", "file_path", ":", "str", ")", "->", "Iterator", "[", "str", "]", ":", "logger", ".", "info", "(", "\"Reading CONLL sentences from dataset files at: %s\"", ",", "file_path", ")", "for", "root", ",", "_", ",", "files", "in", "list", "(", "os", ".", "walk", "(", "file_path", ")", ")", ":", "for", "data_file", "in", "files", ":", "# These are a relic of the dataset pre-processing. Every", "# file will be duplicated - one file called filename.gold_skel", "# and one generated from the preprocessing called filename.gold_conll.", "if", "not", "data_file", ".", "endswith", "(", "\"gold_conll\"", ")", ":", "continue", "yield", "os", ".", "path", ".", "join", "(", "root", ",", "data_file", ")" ]
An iterator returning file_paths in a directory containing CONLL-formatted files.
[ "An", "iterator", "returning", "file_paths", "in", "a", "directory", "containing", "CONLL", "-", "formatted", "files", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/dataset_readers/dataset_utils/ontonotes.py#L184-L198
23,120
allenai/allennlp
allennlp/data/dataset_readers/dataset_utils/ontonotes.py
Ontonotes.dataset_document_iterator
def dataset_document_iterator(self, file_path: str) -> Iterator[List[OntonotesSentence]]: """ An iterator over CONLL formatted files which yields documents, regardless of the number of document annotations in a particular file. This is useful for conll data which has been preprocessed, such as the preprocessing which takes place for the 2012 CONLL Coreference Resolution task. """ with codecs.open(file_path, 'r', encoding='utf8') as open_file: conll_rows = [] document: List[OntonotesSentence] = [] for line in open_file: line = line.strip() if line != '' and not line.startswith('#'): # Non-empty line. Collect the annotation. conll_rows.append(line) else: if conll_rows: document.append(self._conll_rows_to_sentence(conll_rows)) conll_rows = [] if line.startswith("#end document"): yield document document = [] if document: # Collect any stragglers or files which might not # have the '#end document' format for the end of the file. yield document
python
def dataset_document_iterator(self, file_path: str) -> Iterator[List[OntonotesSentence]]: """ An iterator over CONLL formatted files which yields documents, regardless of the number of document annotations in a particular file. This is useful for conll data which has been preprocessed, such as the preprocessing which takes place for the 2012 CONLL Coreference Resolution task. """ with codecs.open(file_path, 'r', encoding='utf8') as open_file: conll_rows = [] document: List[OntonotesSentence] = [] for line in open_file: line = line.strip() if line != '' and not line.startswith('#'): # Non-empty line. Collect the annotation. conll_rows.append(line) else: if conll_rows: document.append(self._conll_rows_to_sentence(conll_rows)) conll_rows = [] if line.startswith("#end document"): yield document document = [] if document: # Collect any stragglers or files which might not # have the '#end document' format for the end of the file. yield document
[ "def", "dataset_document_iterator", "(", "self", ",", "file_path", ":", "str", ")", "->", "Iterator", "[", "List", "[", "OntonotesSentence", "]", "]", ":", "with", "codecs", ".", "open", "(", "file_path", ",", "'r'", ",", "encoding", "=", "'utf8'", ")", "as", "open_file", ":", "conll_rows", "=", "[", "]", "document", ":", "List", "[", "OntonotesSentence", "]", "=", "[", "]", "for", "line", "in", "open_file", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "line", "!=", "''", "and", "not", "line", ".", "startswith", "(", "'#'", ")", ":", "# Non-empty line. Collect the annotation.", "conll_rows", ".", "append", "(", "line", ")", "else", ":", "if", "conll_rows", ":", "document", ".", "append", "(", "self", ".", "_conll_rows_to_sentence", "(", "conll_rows", ")", ")", "conll_rows", "=", "[", "]", "if", "line", ".", "startswith", "(", "\"#end document\"", ")", ":", "yield", "document", "document", "=", "[", "]", "if", "document", ":", "# Collect any stragglers or files which might not", "# have the '#end document' format for the end of the file.", "yield", "document" ]
An iterator over CONLL formatted files which yields documents, regardless of the number of document annotations in a particular file. This is useful for conll data which has been preprocessed, such as the preprocessing which takes place for the 2012 CONLL Coreference Resolution task.
[ "An", "iterator", "over", "CONLL", "formatted", "files", "which", "yields", "documents", "regardless", "of", "the", "number", "of", "document", "annotations", "in", "a", "particular", "file", ".", "This", "is", "useful", "for", "conll", "data", "which", "has", "been", "preprocessed", "such", "as", "the", "preprocessing", "which", "takes", "place", "for", "the", "2012", "CONLL", "Coreference", "Resolution", "task", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/dataset_readers/dataset_utils/ontonotes.py#L200-L225
23,121
allenai/allennlp
allennlp/data/dataset_readers/dataset_utils/ontonotes.py
Ontonotes.sentence_iterator
def sentence_iterator(self, file_path: str) -> Iterator[OntonotesSentence]: """ An iterator over the sentences in an individual CONLL formatted file. """ for document in self.dataset_document_iterator(file_path): for sentence in document: yield sentence
python
def sentence_iterator(self, file_path: str) -> Iterator[OntonotesSentence]: """ An iterator over the sentences in an individual CONLL formatted file. """ for document in self.dataset_document_iterator(file_path): for sentence in document: yield sentence
[ "def", "sentence_iterator", "(", "self", ",", "file_path", ":", "str", ")", "->", "Iterator", "[", "OntonotesSentence", "]", ":", "for", "document", "in", "self", ".", "dataset_document_iterator", "(", "file_path", ")", ":", "for", "sentence", "in", "document", ":", "yield", "sentence" ]
An iterator over the sentences in an individual CONLL formatted file.
[ "An", "iterator", "over", "the", "sentences", "in", "an", "individual", "CONLL", "formatted", "file", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/dataset_readers/dataset_utils/ontonotes.py#L227-L233
23,122
allenai/allennlp
allennlp/data/dataset_readers/dataset_utils/ontonotes.py
Ontonotes._process_span_annotations_for_word
def _process_span_annotations_for_word(annotations: List[str], span_labels: List[List[str]], current_span_labels: List[Optional[str]]) -> None: """ Given a sequence of different label types for a single word and the current span label we are inside, compute the BIO tag for each label and append to a list. Parameters ---------- annotations: ``List[str]`` A list of labels to compute BIO tags for. span_labels : ``List[List[str]]`` A list of lists, one for each annotation, to incrementally collect the BIO tags for a sequence. current_span_labels : ``List[Optional[str]]`` The currently open span per annotation type, or ``None`` if there is no open span. """ for annotation_index, annotation in enumerate(annotations): # strip all bracketing information to # get the actual propbank label. label = annotation.strip("()*") if "(" in annotation: # Entering into a span for a particular semantic role label. # We append the label and set the current span for this annotation. bio_label = "B-" + label span_labels[annotation_index].append(bio_label) current_span_labels[annotation_index] = label elif current_span_labels[annotation_index] is not None: # If there's no '(' token, but the current_span_label is not None, # then we are inside a span. bio_label = "I-" + current_span_labels[annotation_index] span_labels[annotation_index].append(bio_label) else: # We're outside a span. span_labels[annotation_index].append("O") # Exiting a span, so we reset the current span label for this annotation. if ")" in annotation: current_span_labels[annotation_index] = None
python
def _process_span_annotations_for_word(annotations: List[str], span_labels: List[List[str]], current_span_labels: List[Optional[str]]) -> None: """ Given a sequence of different label types for a single word and the current span label we are inside, compute the BIO tag for each label and append to a list. Parameters ---------- annotations: ``List[str]`` A list of labels to compute BIO tags for. span_labels : ``List[List[str]]`` A list of lists, one for each annotation, to incrementally collect the BIO tags for a sequence. current_span_labels : ``List[Optional[str]]`` The currently open span per annotation type, or ``None`` if there is no open span. """ for annotation_index, annotation in enumerate(annotations): # strip all bracketing information to # get the actual propbank label. label = annotation.strip("()*") if "(" in annotation: # Entering into a span for a particular semantic role label. # We append the label and set the current span for this annotation. bio_label = "B-" + label span_labels[annotation_index].append(bio_label) current_span_labels[annotation_index] = label elif current_span_labels[annotation_index] is not None: # If there's no '(' token, but the current_span_label is not None, # then we are inside a span. bio_label = "I-" + current_span_labels[annotation_index] span_labels[annotation_index].append(bio_label) else: # We're outside a span. span_labels[annotation_index].append("O") # Exiting a span, so we reset the current span label for this annotation. if ")" in annotation: current_span_labels[annotation_index] = None
[ "def", "_process_span_annotations_for_word", "(", "annotations", ":", "List", "[", "str", "]", ",", "span_labels", ":", "List", "[", "List", "[", "str", "]", "]", ",", "current_span_labels", ":", "List", "[", "Optional", "[", "str", "]", "]", ")", "->", "None", ":", "for", "annotation_index", ",", "annotation", "in", "enumerate", "(", "annotations", ")", ":", "# strip all bracketing information to", "# get the actual propbank label.", "label", "=", "annotation", ".", "strip", "(", "\"()*\"", ")", "if", "\"(\"", "in", "annotation", ":", "# Entering into a span for a particular semantic role label.", "# We append the label and set the current span for this annotation.", "bio_label", "=", "\"B-\"", "+", "label", "span_labels", "[", "annotation_index", "]", ".", "append", "(", "bio_label", ")", "current_span_labels", "[", "annotation_index", "]", "=", "label", "elif", "current_span_labels", "[", "annotation_index", "]", "is", "not", "None", ":", "# If there's no '(' token, but the current_span_label is not None,", "# then we are inside a span.", "bio_label", "=", "\"I-\"", "+", "current_span_labels", "[", "annotation_index", "]", "span_labels", "[", "annotation_index", "]", ".", "append", "(", "bio_label", ")", "else", ":", "# We're outside a span.", "span_labels", "[", "annotation_index", "]", ".", "append", "(", "\"O\"", ")", "# Exiting a span, so we reset the current span label for this annotation.", "if", "\")\"", "in", "annotation", ":", "current_span_labels", "[", "annotation_index", "]", "=", "None" ]
Given a sequence of different label types for a single word and the current span label we are inside, compute the BIO tag for each label and append to a list. Parameters ---------- annotations: ``List[str]`` A list of labels to compute BIO tags for. span_labels : ``List[List[str]]`` A list of lists, one for each annotation, to incrementally collect the BIO tags for a sequence. current_span_labels : ``List[Optional[str]]`` The currently open span per annotation type, or ``None`` if there is no open span.
[ "Given", "a", "sequence", "of", "different", "label", "types", "for", "a", "single", "word", "and", "the", "current", "span", "label", "we", "are", "inside", "compute", "the", "BIO", "tag", "for", "each", "label", "and", "append", "to", "a", "list", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/dataset_readers/dataset_utils/ontonotes.py#L411-L449
23,123
allenai/allennlp
allennlp/commands/print_results.py
print_results_from_args
def print_results_from_args(args: argparse.Namespace): """ Prints results from an ``argparse.Namespace`` object. """ path = args.path metrics_name = args.metrics_filename keys = args.keys results_dict = {} for root, _, files in os.walk(path): if metrics_name in files: full_name = os.path.join(root, metrics_name) metrics = json.load(open(full_name)) results_dict[full_name] = metrics sorted_keys = sorted(list(results_dict.keys())) print(f"model_run, {', '.join(keys)}") for name in sorted_keys: results = results_dict[name] keys_to_print = [str(results.get(key, "N/A")) for key in keys] print(f"{name}, {', '.join(keys_to_print)}")
python
def print_results_from_args(args: argparse.Namespace): """ Prints results from an ``argparse.Namespace`` object. """ path = args.path metrics_name = args.metrics_filename keys = args.keys results_dict = {} for root, _, files in os.walk(path): if metrics_name in files: full_name = os.path.join(root, metrics_name) metrics = json.load(open(full_name)) results_dict[full_name] = metrics sorted_keys = sorted(list(results_dict.keys())) print(f"model_run, {', '.join(keys)}") for name in sorted_keys: results = results_dict[name] keys_to_print = [str(results.get(key, "N/A")) for key in keys] print(f"{name}, {', '.join(keys_to_print)}")
[ "def", "print_results_from_args", "(", "args", ":", "argparse", ".", "Namespace", ")", ":", "path", "=", "args", ".", "path", "metrics_name", "=", "args", ".", "metrics_filename", "keys", "=", "args", ".", "keys", "results_dict", "=", "{", "}", "for", "root", ",", "_", ",", "files", "in", "os", ".", "walk", "(", "path", ")", ":", "if", "metrics_name", "in", "files", ":", "full_name", "=", "os", ".", "path", ".", "join", "(", "root", ",", "metrics_name", ")", "metrics", "=", "json", ".", "load", "(", "open", "(", "full_name", ")", ")", "results_dict", "[", "full_name", "]", "=", "metrics", "sorted_keys", "=", "sorted", "(", "list", "(", "results_dict", ".", "keys", "(", ")", ")", ")", "print", "(", "f\"model_run, {', '.join(keys)}\"", ")", "for", "name", "in", "sorted_keys", ":", "results", "=", "results_dict", "[", "name", "]", "keys_to_print", "=", "[", "str", "(", "results", ".", "get", "(", "key", ",", "\"N/A\"", ")", ")", "for", "key", "in", "keys", "]", "print", "(", "f\"{name}, {', '.join(keys_to_print)}\"", ")" ]
Prints results from an ``argparse.Namespace`` object.
[ "Prints", "results", "from", "an", "argparse", ".", "Namespace", "object", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/commands/print_results.py#L66-L88
23,124
allenai/allennlp
allennlp/modules/input_variational_dropout.py
InputVariationalDropout.forward
def forward(self, input_tensor): # pylint: disable=arguments-differ """ Apply dropout to input tensor. Parameters ---------- input_tensor: ``torch.FloatTensor`` A tensor of shape ``(batch_size, num_timesteps, embedding_dim)`` Returns ------- output: ``torch.FloatTensor`` A tensor of shape ``(batch_size, num_timesteps, embedding_dim)`` with dropout applied. """ ones = input_tensor.data.new_ones(input_tensor.shape[0], input_tensor.shape[-1]) dropout_mask = torch.nn.functional.dropout(ones, self.p, self.training, inplace=False) if self.inplace: input_tensor *= dropout_mask.unsqueeze(1) return None else: return dropout_mask.unsqueeze(1) * input_tensor
python
def forward(self, input_tensor): # pylint: disable=arguments-differ """ Apply dropout to input tensor. Parameters ---------- input_tensor: ``torch.FloatTensor`` A tensor of shape ``(batch_size, num_timesteps, embedding_dim)`` Returns ------- output: ``torch.FloatTensor`` A tensor of shape ``(batch_size, num_timesteps, embedding_dim)`` with dropout applied. """ ones = input_tensor.data.new_ones(input_tensor.shape[0], input_tensor.shape[-1]) dropout_mask = torch.nn.functional.dropout(ones, self.p, self.training, inplace=False) if self.inplace: input_tensor *= dropout_mask.unsqueeze(1) return None else: return dropout_mask.unsqueeze(1) * input_tensor
[ "def", "forward", "(", "self", ",", "input_tensor", ")", ":", "# pylint: disable=arguments-differ", "ones", "=", "input_tensor", ".", "data", ".", "new_ones", "(", "input_tensor", ".", "shape", "[", "0", "]", ",", "input_tensor", ".", "shape", "[", "-", "1", "]", ")", "dropout_mask", "=", "torch", ".", "nn", ".", "functional", ".", "dropout", "(", "ones", ",", "self", ".", "p", ",", "self", ".", "training", ",", "inplace", "=", "False", ")", "if", "self", ".", "inplace", ":", "input_tensor", "*=", "dropout_mask", ".", "unsqueeze", "(", "1", ")", "return", "None", "else", ":", "return", "dropout_mask", ".", "unsqueeze", "(", "1", ")", "*", "input_tensor" ]
Apply dropout to input tensor. Parameters ---------- input_tensor: ``torch.FloatTensor`` A tensor of shape ``(batch_size, num_timesteps, embedding_dim)`` Returns ------- output: ``torch.FloatTensor`` A tensor of shape ``(batch_size, num_timesteps, embedding_dim)`` with dropout applied.
[ "Apply", "dropout", "to", "input", "tensor", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/modules/input_variational_dropout.py#L13-L34
23,125
allenai/allennlp
allennlp/training/metrics/metric.py
Metric.unwrap_to_tensors
def unwrap_to_tensors(*tensors: torch.Tensor): """ If you actually passed gradient-tracking Tensors to a Metric, there will be a huge memory leak, because it will prevent garbage collection for the computation graph. This method ensures that you're using tensors directly and that they are on the CPU. """ return (x.detach().cpu() if isinstance(x, torch.Tensor) else x for x in tensors)
python
def unwrap_to_tensors(*tensors: torch.Tensor): """ If you actually passed gradient-tracking Tensors to a Metric, there will be a huge memory leak, because it will prevent garbage collection for the computation graph. This method ensures that you're using tensors directly and that they are on the CPU. """ return (x.detach().cpu() if isinstance(x, torch.Tensor) else x for x in tensors)
[ "def", "unwrap_to_tensors", "(", "*", "tensors", ":", "torch", ".", "Tensor", ")", ":", "return", "(", "x", ".", "detach", "(", ")", ".", "cpu", "(", ")", "if", "isinstance", "(", "x", ",", "torch", ".", "Tensor", ")", "else", "x", "for", "x", "in", "tensors", ")" ]
If you actually passed gradient-tracking Tensors to a Metric, there will be a huge memory leak, because it will prevent garbage collection for the computation graph. This method ensures that you're using tensors directly and that they are on the CPU.
[ "If", "you", "actually", "passed", "gradient", "-", "tracking", "Tensors", "to", "a", "Metric", "there", "will", "be", "a", "huge", "memory", "leak", "because", "it", "will", "prevent", "garbage", "collection", "for", "the", "computation", "graph", ".", "This", "method", "ensures", "that", "you", "re", "using", "tensors", "directly", "and", "that", "they", "are", "on", "the", "CPU", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/training/metrics/metric.py#L42-L49
23,126
allenai/allennlp
allennlp/data/dataset_readers/dataset_utils/text2sql_utils.py
replace_variables
def replace_variables(sentence: List[str], sentence_variables: Dict[str, str]) -> Tuple[List[str], List[str]]: """ Replaces abstract variables in text with their concrete counterparts. """ tokens = [] tags = [] for token in sentence: if token not in sentence_variables: tokens.append(token) tags.append("O") else: for word in sentence_variables[token].split(): tokens.append(word) tags.append(token) return tokens, tags
python
def replace_variables(sentence: List[str], sentence_variables: Dict[str, str]) -> Tuple[List[str], List[str]]: """ Replaces abstract variables in text with their concrete counterparts. """ tokens = [] tags = [] for token in sentence: if token not in sentence_variables: tokens.append(token) tags.append("O") else: for word in sentence_variables[token].split(): tokens.append(word) tags.append(token) return tokens, tags
[ "def", "replace_variables", "(", "sentence", ":", "List", "[", "str", "]", ",", "sentence_variables", ":", "Dict", "[", "str", ",", "str", "]", ")", "->", "Tuple", "[", "List", "[", "str", "]", ",", "List", "[", "str", "]", "]", ":", "tokens", "=", "[", "]", "tags", "=", "[", "]", "for", "token", "in", "sentence", ":", "if", "token", "not", "in", "sentence_variables", ":", "tokens", ".", "append", "(", "token", ")", "tags", ".", "append", "(", "\"O\"", ")", "else", ":", "for", "word", "in", "sentence_variables", "[", "token", "]", ".", "split", "(", ")", ":", "tokens", ".", "append", "(", "word", ")", "tags", ".", "append", "(", "token", ")", "return", "tokens", ",", "tags" ]
Replaces abstract variables in text with their concrete counterparts.
[ "Replaces", "abstract", "variables", "in", "text", "with", "their", "concrete", "counterparts", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/dataset_readers/dataset_utils/text2sql_utils.py#L65-L80
23,127
allenai/allennlp
allennlp/data/dataset_readers/dataset_utils/text2sql_utils.py
clean_and_split_sql
def clean_and_split_sql(sql: str) -> List[str]: """ Cleans up and unifies a SQL query. This involves unifying quoted strings and splitting brackets which aren't formatted consistently in the data. """ sql_tokens: List[str] = [] for token in sql.strip().split(): token = token.replace('"', "'").replace("%", "") if token.endswith("(") and len(token) > 1: sql_tokens.extend(split_table_and_column_names(token[:-1])) sql_tokens.extend(split_table_and_column_names(token[-1])) else: sql_tokens.extend(split_table_and_column_names(token)) return sql_tokens
python
def clean_and_split_sql(sql: str) -> List[str]: """ Cleans up and unifies a SQL query. This involves unifying quoted strings and splitting brackets which aren't formatted consistently in the data. """ sql_tokens: List[str] = [] for token in sql.strip().split(): token = token.replace('"', "'").replace("%", "") if token.endswith("(") and len(token) > 1: sql_tokens.extend(split_table_and_column_names(token[:-1])) sql_tokens.extend(split_table_and_column_names(token[-1])) else: sql_tokens.extend(split_table_and_column_names(token)) return sql_tokens
[ "def", "clean_and_split_sql", "(", "sql", ":", "str", ")", "->", "List", "[", "str", "]", ":", "sql_tokens", ":", "List", "[", "str", "]", "=", "[", "]", "for", "token", "in", "sql", ".", "strip", "(", ")", ".", "split", "(", ")", ":", "token", "=", "token", ".", "replace", "(", "'\"'", ",", "\"'\"", ")", ".", "replace", "(", "\"%\"", ",", "\"\"", ")", "if", "token", ".", "endswith", "(", "\"(\"", ")", "and", "len", "(", "token", ")", ">", "1", ":", "sql_tokens", ".", "extend", "(", "split_table_and_column_names", "(", "token", "[", ":", "-", "1", "]", ")", ")", "sql_tokens", ".", "extend", "(", "split_table_and_column_names", "(", "token", "[", "-", "1", "]", ")", ")", "else", ":", "sql_tokens", ".", "extend", "(", "split_table_and_column_names", "(", "token", ")", ")", "return", "sql_tokens" ]
Cleans up and unifies a SQL query. This involves unifying quoted strings and splitting brackets which aren't formatted consistently in the data.
[ "Cleans", "up", "and", "unifies", "a", "SQL", "query", ".", "This", "involves", "unifying", "quoted", "strings", "and", "splitting", "brackets", "which", "aren", "t", "formatted", "consistently", "in", "the", "data", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/dataset_readers/dataset_utils/text2sql_utils.py#L89-L102
23,128
allenai/allennlp
allennlp/data/dataset_readers/dataset_utils/text2sql_utils.py
resolve_primary_keys_in_schema
def resolve_primary_keys_in_schema(sql_tokens: List[str], schema: Dict[str, List[TableColumn]]) -> List[str]: """ Some examples in the text2sql datasets use ID as a column reference to the column of a table which has a primary key. This causes problems if you are trying to constrain a grammar to only produce the column names directly, because you don't know what ID refers to. So instead of dealing with that, we just replace it. """ primary_keys_for_tables = {name: max(columns, key=lambda x: x.is_primary_key).name for name, columns in schema.items()} resolved_tokens = [] for i, token in enumerate(sql_tokens): if i > 2: table_name = sql_tokens[i - 2] if token == "ID" and table_name in primary_keys_for_tables.keys(): token = primary_keys_for_tables[table_name] resolved_tokens.append(token) return resolved_tokens
python
def resolve_primary_keys_in_schema(sql_tokens: List[str], schema: Dict[str, List[TableColumn]]) -> List[str]: """ Some examples in the text2sql datasets use ID as a column reference to the column of a table which has a primary key. This causes problems if you are trying to constrain a grammar to only produce the column names directly, because you don't know what ID refers to. So instead of dealing with that, we just replace it. """ primary_keys_for_tables = {name: max(columns, key=lambda x: x.is_primary_key).name for name, columns in schema.items()} resolved_tokens = [] for i, token in enumerate(sql_tokens): if i > 2: table_name = sql_tokens[i - 2] if token == "ID" and table_name in primary_keys_for_tables.keys(): token = primary_keys_for_tables[table_name] resolved_tokens.append(token) return resolved_tokens
[ "def", "resolve_primary_keys_in_schema", "(", "sql_tokens", ":", "List", "[", "str", "]", ",", "schema", ":", "Dict", "[", "str", ",", "List", "[", "TableColumn", "]", "]", ")", "->", "List", "[", "str", "]", ":", "primary_keys_for_tables", "=", "{", "name", ":", "max", "(", "columns", ",", "key", "=", "lambda", "x", ":", "x", ".", "is_primary_key", ")", ".", "name", "for", "name", ",", "columns", "in", "schema", ".", "items", "(", ")", "}", "resolved_tokens", "=", "[", "]", "for", "i", ",", "token", "in", "enumerate", "(", "sql_tokens", ")", ":", "if", "i", ">", "2", ":", "table_name", "=", "sql_tokens", "[", "i", "-", "2", "]", "if", "token", "==", "\"ID\"", "and", "table_name", "in", "primary_keys_for_tables", ".", "keys", "(", ")", ":", "token", "=", "primary_keys_for_tables", "[", "table_name", "]", "resolved_tokens", ".", "append", "(", "token", ")", "return", "resolved_tokens" ]
Some examples in the text2sql datasets use ID as a column reference to the column of a table which has a primary key. This causes problems if you are trying to constrain a grammar to only produce the column names directly, because you don't know what ID refers to. So instead of dealing with that, we just replace it.
[ "Some", "examples", "in", "the", "text2sql", "datasets", "use", "ID", "as", "a", "column", "reference", "to", "the", "column", "of", "a", "table", "which", "has", "a", "primary", "key", ".", "This", "causes", "problems", "if", "you", "are", "trying", "to", "constrain", "a", "grammar", "to", "only", "produce", "the", "column", "names", "directly", "because", "you", "don", "t", "know", "what", "ID", "refers", "to", ".", "So", "instead", "of", "dealing", "with", "that", "we", "just", "replace", "it", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/dataset_readers/dataset_utils/text2sql_utils.py#L104-L121
23,129
allenai/allennlp
allennlp/modules/encoder_base.py
_EncoderBase.sort_and_run_forward
def sort_and_run_forward(self, module: Callable[[PackedSequence, Optional[RnnState]], Tuple[Union[PackedSequence, torch.Tensor], RnnState]], inputs: torch.Tensor, mask: torch.Tensor, hidden_state: Optional[RnnState] = None): """ This function exists because Pytorch RNNs require that their inputs be sorted before being passed as input. As all of our Seq2xxxEncoders use this functionality, it is provided in a base class. This method can be called on any module which takes as input a ``PackedSequence`` and some ``hidden_state``, which can either be a tuple of tensors or a tensor. As all of our Seq2xxxEncoders have different return types, we return `sorted` outputs from the module, which is called directly. Additionally, we return the indices into the batch dimension required to restore the tensor to it's correct, unsorted order and the number of valid batch elements (i.e the number of elements in the batch which are not completely masked). This un-sorting and re-padding of the module outputs is left to the subclasses because their outputs have different types and handling them smoothly here is difficult. Parameters ---------- module : ``Callable[[PackedSequence, Optional[RnnState]], Tuple[Union[PackedSequence, torch.Tensor], RnnState]]``, required. A function to run on the inputs. In most cases, this is a ``torch.nn.Module``. inputs : ``torch.Tensor``, required. A tensor of shape ``(batch_size, sequence_length, embedding_size)`` representing the inputs to the Encoder. mask : ``torch.Tensor``, required. A tensor of shape ``(batch_size, sequence_length)``, representing masked and non-masked elements of the sequence for each element in the batch. hidden_state : ``Optional[RnnState]``, (default = None). A single tensor of shape (num_layers, batch_size, hidden_size) representing the state of an RNN with or a tuple of tensors of shapes (num_layers, batch_size, hidden_size) and (num_layers, batch_size, memory_size), representing the hidden state and memory state of an LSTM-like RNN. Returns ------- module_output : ``Union[torch.Tensor, PackedSequence]``. A Tensor or PackedSequence representing the output of the Pytorch Module. The batch size dimension will be equal to ``num_valid``, as sequences of zero length are clipped off before the module is called, as Pytorch cannot handle zero length sequences. final_states : ``Optional[RnnState]`` A Tensor representing the hidden state of the Pytorch Module. This can either be a single tensor of shape (num_layers, num_valid, hidden_size), for instance in the case of a GRU, or a tuple of tensors, such as those required for an LSTM. restoration_indices : ``torch.LongTensor`` A tensor of shape ``(batch_size,)``, describing the re-indexing required to transform the outputs back to their original batch order. """ # In some circumstances you may have sequences of zero length. ``pack_padded_sequence`` # requires all sequence lengths to be > 0, so remove sequences of zero length before # calling self._module, then fill with zeros. # First count how many sequences are empty. batch_size = mask.size(0) num_valid = torch.sum(mask[:, 0]).int().item() sequence_lengths = get_lengths_from_binary_sequence_mask(mask) sorted_inputs, sorted_sequence_lengths, restoration_indices, sorting_indices =\ sort_batch_by_length(inputs, sequence_lengths) # Now create a PackedSequence with only the non-empty, sorted sequences. packed_sequence_input = pack_padded_sequence(sorted_inputs[:num_valid, :, :], sorted_sequence_lengths[:num_valid].data.tolist(), batch_first=True) # Prepare the initial states. if not self.stateful: if hidden_state is None: initial_states = hidden_state elif isinstance(hidden_state, tuple): initial_states = [state.index_select(1, sorting_indices)[:, :num_valid, :].contiguous() for state in hidden_state] else: initial_states = hidden_state.index_select(1, sorting_indices)[:, :num_valid, :].contiguous() else: initial_states = self._get_initial_states(batch_size, num_valid, sorting_indices) # Actually call the module on the sorted PackedSequence. module_output, final_states = module(packed_sequence_input, initial_states) return module_output, final_states, restoration_indices
python
def sort_and_run_forward(self, module: Callable[[PackedSequence, Optional[RnnState]], Tuple[Union[PackedSequence, torch.Tensor], RnnState]], inputs: torch.Tensor, mask: torch.Tensor, hidden_state: Optional[RnnState] = None): """ This function exists because Pytorch RNNs require that their inputs be sorted before being passed as input. As all of our Seq2xxxEncoders use this functionality, it is provided in a base class. This method can be called on any module which takes as input a ``PackedSequence`` and some ``hidden_state``, which can either be a tuple of tensors or a tensor. As all of our Seq2xxxEncoders have different return types, we return `sorted` outputs from the module, which is called directly. Additionally, we return the indices into the batch dimension required to restore the tensor to it's correct, unsorted order and the number of valid batch elements (i.e the number of elements in the batch which are not completely masked). This un-sorting and re-padding of the module outputs is left to the subclasses because their outputs have different types and handling them smoothly here is difficult. Parameters ---------- module : ``Callable[[PackedSequence, Optional[RnnState]], Tuple[Union[PackedSequence, torch.Tensor], RnnState]]``, required. A function to run on the inputs. In most cases, this is a ``torch.nn.Module``. inputs : ``torch.Tensor``, required. A tensor of shape ``(batch_size, sequence_length, embedding_size)`` representing the inputs to the Encoder. mask : ``torch.Tensor``, required. A tensor of shape ``(batch_size, sequence_length)``, representing masked and non-masked elements of the sequence for each element in the batch. hidden_state : ``Optional[RnnState]``, (default = None). A single tensor of shape (num_layers, batch_size, hidden_size) representing the state of an RNN with or a tuple of tensors of shapes (num_layers, batch_size, hidden_size) and (num_layers, batch_size, memory_size), representing the hidden state and memory state of an LSTM-like RNN. Returns ------- module_output : ``Union[torch.Tensor, PackedSequence]``. A Tensor or PackedSequence representing the output of the Pytorch Module. The batch size dimension will be equal to ``num_valid``, as sequences of zero length are clipped off before the module is called, as Pytorch cannot handle zero length sequences. final_states : ``Optional[RnnState]`` A Tensor representing the hidden state of the Pytorch Module. This can either be a single tensor of shape (num_layers, num_valid, hidden_size), for instance in the case of a GRU, or a tuple of tensors, such as those required for an LSTM. restoration_indices : ``torch.LongTensor`` A tensor of shape ``(batch_size,)``, describing the re-indexing required to transform the outputs back to their original batch order. """ # In some circumstances you may have sequences of zero length. ``pack_padded_sequence`` # requires all sequence lengths to be > 0, so remove sequences of zero length before # calling self._module, then fill with zeros. # First count how many sequences are empty. batch_size = mask.size(0) num_valid = torch.sum(mask[:, 0]).int().item() sequence_lengths = get_lengths_from_binary_sequence_mask(mask) sorted_inputs, sorted_sequence_lengths, restoration_indices, sorting_indices =\ sort_batch_by_length(inputs, sequence_lengths) # Now create a PackedSequence with only the non-empty, sorted sequences. packed_sequence_input = pack_padded_sequence(sorted_inputs[:num_valid, :, :], sorted_sequence_lengths[:num_valid].data.tolist(), batch_first=True) # Prepare the initial states. if not self.stateful: if hidden_state is None: initial_states = hidden_state elif isinstance(hidden_state, tuple): initial_states = [state.index_select(1, sorting_indices)[:, :num_valid, :].contiguous() for state in hidden_state] else: initial_states = hidden_state.index_select(1, sorting_indices)[:, :num_valid, :].contiguous() else: initial_states = self._get_initial_states(batch_size, num_valid, sorting_indices) # Actually call the module on the sorted PackedSequence. module_output, final_states = module(packed_sequence_input, initial_states) return module_output, final_states, restoration_indices
[ "def", "sort_and_run_forward", "(", "self", ",", "module", ":", "Callable", "[", "[", "PackedSequence", ",", "Optional", "[", "RnnState", "]", "]", ",", "Tuple", "[", "Union", "[", "PackedSequence", ",", "torch", ".", "Tensor", "]", ",", "RnnState", "]", "]", ",", "inputs", ":", "torch", ".", "Tensor", ",", "mask", ":", "torch", ".", "Tensor", ",", "hidden_state", ":", "Optional", "[", "RnnState", "]", "=", "None", ")", ":", "# In some circumstances you may have sequences of zero length. ``pack_padded_sequence``", "# requires all sequence lengths to be > 0, so remove sequences of zero length before", "# calling self._module, then fill with zeros.", "# First count how many sequences are empty.", "batch_size", "=", "mask", ".", "size", "(", "0", ")", "num_valid", "=", "torch", ".", "sum", "(", "mask", "[", ":", ",", "0", "]", ")", ".", "int", "(", ")", ".", "item", "(", ")", "sequence_lengths", "=", "get_lengths_from_binary_sequence_mask", "(", "mask", ")", "sorted_inputs", ",", "sorted_sequence_lengths", ",", "restoration_indices", ",", "sorting_indices", "=", "sort_batch_by_length", "(", "inputs", ",", "sequence_lengths", ")", "# Now create a PackedSequence with only the non-empty, sorted sequences.", "packed_sequence_input", "=", "pack_padded_sequence", "(", "sorted_inputs", "[", ":", "num_valid", ",", ":", ",", ":", "]", ",", "sorted_sequence_lengths", "[", ":", "num_valid", "]", ".", "data", ".", "tolist", "(", ")", ",", "batch_first", "=", "True", ")", "# Prepare the initial states.", "if", "not", "self", ".", "stateful", ":", "if", "hidden_state", "is", "None", ":", "initial_states", "=", "hidden_state", "elif", "isinstance", "(", "hidden_state", ",", "tuple", ")", ":", "initial_states", "=", "[", "state", ".", "index_select", "(", "1", ",", "sorting_indices", ")", "[", ":", ",", ":", "num_valid", ",", ":", "]", ".", "contiguous", "(", ")", "for", "state", "in", "hidden_state", "]", "else", ":", "initial_states", "=", "hidden_state", ".", "index_select", "(", "1", ",", "sorting_indices", ")", "[", ":", ",", ":", "num_valid", ",", ":", "]", ".", "contiguous", "(", ")", "else", ":", "initial_states", "=", "self", ".", "_get_initial_states", "(", "batch_size", ",", "num_valid", ",", "sorting_indices", ")", "# Actually call the module on the sorted PackedSequence.", "module_output", ",", "final_states", "=", "module", "(", "packed_sequence_input", ",", "initial_states", ")", "return", "module_output", ",", "final_states", ",", "restoration_indices" ]
This function exists because Pytorch RNNs require that their inputs be sorted before being passed as input. As all of our Seq2xxxEncoders use this functionality, it is provided in a base class. This method can be called on any module which takes as input a ``PackedSequence`` and some ``hidden_state``, which can either be a tuple of tensors or a tensor. As all of our Seq2xxxEncoders have different return types, we return `sorted` outputs from the module, which is called directly. Additionally, we return the indices into the batch dimension required to restore the tensor to it's correct, unsorted order and the number of valid batch elements (i.e the number of elements in the batch which are not completely masked). This un-sorting and re-padding of the module outputs is left to the subclasses because their outputs have different types and handling them smoothly here is difficult. Parameters ---------- module : ``Callable[[PackedSequence, Optional[RnnState]], Tuple[Union[PackedSequence, torch.Tensor], RnnState]]``, required. A function to run on the inputs. In most cases, this is a ``torch.nn.Module``. inputs : ``torch.Tensor``, required. A tensor of shape ``(batch_size, sequence_length, embedding_size)`` representing the inputs to the Encoder. mask : ``torch.Tensor``, required. A tensor of shape ``(batch_size, sequence_length)``, representing masked and non-masked elements of the sequence for each element in the batch. hidden_state : ``Optional[RnnState]``, (default = None). A single tensor of shape (num_layers, batch_size, hidden_size) representing the state of an RNN with or a tuple of tensors of shapes (num_layers, batch_size, hidden_size) and (num_layers, batch_size, memory_size), representing the hidden state and memory state of an LSTM-like RNN. Returns ------- module_output : ``Union[torch.Tensor, PackedSequence]``. A Tensor or PackedSequence representing the output of the Pytorch Module. The batch size dimension will be equal to ``num_valid``, as sequences of zero length are clipped off before the module is called, as Pytorch cannot handle zero length sequences. final_states : ``Optional[RnnState]`` A Tensor representing the hidden state of the Pytorch Module. This can either be a single tensor of shape (num_layers, num_valid, hidden_size), for instance in the case of a GRU, or a tuple of tensors, such as those required for an LSTM. restoration_indices : ``torch.LongTensor`` A tensor of shape ``(batch_size,)``, describing the re-indexing required to transform the outputs back to their original batch order.
[ "This", "function", "exists", "because", "Pytorch", "RNNs", "require", "that", "their", "inputs", "be", "sorted", "before", "being", "passed", "as", "input", ".", "As", "all", "of", "our", "Seq2xxxEncoders", "use", "this", "functionality", "it", "is", "provided", "in", "a", "base", "class", ".", "This", "method", "can", "be", "called", "on", "any", "module", "which", "takes", "as", "input", "a", "PackedSequence", "and", "some", "hidden_state", "which", "can", "either", "be", "a", "tuple", "of", "tensors", "or", "a", "tensor", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/modules/encoder_base.py#L32-L118
23,130
allenai/allennlp
allennlp/modules/encoder_base.py
_EncoderBase._get_initial_states
def _get_initial_states(self, batch_size: int, num_valid: int, sorting_indices: torch.LongTensor) -> Optional[RnnState]: """ Returns an initial state for use in an RNN. Additionally, this method handles the batch size changing across calls by mutating the state to append initial states for new elements in the batch. Finally, it also handles sorting the states with respect to the sequence lengths of elements in the batch and removing rows which are completely padded. Importantly, this `mutates` the state if the current batch size is larger than when it was previously called. Parameters ---------- batch_size : ``int``, required. The batch size can change size across calls to stateful RNNs, so we need to know if we need to expand or shrink the states before returning them. Expanded states will be set to zero. num_valid : ``int``, required. The batch may contain completely padded sequences which get removed before the sequence is passed through the encoder. We also need to clip these off of the state too. sorting_indices ``torch.LongTensor``, required. Pytorch RNNs take sequences sorted by length. When we return the states to be used for a given call to ``module.forward``, we need the states to match up to the sorted sequences, so before returning them, we sort the states using the same indices used to sort the sequences. Returns ------- This method has a complex return type because it has to deal with the first time it is called, when it has no state, and the fact that types of RNN have heterogeneous states. If it is the first time the module has been called, it returns ``None``, regardless of the type of the ``Module``. Otherwise, for LSTMs, it returns a tuple of ``torch.Tensors`` with shape ``(num_layers, num_valid, state_size)`` and ``(num_layers, num_valid, memory_size)`` respectively, or for GRUs, it returns a single ``torch.Tensor`` of shape ``(num_layers, num_valid, state_size)``. """ # We don't know the state sizes the first time calling forward, # so we let the module define what it's initial hidden state looks like. if self._states is None: return None # Otherwise, we have some previous states. if batch_size > self._states[0].size(1): # This batch is larger than the all previous states. # If so, resize the states. num_states_to_concat = batch_size - self._states[0].size(1) resized_states = [] # state has shape (num_layers, batch_size, hidden_size) for state in self._states: # This _must_ be inside the loop because some # RNNs have states with different last dimension sizes. zeros = state.new_zeros(state.size(0), num_states_to_concat, state.size(2)) resized_states.append(torch.cat([state, zeros], 1)) self._states = tuple(resized_states) correctly_shaped_states = self._states elif batch_size < self._states[0].size(1): # This batch is smaller than the previous one. correctly_shaped_states = tuple(state[:, :batch_size, :] for state in self._states) else: correctly_shaped_states = self._states # At this point, our states are of shape (num_layers, batch_size, hidden_size). # However, the encoder uses sorted sequences and additionally removes elements # of the batch which are fully padded. We need the states to match up to these # sorted and filtered sequences, so we do that in the next two blocks before # returning the state/s. if len(self._states) == 1: # GRUs only have a single state. This `unpacks` it from the # tuple and returns the tensor directly. correctly_shaped_state = correctly_shaped_states[0] sorted_state = correctly_shaped_state.index_select(1, sorting_indices) return sorted_state[:, :num_valid, :].contiguous() else: # LSTMs have a state tuple of (state, memory). sorted_states = [state.index_select(1, sorting_indices) for state in correctly_shaped_states] return tuple(state[:, :num_valid, :].contiguous() for state in sorted_states)
python
def _get_initial_states(self, batch_size: int, num_valid: int, sorting_indices: torch.LongTensor) -> Optional[RnnState]: """ Returns an initial state for use in an RNN. Additionally, this method handles the batch size changing across calls by mutating the state to append initial states for new elements in the batch. Finally, it also handles sorting the states with respect to the sequence lengths of elements in the batch and removing rows which are completely padded. Importantly, this `mutates` the state if the current batch size is larger than when it was previously called. Parameters ---------- batch_size : ``int``, required. The batch size can change size across calls to stateful RNNs, so we need to know if we need to expand or shrink the states before returning them. Expanded states will be set to zero. num_valid : ``int``, required. The batch may contain completely padded sequences which get removed before the sequence is passed through the encoder. We also need to clip these off of the state too. sorting_indices ``torch.LongTensor``, required. Pytorch RNNs take sequences sorted by length. When we return the states to be used for a given call to ``module.forward``, we need the states to match up to the sorted sequences, so before returning them, we sort the states using the same indices used to sort the sequences. Returns ------- This method has a complex return type because it has to deal with the first time it is called, when it has no state, and the fact that types of RNN have heterogeneous states. If it is the first time the module has been called, it returns ``None``, regardless of the type of the ``Module``. Otherwise, for LSTMs, it returns a tuple of ``torch.Tensors`` with shape ``(num_layers, num_valid, state_size)`` and ``(num_layers, num_valid, memory_size)`` respectively, or for GRUs, it returns a single ``torch.Tensor`` of shape ``(num_layers, num_valid, state_size)``. """ # We don't know the state sizes the first time calling forward, # so we let the module define what it's initial hidden state looks like. if self._states is None: return None # Otherwise, we have some previous states. if batch_size > self._states[0].size(1): # This batch is larger than the all previous states. # If so, resize the states. num_states_to_concat = batch_size - self._states[0].size(1) resized_states = [] # state has shape (num_layers, batch_size, hidden_size) for state in self._states: # This _must_ be inside the loop because some # RNNs have states with different last dimension sizes. zeros = state.new_zeros(state.size(0), num_states_to_concat, state.size(2)) resized_states.append(torch.cat([state, zeros], 1)) self._states = tuple(resized_states) correctly_shaped_states = self._states elif batch_size < self._states[0].size(1): # This batch is smaller than the previous one. correctly_shaped_states = tuple(state[:, :batch_size, :] for state in self._states) else: correctly_shaped_states = self._states # At this point, our states are of shape (num_layers, batch_size, hidden_size). # However, the encoder uses sorted sequences and additionally removes elements # of the batch which are fully padded. We need the states to match up to these # sorted and filtered sequences, so we do that in the next two blocks before # returning the state/s. if len(self._states) == 1: # GRUs only have a single state. This `unpacks` it from the # tuple and returns the tensor directly. correctly_shaped_state = correctly_shaped_states[0] sorted_state = correctly_shaped_state.index_select(1, sorting_indices) return sorted_state[:, :num_valid, :].contiguous() else: # LSTMs have a state tuple of (state, memory). sorted_states = [state.index_select(1, sorting_indices) for state in correctly_shaped_states] return tuple(state[:, :num_valid, :].contiguous() for state in sorted_states)
[ "def", "_get_initial_states", "(", "self", ",", "batch_size", ":", "int", ",", "num_valid", ":", "int", ",", "sorting_indices", ":", "torch", ".", "LongTensor", ")", "->", "Optional", "[", "RnnState", "]", ":", "# We don't know the state sizes the first time calling forward,", "# so we let the module define what it's initial hidden state looks like.", "if", "self", ".", "_states", "is", "None", ":", "return", "None", "# Otherwise, we have some previous states.", "if", "batch_size", ">", "self", ".", "_states", "[", "0", "]", ".", "size", "(", "1", ")", ":", "# This batch is larger than the all previous states.", "# If so, resize the states.", "num_states_to_concat", "=", "batch_size", "-", "self", ".", "_states", "[", "0", "]", ".", "size", "(", "1", ")", "resized_states", "=", "[", "]", "# state has shape (num_layers, batch_size, hidden_size)", "for", "state", "in", "self", ".", "_states", ":", "# This _must_ be inside the loop because some", "# RNNs have states with different last dimension sizes.", "zeros", "=", "state", ".", "new_zeros", "(", "state", ".", "size", "(", "0", ")", ",", "num_states_to_concat", ",", "state", ".", "size", "(", "2", ")", ")", "resized_states", ".", "append", "(", "torch", ".", "cat", "(", "[", "state", ",", "zeros", "]", ",", "1", ")", ")", "self", ".", "_states", "=", "tuple", "(", "resized_states", ")", "correctly_shaped_states", "=", "self", ".", "_states", "elif", "batch_size", "<", "self", ".", "_states", "[", "0", "]", ".", "size", "(", "1", ")", ":", "# This batch is smaller than the previous one.", "correctly_shaped_states", "=", "tuple", "(", "state", "[", ":", ",", ":", "batch_size", ",", ":", "]", "for", "state", "in", "self", ".", "_states", ")", "else", ":", "correctly_shaped_states", "=", "self", ".", "_states", "# At this point, our states are of shape (num_layers, batch_size, hidden_size).", "# However, the encoder uses sorted sequences and additionally removes elements", "# of the batch which are fully padded. We need the states to match up to these", "# sorted and filtered sequences, so we do that in the next two blocks before", "# returning the state/s.", "if", "len", "(", "self", ".", "_states", ")", "==", "1", ":", "# GRUs only have a single state. This `unpacks` it from the", "# tuple and returns the tensor directly.", "correctly_shaped_state", "=", "correctly_shaped_states", "[", "0", "]", "sorted_state", "=", "correctly_shaped_state", ".", "index_select", "(", "1", ",", "sorting_indices", ")", "return", "sorted_state", "[", ":", ",", ":", "num_valid", ",", ":", "]", ".", "contiguous", "(", ")", "else", ":", "# LSTMs have a state tuple of (state, memory).", "sorted_states", "=", "[", "state", ".", "index_select", "(", "1", ",", "sorting_indices", ")", "for", "state", "in", "correctly_shaped_states", "]", "return", "tuple", "(", "state", "[", ":", ",", ":", "num_valid", ",", ":", "]", ".", "contiguous", "(", ")", "for", "state", "in", "sorted_states", ")" ]
Returns an initial state for use in an RNN. Additionally, this method handles the batch size changing across calls by mutating the state to append initial states for new elements in the batch. Finally, it also handles sorting the states with respect to the sequence lengths of elements in the batch and removing rows which are completely padded. Importantly, this `mutates` the state if the current batch size is larger than when it was previously called. Parameters ---------- batch_size : ``int``, required. The batch size can change size across calls to stateful RNNs, so we need to know if we need to expand or shrink the states before returning them. Expanded states will be set to zero. num_valid : ``int``, required. The batch may contain completely padded sequences which get removed before the sequence is passed through the encoder. We also need to clip these off of the state too. sorting_indices ``torch.LongTensor``, required. Pytorch RNNs take sequences sorted by length. When we return the states to be used for a given call to ``module.forward``, we need the states to match up to the sorted sequences, so before returning them, we sort the states using the same indices used to sort the sequences. Returns ------- This method has a complex return type because it has to deal with the first time it is called, when it has no state, and the fact that types of RNN have heterogeneous states. If it is the first time the module has been called, it returns ``None``, regardless of the type of the ``Module``. Otherwise, for LSTMs, it returns a tuple of ``torch.Tensors`` with shape ``(num_layers, num_valid, state_size)`` and ``(num_layers, num_valid, memory_size)`` respectively, or for GRUs, it returns a single ``torch.Tensor`` of shape ``(num_layers, num_valid, state_size)``.
[ "Returns", "an", "initial", "state", "for", "use", "in", "an", "RNN", ".", "Additionally", "this", "method", "handles", "the", "batch", "size", "changing", "across", "calls", "by", "mutating", "the", "state", "to", "append", "initial", "states", "for", "new", "elements", "in", "the", "batch", ".", "Finally", "it", "also", "handles", "sorting", "the", "states", "with", "respect", "to", "the", "sequence", "lengths", "of", "elements", "in", "the", "batch", "and", "removing", "rows", "which", "are", "completely", "padded", ".", "Importantly", "this", "mutates", "the", "state", "if", "the", "current", "batch", "size", "is", "larger", "than", "when", "it", "was", "previously", "called", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/modules/encoder_base.py#L120-L205
23,131
allenai/allennlp
allennlp/modules/encoder_base.py
_EncoderBase._update_states
def _update_states(self, final_states: RnnStateStorage, restoration_indices: torch.LongTensor) -> None: """ After the RNN has run forward, the states need to be updated. This method just sets the state to the updated new state, performing several pieces of book-keeping along the way - namely, unsorting the states and ensuring that the states of completely padded sequences are not updated. Finally, it also detaches the state variable from the computational graph, such that the graph can be garbage collected after each batch iteration. Parameters ---------- final_states : ``RnnStateStorage``, required. The hidden states returned as output from the RNN. restoration_indices : ``torch.LongTensor``, required. The indices that invert the sorting used in ``sort_and_run_forward`` to order the states with respect to the lengths of the sequences in the batch. """ # TODO(Mark): seems weird to sort here, but append zeros in the subclasses. # which way around is best? new_unsorted_states = [state.index_select(1, restoration_indices) for state in final_states] if self._states is None: # We don't already have states, so just set the # ones we receive to be the current state. self._states = tuple(state.data for state in new_unsorted_states) else: # Now we've sorted the states back so that they correspond to the original # indices, we need to figure out what states we need to update, because if we # didn't use a state for a particular row, we want to preserve its state. # Thankfully, the rows which are all zero in the state correspond exactly # to those which aren't used, so we create masks of shape (new_batch_size,), # denoting which states were used in the RNN computation. current_state_batch_size = self._states[0].size(1) new_state_batch_size = final_states[0].size(1) # Masks for the unused states of shape (1, new_batch_size, 1) used_new_rows_mask = [(state[0, :, :].sum(-1) != 0.0).float().view(1, new_state_batch_size, 1) for state in new_unsorted_states] new_states = [] if current_state_batch_size > new_state_batch_size: # The new state is smaller than the old one, # so just update the indices which we used. for old_state, new_state, used_mask in zip(self._states, new_unsorted_states, used_new_rows_mask): # zero out all rows in the previous state # which _were_ used in the current state. masked_old_state = old_state[:, :new_state_batch_size, :] * (1 - used_mask) # The old state is larger, so update the relevant parts of it. old_state[:, :new_state_batch_size, :] = new_state + masked_old_state new_states.append(old_state.detach()) else: # The states are the same size, so we just have to # deal with the possibility that some rows weren't used. new_states = [] for old_state, new_state, used_mask in zip(self._states, new_unsorted_states, used_new_rows_mask): # zero out all rows which _were_ used in the current state. masked_old_state = old_state * (1 - used_mask) # The old state is larger, so update the relevant parts of it. new_state += masked_old_state new_states.append(new_state.detach()) # It looks like there should be another case handled here - when # the current_state_batch_size < new_state_batch_size. However, # this never happens, because the states themeselves are mutated # by appending zeros when calling _get_inital_states, meaning that # the new states are either of equal size, or smaller, in the case # that there are some unused elements (zero-length) for the RNN computation. self._states = tuple(new_states)
python
def _update_states(self, final_states: RnnStateStorage, restoration_indices: torch.LongTensor) -> None: """ After the RNN has run forward, the states need to be updated. This method just sets the state to the updated new state, performing several pieces of book-keeping along the way - namely, unsorting the states and ensuring that the states of completely padded sequences are not updated. Finally, it also detaches the state variable from the computational graph, such that the graph can be garbage collected after each batch iteration. Parameters ---------- final_states : ``RnnStateStorage``, required. The hidden states returned as output from the RNN. restoration_indices : ``torch.LongTensor``, required. The indices that invert the sorting used in ``sort_and_run_forward`` to order the states with respect to the lengths of the sequences in the batch. """ # TODO(Mark): seems weird to sort here, but append zeros in the subclasses. # which way around is best? new_unsorted_states = [state.index_select(1, restoration_indices) for state in final_states] if self._states is None: # We don't already have states, so just set the # ones we receive to be the current state. self._states = tuple(state.data for state in new_unsorted_states) else: # Now we've sorted the states back so that they correspond to the original # indices, we need to figure out what states we need to update, because if we # didn't use a state for a particular row, we want to preserve its state. # Thankfully, the rows which are all zero in the state correspond exactly # to those which aren't used, so we create masks of shape (new_batch_size,), # denoting which states were used in the RNN computation. current_state_batch_size = self._states[0].size(1) new_state_batch_size = final_states[0].size(1) # Masks for the unused states of shape (1, new_batch_size, 1) used_new_rows_mask = [(state[0, :, :].sum(-1) != 0.0).float().view(1, new_state_batch_size, 1) for state in new_unsorted_states] new_states = [] if current_state_batch_size > new_state_batch_size: # The new state is smaller than the old one, # so just update the indices which we used. for old_state, new_state, used_mask in zip(self._states, new_unsorted_states, used_new_rows_mask): # zero out all rows in the previous state # which _were_ used in the current state. masked_old_state = old_state[:, :new_state_batch_size, :] * (1 - used_mask) # The old state is larger, so update the relevant parts of it. old_state[:, :new_state_batch_size, :] = new_state + masked_old_state new_states.append(old_state.detach()) else: # The states are the same size, so we just have to # deal with the possibility that some rows weren't used. new_states = [] for old_state, new_state, used_mask in zip(self._states, new_unsorted_states, used_new_rows_mask): # zero out all rows which _were_ used in the current state. masked_old_state = old_state * (1 - used_mask) # The old state is larger, so update the relevant parts of it. new_state += masked_old_state new_states.append(new_state.detach()) # It looks like there should be another case handled here - when # the current_state_batch_size < new_state_batch_size. However, # this never happens, because the states themeselves are mutated # by appending zeros when calling _get_inital_states, meaning that # the new states are either of equal size, or smaller, in the case # that there are some unused elements (zero-length) for the RNN computation. self._states = tuple(new_states)
[ "def", "_update_states", "(", "self", ",", "final_states", ":", "RnnStateStorage", ",", "restoration_indices", ":", "torch", ".", "LongTensor", ")", "->", "None", ":", "# TODO(Mark): seems weird to sort here, but append zeros in the subclasses.", "# which way around is best?", "new_unsorted_states", "=", "[", "state", ".", "index_select", "(", "1", ",", "restoration_indices", ")", "for", "state", "in", "final_states", "]", "if", "self", ".", "_states", "is", "None", ":", "# We don't already have states, so just set the", "# ones we receive to be the current state.", "self", ".", "_states", "=", "tuple", "(", "state", ".", "data", "for", "state", "in", "new_unsorted_states", ")", "else", ":", "# Now we've sorted the states back so that they correspond to the original", "# indices, we need to figure out what states we need to update, because if we", "# didn't use a state for a particular row, we want to preserve its state.", "# Thankfully, the rows which are all zero in the state correspond exactly", "# to those which aren't used, so we create masks of shape (new_batch_size,),", "# denoting which states were used in the RNN computation.", "current_state_batch_size", "=", "self", ".", "_states", "[", "0", "]", ".", "size", "(", "1", ")", "new_state_batch_size", "=", "final_states", "[", "0", "]", ".", "size", "(", "1", ")", "# Masks for the unused states of shape (1, new_batch_size, 1)", "used_new_rows_mask", "=", "[", "(", "state", "[", "0", ",", ":", ",", ":", "]", ".", "sum", "(", "-", "1", ")", "!=", "0.0", ")", ".", "float", "(", ")", ".", "view", "(", "1", ",", "new_state_batch_size", ",", "1", ")", "for", "state", "in", "new_unsorted_states", "]", "new_states", "=", "[", "]", "if", "current_state_batch_size", ">", "new_state_batch_size", ":", "# The new state is smaller than the old one,", "# so just update the indices which we used.", "for", "old_state", ",", "new_state", ",", "used_mask", "in", "zip", "(", "self", ".", "_states", ",", "new_unsorted_states", ",", "used_new_rows_mask", ")", ":", "# zero out all rows in the previous state", "# which _were_ used in the current state.", "masked_old_state", "=", "old_state", "[", ":", ",", ":", "new_state_batch_size", ",", ":", "]", "*", "(", "1", "-", "used_mask", ")", "# The old state is larger, so update the relevant parts of it.", "old_state", "[", ":", ",", ":", "new_state_batch_size", ",", ":", "]", "=", "new_state", "+", "masked_old_state", "new_states", ".", "append", "(", "old_state", ".", "detach", "(", ")", ")", "else", ":", "# The states are the same size, so we just have to", "# deal with the possibility that some rows weren't used.", "new_states", "=", "[", "]", "for", "old_state", ",", "new_state", ",", "used_mask", "in", "zip", "(", "self", ".", "_states", ",", "new_unsorted_states", ",", "used_new_rows_mask", ")", ":", "# zero out all rows which _were_ used in the current state.", "masked_old_state", "=", "old_state", "*", "(", "1", "-", "used_mask", ")", "# The old state is larger, so update the relevant parts of it.", "new_state", "+=", "masked_old_state", "new_states", ".", "append", "(", "new_state", ".", "detach", "(", ")", ")", "# It looks like there should be another case handled here - when", "# the current_state_batch_size < new_state_batch_size. However,", "# this never happens, because the states themeselves are mutated", "# by appending zeros when calling _get_inital_states, meaning that", "# the new states are either of equal size, or smaller, in the case", "# that there are some unused elements (zero-length) for the RNN computation.", "self", ".", "_states", "=", "tuple", "(", "new_states", ")" ]
After the RNN has run forward, the states need to be updated. This method just sets the state to the updated new state, performing several pieces of book-keeping along the way - namely, unsorting the states and ensuring that the states of completely padded sequences are not updated. Finally, it also detaches the state variable from the computational graph, such that the graph can be garbage collected after each batch iteration. Parameters ---------- final_states : ``RnnStateStorage``, required. The hidden states returned as output from the RNN. restoration_indices : ``torch.LongTensor``, required. The indices that invert the sorting used in ``sort_and_run_forward`` to order the states with respect to the lengths of the sequences in the batch.
[ "After", "the", "RNN", "has", "run", "forward", "the", "states", "need", "to", "be", "updated", ".", "This", "method", "just", "sets", "the", "state", "to", "the", "updated", "new", "state", "performing", "several", "pieces", "of", "book", "-", "keeping", "along", "the", "way", "-", "namely", "unsorting", "the", "states", "and", "ensuring", "that", "the", "states", "of", "completely", "padded", "sequences", "are", "not", "updated", ".", "Finally", "it", "also", "detaches", "the", "state", "variable", "from", "the", "computational", "graph", "such", "that", "the", "graph", "can", "be", "garbage", "collected", "after", "each", "batch", "iteration", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/modules/encoder_base.py#L207-L282
23,132
allenai/allennlp
allennlp/tools/wikitables_evaluator.py
to_value
def to_value(original_string, corenlp_value=None): """Convert the string to Value object. Args: original_string (basestring): Original string corenlp_value (basestring): Optional value returned from CoreNLP Returns: Value """ if isinstance(original_string, Value): # Already a Value return original_string if not corenlp_value: corenlp_value = original_string # Number? amount = NumberValue.parse(corenlp_value) if amount is not None: return NumberValue(amount, original_string) # Date? ymd = DateValue.parse(corenlp_value) if ymd is not None: if ymd[1] == ymd[2] == -1: return NumberValue(ymd[0], original_string) else: return DateValue(ymd[0], ymd[1], ymd[2], original_string) # String. return StringValue(original_string)
python
def to_value(original_string, corenlp_value=None): """Convert the string to Value object. Args: original_string (basestring): Original string corenlp_value (basestring): Optional value returned from CoreNLP Returns: Value """ if isinstance(original_string, Value): # Already a Value return original_string if not corenlp_value: corenlp_value = original_string # Number? amount = NumberValue.parse(corenlp_value) if amount is not None: return NumberValue(amount, original_string) # Date? ymd = DateValue.parse(corenlp_value) if ymd is not None: if ymd[1] == ymd[2] == -1: return NumberValue(ymd[0], original_string) else: return DateValue(ymd[0], ymd[1], ymd[2], original_string) # String. return StringValue(original_string)
[ "def", "to_value", "(", "original_string", ",", "corenlp_value", "=", "None", ")", ":", "if", "isinstance", "(", "original_string", ",", "Value", ")", ":", "# Already a Value", "return", "original_string", "if", "not", "corenlp_value", ":", "corenlp_value", "=", "original_string", "# Number?", "amount", "=", "NumberValue", ".", "parse", "(", "corenlp_value", ")", "if", "amount", "is", "not", "None", ":", "return", "NumberValue", "(", "amount", ",", "original_string", ")", "# Date?", "ymd", "=", "DateValue", ".", "parse", "(", "corenlp_value", ")", "if", "ymd", "is", "not", "None", ":", "if", "ymd", "[", "1", "]", "==", "ymd", "[", "2", "]", "==", "-", "1", ":", "return", "NumberValue", "(", "ymd", "[", "0", "]", ",", "original_string", ")", "else", ":", "return", "DateValue", "(", "ymd", "[", "0", "]", ",", "ymd", "[", "1", "]", ",", "ymd", "[", "2", "]", ",", "original_string", ")", "# String.", "return", "StringValue", "(", "original_string", ")" ]
Convert the string to Value object. Args: original_string (basestring): Original string corenlp_value (basestring): Optional value returned from CoreNLP Returns: Value
[ "Convert", "the", "string", "to", "Value", "object", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/tools/wikitables_evaluator.py#L252-L278
23,133
allenai/allennlp
allennlp/tools/wikitables_evaluator.py
to_value_list
def to_value_list(original_strings, corenlp_values=None): """Convert a list of strings to a list of Values Args: original_strings (list[basestring]) corenlp_values (list[basestring or None]) Returns: list[Value] """ assert isinstance(original_strings, (list, tuple, set)) if corenlp_values is not None: assert isinstance(corenlp_values, (list, tuple, set)) assert len(original_strings) == len(corenlp_values) return list(set(to_value(x, y) for (x, y) in zip(original_strings, corenlp_values))) else: return list(set(to_value(x) for x in original_strings))
python
def to_value_list(original_strings, corenlp_values=None): """Convert a list of strings to a list of Values Args: original_strings (list[basestring]) corenlp_values (list[basestring or None]) Returns: list[Value] """ assert isinstance(original_strings, (list, tuple, set)) if corenlp_values is not None: assert isinstance(corenlp_values, (list, tuple, set)) assert len(original_strings) == len(corenlp_values) return list(set(to_value(x, y) for (x, y) in zip(original_strings, corenlp_values))) else: return list(set(to_value(x) for x in original_strings))
[ "def", "to_value_list", "(", "original_strings", ",", "corenlp_values", "=", "None", ")", ":", "assert", "isinstance", "(", "original_strings", ",", "(", "list", ",", "tuple", ",", "set", ")", ")", "if", "corenlp_values", "is", "not", "None", ":", "assert", "isinstance", "(", "corenlp_values", ",", "(", "list", ",", "tuple", ",", "set", ")", ")", "assert", "len", "(", "original_strings", ")", "==", "len", "(", "corenlp_values", ")", "return", "list", "(", "set", "(", "to_value", "(", "x", ",", "y", ")", "for", "(", "x", ",", "y", ")", "in", "zip", "(", "original_strings", ",", "corenlp_values", ")", ")", ")", "else", ":", "return", "list", "(", "set", "(", "to_value", "(", "x", ")", "for", "x", "in", "original_strings", ")", ")" ]
Convert a list of strings to a list of Values Args: original_strings (list[basestring]) corenlp_values (list[basestring or None]) Returns: list[Value]
[ "Convert", "a", "list", "of", "strings", "to", "a", "list", "of", "Values" ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/tools/wikitables_evaluator.py#L280-L296
23,134
allenai/allennlp
allennlp/tools/wikitables_evaluator.py
check_denotation
def check_denotation(target_values, predicted_values): """Return True if the predicted denotation is correct. Args: target_values (list[Value]) predicted_values (list[Value]) Returns: bool """ # Check size if len(target_values) != len(predicted_values): return False # Check items for target in target_values: if not any(target.match(pred) for pred in predicted_values): return False return True
python
def check_denotation(target_values, predicted_values): """Return True if the predicted denotation is correct. Args: target_values (list[Value]) predicted_values (list[Value]) Returns: bool """ # Check size if len(target_values) != len(predicted_values): return False # Check items for target in target_values: if not any(target.match(pred) for pred in predicted_values): return False return True
[ "def", "check_denotation", "(", "target_values", ",", "predicted_values", ")", ":", "# Check size", "if", "len", "(", "target_values", ")", "!=", "len", "(", "predicted_values", ")", ":", "return", "False", "# Check items", "for", "target", "in", "target_values", ":", "if", "not", "any", "(", "target", ".", "match", "(", "pred", ")", "for", "pred", "in", "predicted_values", ")", ":", "return", "False", "return", "True" ]
Return True if the predicted denotation is correct. Args: target_values (list[Value]) predicted_values (list[Value]) Returns: bool
[ "Return", "True", "if", "the", "predicted", "denotation", "is", "correct", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/tools/wikitables_evaluator.py#L301-L317
23,135
allenai/allennlp
allennlp/tools/wikitables_evaluator.py
NumberValue.parse
def parse(text): """Try to parse into a number. Return: the number (int or float) if successful; otherwise None. """ try: return int(text) except ValueError: try: amount = float(text) assert not isnan(amount) and not isinf(amount) return amount except (ValueError, AssertionError): return None
python
def parse(text): """Try to parse into a number. Return: the number (int or float) if successful; otherwise None. """ try: return int(text) except ValueError: try: amount = float(text) assert not isnan(amount) and not isinf(amount) return amount except (ValueError, AssertionError): return None
[ "def", "parse", "(", "text", ")", ":", "try", ":", "return", "int", "(", "text", ")", "except", "ValueError", ":", "try", ":", "amount", "=", "float", "(", "text", ")", "assert", "not", "isnan", "(", "amount", ")", "and", "not", "isinf", "(", "amount", ")", "return", "amount", "except", "(", "ValueError", ",", "AssertionError", ")", ":", "return", "None" ]
Try to parse into a number. Return: the number (int or float) if successful; otherwise None.
[ "Try", "to", "parse", "into", "a", "number", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/tools/wikitables_evaluator.py#L169-L183
23,136
allenai/allennlp
allennlp/tools/wikitables_evaluator.py
DateValue.parse
def parse(text): """Try to parse into a date. Return: tuple (year, month, date) if successful; otherwise None. """ try: ymd = text.lower().split('-') assert len(ymd) == 3 year = -1 if ymd[0] in ('xx', 'xxxx') else int(ymd[0]) month = -1 if ymd[1] == 'xx' else int(ymd[1]) day = -1 if ymd[2] == 'xx' else int(ymd[2]) assert not year == month == day == -1 assert month == -1 or 1 <= month <= 12 assert day == -1 or 1 <= day <= 31 return (year, month, day) except (ValueError, AssertionError): return None
python
def parse(text): """Try to parse into a date. Return: tuple (year, month, date) if successful; otherwise None. """ try: ymd = text.lower().split('-') assert len(ymd) == 3 year = -1 if ymd[0] in ('xx', 'xxxx') else int(ymd[0]) month = -1 if ymd[1] == 'xx' else int(ymd[1]) day = -1 if ymd[2] == 'xx' else int(ymd[2]) assert not year == month == day == -1 assert month == -1 or 1 <= month <= 12 assert day == -1 or 1 <= day <= 31 return (year, month, day) except (ValueError, AssertionError): return None
[ "def", "parse", "(", "text", ")", ":", "try", ":", "ymd", "=", "text", ".", "lower", "(", ")", ".", "split", "(", "'-'", ")", "assert", "len", "(", "ymd", ")", "==", "3", "year", "=", "-", "1", "if", "ymd", "[", "0", "]", "in", "(", "'xx'", ",", "'xxxx'", ")", "else", "int", "(", "ymd", "[", "0", "]", ")", "month", "=", "-", "1", "if", "ymd", "[", "1", "]", "==", "'xx'", "else", "int", "(", "ymd", "[", "1", "]", ")", "day", "=", "-", "1", "if", "ymd", "[", "2", "]", "==", "'xx'", "else", "int", "(", "ymd", "[", "2", "]", ")", "assert", "not", "year", "==", "month", "==", "day", "==", "-", "1", "assert", "month", "==", "-", "1", "or", "1", "<=", "month", "<=", "12", "assert", "day", "==", "-", "1", "or", "1", "<=", "day", "<=", "31", "return", "(", "year", ",", "month", ",", "day", ")", "except", "(", "ValueError", ",", "AssertionError", ")", ":", "return", "None" ]
Try to parse into a date. Return: tuple (year, month, date) if successful; otherwise None.
[ "Try", "to", "parse", "into", "a", "date", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/tools/wikitables_evaluator.py#L230-L247
23,137
allenai/allennlp
allennlp/modules/span_extractors/span_extractor.py
SpanExtractor.forward
def forward(self, # pylint: disable=arguments-differ sequence_tensor: torch.FloatTensor, span_indices: torch.LongTensor, sequence_mask: torch.LongTensor = None, span_indices_mask: torch.LongTensor = None): """ Given a sequence tensor, extract spans and return representations of them. Span representation can be computed in many different ways, such as concatenation of the start and end spans, attention over the vectors contained inside the span, etc. Parameters ---------- sequence_tensor : ``torch.FloatTensor``, required. A tensor of shape (batch_size, sequence_length, embedding_size) representing an embedded sequence of words. span_indices : ``torch.LongTensor``, required. A tensor of shape ``(batch_size, num_spans, 2)``, where the last dimension represents the inclusive start and end indices of the span to be extracted from the ``sequence_tensor``. sequence_mask : ``torch.LongTensor``, optional (default = ``None``). A tensor of shape (batch_size, sequence_length) representing padded elements of the sequence. span_indices_mask : ``torch.LongTensor``, optional (default = ``None``). A tensor of shape (batch_size, num_spans) representing the valid spans in the ``indices`` tensor. This mask is optional because sometimes it's easier to worry about masking after calling this function, rather than passing a mask directly. Returns ------- A tensor of shape ``(batch_size, num_spans, embedded_span_size)``, where ``embedded_span_size`` depends on the way spans are represented. """ raise NotImplementedError
python
def forward(self, # pylint: disable=arguments-differ sequence_tensor: torch.FloatTensor, span_indices: torch.LongTensor, sequence_mask: torch.LongTensor = None, span_indices_mask: torch.LongTensor = None): """ Given a sequence tensor, extract spans and return representations of them. Span representation can be computed in many different ways, such as concatenation of the start and end spans, attention over the vectors contained inside the span, etc. Parameters ---------- sequence_tensor : ``torch.FloatTensor``, required. A tensor of shape (batch_size, sequence_length, embedding_size) representing an embedded sequence of words. span_indices : ``torch.LongTensor``, required. A tensor of shape ``(batch_size, num_spans, 2)``, where the last dimension represents the inclusive start and end indices of the span to be extracted from the ``sequence_tensor``. sequence_mask : ``torch.LongTensor``, optional (default = ``None``). A tensor of shape (batch_size, sequence_length) representing padded elements of the sequence. span_indices_mask : ``torch.LongTensor``, optional (default = ``None``). A tensor of shape (batch_size, num_spans) representing the valid spans in the ``indices`` tensor. This mask is optional because sometimes it's easier to worry about masking after calling this function, rather than passing a mask directly. Returns ------- A tensor of shape ``(batch_size, num_spans, embedded_span_size)``, where ``embedded_span_size`` depends on the way spans are represented. """ raise NotImplementedError
[ "def", "forward", "(", "self", ",", "# pylint: disable=arguments-differ", "sequence_tensor", ":", "torch", ".", "FloatTensor", ",", "span_indices", ":", "torch", ".", "LongTensor", ",", "sequence_mask", ":", "torch", ".", "LongTensor", "=", "None", ",", "span_indices_mask", ":", "torch", ".", "LongTensor", "=", "None", ")", ":", "raise", "NotImplementedError" ]
Given a sequence tensor, extract spans and return representations of them. Span representation can be computed in many different ways, such as concatenation of the start and end spans, attention over the vectors contained inside the span, etc. Parameters ---------- sequence_tensor : ``torch.FloatTensor``, required. A tensor of shape (batch_size, sequence_length, embedding_size) representing an embedded sequence of words. span_indices : ``torch.LongTensor``, required. A tensor of shape ``(batch_size, num_spans, 2)``, where the last dimension represents the inclusive start and end indices of the span to be extracted from the ``sequence_tensor``. sequence_mask : ``torch.LongTensor``, optional (default = ``None``). A tensor of shape (batch_size, sequence_length) representing padded elements of the sequence. span_indices_mask : ``torch.LongTensor``, optional (default = ``None``). A tensor of shape (batch_size, num_spans) representing the valid spans in the ``indices`` tensor. This mask is optional because sometimes it's easier to worry about masking after calling this function, rather than passing a mask directly. Returns ------- A tensor of shape ``(batch_size, num_spans, embedded_span_size)``, where ``embedded_span_size`` depends on the way spans are represented.
[ "Given", "a", "sequence", "tensor", "extract", "spans", "and", "return", "representations", "of", "them", ".", "Span", "representation", "can", "be", "computed", "in", "many", "different", "ways", "such", "as", "concatenation", "of", "the", "start", "and", "end", "spans", "attention", "over", "the", "vectors", "contained", "inside", "the", "span", "etc", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/modules/span_extractors/span_extractor.py#L19-L53
23,138
allenai/allennlp
allennlp/state_machines/trainers/decoder_trainer.py
DecoderTrainer.decode
def decode(self, initial_state: State, transition_function: TransitionFunction, supervision: SupervisionType) -> Dict[str, torch.Tensor]: """ Takes an initial state object, a means of transitioning from state to state, and a supervision signal, and uses the supervision to train the transition function to pick "good" states. This function should typically return a ``loss`` key during training, which the ``Model`` will use as its loss. Parameters ---------- initial_state : ``State`` This is the initial state for decoding, typically initialized after running some kind of encoder on some inputs. transition_function : ``TransitionFunction`` This is the transition function that scores all possible actions that can be taken in a given state, and returns a ranked list of next states at each step of decoding. supervision : ``SupervisionType`` This is the supervision that is used to train the ``transition_function`` function to pick "good" states. You can use whatever kind of supervision you want (e.g., a single "gold" action sequence, a set of possible "gold" action sequences, a reward function, etc.). We use ``typing.Generics`` to make sure that our static type checker is happy with how you've matched the supervision that you provide in the model to the ``DecoderTrainer`` that you want to use. """ raise NotImplementedError
python
def decode(self, initial_state: State, transition_function: TransitionFunction, supervision: SupervisionType) -> Dict[str, torch.Tensor]: """ Takes an initial state object, a means of transitioning from state to state, and a supervision signal, and uses the supervision to train the transition function to pick "good" states. This function should typically return a ``loss`` key during training, which the ``Model`` will use as its loss. Parameters ---------- initial_state : ``State`` This is the initial state for decoding, typically initialized after running some kind of encoder on some inputs. transition_function : ``TransitionFunction`` This is the transition function that scores all possible actions that can be taken in a given state, and returns a ranked list of next states at each step of decoding. supervision : ``SupervisionType`` This is the supervision that is used to train the ``transition_function`` function to pick "good" states. You can use whatever kind of supervision you want (e.g., a single "gold" action sequence, a set of possible "gold" action sequences, a reward function, etc.). We use ``typing.Generics`` to make sure that our static type checker is happy with how you've matched the supervision that you provide in the model to the ``DecoderTrainer`` that you want to use. """ raise NotImplementedError
[ "def", "decode", "(", "self", ",", "initial_state", ":", "State", ",", "transition_function", ":", "TransitionFunction", ",", "supervision", ":", "SupervisionType", ")", "->", "Dict", "[", "str", ",", "torch", ".", "Tensor", "]", ":", "raise", "NotImplementedError" ]
Takes an initial state object, a means of transitioning from state to state, and a supervision signal, and uses the supervision to train the transition function to pick "good" states. This function should typically return a ``loss`` key during training, which the ``Model`` will use as its loss. Parameters ---------- initial_state : ``State`` This is the initial state for decoding, typically initialized after running some kind of encoder on some inputs. transition_function : ``TransitionFunction`` This is the transition function that scores all possible actions that can be taken in a given state, and returns a ranked list of next states at each step of decoding. supervision : ``SupervisionType`` This is the supervision that is used to train the ``transition_function`` function to pick "good" states. You can use whatever kind of supervision you want (e.g., a single "gold" action sequence, a set of possible "gold" action sequences, a reward function, etc.). We use ``typing.Generics`` to make sure that our static type checker is happy with how you've matched the supervision that you provide in the model to the ``DecoderTrainer`` that you want to use.
[ "Takes", "an", "initial", "state", "object", "a", "means", "of", "transitioning", "from", "state", "to", "state", "and", "a", "supervision", "signal", "and", "uses", "the", "supervision", "to", "train", "the", "transition", "function", "to", "pick", "good", "states", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/state_machines/trainers/decoder_trainer.py#L24-L52
23,139
allenai/allennlp
allennlp/training/scheduler.py
Scheduler.state_dict
def state_dict(self) -> Dict[str, Any]: """ Returns the state of the scheduler as a ``dict``. """ return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
python
def state_dict(self) -> Dict[str, Any]: """ Returns the state of the scheduler as a ``dict``. """ return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
[ "def", "state_dict", "(", "self", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "return", "{", "key", ":", "value", "for", "key", ",", "value", "in", "self", ".", "__dict__", ".", "items", "(", ")", "if", "key", "!=", "'optimizer'", "}" ]
Returns the state of the scheduler as a ``dict``.
[ "Returns", "the", "state", "of", "the", "scheduler", "as", "a", "dict", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/training/scheduler.py#L49-L53
23,140
allenai/allennlp
allennlp/training/scheduler.py
Scheduler.load_state_dict
def load_state_dict(self, state_dict: Dict[str, Any]) -> None: """ Load the schedulers state. Parameters ---------- state_dict : ``Dict[str, Any]`` Scheduler state. Should be an object returned from a call to ``state_dict``. """ self.__dict__.update(state_dict)
python
def load_state_dict(self, state_dict: Dict[str, Any]) -> None: """ Load the schedulers state. Parameters ---------- state_dict : ``Dict[str, Any]`` Scheduler state. Should be an object returned from a call to ``state_dict``. """ self.__dict__.update(state_dict)
[ "def", "load_state_dict", "(", "self", ",", "state_dict", ":", "Dict", "[", "str", ",", "Any", "]", ")", "->", "None", ":", "self", ".", "__dict__", ".", "update", "(", "state_dict", ")" ]
Load the schedulers state. Parameters ---------- state_dict : ``Dict[str, Any]`` Scheduler state. Should be an object returned from a call to ``state_dict``.
[ "Load", "the", "schedulers", "state", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/training/scheduler.py#L55-L64
23,141
allenai/allennlp
allennlp/models/reading_comprehension/bidaf_ensemble.py
ensemble
def ensemble(subresults: List[Dict[str, torch.Tensor]]) -> torch.Tensor: """ Identifies the best prediction given the results from the submodels. Parameters ---------- subresults : List[Dict[str, torch.Tensor]] Results of each submodel. Returns ------- The index of the best submodel. """ # Choose the highest average confidence span. span_start_probs = sum(subresult['span_start_probs'] for subresult in subresults) / len(subresults) span_end_probs = sum(subresult['span_end_probs'] for subresult in subresults) / len(subresults) return get_best_span(span_start_probs.log(), span_end_probs.log())
python
def ensemble(subresults: List[Dict[str, torch.Tensor]]) -> torch.Tensor: """ Identifies the best prediction given the results from the submodels. Parameters ---------- subresults : List[Dict[str, torch.Tensor]] Results of each submodel. Returns ------- The index of the best submodel. """ # Choose the highest average confidence span. span_start_probs = sum(subresult['span_start_probs'] for subresult in subresults) / len(subresults) span_end_probs = sum(subresult['span_end_probs'] for subresult in subresults) / len(subresults) return get_best_span(span_start_probs.log(), span_end_probs.log())
[ "def", "ensemble", "(", "subresults", ":", "List", "[", "Dict", "[", "str", ",", "torch", ".", "Tensor", "]", "]", ")", "->", "torch", ".", "Tensor", ":", "# Choose the highest average confidence span.", "span_start_probs", "=", "sum", "(", "subresult", "[", "'span_start_probs'", "]", "for", "subresult", "in", "subresults", ")", "/", "len", "(", "subresults", ")", "span_end_probs", "=", "sum", "(", "subresult", "[", "'span_end_probs'", "]", "for", "subresult", "in", "subresults", ")", "/", "len", "(", "subresults", ")", "return", "get_best_span", "(", "span_start_probs", ".", "log", "(", ")", ",", "span_end_probs", ".", "log", "(", ")", ")" ]
Identifies the best prediction given the results from the submodels. Parameters ---------- subresults : List[Dict[str, torch.Tensor]] Results of each submodel. Returns ------- The index of the best submodel.
[ "Identifies", "the", "best", "prediction", "given", "the", "results", "from", "the", "submodels", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/models/reading_comprehension/bidaf_ensemble.py#L124-L142
23,142
allenai/allennlp
allennlp/modules/elmo_lstm.py
ElmoLstm.load_weights
def load_weights(self, weight_file: str) -> None: """ Load the pre-trained weights from the file. """ requires_grad = self.requires_grad with h5py.File(cached_path(weight_file), 'r') as fin: for i_layer, lstms in enumerate( zip(self.forward_layers, self.backward_layers) ): for j_direction, lstm in enumerate(lstms): # lstm is an instance of LSTMCellWithProjection cell_size = lstm.cell_size dataset = fin['RNN_%s' % j_direction]['RNN']['MultiRNNCell']['Cell%s' % i_layer ]['LSTMCell'] # tensorflow packs together both W and U matrices into one matrix, # but pytorch maintains individual matrices. In addition, tensorflow # packs the gates as input, memory, forget, output but pytorch # uses input, forget, memory, output. So we need to modify the weights. tf_weights = numpy.transpose(dataset['W_0'][...]) torch_weights = tf_weights.copy() # split the W from U matrices input_size = lstm.input_size input_weights = torch_weights[:, :input_size] recurrent_weights = torch_weights[:, input_size:] tf_input_weights = tf_weights[:, :input_size] tf_recurrent_weights = tf_weights[:, input_size:] # handle the different gate order convention for torch_w, tf_w in [[input_weights, tf_input_weights], [recurrent_weights, tf_recurrent_weights]]: torch_w[(1 * cell_size):(2 * cell_size), :] = tf_w[(2 * cell_size):(3 * cell_size), :] torch_w[(2 * cell_size):(3 * cell_size), :] = tf_w[(1 * cell_size):(2 * cell_size), :] lstm.input_linearity.weight.data.copy_(torch.FloatTensor(input_weights)) lstm.state_linearity.weight.data.copy_(torch.FloatTensor(recurrent_weights)) lstm.input_linearity.weight.requires_grad = requires_grad lstm.state_linearity.weight.requires_grad = requires_grad # the bias weights tf_bias = dataset['B'][...] # tensorflow adds 1.0 to forget gate bias instead of modifying the # parameters... tf_bias[(2 * cell_size):(3 * cell_size)] += 1 torch_bias = tf_bias.copy() torch_bias[(1 * cell_size):(2 * cell_size) ] = tf_bias[(2 * cell_size):(3 * cell_size)] torch_bias[(2 * cell_size):(3 * cell_size) ] = tf_bias[(1 * cell_size):(2 * cell_size)] lstm.state_linearity.bias.data.copy_(torch.FloatTensor(torch_bias)) lstm.state_linearity.bias.requires_grad = requires_grad # the projection weights proj_weights = numpy.transpose(dataset['W_P_0'][...]) lstm.state_projection.weight.data.copy_(torch.FloatTensor(proj_weights)) lstm.state_projection.weight.requires_grad = requires_grad
python
def load_weights(self, weight_file: str) -> None: """ Load the pre-trained weights from the file. """ requires_grad = self.requires_grad with h5py.File(cached_path(weight_file), 'r') as fin: for i_layer, lstms in enumerate( zip(self.forward_layers, self.backward_layers) ): for j_direction, lstm in enumerate(lstms): # lstm is an instance of LSTMCellWithProjection cell_size = lstm.cell_size dataset = fin['RNN_%s' % j_direction]['RNN']['MultiRNNCell']['Cell%s' % i_layer ]['LSTMCell'] # tensorflow packs together both W and U matrices into one matrix, # but pytorch maintains individual matrices. In addition, tensorflow # packs the gates as input, memory, forget, output but pytorch # uses input, forget, memory, output. So we need to modify the weights. tf_weights = numpy.transpose(dataset['W_0'][...]) torch_weights = tf_weights.copy() # split the W from U matrices input_size = lstm.input_size input_weights = torch_weights[:, :input_size] recurrent_weights = torch_weights[:, input_size:] tf_input_weights = tf_weights[:, :input_size] tf_recurrent_weights = tf_weights[:, input_size:] # handle the different gate order convention for torch_w, tf_w in [[input_weights, tf_input_weights], [recurrent_weights, tf_recurrent_weights]]: torch_w[(1 * cell_size):(2 * cell_size), :] = tf_w[(2 * cell_size):(3 * cell_size), :] torch_w[(2 * cell_size):(3 * cell_size), :] = tf_w[(1 * cell_size):(2 * cell_size), :] lstm.input_linearity.weight.data.copy_(torch.FloatTensor(input_weights)) lstm.state_linearity.weight.data.copy_(torch.FloatTensor(recurrent_weights)) lstm.input_linearity.weight.requires_grad = requires_grad lstm.state_linearity.weight.requires_grad = requires_grad # the bias weights tf_bias = dataset['B'][...] # tensorflow adds 1.0 to forget gate bias instead of modifying the # parameters... tf_bias[(2 * cell_size):(3 * cell_size)] += 1 torch_bias = tf_bias.copy() torch_bias[(1 * cell_size):(2 * cell_size) ] = tf_bias[(2 * cell_size):(3 * cell_size)] torch_bias[(2 * cell_size):(3 * cell_size) ] = tf_bias[(1 * cell_size):(2 * cell_size)] lstm.state_linearity.bias.data.copy_(torch.FloatTensor(torch_bias)) lstm.state_linearity.bias.requires_grad = requires_grad # the projection weights proj_weights = numpy.transpose(dataset['W_P_0'][...]) lstm.state_projection.weight.data.copy_(torch.FloatTensor(proj_weights)) lstm.state_projection.weight.requires_grad = requires_grad
[ "def", "load_weights", "(", "self", ",", "weight_file", ":", "str", ")", "->", "None", ":", "requires_grad", "=", "self", ".", "requires_grad", "with", "h5py", ".", "File", "(", "cached_path", "(", "weight_file", ")", ",", "'r'", ")", "as", "fin", ":", "for", "i_layer", ",", "lstms", "in", "enumerate", "(", "zip", "(", "self", ".", "forward_layers", ",", "self", ".", "backward_layers", ")", ")", ":", "for", "j_direction", ",", "lstm", "in", "enumerate", "(", "lstms", ")", ":", "# lstm is an instance of LSTMCellWithProjection", "cell_size", "=", "lstm", ".", "cell_size", "dataset", "=", "fin", "[", "'RNN_%s'", "%", "j_direction", "]", "[", "'RNN'", "]", "[", "'MultiRNNCell'", "]", "[", "'Cell%s'", "%", "i_layer", "]", "[", "'LSTMCell'", "]", "# tensorflow packs together both W and U matrices into one matrix,", "# but pytorch maintains individual matrices. In addition, tensorflow", "# packs the gates as input, memory, forget, output but pytorch", "# uses input, forget, memory, output. So we need to modify the weights.", "tf_weights", "=", "numpy", ".", "transpose", "(", "dataset", "[", "'W_0'", "]", "[", "...", "]", ")", "torch_weights", "=", "tf_weights", ".", "copy", "(", ")", "# split the W from U matrices", "input_size", "=", "lstm", ".", "input_size", "input_weights", "=", "torch_weights", "[", ":", ",", ":", "input_size", "]", "recurrent_weights", "=", "torch_weights", "[", ":", ",", "input_size", ":", "]", "tf_input_weights", "=", "tf_weights", "[", ":", ",", ":", "input_size", "]", "tf_recurrent_weights", "=", "tf_weights", "[", ":", ",", "input_size", ":", "]", "# handle the different gate order convention", "for", "torch_w", ",", "tf_w", "in", "[", "[", "input_weights", ",", "tf_input_weights", "]", ",", "[", "recurrent_weights", ",", "tf_recurrent_weights", "]", "]", ":", "torch_w", "[", "(", "1", "*", "cell_size", ")", ":", "(", "2", "*", "cell_size", ")", ",", ":", "]", "=", "tf_w", "[", "(", "2", "*", "cell_size", ")", ":", "(", "3", "*", "cell_size", ")", ",", ":", "]", "torch_w", "[", "(", "2", "*", "cell_size", ")", ":", "(", "3", "*", "cell_size", ")", ",", ":", "]", "=", "tf_w", "[", "(", "1", "*", "cell_size", ")", ":", "(", "2", "*", "cell_size", ")", ",", ":", "]", "lstm", ".", "input_linearity", ".", "weight", ".", "data", ".", "copy_", "(", "torch", ".", "FloatTensor", "(", "input_weights", ")", ")", "lstm", ".", "state_linearity", ".", "weight", ".", "data", ".", "copy_", "(", "torch", ".", "FloatTensor", "(", "recurrent_weights", ")", ")", "lstm", ".", "input_linearity", ".", "weight", ".", "requires_grad", "=", "requires_grad", "lstm", ".", "state_linearity", ".", "weight", ".", "requires_grad", "=", "requires_grad", "# the bias weights", "tf_bias", "=", "dataset", "[", "'B'", "]", "[", "...", "]", "# tensorflow adds 1.0 to forget gate bias instead of modifying the", "# parameters...", "tf_bias", "[", "(", "2", "*", "cell_size", ")", ":", "(", "3", "*", "cell_size", ")", "]", "+=", "1", "torch_bias", "=", "tf_bias", ".", "copy", "(", ")", "torch_bias", "[", "(", "1", "*", "cell_size", ")", ":", "(", "2", "*", "cell_size", ")", "]", "=", "tf_bias", "[", "(", "2", "*", "cell_size", ")", ":", "(", "3", "*", "cell_size", ")", "]", "torch_bias", "[", "(", "2", "*", "cell_size", ")", ":", "(", "3", "*", "cell_size", ")", "]", "=", "tf_bias", "[", "(", "1", "*", "cell_size", ")", ":", "(", "2", "*", "cell_size", ")", "]", "lstm", ".", "state_linearity", ".", "bias", ".", "data", ".", "copy_", "(", "torch", ".", "FloatTensor", "(", "torch_bias", ")", ")", "lstm", ".", "state_linearity", ".", "bias", ".", "requires_grad", "=", "requires_grad", "# the projection weights", "proj_weights", "=", "numpy", ".", "transpose", "(", "dataset", "[", "'W_P_0'", "]", "[", "...", "]", ")", "lstm", ".", "state_projection", ".", "weight", ".", "data", ".", "copy_", "(", "torch", ".", "FloatTensor", "(", "proj_weights", ")", ")", "lstm", ".", "state_projection", ".", "weight", ".", "requires_grad", "=", "requires_grad" ]
Load the pre-trained weights from the file.
[ "Load", "the", "pre", "-", "trained", "weights", "from", "the", "file", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/modules/elmo_lstm.py#L243-L301
23,143
allenai/allennlp
allennlp/semparse/type_declarations/type_declaration.py
ComplexType.return_type
def return_type(self) -> Type: """ Gives the final return type for this function. If the function takes a single argument, this is just ``self.second``. If the function takes multiple arguments and returns a basic type, this should be the final ``.second`` after following all complex types. That is the implementation here in the base class. If you have a higher-order function that returns a function itself, you need to override this method. """ return_type = self.second while isinstance(return_type, ComplexType): return_type = return_type.second return return_type
python
def return_type(self) -> Type: """ Gives the final return type for this function. If the function takes a single argument, this is just ``self.second``. If the function takes multiple arguments and returns a basic type, this should be the final ``.second`` after following all complex types. That is the implementation here in the base class. If you have a higher-order function that returns a function itself, you need to override this method. """ return_type = self.second while isinstance(return_type, ComplexType): return_type = return_type.second return return_type
[ "def", "return_type", "(", "self", ")", "->", "Type", ":", "return_type", "=", "self", ".", "second", "while", "isinstance", "(", "return_type", ",", "ComplexType", ")", ":", "return_type", "=", "return_type", ".", "second", "return", "return_type" ]
Gives the final return type for this function. If the function takes a single argument, this is just ``self.second``. If the function takes multiple arguments and returns a basic type, this should be the final ``.second`` after following all complex types. That is the implementation here in the base class. If you have a higher-order function that returns a function itself, you need to override this method.
[ "Gives", "the", "final", "return", "type", "for", "this", "function", ".", "If", "the", "function", "takes", "a", "single", "argument", "this", "is", "just", "self", ".", "second", ".", "If", "the", "function", "takes", "multiple", "arguments", "and", "returns", "a", "basic", "type", "this", "should", "be", "the", "final", ".", "second", "after", "following", "all", "complex", "types", ".", "That", "is", "the", "implementation", "here", "in", "the", "base", "class", ".", "If", "you", "have", "a", "higher", "-", "order", "function", "that", "returns", "a", "function", "itself", "you", "need", "to", "override", "this", "method", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/type_declarations/type_declaration.py#L29-L40
23,144
allenai/allennlp
allennlp/semparse/type_declarations/type_declaration.py
ComplexType.argument_types
def argument_types(self) -> List[Type]: """ Gives the types of all arguments to this function. For functions returning a basic type, we grab all ``.first`` types until ``.second`` is no longer a ``ComplexType``. That logic is implemented here in the base class. If you have a higher-order function that returns a function itself, you need to override this method. """ arguments = [self.first] remaining_type = self.second while isinstance(remaining_type, ComplexType): arguments.append(remaining_type.first) remaining_type = remaining_type.second return arguments
python
def argument_types(self) -> List[Type]: """ Gives the types of all arguments to this function. For functions returning a basic type, we grab all ``.first`` types until ``.second`` is no longer a ``ComplexType``. That logic is implemented here in the base class. If you have a higher-order function that returns a function itself, you need to override this method. """ arguments = [self.first] remaining_type = self.second while isinstance(remaining_type, ComplexType): arguments.append(remaining_type.first) remaining_type = remaining_type.second return arguments
[ "def", "argument_types", "(", "self", ")", "->", "List", "[", "Type", "]", ":", "arguments", "=", "[", "self", ".", "first", "]", "remaining_type", "=", "self", ".", "second", "while", "isinstance", "(", "remaining_type", ",", "ComplexType", ")", ":", "arguments", ".", "append", "(", "remaining_type", ".", "first", ")", "remaining_type", "=", "remaining_type", ".", "second", "return", "arguments" ]
Gives the types of all arguments to this function. For functions returning a basic type, we grab all ``.first`` types until ``.second`` is no longer a ``ComplexType``. That logic is implemented here in the base class. If you have a higher-order function that returns a function itself, you need to override this method.
[ "Gives", "the", "types", "of", "all", "arguments", "to", "this", "function", ".", "For", "functions", "returning", "a", "basic", "type", "we", "grab", "all", ".", "first", "types", "until", ".", "second", "is", "no", "longer", "a", "ComplexType", ".", "That", "logic", "is", "implemented", "here", "in", "the", "base", "class", ".", "If", "you", "have", "a", "higher", "-", "order", "function", "that", "returns", "a", "function", "itself", "you", "need", "to", "override", "this", "method", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/type_declarations/type_declaration.py#L42-L54
23,145
allenai/allennlp
allennlp/semparse/type_declarations/type_declaration.py
ComplexType.substitute_any_type
def substitute_any_type(self, basic_types: Set[BasicType]) -> List[Type]: """ Takes a set of ``BasicTypes`` and replaces any instances of ``ANY_TYPE`` inside this complex type with each of those basic types. """ substitutions = [] for first_type in substitute_any_type(self.first, basic_types): for second_type in substitute_any_type(self.second, basic_types): substitutions.append(self.__class__(first_type, second_type)) return substitutions
python
def substitute_any_type(self, basic_types: Set[BasicType]) -> List[Type]: """ Takes a set of ``BasicTypes`` and replaces any instances of ``ANY_TYPE`` inside this complex type with each of those basic types. """ substitutions = [] for first_type in substitute_any_type(self.first, basic_types): for second_type in substitute_any_type(self.second, basic_types): substitutions.append(self.__class__(first_type, second_type)) return substitutions
[ "def", "substitute_any_type", "(", "self", ",", "basic_types", ":", "Set", "[", "BasicType", "]", ")", "->", "List", "[", "Type", "]", ":", "substitutions", "=", "[", "]", "for", "first_type", "in", "substitute_any_type", "(", "self", ".", "first", ",", "basic_types", ")", ":", "for", "second_type", "in", "substitute_any_type", "(", "self", ".", "second", ",", "basic_types", ")", ":", "substitutions", ".", "append", "(", "self", ".", "__class__", "(", "first_type", ",", "second_type", ")", ")", "return", "substitutions" ]
Takes a set of ``BasicTypes`` and replaces any instances of ``ANY_TYPE`` inside this complex type with each of those basic types.
[ "Takes", "a", "set", "of", "BasicTypes", "and", "replaces", "any", "instances", "of", "ANY_TYPE", "inside", "this", "complex", "type", "with", "each", "of", "those", "basic", "types", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/type_declarations/type_declaration.py#L56-L65
23,146
allenai/allennlp
allennlp/training/tensorboard_writer.py
TensorboardWriter.log_parameter_and_gradient_statistics
def log_parameter_and_gradient_statistics(self, # pylint: disable=invalid-name model: Model, batch_grad_norm: float) -> None: """ Send the mean and std of all parameters and gradients to tensorboard, as well as logging the average gradient norm. """ if self._should_log_parameter_statistics: # Log parameter values to Tensorboard for name, param in model.named_parameters(): self.add_train_scalar("parameter_mean/" + name, param.data.mean()) self.add_train_scalar("parameter_std/" + name, param.data.std()) if param.grad is not None: if param.grad.is_sparse: # pylint: disable=protected-access grad_data = param.grad.data._values() else: grad_data = param.grad.data # skip empty gradients if torch.prod(torch.tensor(grad_data.shape)).item() > 0: # pylint: disable=not-callable self.add_train_scalar("gradient_mean/" + name, grad_data.mean()) self.add_train_scalar("gradient_std/" + name, grad_data.std()) else: # no gradient for a parameter with sparse gradients logger.info("No gradient for %s, skipping tensorboard logging.", name) # norm of gradients if batch_grad_norm is not None: self.add_train_scalar("gradient_norm", batch_grad_norm)
python
def log_parameter_and_gradient_statistics(self, # pylint: disable=invalid-name model: Model, batch_grad_norm: float) -> None: """ Send the mean and std of all parameters and gradients to tensorboard, as well as logging the average gradient norm. """ if self._should_log_parameter_statistics: # Log parameter values to Tensorboard for name, param in model.named_parameters(): self.add_train_scalar("parameter_mean/" + name, param.data.mean()) self.add_train_scalar("parameter_std/" + name, param.data.std()) if param.grad is not None: if param.grad.is_sparse: # pylint: disable=protected-access grad_data = param.grad.data._values() else: grad_data = param.grad.data # skip empty gradients if torch.prod(torch.tensor(grad_data.shape)).item() > 0: # pylint: disable=not-callable self.add_train_scalar("gradient_mean/" + name, grad_data.mean()) self.add_train_scalar("gradient_std/" + name, grad_data.std()) else: # no gradient for a parameter with sparse gradients logger.info("No gradient for %s, skipping tensorboard logging.", name) # norm of gradients if batch_grad_norm is not None: self.add_train_scalar("gradient_norm", batch_grad_norm)
[ "def", "log_parameter_and_gradient_statistics", "(", "self", ",", "# pylint: disable=invalid-name", "model", ":", "Model", ",", "batch_grad_norm", ":", "float", ")", "->", "None", ":", "if", "self", ".", "_should_log_parameter_statistics", ":", "# Log parameter values to Tensorboard", "for", "name", ",", "param", "in", "model", ".", "named_parameters", "(", ")", ":", "self", ".", "add_train_scalar", "(", "\"parameter_mean/\"", "+", "name", ",", "param", ".", "data", ".", "mean", "(", ")", ")", "self", ".", "add_train_scalar", "(", "\"parameter_std/\"", "+", "name", ",", "param", ".", "data", ".", "std", "(", ")", ")", "if", "param", ".", "grad", "is", "not", "None", ":", "if", "param", ".", "grad", ".", "is_sparse", ":", "# pylint: disable=protected-access", "grad_data", "=", "param", ".", "grad", ".", "data", ".", "_values", "(", ")", "else", ":", "grad_data", "=", "param", ".", "grad", ".", "data", "# skip empty gradients", "if", "torch", ".", "prod", "(", "torch", ".", "tensor", "(", "grad_data", ".", "shape", ")", ")", ".", "item", "(", ")", ">", "0", ":", "# pylint: disable=not-callable", "self", ".", "add_train_scalar", "(", "\"gradient_mean/\"", "+", "name", ",", "grad_data", ".", "mean", "(", ")", ")", "self", ".", "add_train_scalar", "(", "\"gradient_std/\"", "+", "name", ",", "grad_data", ".", "std", "(", ")", ")", "else", ":", "# no gradient for a parameter with sparse gradients", "logger", ".", "info", "(", "\"No gradient for %s, skipping tensorboard logging.\"", ",", "name", ")", "# norm of gradients", "if", "batch_grad_norm", "is", "not", "None", ":", "self", ".", "add_train_scalar", "(", "\"gradient_norm\"", ",", "batch_grad_norm", ")" ]
Send the mean and std of all parameters and gradients to tensorboard, as well as logging the average gradient norm.
[ "Send", "the", "mean", "and", "std", "of", "all", "parameters", "and", "gradients", "to", "tensorboard", "as", "well", "as", "logging", "the", "average", "gradient", "norm", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/training/tensorboard_writer.py#L84-L112
23,147
allenai/allennlp
allennlp/training/tensorboard_writer.py
TensorboardWriter.log_learning_rates
def log_learning_rates(self, model: Model, optimizer: torch.optim.Optimizer): """ Send current parameter specific learning rates to tensorboard """ if self._should_log_learning_rate: # optimizer stores lr info keyed by parameter tensor # we want to log with parameter name names = {param: name for name, param in model.named_parameters()} for group in optimizer.param_groups: if 'lr' not in group: continue rate = group['lr'] for param in group['params']: # check whether params has requires grad or not effective_rate = rate * float(param.requires_grad) self.add_train_scalar("learning_rate/" + names[param], effective_rate)
python
def log_learning_rates(self, model: Model, optimizer: torch.optim.Optimizer): """ Send current parameter specific learning rates to tensorboard """ if self._should_log_learning_rate: # optimizer stores lr info keyed by parameter tensor # we want to log with parameter name names = {param: name for name, param in model.named_parameters()} for group in optimizer.param_groups: if 'lr' not in group: continue rate = group['lr'] for param in group['params']: # check whether params has requires grad or not effective_rate = rate * float(param.requires_grad) self.add_train_scalar("learning_rate/" + names[param], effective_rate)
[ "def", "log_learning_rates", "(", "self", ",", "model", ":", "Model", ",", "optimizer", ":", "torch", ".", "optim", ".", "Optimizer", ")", ":", "if", "self", ".", "_should_log_learning_rate", ":", "# optimizer stores lr info keyed by parameter tensor", "# we want to log with parameter name", "names", "=", "{", "param", ":", "name", "for", "name", ",", "param", "in", "model", ".", "named_parameters", "(", ")", "}", "for", "group", "in", "optimizer", ".", "param_groups", ":", "if", "'lr'", "not", "in", "group", ":", "continue", "rate", "=", "group", "[", "'lr'", "]", "for", "param", "in", "group", "[", "'params'", "]", ":", "# check whether params has requires grad or not", "effective_rate", "=", "rate", "*", "float", "(", "param", ".", "requires_grad", ")", "self", ".", "add_train_scalar", "(", "\"learning_rate/\"", "+", "names", "[", "param", "]", ",", "effective_rate", ")" ]
Send current parameter specific learning rates to tensorboard
[ "Send", "current", "parameter", "specific", "learning", "rates", "to", "tensorboard" ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/training/tensorboard_writer.py#L114-L131
23,148
allenai/allennlp
allennlp/training/tensorboard_writer.py
TensorboardWriter.log_histograms
def log_histograms(self, model: Model, histogram_parameters: Set[str]) -> None: """ Send histograms of parameters to tensorboard. """ for name, param in model.named_parameters(): if name in histogram_parameters: self.add_train_histogram("parameter_histogram/" + name, param)
python
def log_histograms(self, model: Model, histogram_parameters: Set[str]) -> None: """ Send histograms of parameters to tensorboard. """ for name, param in model.named_parameters(): if name in histogram_parameters: self.add_train_histogram("parameter_histogram/" + name, param)
[ "def", "log_histograms", "(", "self", ",", "model", ":", "Model", ",", "histogram_parameters", ":", "Set", "[", "str", "]", ")", "->", "None", ":", "for", "name", ",", "param", "in", "model", ".", "named_parameters", "(", ")", ":", "if", "name", "in", "histogram_parameters", ":", "self", ".", "add_train_histogram", "(", "\"parameter_histogram/\"", "+", "name", ",", "param", ")" ]
Send histograms of parameters to tensorboard.
[ "Send", "histograms", "of", "parameters", "to", "tensorboard", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/training/tensorboard_writer.py#L133-L139
23,149
allenai/allennlp
allennlp/semparse/contexts/quarel_utils.py
align_entities
def align_entities(extracted: List[str], literals: JsonDict, stemmer: NltkPorterStemmer) -> List[str]: """ Use stemming to attempt alignment between extracted world and given world literals. If more words align to one world vs the other, it's considered aligned. """ literal_keys = list(literals.keys()) literal_values = list(literals.values()) overlaps = [get_stem_overlaps(extract, literal_values, stemmer) for extract in extracted] worlds = [] for overlap in overlaps: if overlap[0] > overlap[1]: worlds.append(literal_keys[0]) elif overlap[0] < overlap[1]: worlds.append(literal_keys[1]) else: worlds.append(None) return worlds
python
def align_entities(extracted: List[str], literals: JsonDict, stemmer: NltkPorterStemmer) -> List[str]: """ Use stemming to attempt alignment between extracted world and given world literals. If more words align to one world vs the other, it's considered aligned. """ literal_keys = list(literals.keys()) literal_values = list(literals.values()) overlaps = [get_stem_overlaps(extract, literal_values, stemmer) for extract in extracted] worlds = [] for overlap in overlaps: if overlap[0] > overlap[1]: worlds.append(literal_keys[0]) elif overlap[0] < overlap[1]: worlds.append(literal_keys[1]) else: worlds.append(None) return worlds
[ "def", "align_entities", "(", "extracted", ":", "List", "[", "str", "]", ",", "literals", ":", "JsonDict", ",", "stemmer", ":", "NltkPorterStemmer", ")", "->", "List", "[", "str", "]", ":", "literal_keys", "=", "list", "(", "literals", ".", "keys", "(", ")", ")", "literal_values", "=", "list", "(", "literals", ".", "values", "(", ")", ")", "overlaps", "=", "[", "get_stem_overlaps", "(", "extract", ",", "literal_values", ",", "stemmer", ")", "for", "extract", "in", "extracted", "]", "worlds", "=", "[", "]", "for", "overlap", "in", "overlaps", ":", "if", "overlap", "[", "0", "]", ">", "overlap", "[", "1", "]", ":", "worlds", ".", "append", "(", "literal_keys", "[", "0", "]", ")", "elif", "overlap", "[", "0", "]", "<", "overlap", "[", "1", "]", ":", "worlds", ".", "append", "(", "literal_keys", "[", "1", "]", ")", "else", ":", "worlds", ".", "append", "(", "None", ")", "return", "worlds" ]
Use stemming to attempt alignment between extracted world and given world literals. If more words align to one world vs the other, it's considered aligned.
[ "Use", "stemming", "to", "attempt", "alignment", "between", "extracted", "world", "and", "given", "world", "literals", ".", "If", "more", "words", "align", "to", "one", "world", "vs", "the", "other", "it", "s", "considered", "aligned", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/contexts/quarel_utils.py#L360-L378
23,150
allenai/allennlp
allennlp/modules/bimpm_matching.py
multi_perspective_match
def multi_perspective_match(vector1: torch.Tensor, vector2: torch.Tensor, weight: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Calculate multi-perspective cosine matching between time-steps of vectors of the same length. Parameters ---------- vector1 : ``torch.Tensor`` A tensor of shape ``(batch, seq_len, hidden_size)`` vector2 : ``torch.Tensor`` A tensor of shape ``(batch, seq_len or 1, hidden_size)`` weight : ``torch.Tensor`` A tensor of shape ``(num_perspectives, hidden_size)`` Returns ------- A tuple of two tensors consisting multi-perspective matching results. The first one is of the shape (batch, seq_len, 1), the second one is of shape (batch, seq_len, num_perspectives) """ assert vector1.size(0) == vector2.size(0) assert weight.size(1) == vector1.size(2) == vector1.size(2) # (batch, seq_len, 1) similarity_single = F.cosine_similarity(vector1, vector2, 2).unsqueeze(2) # (1, 1, num_perspectives, hidden_size) weight = weight.unsqueeze(0).unsqueeze(0) # (batch, seq_len, num_perspectives, hidden_size) vector1 = weight * vector1.unsqueeze(2) vector2 = weight * vector2.unsqueeze(2) similarity_multi = F.cosine_similarity(vector1, vector2, dim=3) return similarity_single, similarity_multi
python
def multi_perspective_match(vector1: torch.Tensor, vector2: torch.Tensor, weight: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Calculate multi-perspective cosine matching between time-steps of vectors of the same length. Parameters ---------- vector1 : ``torch.Tensor`` A tensor of shape ``(batch, seq_len, hidden_size)`` vector2 : ``torch.Tensor`` A tensor of shape ``(batch, seq_len or 1, hidden_size)`` weight : ``torch.Tensor`` A tensor of shape ``(num_perspectives, hidden_size)`` Returns ------- A tuple of two tensors consisting multi-perspective matching results. The first one is of the shape (batch, seq_len, 1), the second one is of shape (batch, seq_len, num_perspectives) """ assert vector1.size(0) == vector2.size(0) assert weight.size(1) == vector1.size(2) == vector1.size(2) # (batch, seq_len, 1) similarity_single = F.cosine_similarity(vector1, vector2, 2).unsqueeze(2) # (1, 1, num_perspectives, hidden_size) weight = weight.unsqueeze(0).unsqueeze(0) # (batch, seq_len, num_perspectives, hidden_size) vector1 = weight * vector1.unsqueeze(2) vector2 = weight * vector2.unsqueeze(2) similarity_multi = F.cosine_similarity(vector1, vector2, dim=3) return similarity_single, similarity_multi
[ "def", "multi_perspective_match", "(", "vector1", ":", "torch", ".", "Tensor", ",", "vector2", ":", "torch", ".", "Tensor", ",", "weight", ":", "torch", ".", "Tensor", ")", "->", "Tuple", "[", "torch", ".", "Tensor", ",", "torch", ".", "Tensor", "]", ":", "assert", "vector1", ".", "size", "(", "0", ")", "==", "vector2", ".", "size", "(", "0", ")", "assert", "weight", ".", "size", "(", "1", ")", "==", "vector1", ".", "size", "(", "2", ")", "==", "vector1", ".", "size", "(", "2", ")", "# (batch, seq_len, 1)", "similarity_single", "=", "F", ".", "cosine_similarity", "(", "vector1", ",", "vector2", ",", "2", ")", ".", "unsqueeze", "(", "2", ")", "# (1, 1, num_perspectives, hidden_size)", "weight", "=", "weight", ".", "unsqueeze", "(", "0", ")", ".", "unsqueeze", "(", "0", ")", "# (batch, seq_len, num_perspectives, hidden_size)", "vector1", "=", "weight", "*", "vector1", ".", "unsqueeze", "(", "2", ")", "vector2", "=", "weight", "*", "vector2", ".", "unsqueeze", "(", "2", ")", "similarity_multi", "=", "F", ".", "cosine_similarity", "(", "vector1", ",", "vector2", ",", "dim", "=", "3", ")", "return", "similarity_single", ",", "similarity_multi" ]
Calculate multi-perspective cosine matching between time-steps of vectors of the same length. Parameters ---------- vector1 : ``torch.Tensor`` A tensor of shape ``(batch, seq_len, hidden_size)`` vector2 : ``torch.Tensor`` A tensor of shape ``(batch, seq_len or 1, hidden_size)`` weight : ``torch.Tensor`` A tensor of shape ``(num_perspectives, hidden_size)`` Returns ------- A tuple of two tensors consisting multi-perspective matching results. The first one is of the shape (batch, seq_len, 1), the second one is of shape (batch, seq_len, num_perspectives)
[ "Calculate", "multi", "-", "perspective", "cosine", "matching", "between", "time", "-", "steps", "of", "vectors", "of", "the", "same", "length", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/modules/bimpm_matching.py#L16-L53
23,151
allenai/allennlp
allennlp/modules/bimpm_matching.py
multi_perspective_match_pairwise
def multi_perspective_match_pairwise(vector1: torch.Tensor, vector2: torch.Tensor, weight: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: """ Calculate multi-perspective cosine matching between each time step of one vector and each time step of another vector. Parameters ---------- vector1 : ``torch.Tensor`` A tensor of shape ``(batch, seq_len1, hidden_size)`` vector2 : ``torch.Tensor`` A tensor of shape ``(batch, seq_len2, hidden_size)`` weight : ``torch.Tensor`` A tensor of shape ``(num_perspectives, hidden_size)`` eps : ``float`` optional, (default = 1e-8) A small value to avoid zero division problem Returns ------- A tensor of shape (batch, seq_len1, seq_len2, num_perspectives) consisting multi-perspective matching results """ num_perspectives = weight.size(0) # (1, num_perspectives, 1, hidden_size) weight = weight.unsqueeze(0).unsqueeze(2) # (batch, num_perspectives, seq_len*, hidden_size) vector1 = weight * vector1.unsqueeze(1).expand(-1, num_perspectives, -1, -1) vector2 = weight * vector2.unsqueeze(1).expand(-1, num_perspectives, -1, -1) # (batch, num_perspectives, seq_len*, 1) vector1_norm = vector1.norm(p=2, dim=3, keepdim=True) vector2_norm = vector2.norm(p=2, dim=3, keepdim=True) # (batch, num_perspectives, seq_len1, seq_len2) mul_result = torch.matmul(vector1, vector2.transpose(2, 3)) norm_value = vector1_norm * vector2_norm.transpose(2, 3) # (batch, seq_len1, seq_len2, num_perspectives) return (mul_result / norm_value.clamp(min=eps)).permute(0, 2, 3, 1)
python
def multi_perspective_match_pairwise(vector1: torch.Tensor, vector2: torch.Tensor, weight: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: """ Calculate multi-perspective cosine matching between each time step of one vector and each time step of another vector. Parameters ---------- vector1 : ``torch.Tensor`` A tensor of shape ``(batch, seq_len1, hidden_size)`` vector2 : ``torch.Tensor`` A tensor of shape ``(batch, seq_len2, hidden_size)`` weight : ``torch.Tensor`` A tensor of shape ``(num_perspectives, hidden_size)`` eps : ``float`` optional, (default = 1e-8) A small value to avoid zero division problem Returns ------- A tensor of shape (batch, seq_len1, seq_len2, num_perspectives) consisting multi-perspective matching results """ num_perspectives = weight.size(0) # (1, num_perspectives, 1, hidden_size) weight = weight.unsqueeze(0).unsqueeze(2) # (batch, num_perspectives, seq_len*, hidden_size) vector1 = weight * vector1.unsqueeze(1).expand(-1, num_perspectives, -1, -1) vector2 = weight * vector2.unsqueeze(1).expand(-1, num_perspectives, -1, -1) # (batch, num_perspectives, seq_len*, 1) vector1_norm = vector1.norm(p=2, dim=3, keepdim=True) vector2_norm = vector2.norm(p=2, dim=3, keepdim=True) # (batch, num_perspectives, seq_len1, seq_len2) mul_result = torch.matmul(vector1, vector2.transpose(2, 3)) norm_value = vector1_norm * vector2_norm.transpose(2, 3) # (batch, seq_len1, seq_len2, num_perspectives) return (mul_result / norm_value.clamp(min=eps)).permute(0, 2, 3, 1)
[ "def", "multi_perspective_match_pairwise", "(", "vector1", ":", "torch", ".", "Tensor", ",", "vector2", ":", "torch", ".", "Tensor", ",", "weight", ":", "torch", ".", "Tensor", ",", "eps", ":", "float", "=", "1e-8", ")", "->", "torch", ".", "Tensor", ":", "num_perspectives", "=", "weight", ".", "size", "(", "0", ")", "# (1, num_perspectives, 1, hidden_size)", "weight", "=", "weight", ".", "unsqueeze", "(", "0", ")", ".", "unsqueeze", "(", "2", ")", "# (batch, num_perspectives, seq_len*, hidden_size)", "vector1", "=", "weight", "*", "vector1", ".", "unsqueeze", "(", "1", ")", ".", "expand", "(", "-", "1", ",", "num_perspectives", ",", "-", "1", ",", "-", "1", ")", "vector2", "=", "weight", "*", "vector2", ".", "unsqueeze", "(", "1", ")", ".", "expand", "(", "-", "1", ",", "num_perspectives", ",", "-", "1", ",", "-", "1", ")", "# (batch, num_perspectives, seq_len*, 1)", "vector1_norm", "=", "vector1", ".", "norm", "(", "p", "=", "2", ",", "dim", "=", "3", ",", "keepdim", "=", "True", ")", "vector2_norm", "=", "vector2", ".", "norm", "(", "p", "=", "2", ",", "dim", "=", "3", ",", "keepdim", "=", "True", ")", "# (batch, num_perspectives, seq_len1, seq_len2)", "mul_result", "=", "torch", ".", "matmul", "(", "vector1", ",", "vector2", ".", "transpose", "(", "2", ",", "3", ")", ")", "norm_value", "=", "vector1_norm", "*", "vector2_norm", ".", "transpose", "(", "2", ",", "3", ")", "# (batch, seq_len1, seq_len2, num_perspectives)", "return", "(", "mul_result", "/", "norm_value", ".", "clamp", "(", "min", "=", "eps", ")", ")", ".", "permute", "(", "0", ",", "2", ",", "3", ",", "1", ")" ]
Calculate multi-perspective cosine matching between each time step of one vector and each time step of another vector. Parameters ---------- vector1 : ``torch.Tensor`` A tensor of shape ``(batch, seq_len1, hidden_size)`` vector2 : ``torch.Tensor`` A tensor of shape ``(batch, seq_len2, hidden_size)`` weight : ``torch.Tensor`` A tensor of shape ``(num_perspectives, hidden_size)`` eps : ``float`` optional, (default = 1e-8) A small value to avoid zero division problem Returns ------- A tensor of shape (batch, seq_len1, seq_len2, num_perspectives) consisting multi-perspective matching results
[ "Calculate", "multi", "-", "perspective", "cosine", "matching", "between", "each", "time", "step", "of", "one", "vector", "and", "each", "time", "step", "of", "another", "vector", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/modules/bimpm_matching.py#L56-L98
23,152
allenai/allennlp
allennlp/semparse/contexts/atis_tables.py
get_date_from_utterance
def get_date_from_utterance(tokenized_utterance: List[Token], year: int = 1993) -> List[datetime]: """ When the year is not explicitly mentioned in the utterance, the query assumes that it is 1993 so we do the same here. If there is no mention of the month or day then we do not return any dates from the utterance. """ dates = [] utterance = ' '.join([token.text for token in tokenized_utterance]) year_result = re.findall(r'199[0-4]', utterance) if year_result: year = int(year_result[0]) trigrams = ngrams([token.text for token in tokenized_utterance], 3) for month, tens, digit in trigrams: # This will match something like ``september twenty first``. day = ' '.join([tens, digit]) if month in MONTH_NUMBERS and day in DAY_NUMBERS: try: dates.append(datetime(year, MONTH_NUMBERS[month], DAY_NUMBERS[day])) except ValueError: print('invalid month day') bigrams = ngrams([token.text for token in tokenized_utterance], 2) for month, day in bigrams: if month in MONTH_NUMBERS and day in DAY_NUMBERS: # This will match something like ``september first``. try: dates.append(datetime(year, MONTH_NUMBERS[month], DAY_NUMBERS[day])) except ValueError: print('invalid month day') fivegrams = ngrams([token.text for token in tokenized_utterance], 5) for tens, digit, _, year_match, month in fivegrams: # This will match something like ``twenty first of 1993 july``. day = ' '.join([tens, digit]) if month in MONTH_NUMBERS and day in DAY_NUMBERS and year_match.isdigit(): try: dates.append(datetime(int(year_match), MONTH_NUMBERS[month], DAY_NUMBERS[day])) except ValueError: print('invalid month day') if month in MONTH_NUMBERS and digit in DAY_NUMBERS and year_match.isdigit(): try: dates.append(datetime(int(year_match), MONTH_NUMBERS[month], DAY_NUMBERS[digit])) except ValueError: print('invalid month day') return dates
python
def get_date_from_utterance(tokenized_utterance: List[Token], year: int = 1993) -> List[datetime]: """ When the year is not explicitly mentioned in the utterance, the query assumes that it is 1993 so we do the same here. If there is no mention of the month or day then we do not return any dates from the utterance. """ dates = [] utterance = ' '.join([token.text for token in tokenized_utterance]) year_result = re.findall(r'199[0-4]', utterance) if year_result: year = int(year_result[0]) trigrams = ngrams([token.text for token in tokenized_utterance], 3) for month, tens, digit in trigrams: # This will match something like ``september twenty first``. day = ' '.join([tens, digit]) if month in MONTH_NUMBERS and day in DAY_NUMBERS: try: dates.append(datetime(year, MONTH_NUMBERS[month], DAY_NUMBERS[day])) except ValueError: print('invalid month day') bigrams = ngrams([token.text for token in tokenized_utterance], 2) for month, day in bigrams: if month in MONTH_NUMBERS and day in DAY_NUMBERS: # This will match something like ``september first``. try: dates.append(datetime(year, MONTH_NUMBERS[month], DAY_NUMBERS[day])) except ValueError: print('invalid month day') fivegrams = ngrams([token.text for token in tokenized_utterance], 5) for tens, digit, _, year_match, month in fivegrams: # This will match something like ``twenty first of 1993 july``. day = ' '.join([tens, digit]) if month in MONTH_NUMBERS and day in DAY_NUMBERS and year_match.isdigit(): try: dates.append(datetime(int(year_match), MONTH_NUMBERS[month], DAY_NUMBERS[day])) except ValueError: print('invalid month day') if month in MONTH_NUMBERS and digit in DAY_NUMBERS and year_match.isdigit(): try: dates.append(datetime(int(year_match), MONTH_NUMBERS[month], DAY_NUMBERS[digit])) except ValueError: print('invalid month day') return dates
[ "def", "get_date_from_utterance", "(", "tokenized_utterance", ":", "List", "[", "Token", "]", ",", "year", ":", "int", "=", "1993", ")", "->", "List", "[", "datetime", "]", ":", "dates", "=", "[", "]", "utterance", "=", "' '", ".", "join", "(", "[", "token", ".", "text", "for", "token", "in", "tokenized_utterance", "]", ")", "year_result", "=", "re", ".", "findall", "(", "r'199[0-4]'", ",", "utterance", ")", "if", "year_result", ":", "year", "=", "int", "(", "year_result", "[", "0", "]", ")", "trigrams", "=", "ngrams", "(", "[", "token", ".", "text", "for", "token", "in", "tokenized_utterance", "]", ",", "3", ")", "for", "month", ",", "tens", ",", "digit", "in", "trigrams", ":", "# This will match something like ``september twenty first``.", "day", "=", "' '", ".", "join", "(", "[", "tens", ",", "digit", "]", ")", "if", "month", "in", "MONTH_NUMBERS", "and", "day", "in", "DAY_NUMBERS", ":", "try", ":", "dates", ".", "append", "(", "datetime", "(", "year", ",", "MONTH_NUMBERS", "[", "month", "]", ",", "DAY_NUMBERS", "[", "day", "]", ")", ")", "except", "ValueError", ":", "print", "(", "'invalid month day'", ")", "bigrams", "=", "ngrams", "(", "[", "token", ".", "text", "for", "token", "in", "tokenized_utterance", "]", ",", "2", ")", "for", "month", ",", "day", "in", "bigrams", ":", "if", "month", "in", "MONTH_NUMBERS", "and", "day", "in", "DAY_NUMBERS", ":", "# This will match something like ``september first``.", "try", ":", "dates", ".", "append", "(", "datetime", "(", "year", ",", "MONTH_NUMBERS", "[", "month", "]", ",", "DAY_NUMBERS", "[", "day", "]", ")", ")", "except", "ValueError", ":", "print", "(", "'invalid month day'", ")", "fivegrams", "=", "ngrams", "(", "[", "token", ".", "text", "for", "token", "in", "tokenized_utterance", "]", ",", "5", ")", "for", "tens", ",", "digit", ",", "_", ",", "year_match", ",", "month", "in", "fivegrams", ":", "# This will match something like ``twenty first of 1993 july``.", "day", "=", "' '", ".", "join", "(", "[", "tens", ",", "digit", "]", ")", "if", "month", "in", "MONTH_NUMBERS", "and", "day", "in", "DAY_NUMBERS", "and", "year_match", ".", "isdigit", "(", ")", ":", "try", ":", "dates", ".", "append", "(", "datetime", "(", "int", "(", "year_match", ")", ",", "MONTH_NUMBERS", "[", "month", "]", ",", "DAY_NUMBERS", "[", "day", "]", ")", ")", "except", "ValueError", ":", "print", "(", "'invalid month day'", ")", "if", "month", "in", "MONTH_NUMBERS", "and", "digit", "in", "DAY_NUMBERS", "and", "year_match", ".", "isdigit", "(", ")", ":", "try", ":", "dates", ".", "append", "(", "datetime", "(", "int", "(", "year_match", ")", ",", "MONTH_NUMBERS", "[", "month", "]", ",", "DAY_NUMBERS", "[", "digit", "]", ")", ")", "except", "ValueError", ":", "print", "(", "'invalid month day'", ")", "return", "dates" ]
When the year is not explicitly mentioned in the utterance, the query assumes that it is 1993 so we do the same here. If there is no mention of the month or day then we do not return any dates from the utterance.
[ "When", "the", "year", "is", "not", "explicitly", "mentioned", "in", "the", "utterance", "the", "query", "assumes", "that", "it", "is", "1993", "so", "we", "do", "the", "same", "here", ".", "If", "there", "is", "no", "mention", "of", "the", "month", "or", "day", "then", "we", "do", "not", "return", "any", "dates", "from", "the", "utterance", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/contexts/atis_tables.py#L79-L126
23,153
allenai/allennlp
allennlp/semparse/contexts/atis_tables.py
get_numbers_from_utterance
def get_numbers_from_utterance(utterance: str, tokenized_utterance: List[Token]) -> Dict[str, List[int]]: """ Given an utterance, this function finds all the numbers that are in the action space. Since we need to keep track of linking scores, we represent the numbers as a dictionary, where the keys are the string representation of the number and the values are lists of the token indices that triggers that number. """ # When we use a regex to find numbers or strings, we need a mapping from # the character to which token triggered it. char_offset_to_token_index = {token.idx : token_index for token_index, token in enumerate(tokenized_utterance)} # We want to look up later for each time whether it appears after a word # such as "about" or "approximately". indices_of_approximate_words = {index for index, token in enumerate(tokenized_utterance) if token.text in APPROX_WORDS} indices_of_words_preceding_time = {index for index, token in enumerate(tokenized_utterance) if token.text in WORDS_PRECEDING_TIME} indices_of_am_pm = {index for index, token in enumerate(tokenized_utterance) if token.text in {'am', 'pm'}} number_linking_dict: Dict[str, List[int]] = defaultdict(list) for token_index, token in enumerate(tokenized_utterance): if token.text.isdigit(): if token_index - 1 in indices_of_words_preceding_time and token_index + 1 not in indices_of_am_pm: for time in digit_to_query_time(token.text): number_linking_dict[str(time)].append(token_index) times_linking_dict = get_times_from_utterance(utterance, char_offset_to_token_index, indices_of_approximate_words) for key, value in times_linking_dict.items(): number_linking_dict[key].extend(value) for index, token in enumerate(tokenized_utterance): for number in NUMBER_TRIGGER_DICT.get(token.text, []): if index - 1 in indices_of_approximate_words: for approx_time in get_approximate_times([int(number)]): number_linking_dict[str(approx_time)].append(index) else: number_linking_dict[number].append(index) return number_linking_dict
python
def get_numbers_from_utterance(utterance: str, tokenized_utterance: List[Token]) -> Dict[str, List[int]]: """ Given an utterance, this function finds all the numbers that are in the action space. Since we need to keep track of linking scores, we represent the numbers as a dictionary, where the keys are the string representation of the number and the values are lists of the token indices that triggers that number. """ # When we use a regex to find numbers or strings, we need a mapping from # the character to which token triggered it. char_offset_to_token_index = {token.idx : token_index for token_index, token in enumerate(tokenized_utterance)} # We want to look up later for each time whether it appears after a word # such as "about" or "approximately". indices_of_approximate_words = {index for index, token in enumerate(tokenized_utterance) if token.text in APPROX_WORDS} indices_of_words_preceding_time = {index for index, token in enumerate(tokenized_utterance) if token.text in WORDS_PRECEDING_TIME} indices_of_am_pm = {index for index, token in enumerate(tokenized_utterance) if token.text in {'am', 'pm'}} number_linking_dict: Dict[str, List[int]] = defaultdict(list) for token_index, token in enumerate(tokenized_utterance): if token.text.isdigit(): if token_index - 1 in indices_of_words_preceding_time and token_index + 1 not in indices_of_am_pm: for time in digit_to_query_time(token.text): number_linking_dict[str(time)].append(token_index) times_linking_dict = get_times_from_utterance(utterance, char_offset_to_token_index, indices_of_approximate_words) for key, value in times_linking_dict.items(): number_linking_dict[key].extend(value) for index, token in enumerate(tokenized_utterance): for number in NUMBER_TRIGGER_DICT.get(token.text, []): if index - 1 in indices_of_approximate_words: for approx_time in get_approximate_times([int(number)]): number_linking_dict[str(approx_time)].append(index) else: number_linking_dict[number].append(index) return number_linking_dict
[ "def", "get_numbers_from_utterance", "(", "utterance", ":", "str", ",", "tokenized_utterance", ":", "List", "[", "Token", "]", ")", "->", "Dict", "[", "str", ",", "List", "[", "int", "]", "]", ":", "# When we use a regex to find numbers or strings, we need a mapping from", "# the character to which token triggered it.", "char_offset_to_token_index", "=", "{", "token", ".", "idx", ":", "token_index", "for", "token_index", ",", "token", "in", "enumerate", "(", "tokenized_utterance", ")", "}", "# We want to look up later for each time whether it appears after a word", "# such as \"about\" or \"approximately\".", "indices_of_approximate_words", "=", "{", "index", "for", "index", ",", "token", "in", "enumerate", "(", "tokenized_utterance", ")", "if", "token", ".", "text", "in", "APPROX_WORDS", "}", "indices_of_words_preceding_time", "=", "{", "index", "for", "index", ",", "token", "in", "enumerate", "(", "tokenized_utterance", ")", "if", "token", ".", "text", "in", "WORDS_PRECEDING_TIME", "}", "indices_of_am_pm", "=", "{", "index", "for", "index", ",", "token", "in", "enumerate", "(", "tokenized_utterance", ")", "if", "token", ".", "text", "in", "{", "'am'", ",", "'pm'", "}", "}", "number_linking_dict", ":", "Dict", "[", "str", ",", "List", "[", "int", "]", "]", "=", "defaultdict", "(", "list", ")", "for", "token_index", ",", "token", "in", "enumerate", "(", "tokenized_utterance", ")", ":", "if", "token", ".", "text", ".", "isdigit", "(", ")", ":", "if", "token_index", "-", "1", "in", "indices_of_words_preceding_time", "and", "token_index", "+", "1", "not", "in", "indices_of_am_pm", ":", "for", "time", "in", "digit_to_query_time", "(", "token", ".", "text", ")", ":", "number_linking_dict", "[", "str", "(", "time", ")", "]", ".", "append", "(", "token_index", ")", "times_linking_dict", "=", "get_times_from_utterance", "(", "utterance", ",", "char_offset_to_token_index", ",", "indices_of_approximate_words", ")", "for", "key", ",", "value", "in", "times_linking_dict", ".", "items", "(", ")", ":", "number_linking_dict", "[", "key", "]", ".", "extend", "(", "value", ")", "for", "index", ",", "token", "in", "enumerate", "(", "tokenized_utterance", ")", ":", "for", "number", "in", "NUMBER_TRIGGER_DICT", ".", "get", "(", "token", ".", "text", ",", "[", "]", ")", ":", "if", "index", "-", "1", "in", "indices_of_approximate_words", ":", "for", "approx_time", "in", "get_approximate_times", "(", "[", "int", "(", "number", ")", "]", ")", ":", "number_linking_dict", "[", "str", "(", "approx_time", ")", "]", ".", "append", "(", "index", ")", "else", ":", "number_linking_dict", "[", "number", "]", ".", "append", "(", "index", ")", "return", "number_linking_dict" ]
Given an utterance, this function finds all the numbers that are in the action space. Since we need to keep track of linking scores, we represent the numbers as a dictionary, where the keys are the string representation of the number and the values are lists of the token indices that triggers that number.
[ "Given", "an", "utterance", "this", "function", "finds", "all", "the", "numbers", "that", "are", "in", "the", "action", "space", ".", "Since", "we", "need", "to", "keep", "track", "of", "linking", "scores", "we", "represent", "the", "numbers", "as", "a", "dictionary", "where", "the", "keys", "are", "the", "string", "representation", "of", "the", "number", "and", "the", "values", "are", "lists", "of", "the", "token", "indices", "that", "triggers", "that", "number", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/contexts/atis_tables.py#L128-L170
23,154
allenai/allennlp
allennlp/semparse/contexts/atis_tables.py
digit_to_query_time
def digit_to_query_time(digit: str) -> List[int]: """ Given a digit in the utterance, return a list of the times that it corresponds to. """ if len(digit) > 2: return [int(digit), int(digit) + TWELVE_TO_TWENTY_FOUR] elif int(digit) % 12 == 0: return [0, 1200, 2400] return [int(digit) * HOUR_TO_TWENTY_FOUR, (int(digit) * HOUR_TO_TWENTY_FOUR + TWELVE_TO_TWENTY_FOUR) % HOURS_IN_DAY]
python
def digit_to_query_time(digit: str) -> List[int]: """ Given a digit in the utterance, return a list of the times that it corresponds to. """ if len(digit) > 2: return [int(digit), int(digit) + TWELVE_TO_TWENTY_FOUR] elif int(digit) % 12 == 0: return [0, 1200, 2400] return [int(digit) * HOUR_TO_TWENTY_FOUR, (int(digit) * HOUR_TO_TWENTY_FOUR + TWELVE_TO_TWENTY_FOUR) % HOURS_IN_DAY]
[ "def", "digit_to_query_time", "(", "digit", ":", "str", ")", "->", "List", "[", "int", "]", ":", "if", "len", "(", "digit", ")", ">", "2", ":", "return", "[", "int", "(", "digit", ")", ",", "int", "(", "digit", ")", "+", "TWELVE_TO_TWENTY_FOUR", "]", "elif", "int", "(", "digit", ")", "%", "12", "==", "0", ":", "return", "[", "0", ",", "1200", ",", "2400", "]", "return", "[", "int", "(", "digit", ")", "*", "HOUR_TO_TWENTY_FOUR", ",", "(", "int", "(", "digit", ")", "*", "HOUR_TO_TWENTY_FOUR", "+", "TWELVE_TO_TWENTY_FOUR", ")", "%", "HOURS_IN_DAY", "]" ]
Given a digit in the utterance, return a list of the times that it corresponds to.
[ "Given", "a", "digit", "in", "the", "utterance", "return", "a", "list", "of", "the", "times", "that", "it", "corresponds", "to", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/contexts/atis_tables.py#L238-L247
23,155
allenai/allennlp
allennlp/semparse/contexts/atis_tables.py
get_approximate_times
def get_approximate_times(times: List[int]) -> List[int]: """ Given a list of times that follow a word such as ``about``, we return a list of times that could appear in the query as a result of this. For example if ``about 7pm`` appears in the utterance, then we also want to add ``1830`` and ``1930``. """ approximate_times = [] for time in times: hour = int(time/HOUR_TO_TWENTY_FOUR) % 24 minute = time % HOUR_TO_TWENTY_FOUR approximate_time = datetime.now() approximate_time = approximate_time.replace(hour=hour, minute=minute) start_time_range = approximate_time - timedelta(minutes=30) end_time_range = approximate_time + timedelta(minutes=30) approximate_times.extend([start_time_range.hour * HOUR_TO_TWENTY_FOUR + start_time_range.minute, end_time_range.hour * HOUR_TO_TWENTY_FOUR + end_time_range.minute]) return approximate_times
python
def get_approximate_times(times: List[int]) -> List[int]: """ Given a list of times that follow a word such as ``about``, we return a list of times that could appear in the query as a result of this. For example if ``about 7pm`` appears in the utterance, then we also want to add ``1830`` and ``1930``. """ approximate_times = [] for time in times: hour = int(time/HOUR_TO_TWENTY_FOUR) % 24 minute = time % HOUR_TO_TWENTY_FOUR approximate_time = datetime.now() approximate_time = approximate_time.replace(hour=hour, minute=minute) start_time_range = approximate_time - timedelta(minutes=30) end_time_range = approximate_time + timedelta(minutes=30) approximate_times.extend([start_time_range.hour * HOUR_TO_TWENTY_FOUR + start_time_range.minute, end_time_range.hour * HOUR_TO_TWENTY_FOUR + end_time_range.minute]) return approximate_times
[ "def", "get_approximate_times", "(", "times", ":", "List", "[", "int", "]", ")", "->", "List", "[", "int", "]", ":", "approximate_times", "=", "[", "]", "for", "time", "in", "times", ":", "hour", "=", "int", "(", "time", "/", "HOUR_TO_TWENTY_FOUR", ")", "%", "24", "minute", "=", "time", "%", "HOUR_TO_TWENTY_FOUR", "approximate_time", "=", "datetime", ".", "now", "(", ")", "approximate_time", "=", "approximate_time", ".", "replace", "(", "hour", "=", "hour", ",", "minute", "=", "minute", ")", "start_time_range", "=", "approximate_time", "-", "timedelta", "(", "minutes", "=", "30", ")", "end_time_range", "=", "approximate_time", "+", "timedelta", "(", "minutes", "=", "30", ")", "approximate_times", ".", "extend", "(", "[", "start_time_range", ".", "hour", "*", "HOUR_TO_TWENTY_FOUR", "+", "start_time_range", ".", "minute", ",", "end_time_range", ".", "hour", "*", "HOUR_TO_TWENTY_FOUR", "+", "end_time_range", ".", "minute", "]", ")", "return", "approximate_times" ]
Given a list of times that follow a word such as ``about``, we return a list of times that could appear in the query as a result of this. For example if ``about 7pm`` appears in the utterance, then we also want to add ``1830`` and ``1930``.
[ "Given", "a", "list", "of", "times", "that", "follow", "a", "word", "such", "as", "about", "we", "return", "a", "list", "of", "times", "that", "could", "appear", "in", "the", "query", "as", "a", "result", "of", "this", ".", "For", "example", "if", "about", "7pm", "appears", "in", "the", "utterance", "then", "we", "also", "want", "to", "add", "1830", "and", "1930", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/contexts/atis_tables.py#L249-L268
23,156
allenai/allennlp
allennlp/semparse/contexts/atis_tables.py
_time_regex_match
def _time_regex_match(regex: str, utterance: str, char_offset_to_token_index: Dict[int, int], map_match_to_query_value: Callable[[str], List[int]], indices_of_approximate_words: Set[int]) -> Dict[str, List[int]]: r""" Given a regex for matching times in the utterance, we want to convert the matches to the values that appear in the query and token indices they correspond to. ``char_offset_to_token_index`` is a dictionary that maps from the character offset to the token index, we use this to look up what token a regex match corresponds to. ``indices_of_approximate_words`` are the token indices of the words such as ``about`` or ``approximately``. We use this to check if a regex match is preceded by one of these words. If it is, we also want to add the times that define this approximate time range. ``map_match_to_query_value`` is a function that converts the regex matches to the values that appear in the query. For example, we may pass in a regex such as ``\d+pm`` that matches times such as ``7pm``. ``map_match_to_query_value`` would be a function that takes ``7pm`` as input and returns ``1900``. """ linking_scores_dict: Dict[str, List[int]] = defaultdict(list) number_regex = re.compile(regex) for match in number_regex.finditer(utterance): query_values = map_match_to_query_value(match.group()) # If the time appears after a word like ``about`` then we also add # the times that mark the start and end of the allowed range. approximate_times = [] if char_offset_to_token_index.get(match.start(), 0) - 1 in indices_of_approximate_words: approximate_times.extend(get_approximate_times(query_values)) query_values.extend(approximate_times) if match.start() in char_offset_to_token_index: for query_value in query_values: linking_scores_dict[str(query_value)].extend([char_offset_to_token_index[match.start()], char_offset_to_token_index[match.start()] + 1]) return linking_scores_dict
python
def _time_regex_match(regex: str, utterance: str, char_offset_to_token_index: Dict[int, int], map_match_to_query_value: Callable[[str], List[int]], indices_of_approximate_words: Set[int]) -> Dict[str, List[int]]: r""" Given a regex for matching times in the utterance, we want to convert the matches to the values that appear in the query and token indices they correspond to. ``char_offset_to_token_index`` is a dictionary that maps from the character offset to the token index, we use this to look up what token a regex match corresponds to. ``indices_of_approximate_words`` are the token indices of the words such as ``about`` or ``approximately``. We use this to check if a regex match is preceded by one of these words. If it is, we also want to add the times that define this approximate time range. ``map_match_to_query_value`` is a function that converts the regex matches to the values that appear in the query. For example, we may pass in a regex such as ``\d+pm`` that matches times such as ``7pm``. ``map_match_to_query_value`` would be a function that takes ``7pm`` as input and returns ``1900``. """ linking_scores_dict: Dict[str, List[int]] = defaultdict(list) number_regex = re.compile(regex) for match in number_regex.finditer(utterance): query_values = map_match_to_query_value(match.group()) # If the time appears after a word like ``about`` then we also add # the times that mark the start and end of the allowed range. approximate_times = [] if char_offset_to_token_index.get(match.start(), 0) - 1 in indices_of_approximate_words: approximate_times.extend(get_approximate_times(query_values)) query_values.extend(approximate_times) if match.start() in char_offset_to_token_index: for query_value in query_values: linking_scores_dict[str(query_value)].extend([char_offset_to_token_index[match.start()], char_offset_to_token_index[match.start()] + 1]) return linking_scores_dict
[ "def", "_time_regex_match", "(", "regex", ":", "str", ",", "utterance", ":", "str", ",", "char_offset_to_token_index", ":", "Dict", "[", "int", ",", "int", "]", ",", "map_match_to_query_value", ":", "Callable", "[", "[", "str", "]", ",", "List", "[", "int", "]", "]", ",", "indices_of_approximate_words", ":", "Set", "[", "int", "]", ")", "->", "Dict", "[", "str", ",", "List", "[", "int", "]", "]", ":", "linking_scores_dict", ":", "Dict", "[", "str", ",", "List", "[", "int", "]", "]", "=", "defaultdict", "(", "list", ")", "number_regex", "=", "re", ".", "compile", "(", "regex", ")", "for", "match", "in", "number_regex", ".", "finditer", "(", "utterance", ")", ":", "query_values", "=", "map_match_to_query_value", "(", "match", ".", "group", "(", ")", ")", "# If the time appears after a word like ``about`` then we also add", "# the times that mark the start and end of the allowed range.", "approximate_times", "=", "[", "]", "if", "char_offset_to_token_index", ".", "get", "(", "match", ".", "start", "(", ")", ",", "0", ")", "-", "1", "in", "indices_of_approximate_words", ":", "approximate_times", ".", "extend", "(", "get_approximate_times", "(", "query_values", ")", ")", "query_values", ".", "extend", "(", "approximate_times", ")", "if", "match", ".", "start", "(", ")", "in", "char_offset_to_token_index", ":", "for", "query_value", "in", "query_values", ":", "linking_scores_dict", "[", "str", "(", "query_value", ")", "]", ".", "extend", "(", "[", "char_offset_to_token_index", "[", "match", ".", "start", "(", ")", "]", ",", "char_offset_to_token_index", "[", "match", ".", "start", "(", ")", "]", "+", "1", "]", ")", "return", "linking_scores_dict" ]
r""" Given a regex for matching times in the utterance, we want to convert the matches to the values that appear in the query and token indices they correspond to. ``char_offset_to_token_index`` is a dictionary that maps from the character offset to the token index, we use this to look up what token a regex match corresponds to. ``indices_of_approximate_words`` are the token indices of the words such as ``about`` or ``approximately``. We use this to check if a regex match is preceded by one of these words. If it is, we also want to add the times that define this approximate time range. ``map_match_to_query_value`` is a function that converts the regex matches to the values that appear in the query. For example, we may pass in a regex such as ``\d+pm`` that matches times such as ``7pm``. ``map_match_to_query_value`` would be a function that takes ``7pm`` as input and returns ``1900``.
[ "r", "Given", "a", "regex", "for", "matching", "times", "in", "the", "utterance", "we", "want", "to", "convert", "the", "matches", "to", "the", "values", "that", "appear", "in", "the", "query", "and", "token", "indices", "they", "correspond", "to", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/contexts/atis_tables.py#L270-L304
23,157
allenai/allennlp
allennlp/semparse/executors/sql_executor.py
SqlExecutor._evaluate_sql_query_subprocess
def _evaluate_sql_query_subprocess(self, predicted_query: str, sql_query_labels: List[str]) -> int: """ We evaluate here whether the predicted query and the query label evaluate to the exact same table. This method is only called by the subprocess, so we just exit with 1 if it is correct and 0 otherwise. """ postprocessed_predicted_query = self.postprocess_query_sqlite(predicted_query) try: self._cursor.execute(postprocessed_predicted_query) predicted_rows = self._cursor.fetchall() except sqlite3.Error as error: logger.warning(f'Error executing predicted: {error}') exit(0) # If predicted table matches any of the reference tables then it is counted as correct. target_rows = None for sql_query_label in sql_query_labels: postprocessed_sql_query_label = self.postprocess_query_sqlite(sql_query_label) try: self._cursor.execute(postprocessed_sql_query_label) target_rows = self._cursor.fetchall() except sqlite3.Error as error: logger.warning(f'Error executing predicted: {error}') if predicted_rows == target_rows: exit(1) exit(0)
python
def _evaluate_sql_query_subprocess(self, predicted_query: str, sql_query_labels: List[str]) -> int: """ We evaluate here whether the predicted query and the query label evaluate to the exact same table. This method is only called by the subprocess, so we just exit with 1 if it is correct and 0 otherwise. """ postprocessed_predicted_query = self.postprocess_query_sqlite(predicted_query) try: self._cursor.execute(postprocessed_predicted_query) predicted_rows = self._cursor.fetchall() except sqlite3.Error as error: logger.warning(f'Error executing predicted: {error}') exit(0) # If predicted table matches any of the reference tables then it is counted as correct. target_rows = None for sql_query_label in sql_query_labels: postprocessed_sql_query_label = self.postprocess_query_sqlite(sql_query_label) try: self._cursor.execute(postprocessed_sql_query_label) target_rows = self._cursor.fetchall() except sqlite3.Error as error: logger.warning(f'Error executing predicted: {error}') if predicted_rows == target_rows: exit(1) exit(0)
[ "def", "_evaluate_sql_query_subprocess", "(", "self", ",", "predicted_query", ":", "str", ",", "sql_query_labels", ":", "List", "[", "str", "]", ")", "->", "int", ":", "postprocessed_predicted_query", "=", "self", ".", "postprocess_query_sqlite", "(", "predicted_query", ")", "try", ":", "self", ".", "_cursor", ".", "execute", "(", "postprocessed_predicted_query", ")", "predicted_rows", "=", "self", ".", "_cursor", ".", "fetchall", "(", ")", "except", "sqlite3", ".", "Error", "as", "error", ":", "logger", ".", "warning", "(", "f'Error executing predicted: {error}'", ")", "exit", "(", "0", ")", "# If predicted table matches any of the reference tables then it is counted as correct.", "target_rows", "=", "None", "for", "sql_query_label", "in", "sql_query_labels", ":", "postprocessed_sql_query_label", "=", "self", ".", "postprocess_query_sqlite", "(", "sql_query_label", ")", "try", ":", "self", ".", "_cursor", ".", "execute", "(", "postprocessed_sql_query_label", ")", "target_rows", "=", "self", ".", "_cursor", ".", "fetchall", "(", ")", "except", "sqlite3", ".", "Error", "as", "error", ":", "logger", ".", "warning", "(", "f'Error executing predicted: {error}'", ")", "if", "predicted_rows", "==", "target_rows", ":", "exit", "(", "1", ")", "exit", "(", "0", ")" ]
We evaluate here whether the predicted query and the query label evaluate to the exact same table. This method is only called by the subprocess, so we just exit with 1 if it is correct and 0 otherwise.
[ "We", "evaluate", "here", "whether", "the", "predicted", "query", "and", "the", "query", "label", "evaluate", "to", "the", "exact", "same", "table", ".", "This", "method", "is", "only", "called", "by", "the", "subprocess", "so", "we", "just", "exit", "with", "1", "if", "it", "is", "correct", "and", "0", "otherwise", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/executors/sql_executor.py#L52-L79
23,158
allenai/allennlp
allennlp/semparse/contexts/sql_context_utils.py
format_grammar_string
def format_grammar_string(grammar_dictionary: Dict[str, List[str]]) -> str: """ Formats a dictionary of production rules into the string format expected by the Parsimonious Grammar class. """ grammar_string = '\n'.join([f"{nonterminal} = {' / '.join(right_hand_side)}" for nonterminal, right_hand_side in grammar_dictionary.items()]) return grammar_string.replace("\\", "\\\\")
python
def format_grammar_string(grammar_dictionary: Dict[str, List[str]]) -> str: """ Formats a dictionary of production rules into the string format expected by the Parsimonious Grammar class. """ grammar_string = '\n'.join([f"{nonterminal} = {' / '.join(right_hand_side)}" for nonterminal, right_hand_side in grammar_dictionary.items()]) return grammar_string.replace("\\", "\\\\")
[ "def", "format_grammar_string", "(", "grammar_dictionary", ":", "Dict", "[", "str", ",", "List", "[", "str", "]", "]", ")", "->", "str", ":", "grammar_string", "=", "'\\n'", ".", "join", "(", "[", "f\"{nonterminal} = {' / '.join(right_hand_side)}\"", "for", "nonterminal", ",", "right_hand_side", "in", "grammar_dictionary", ".", "items", "(", ")", "]", ")", "return", "grammar_string", ".", "replace", "(", "\"\\\\\"", ",", "\"\\\\\\\\\"", ")" ]
Formats a dictionary of production rules into the string format expected by the Parsimonious Grammar class.
[ "Formats", "a", "dictionary", "of", "production", "rules", "into", "the", "string", "format", "expected", "by", "the", "Parsimonious", "Grammar", "class", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/contexts/sql_context_utils.py#L16-L23
23,159
allenai/allennlp
allennlp/semparse/contexts/sql_context_utils.py
initialize_valid_actions
def initialize_valid_actions(grammar: Grammar, keywords_to_uppercase: List[str] = None) -> Dict[str, List[str]]: """ We initialize the valid actions with the global actions. These include the valid actions that result from the grammar and also those that result from the tables provided. The keys represent the nonterminals in the grammar and the values are lists of the valid actions of that nonterminal. """ valid_actions: Dict[str, Set[str]] = defaultdict(set) for key in grammar: rhs = grammar[key] # Sequence represents a series of expressions that match pieces of the text in order. # Eg. A -> B C if isinstance(rhs, Sequence): valid_actions[key].add(format_action(key, " ".join(rhs._unicode_members()), # pylint: disable=protected-access keywords_to_uppercase=keywords_to_uppercase)) # OneOf represents a series of expressions, one of which matches the text. # Eg. A -> B / C elif isinstance(rhs, OneOf): for option in rhs._unicode_members(): # pylint: disable=protected-access valid_actions[key].add(format_action(key, option, keywords_to_uppercase=keywords_to_uppercase)) # A string literal, eg. "A" elif isinstance(rhs, Literal): if rhs.literal != "": valid_actions[key].add(format_action(key, repr(rhs.literal), keywords_to_uppercase=keywords_to_uppercase)) else: valid_actions[key] = set() valid_action_strings = {key: sorted(value) for key, value in valid_actions.items()} return valid_action_strings
python
def initialize_valid_actions(grammar: Grammar, keywords_to_uppercase: List[str] = None) -> Dict[str, List[str]]: """ We initialize the valid actions with the global actions. These include the valid actions that result from the grammar and also those that result from the tables provided. The keys represent the nonterminals in the grammar and the values are lists of the valid actions of that nonterminal. """ valid_actions: Dict[str, Set[str]] = defaultdict(set) for key in grammar: rhs = grammar[key] # Sequence represents a series of expressions that match pieces of the text in order. # Eg. A -> B C if isinstance(rhs, Sequence): valid_actions[key].add(format_action(key, " ".join(rhs._unicode_members()), # pylint: disable=protected-access keywords_to_uppercase=keywords_to_uppercase)) # OneOf represents a series of expressions, one of which matches the text. # Eg. A -> B / C elif isinstance(rhs, OneOf): for option in rhs._unicode_members(): # pylint: disable=protected-access valid_actions[key].add(format_action(key, option, keywords_to_uppercase=keywords_to_uppercase)) # A string literal, eg. "A" elif isinstance(rhs, Literal): if rhs.literal != "": valid_actions[key].add(format_action(key, repr(rhs.literal), keywords_to_uppercase=keywords_to_uppercase)) else: valid_actions[key] = set() valid_action_strings = {key: sorted(value) for key, value in valid_actions.items()} return valid_action_strings
[ "def", "initialize_valid_actions", "(", "grammar", ":", "Grammar", ",", "keywords_to_uppercase", ":", "List", "[", "str", "]", "=", "None", ")", "->", "Dict", "[", "str", ",", "List", "[", "str", "]", "]", ":", "valid_actions", ":", "Dict", "[", "str", ",", "Set", "[", "str", "]", "]", "=", "defaultdict", "(", "set", ")", "for", "key", "in", "grammar", ":", "rhs", "=", "grammar", "[", "key", "]", "# Sequence represents a series of expressions that match pieces of the text in order.", "# Eg. A -> B C", "if", "isinstance", "(", "rhs", ",", "Sequence", ")", ":", "valid_actions", "[", "key", "]", ".", "add", "(", "format_action", "(", "key", ",", "\" \"", ".", "join", "(", "rhs", ".", "_unicode_members", "(", ")", ")", ",", "# pylint: disable=protected-access", "keywords_to_uppercase", "=", "keywords_to_uppercase", ")", ")", "# OneOf represents a series of expressions, one of which matches the text.", "# Eg. A -> B / C", "elif", "isinstance", "(", "rhs", ",", "OneOf", ")", ":", "for", "option", "in", "rhs", ".", "_unicode_members", "(", ")", ":", "# pylint: disable=protected-access", "valid_actions", "[", "key", "]", ".", "add", "(", "format_action", "(", "key", ",", "option", ",", "keywords_to_uppercase", "=", "keywords_to_uppercase", ")", ")", "# A string literal, eg. \"A\"", "elif", "isinstance", "(", "rhs", ",", "Literal", ")", ":", "if", "rhs", ".", "literal", "!=", "\"\"", ":", "valid_actions", "[", "key", "]", ".", "add", "(", "format_action", "(", "key", ",", "repr", "(", "rhs", ".", "literal", ")", ",", "keywords_to_uppercase", "=", "keywords_to_uppercase", ")", ")", "else", ":", "valid_actions", "[", "key", "]", "=", "set", "(", ")", "valid_action_strings", "=", "{", "key", ":", "sorted", "(", "value", ")", "for", "key", ",", "value", "in", "valid_actions", ".", "items", "(", ")", "}", "return", "valid_action_strings" ]
We initialize the valid actions with the global actions. These include the valid actions that result from the grammar and also those that result from the tables provided. The keys represent the nonterminals in the grammar and the values are lists of the valid actions of that nonterminal.
[ "We", "initialize", "the", "valid", "actions", "with", "the", "global", "actions", ".", "These", "include", "the", "valid", "actions", "that", "result", "from", "the", "grammar", "and", "also", "those", "that", "result", "from", "the", "tables", "provided", ".", "The", "keys", "represent", "the", "nonterminals", "in", "the", "grammar", "and", "the", "values", "are", "lists", "of", "the", "valid", "actions", "of", "that", "nonterminal", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/contexts/sql_context_utils.py#L26-L61
23,160
allenai/allennlp
allennlp/semparse/contexts/sql_context_utils.py
format_action
def format_action(nonterminal: str, right_hand_side: str, is_string: bool = False, is_number: bool = False, keywords_to_uppercase: List[str] = None) -> str: """ This function formats an action as it appears in models. It splits productions based on the special `ws` and `wsp` rules, which are used in grammars to denote whitespace, and then rejoins these tokens a formatted, comma separated list. Importantly, note that it `does not` split on spaces in the grammar string, because these might not correspond to spaces in the language the grammar recognises. Parameters ---------- nonterminal : ``str``, required. The nonterminal in the action. right_hand_side : ``str``, required. The right hand side of the action (i.e the thing which is produced). is_string : ``bool``, optional (default = False). Whether the production produces a string. If it does, it is formatted as ``nonterminal -> ['string']`` is_number : ``bool``, optional, (default = False). Whether the production produces a string. If it does, it is formatted as ``nonterminal -> ['number']`` keywords_to_uppercase: ``List[str]``, optional, (default = None) Keywords in the grammar to uppercase. In the case of sql, this might be SELECT, MAX etc. """ keywords_to_uppercase = keywords_to_uppercase or [] if right_hand_side.upper() in keywords_to_uppercase: right_hand_side = right_hand_side.upper() if is_string: return f'{nonterminal} -> ["\'{right_hand_side}\'"]' elif is_number: return f'{nonterminal} -> ["{right_hand_side}"]' else: right_hand_side = right_hand_side.lstrip("(").rstrip(")") child_strings = [token for token in WHITESPACE_REGEX.split(right_hand_side) if token] child_strings = [tok.upper() if tok.upper() in keywords_to_uppercase else tok for tok in child_strings] return f"{nonterminal} -> [{', '.join(child_strings)}]"
python
def format_action(nonterminal: str, right_hand_side: str, is_string: bool = False, is_number: bool = False, keywords_to_uppercase: List[str] = None) -> str: """ This function formats an action as it appears in models. It splits productions based on the special `ws` and `wsp` rules, which are used in grammars to denote whitespace, and then rejoins these tokens a formatted, comma separated list. Importantly, note that it `does not` split on spaces in the grammar string, because these might not correspond to spaces in the language the grammar recognises. Parameters ---------- nonterminal : ``str``, required. The nonterminal in the action. right_hand_side : ``str``, required. The right hand side of the action (i.e the thing which is produced). is_string : ``bool``, optional (default = False). Whether the production produces a string. If it does, it is formatted as ``nonterminal -> ['string']`` is_number : ``bool``, optional, (default = False). Whether the production produces a string. If it does, it is formatted as ``nonterminal -> ['number']`` keywords_to_uppercase: ``List[str]``, optional, (default = None) Keywords in the grammar to uppercase. In the case of sql, this might be SELECT, MAX etc. """ keywords_to_uppercase = keywords_to_uppercase or [] if right_hand_side.upper() in keywords_to_uppercase: right_hand_side = right_hand_side.upper() if is_string: return f'{nonterminal} -> ["\'{right_hand_side}\'"]' elif is_number: return f'{nonterminal} -> ["{right_hand_side}"]' else: right_hand_side = right_hand_side.lstrip("(").rstrip(")") child_strings = [token for token in WHITESPACE_REGEX.split(right_hand_side) if token] child_strings = [tok.upper() if tok.upper() in keywords_to_uppercase else tok for tok in child_strings] return f"{nonterminal} -> [{', '.join(child_strings)}]"
[ "def", "format_action", "(", "nonterminal", ":", "str", ",", "right_hand_side", ":", "str", ",", "is_string", ":", "bool", "=", "False", ",", "is_number", ":", "bool", "=", "False", ",", "keywords_to_uppercase", ":", "List", "[", "str", "]", "=", "None", ")", "->", "str", ":", "keywords_to_uppercase", "=", "keywords_to_uppercase", "or", "[", "]", "if", "right_hand_side", ".", "upper", "(", ")", "in", "keywords_to_uppercase", ":", "right_hand_side", "=", "right_hand_side", ".", "upper", "(", ")", "if", "is_string", ":", "return", "f'{nonterminal} -> [\"\\'{right_hand_side}\\'\"]'", "elif", "is_number", ":", "return", "f'{nonterminal} -> [\"{right_hand_side}\"]'", "else", ":", "right_hand_side", "=", "right_hand_side", ".", "lstrip", "(", "\"(\"", ")", ".", "rstrip", "(", "\")\"", ")", "child_strings", "=", "[", "token", "for", "token", "in", "WHITESPACE_REGEX", ".", "split", "(", "right_hand_side", ")", "if", "token", "]", "child_strings", "=", "[", "tok", ".", "upper", "(", ")", "if", "tok", ".", "upper", "(", ")", "in", "keywords_to_uppercase", "else", "tok", "for", "tok", "in", "child_strings", "]", "return", "f\"{nonterminal} -> [{', '.join(child_strings)}]\"" ]
This function formats an action as it appears in models. It splits productions based on the special `ws` and `wsp` rules, which are used in grammars to denote whitespace, and then rejoins these tokens a formatted, comma separated list. Importantly, note that it `does not` split on spaces in the grammar string, because these might not correspond to spaces in the language the grammar recognises. Parameters ---------- nonterminal : ``str``, required. The nonterminal in the action. right_hand_side : ``str``, required. The right hand side of the action (i.e the thing which is produced). is_string : ``bool``, optional (default = False). Whether the production produces a string. If it does, it is formatted as ``nonterminal -> ['string']`` is_number : ``bool``, optional, (default = False). Whether the production produces a string. If it does, it is formatted as ``nonterminal -> ['number']`` keywords_to_uppercase: ``List[str]``, optional, (default = None) Keywords in the grammar to uppercase. In the case of sql, this might be SELECT, MAX etc.
[ "This", "function", "formats", "an", "action", "as", "it", "appears", "in", "models", ".", "It", "splits", "productions", "based", "on", "the", "special", "ws", "and", "wsp", "rules", "which", "are", "used", "in", "grammars", "to", "denote", "whitespace", "and", "then", "rejoins", "these", "tokens", "a", "formatted", "comma", "separated", "list", ".", "Importantly", "note", "that", "it", "does", "not", "split", "on", "spaces", "in", "the", "grammar", "string", "because", "these", "might", "not", "correspond", "to", "spaces", "in", "the", "language", "the", "grammar", "recognises", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/contexts/sql_context_utils.py#L64-L109
23,161
allenai/allennlp
allennlp/semparse/contexts/sql_context_utils.py
SqlVisitor.add_action
def add_action(self, node: Node) -> None: """ For each node, we accumulate the rules that generated its children in a list. """ if node.expr.name and node.expr.name not in ['ws', 'wsp']: nonterminal = f'{node.expr.name} -> ' if isinstance(node.expr, Literal): right_hand_side = f'["{node.text}"]' else: child_strings = [] for child in node.__iter__(): if child.expr.name in ['ws', 'wsp']: continue if child.expr.name != '': child_strings.append(child.expr.name) else: child_right_side_string = child.expr._as_rhs().lstrip("(").rstrip(")") # pylint: disable=protected-access child_right_side_list = [tok for tok in WHITESPACE_REGEX.split(child_right_side_string) if tok] child_right_side_list = [tok.upper() if tok.upper() in self.keywords_to_uppercase else tok for tok in child_right_side_list] child_strings.extend(child_right_side_list) right_hand_side = "[" + ", ".join(child_strings) + "]" rule = nonterminal + right_hand_side self.action_sequence = [rule] + self.action_sequence
python
def add_action(self, node: Node) -> None: """ For each node, we accumulate the rules that generated its children in a list. """ if node.expr.name and node.expr.name not in ['ws', 'wsp']: nonterminal = f'{node.expr.name} -> ' if isinstance(node.expr, Literal): right_hand_side = f'["{node.text}"]' else: child_strings = [] for child in node.__iter__(): if child.expr.name in ['ws', 'wsp']: continue if child.expr.name != '': child_strings.append(child.expr.name) else: child_right_side_string = child.expr._as_rhs().lstrip("(").rstrip(")") # pylint: disable=protected-access child_right_side_list = [tok for tok in WHITESPACE_REGEX.split(child_right_side_string) if tok] child_right_side_list = [tok.upper() if tok.upper() in self.keywords_to_uppercase else tok for tok in child_right_side_list] child_strings.extend(child_right_side_list) right_hand_side = "[" + ", ".join(child_strings) + "]" rule = nonterminal + right_hand_side self.action_sequence = [rule] + self.action_sequence
[ "def", "add_action", "(", "self", ",", "node", ":", "Node", ")", "->", "None", ":", "if", "node", ".", "expr", ".", "name", "and", "node", ".", "expr", ".", "name", "not", "in", "[", "'ws'", ",", "'wsp'", "]", ":", "nonterminal", "=", "f'{node.expr.name} -> '", "if", "isinstance", "(", "node", ".", "expr", ",", "Literal", ")", ":", "right_hand_side", "=", "f'[\"{node.text}\"]'", "else", ":", "child_strings", "=", "[", "]", "for", "child", "in", "node", ".", "__iter__", "(", ")", ":", "if", "child", ".", "expr", ".", "name", "in", "[", "'ws'", ",", "'wsp'", "]", ":", "continue", "if", "child", ".", "expr", ".", "name", "!=", "''", ":", "child_strings", ".", "append", "(", "child", ".", "expr", ".", "name", ")", "else", ":", "child_right_side_string", "=", "child", ".", "expr", ".", "_as_rhs", "(", ")", ".", "lstrip", "(", "\"(\"", ")", ".", "rstrip", "(", "\")\"", ")", "# pylint: disable=protected-access", "child_right_side_list", "=", "[", "tok", "for", "tok", "in", "WHITESPACE_REGEX", ".", "split", "(", "child_right_side_string", ")", "if", "tok", "]", "child_right_side_list", "=", "[", "tok", ".", "upper", "(", ")", "if", "tok", ".", "upper", "(", ")", "in", "self", ".", "keywords_to_uppercase", "else", "tok", "for", "tok", "in", "child_right_side_list", "]", "child_strings", ".", "extend", "(", "child_right_side_list", ")", "right_hand_side", "=", "\"[\"", "+", "\", \"", ".", "join", "(", "child_strings", ")", "+", "\"]\"", "rule", "=", "nonterminal", "+", "right_hand_side", "self", ".", "action_sequence", "=", "[", "rule", "]", "+", "self", ".", "action_sequence" ]
For each node, we accumulate the rules that generated its children in a list.
[ "For", "each", "node", "we", "accumulate", "the", "rules", "that", "generated", "its", "children", "in", "a", "list", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/contexts/sql_context_utils.py#L164-L191
23,162
allenai/allennlp
allennlp/semparse/contexts/sql_context_utils.py
SqlVisitor.visit
def visit(self, node): """ See the ``NodeVisitor`` visit method. This just changes the order in which we visit nonterminals from right to left to left to right. """ method = getattr(self, 'visit_' + node.expr_name, self.generic_visit) # Call that method, and show where in the tree it failed if it blows # up. try: # Changing this to reverse here! return method(node, [self.visit(child) for child in reversed(list(node))]) except (VisitationError, UndefinedLabel): # Don't catch and re-wrap already-wrapped exceptions. raise except self.unwrapped_exceptions: raise except Exception: # pylint: disable=broad-except # Catch any exception, and tack on a parse tree so it's easier to # see where it went wrong. exc_class, exc, traceback = exc_info() reraise(VisitationError, VisitationError(exc, exc_class, node), traceback)
python
def visit(self, node): """ See the ``NodeVisitor`` visit method. This just changes the order in which we visit nonterminals from right to left to left to right. """ method = getattr(self, 'visit_' + node.expr_name, self.generic_visit) # Call that method, and show where in the tree it failed if it blows # up. try: # Changing this to reverse here! return method(node, [self.visit(child) for child in reversed(list(node))]) except (VisitationError, UndefinedLabel): # Don't catch and re-wrap already-wrapped exceptions. raise except self.unwrapped_exceptions: raise except Exception: # pylint: disable=broad-except # Catch any exception, and tack on a parse tree so it's easier to # see where it went wrong. exc_class, exc, traceback = exc_info() reraise(VisitationError, VisitationError(exc, exc_class, node), traceback)
[ "def", "visit", "(", "self", ",", "node", ")", ":", "method", "=", "getattr", "(", "self", ",", "'visit_'", "+", "node", ".", "expr_name", ",", "self", ".", "generic_visit", ")", "# Call that method, and show where in the tree it failed if it blows", "# up.", "try", ":", "# Changing this to reverse here!", "return", "method", "(", "node", ",", "[", "self", ".", "visit", "(", "child", ")", "for", "child", "in", "reversed", "(", "list", "(", "node", ")", ")", "]", ")", "except", "(", "VisitationError", ",", "UndefinedLabel", ")", ":", "# Don't catch and re-wrap already-wrapped exceptions.", "raise", "except", "self", ".", "unwrapped_exceptions", ":", "raise", "except", "Exception", ":", "# pylint: disable=broad-except", "# Catch any exception, and tack on a parse tree so it's easier to", "# see where it went wrong.", "exc_class", ",", "exc", ",", "traceback", "=", "exc_info", "(", ")", "reraise", "(", "VisitationError", ",", "VisitationError", "(", "exc", ",", "exc_class", ",", "node", ")", ",", "traceback", ")" ]
See the ``NodeVisitor`` visit method. This just changes the order in which we visit nonterminals from right to left to left to right.
[ "See", "the", "NodeVisitor", "visit", "method", ".", "This", "just", "changes", "the", "order", "in", "which", "we", "visit", "nonterminals", "from", "right", "to", "left", "to", "left", "to", "right", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/contexts/sql_context_utils.py#L194-L215
23,163
allenai/allennlp
allennlp/semparse/contexts/text2sql_table_context.py
update_grammar_to_be_variable_free
def update_grammar_to_be_variable_free(grammar_dictionary: Dict[str, List[str]]): """ SQL is a predominately variable free language in terms of simple usage, in the sense that most queries do not create references to variables which are not already static tables in a dataset. However, it is possible to do this via derived tables. If we don't require this functionality, we can tighten the grammar, because we don't need to support aliased tables. """ # Tables in variable free grammars cannot be aliased, so we # remove this functionality from the grammar. grammar_dictionary["select_result"] = ['"*"', '(table_name ws ".*")', 'expr'] # Similarly, collapse the definition of a source table # to not contain aliases and modify references to subqueries. grammar_dictionary["single_source"] = ['table_name', '("(" ws query ws ")")'] del grammar_dictionary["source_subq"] del grammar_dictionary["source_table"] grammar_dictionary["expr"] = ['in_expr', '(value wsp "LIKE" wsp string)', '(value ws "BETWEEN" wsp value ws "AND" wsp value)', '(value ws binaryop wsp expr)', '(unaryop ws expr)', '(col_ref ws "IS" ws "NOT" ws "NULL")', '(col_ref ws "IS" ws "NULL")', # This used to be source_subq - now # we don't need aliases, we can colapse it to queries. '("(" ws query ws ")")', 'value'] # Finally, remove the ability to reference an arbitrary name, # because now we don't have aliased tables, we don't need # to recognise new variables. del grammar_dictionary["name"]
python
def update_grammar_to_be_variable_free(grammar_dictionary: Dict[str, List[str]]): """ SQL is a predominately variable free language in terms of simple usage, in the sense that most queries do not create references to variables which are not already static tables in a dataset. However, it is possible to do this via derived tables. If we don't require this functionality, we can tighten the grammar, because we don't need to support aliased tables. """ # Tables in variable free grammars cannot be aliased, so we # remove this functionality from the grammar. grammar_dictionary["select_result"] = ['"*"', '(table_name ws ".*")', 'expr'] # Similarly, collapse the definition of a source table # to not contain aliases and modify references to subqueries. grammar_dictionary["single_source"] = ['table_name', '("(" ws query ws ")")'] del grammar_dictionary["source_subq"] del grammar_dictionary["source_table"] grammar_dictionary["expr"] = ['in_expr', '(value wsp "LIKE" wsp string)', '(value ws "BETWEEN" wsp value ws "AND" wsp value)', '(value ws binaryop wsp expr)', '(unaryop ws expr)', '(col_ref ws "IS" ws "NOT" ws "NULL")', '(col_ref ws "IS" ws "NULL")', # This used to be source_subq - now # we don't need aliases, we can colapse it to queries. '("(" ws query ws ")")', 'value'] # Finally, remove the ability to reference an arbitrary name, # because now we don't have aliased tables, we don't need # to recognise new variables. del grammar_dictionary["name"]
[ "def", "update_grammar_to_be_variable_free", "(", "grammar_dictionary", ":", "Dict", "[", "str", ",", "List", "[", "str", "]", "]", ")", ":", "# Tables in variable free grammars cannot be aliased, so we", "# remove this functionality from the grammar.", "grammar_dictionary", "[", "\"select_result\"", "]", "=", "[", "'\"*\"'", ",", "'(table_name ws \".*\")'", ",", "'expr'", "]", "# Similarly, collapse the definition of a source table", "# to not contain aliases and modify references to subqueries.", "grammar_dictionary", "[", "\"single_source\"", "]", "=", "[", "'table_name'", ",", "'(\"(\" ws query ws \")\")'", "]", "del", "grammar_dictionary", "[", "\"source_subq\"", "]", "del", "grammar_dictionary", "[", "\"source_table\"", "]", "grammar_dictionary", "[", "\"expr\"", "]", "=", "[", "'in_expr'", ",", "'(value wsp \"LIKE\" wsp string)'", ",", "'(value ws \"BETWEEN\" wsp value ws \"AND\" wsp value)'", ",", "'(value ws binaryop wsp expr)'", ",", "'(unaryop ws expr)'", ",", "'(col_ref ws \"IS\" ws \"NOT\" ws \"NULL\")'", ",", "'(col_ref ws \"IS\" ws \"NULL\")'", ",", "# This used to be source_subq - now", "# we don't need aliases, we can colapse it to queries.", "'(\"(\" ws query ws \")\")'", ",", "'value'", "]", "# Finally, remove the ability to reference an arbitrary name,", "# because now we don't have aliased tables, we don't need", "# to recognise new variables.", "del", "grammar_dictionary", "[", "\"name\"", "]" ]
SQL is a predominately variable free language in terms of simple usage, in the sense that most queries do not create references to variables which are not already static tables in a dataset. However, it is possible to do this via derived tables. If we don't require this functionality, we can tighten the grammar, because we don't need to support aliased tables.
[ "SQL", "is", "a", "predominately", "variable", "free", "language", "in", "terms", "of", "simple", "usage", "in", "the", "sense", "that", "most", "queries", "do", "not", "create", "references", "to", "variables", "which", "are", "not", "already", "static", "tables", "in", "a", "dataset", ".", "However", "it", "is", "possible", "to", "do", "this", "via", "derived", "tables", ".", "If", "we", "don", "t", "require", "this", "functionality", "we", "can", "tighten", "the", "grammar", "because", "we", "don", "t", "need", "to", "support", "aliased", "tables", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/contexts/text2sql_table_context.py#L145-L179
23,164
allenai/allennlp
allennlp/semparse/contexts/text2sql_table_context.py
update_grammar_with_untyped_entities
def update_grammar_with_untyped_entities(grammar_dictionary: Dict[str, List[str]]) -> None: """ Variables can be treated as numbers or strings if their type can be inferred - however, that can be difficult, so instead, we can just treat them all as values and be a bit looser on the typing we allow in our grammar. Here we just remove all references to number and string from the grammar, replacing them with value. """ grammar_dictionary["string_set_vals"] = ['(value ws "," ws string_set_vals)', 'value'] grammar_dictionary["value"].remove('string') grammar_dictionary["value"].remove('number') grammar_dictionary["limit"] = ['("LIMIT" ws "1")', '("LIMIT" ws value)'] grammar_dictionary["expr"][1] = '(value wsp "LIKE" wsp value)' del grammar_dictionary["string"] del grammar_dictionary["number"]
python
def update_grammar_with_untyped_entities(grammar_dictionary: Dict[str, List[str]]) -> None: """ Variables can be treated as numbers or strings if their type can be inferred - however, that can be difficult, so instead, we can just treat them all as values and be a bit looser on the typing we allow in our grammar. Here we just remove all references to number and string from the grammar, replacing them with value. """ grammar_dictionary["string_set_vals"] = ['(value ws "," ws string_set_vals)', 'value'] grammar_dictionary["value"].remove('string') grammar_dictionary["value"].remove('number') grammar_dictionary["limit"] = ['("LIMIT" ws "1")', '("LIMIT" ws value)'] grammar_dictionary["expr"][1] = '(value wsp "LIKE" wsp value)' del grammar_dictionary["string"] del grammar_dictionary["number"]
[ "def", "update_grammar_with_untyped_entities", "(", "grammar_dictionary", ":", "Dict", "[", "str", ",", "List", "[", "str", "]", "]", ")", "->", "None", ":", "grammar_dictionary", "[", "\"string_set_vals\"", "]", "=", "[", "'(value ws \",\" ws string_set_vals)'", ",", "'value'", "]", "grammar_dictionary", "[", "\"value\"", "]", ".", "remove", "(", "'string'", ")", "grammar_dictionary", "[", "\"value\"", "]", ".", "remove", "(", "'number'", ")", "grammar_dictionary", "[", "\"limit\"", "]", "=", "[", "'(\"LIMIT\" ws \"1\")'", ",", "'(\"LIMIT\" ws value)'", "]", "grammar_dictionary", "[", "\"expr\"", "]", "[", "1", "]", "=", "'(value wsp \"LIKE\" wsp value)'", "del", "grammar_dictionary", "[", "\"string\"", "]", "del", "grammar_dictionary", "[", "\"number\"", "]" ]
Variables can be treated as numbers or strings if their type can be inferred - however, that can be difficult, so instead, we can just treat them all as values and be a bit looser on the typing we allow in our grammar. Here we just remove all references to number and string from the grammar, replacing them with value.
[ "Variables", "can", "be", "treated", "as", "numbers", "or", "strings", "if", "their", "type", "can", "be", "inferred", "-", "however", "that", "can", "be", "difficult", "so", "instead", "we", "can", "just", "treat", "them", "all", "as", "values", "and", "be", "a", "bit", "looser", "on", "the", "typing", "we", "allow", "in", "our", "grammar", ".", "Here", "we", "just", "remove", "all", "references", "to", "number", "and", "string", "from", "the", "grammar", "replacing", "them", "with", "value", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/contexts/text2sql_table_context.py#L181-L194
23,165
allenai/allennlp
allennlp/models/ensemble.py
Ensemble._load
def _load(cls, config: Params, serialization_dir: str, weights_file: str = None, cuda_device: int = -1) -> 'Model': """ Ensembles don't have vocabularies or weights of their own, so they override _load. """ model_params = config.get('model') # The experiment config tells us how to _train_ a model, including where to get pre-trained # embeddings from. We're now _loading_ the model, so those embeddings will already be # stored in our weights. We don't need any pretrained weight file anymore, and we don't # want the code to look for it, so we remove it from the parameters here. remove_pretrained_embedding_params(model_params) model = Model.from_params(vocab=None, params=model_params) # Force model to cpu or gpu, as appropriate, to make sure that the embeddings are # in sync with the weights if cuda_device >= 0: model.cuda(cuda_device) else: model.cpu() return model
python
def _load(cls, config: Params, serialization_dir: str, weights_file: str = None, cuda_device: int = -1) -> 'Model': """ Ensembles don't have vocabularies or weights of their own, so they override _load. """ model_params = config.get('model') # The experiment config tells us how to _train_ a model, including where to get pre-trained # embeddings from. We're now _loading_ the model, so those embeddings will already be # stored in our weights. We don't need any pretrained weight file anymore, and we don't # want the code to look for it, so we remove it from the parameters here. remove_pretrained_embedding_params(model_params) model = Model.from_params(vocab=None, params=model_params) # Force model to cpu or gpu, as appropriate, to make sure that the embeddings are # in sync with the weights if cuda_device >= 0: model.cuda(cuda_device) else: model.cpu() return model
[ "def", "_load", "(", "cls", ",", "config", ":", "Params", ",", "serialization_dir", ":", "str", ",", "weights_file", ":", "str", "=", "None", ",", "cuda_device", ":", "int", "=", "-", "1", ")", "->", "'Model'", ":", "model_params", "=", "config", ".", "get", "(", "'model'", ")", "# The experiment config tells us how to _train_ a model, including where to get pre-trained", "# embeddings from. We're now _loading_ the model, so those embeddings will already be", "# stored in our weights. We don't need any pretrained weight file anymore, and we don't", "# want the code to look for it, so we remove it from the parameters here.", "remove_pretrained_embedding_params", "(", "model_params", ")", "model", "=", "Model", ".", "from_params", "(", "vocab", "=", "None", ",", "params", "=", "model_params", ")", "# Force model to cpu or gpu, as appropriate, to make sure that the embeddings are", "# in sync with the weights", "if", "cuda_device", ">=", "0", ":", "model", ".", "cuda", "(", "cuda_device", ")", "else", ":", "model", ".", "cpu", "(", ")", "return", "model" ]
Ensembles don't have vocabularies or weights of their own, so they override _load.
[ "Ensembles", "don", "t", "have", "vocabularies", "or", "weights", "of", "their", "own", "so", "they", "override", "_load", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/models/ensemble.py#L34-L58
23,166
allenai/allennlp
allennlp/semparse/domain_languages/quarel_language.py
QuaRelLanguage.infer
def infer(self, setup: QuaRelType, answer_0: QuaRelType, answer_1: QuaRelType) -> int: """ Take the question and check if it is compatible with either of the answer choices. """ if self._check_quarels_compatible(setup, answer_0): if self._check_quarels_compatible(setup, answer_1): # Found two answers return -2 else: return 0 elif self._check_quarels_compatible(setup, answer_1): return 1 else: return -1
python
def infer(self, setup: QuaRelType, answer_0: QuaRelType, answer_1: QuaRelType) -> int: """ Take the question and check if it is compatible with either of the answer choices. """ if self._check_quarels_compatible(setup, answer_0): if self._check_quarels_compatible(setup, answer_1): # Found two answers return -2 else: return 0 elif self._check_quarels_compatible(setup, answer_1): return 1 else: return -1
[ "def", "infer", "(", "self", ",", "setup", ":", "QuaRelType", ",", "answer_0", ":", "QuaRelType", ",", "answer_1", ":", "QuaRelType", ")", "->", "int", ":", "if", "self", ".", "_check_quarels_compatible", "(", "setup", ",", "answer_0", ")", ":", "if", "self", ".", "_check_quarels_compatible", "(", "setup", ",", "answer_1", ")", ":", "# Found two answers", "return", "-", "2", "else", ":", "return", "0", "elif", "self", ".", "_check_quarels_compatible", "(", "setup", ",", "answer_1", ")", ":", "return", "1", "else", ":", "return", "-", "1" ]
Take the question and check if it is compatible with either of the answer choices.
[ "Take", "the", "question", "and", "check", "if", "it", "is", "compatible", "with", "either", "of", "the", "answer", "choices", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/quarel_language.py#L97-L110
23,167
allenai/allennlp
allennlp/service/server_simple.py
make_app
def make_app(predictor: Predictor, field_names: List[str] = None, static_dir: str = None, sanitizer: Callable[[JsonDict], JsonDict] = None, title: str = "AllenNLP Demo") -> Flask: """ Creates a Flask app that serves up the provided ``Predictor`` along with a front-end for interacting with it. If you want to use the built-in bare-bones HTML, you must provide the field names for the inputs (which will be used both as labels and as the keys in the JSON that gets sent to the predictor). If you would rather create your own HTML, call it index.html and provide its directory as ``static_dir``. In that case you don't need to supply the field names -- that information should be implicit in your demo site. (Probably the easiest thing to do is just start with the bare-bones HTML and modify it.) In addition, if you want somehow transform the JSON prediction (e.g. by removing probabilities or logits) you can do that by passing in a ``sanitizer`` function. """ if static_dir is not None: static_dir = os.path.abspath(static_dir) if not os.path.exists(static_dir): logger.error("app directory %s does not exist, aborting", static_dir) sys.exit(-1) elif static_dir is None and field_names is None: print("Neither build_dir nor field_names passed. Demo won't render on this port.\n" "You must use nodejs + react app to interact with the server.") app = Flask(__name__) # pylint: disable=invalid-name @app.errorhandler(ServerError) def handle_invalid_usage(error: ServerError) -> Response: # pylint: disable=unused-variable response = jsonify(error.to_dict()) response.status_code = error.status_code return response @app.route('/') def index() -> Response: # pylint: disable=unused-variable if static_dir is not None: return send_file(os.path.join(static_dir, 'index.html')) else: html = _html(title, field_names) return Response(response=html, status=200) @app.route('/predict', methods=['POST', 'OPTIONS']) def predict() -> Response: # pylint: disable=unused-variable """make a prediction using the specified model and return the results""" if request.method == "OPTIONS": return Response(response="", status=200) data = request.get_json() prediction = predictor.predict_json(data) if sanitizer is not None: prediction = sanitizer(prediction) log_blob = {"inputs": data, "outputs": prediction} logger.info("prediction: %s", json.dumps(log_blob)) return jsonify(prediction) @app.route('/predict_batch', methods=['POST', 'OPTIONS']) def predict_batch() -> Response: # pylint: disable=unused-variable """make a prediction using the specified model and return the results""" if request.method == "OPTIONS": return Response(response="", status=200) data = request.get_json() prediction = predictor.predict_batch_json(data) if sanitizer is not None: prediction = [sanitizer(p) for p in prediction] return jsonify(prediction) @app.route('/<path:path>') def static_proxy(path: str) -> Response: # pylint: disable=unused-variable if static_dir is not None: return send_from_directory(static_dir, path) else: raise ServerError("static_dir not specified", 404) return app
python
def make_app(predictor: Predictor, field_names: List[str] = None, static_dir: str = None, sanitizer: Callable[[JsonDict], JsonDict] = None, title: str = "AllenNLP Demo") -> Flask: """ Creates a Flask app that serves up the provided ``Predictor`` along with a front-end for interacting with it. If you want to use the built-in bare-bones HTML, you must provide the field names for the inputs (which will be used both as labels and as the keys in the JSON that gets sent to the predictor). If you would rather create your own HTML, call it index.html and provide its directory as ``static_dir``. In that case you don't need to supply the field names -- that information should be implicit in your demo site. (Probably the easiest thing to do is just start with the bare-bones HTML and modify it.) In addition, if you want somehow transform the JSON prediction (e.g. by removing probabilities or logits) you can do that by passing in a ``sanitizer`` function. """ if static_dir is not None: static_dir = os.path.abspath(static_dir) if not os.path.exists(static_dir): logger.error("app directory %s does not exist, aborting", static_dir) sys.exit(-1) elif static_dir is None and field_names is None: print("Neither build_dir nor field_names passed. Demo won't render on this port.\n" "You must use nodejs + react app to interact with the server.") app = Flask(__name__) # pylint: disable=invalid-name @app.errorhandler(ServerError) def handle_invalid_usage(error: ServerError) -> Response: # pylint: disable=unused-variable response = jsonify(error.to_dict()) response.status_code = error.status_code return response @app.route('/') def index() -> Response: # pylint: disable=unused-variable if static_dir is not None: return send_file(os.path.join(static_dir, 'index.html')) else: html = _html(title, field_names) return Response(response=html, status=200) @app.route('/predict', methods=['POST', 'OPTIONS']) def predict() -> Response: # pylint: disable=unused-variable """make a prediction using the specified model and return the results""" if request.method == "OPTIONS": return Response(response="", status=200) data = request.get_json() prediction = predictor.predict_json(data) if sanitizer is not None: prediction = sanitizer(prediction) log_blob = {"inputs": data, "outputs": prediction} logger.info("prediction: %s", json.dumps(log_blob)) return jsonify(prediction) @app.route('/predict_batch', methods=['POST', 'OPTIONS']) def predict_batch() -> Response: # pylint: disable=unused-variable """make a prediction using the specified model and return the results""" if request.method == "OPTIONS": return Response(response="", status=200) data = request.get_json() prediction = predictor.predict_batch_json(data) if sanitizer is not None: prediction = [sanitizer(p) for p in prediction] return jsonify(prediction) @app.route('/<path:path>') def static_proxy(path: str) -> Response: # pylint: disable=unused-variable if static_dir is not None: return send_from_directory(static_dir, path) else: raise ServerError("static_dir not specified", 404) return app
[ "def", "make_app", "(", "predictor", ":", "Predictor", ",", "field_names", ":", "List", "[", "str", "]", "=", "None", ",", "static_dir", ":", "str", "=", "None", ",", "sanitizer", ":", "Callable", "[", "[", "JsonDict", "]", ",", "JsonDict", "]", "=", "None", ",", "title", ":", "str", "=", "\"AllenNLP Demo\"", ")", "->", "Flask", ":", "if", "static_dir", "is", "not", "None", ":", "static_dir", "=", "os", ".", "path", ".", "abspath", "(", "static_dir", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "static_dir", ")", ":", "logger", ".", "error", "(", "\"app directory %s does not exist, aborting\"", ",", "static_dir", ")", "sys", ".", "exit", "(", "-", "1", ")", "elif", "static_dir", "is", "None", "and", "field_names", "is", "None", ":", "print", "(", "\"Neither build_dir nor field_names passed. Demo won't render on this port.\\n\"", "\"You must use nodejs + react app to interact with the server.\"", ")", "app", "=", "Flask", "(", "__name__", ")", "# pylint: disable=invalid-name", "@", "app", ".", "errorhandler", "(", "ServerError", ")", "def", "handle_invalid_usage", "(", "error", ":", "ServerError", ")", "->", "Response", ":", "# pylint: disable=unused-variable", "response", "=", "jsonify", "(", "error", ".", "to_dict", "(", ")", ")", "response", ".", "status_code", "=", "error", ".", "status_code", "return", "response", "@", "app", ".", "route", "(", "'/'", ")", "def", "index", "(", ")", "->", "Response", ":", "# pylint: disable=unused-variable", "if", "static_dir", "is", "not", "None", ":", "return", "send_file", "(", "os", ".", "path", ".", "join", "(", "static_dir", ",", "'index.html'", ")", ")", "else", ":", "html", "=", "_html", "(", "title", ",", "field_names", ")", "return", "Response", "(", "response", "=", "html", ",", "status", "=", "200", ")", "@", "app", ".", "route", "(", "'/predict'", ",", "methods", "=", "[", "'POST'", ",", "'OPTIONS'", "]", ")", "def", "predict", "(", ")", "->", "Response", ":", "# pylint: disable=unused-variable", "\"\"\"make a prediction using the specified model and return the results\"\"\"", "if", "request", ".", "method", "==", "\"OPTIONS\"", ":", "return", "Response", "(", "response", "=", "\"\"", ",", "status", "=", "200", ")", "data", "=", "request", ".", "get_json", "(", ")", "prediction", "=", "predictor", ".", "predict_json", "(", "data", ")", "if", "sanitizer", "is", "not", "None", ":", "prediction", "=", "sanitizer", "(", "prediction", ")", "log_blob", "=", "{", "\"inputs\"", ":", "data", ",", "\"outputs\"", ":", "prediction", "}", "logger", ".", "info", "(", "\"prediction: %s\"", ",", "json", ".", "dumps", "(", "log_blob", ")", ")", "return", "jsonify", "(", "prediction", ")", "@", "app", ".", "route", "(", "'/predict_batch'", ",", "methods", "=", "[", "'POST'", ",", "'OPTIONS'", "]", ")", "def", "predict_batch", "(", ")", "->", "Response", ":", "# pylint: disable=unused-variable", "\"\"\"make a prediction using the specified model and return the results\"\"\"", "if", "request", ".", "method", "==", "\"OPTIONS\"", ":", "return", "Response", "(", "response", "=", "\"\"", ",", "status", "=", "200", ")", "data", "=", "request", ".", "get_json", "(", ")", "prediction", "=", "predictor", ".", "predict_batch_json", "(", "data", ")", "if", "sanitizer", "is", "not", "None", ":", "prediction", "=", "[", "sanitizer", "(", "p", ")", "for", "p", "in", "prediction", "]", "return", "jsonify", "(", "prediction", ")", "@", "app", ".", "route", "(", "'/<path:path>'", ")", "def", "static_proxy", "(", "path", ":", "str", ")", "->", "Response", ":", "# pylint: disable=unused-variable", "if", "static_dir", "is", "not", "None", ":", "return", "send_from_directory", "(", "static_dir", ",", "path", ")", "else", ":", "raise", "ServerError", "(", "\"static_dir not specified\"", ",", "404", ")", "return", "app" ]
Creates a Flask app that serves up the provided ``Predictor`` along with a front-end for interacting with it. If you want to use the built-in bare-bones HTML, you must provide the field names for the inputs (which will be used both as labels and as the keys in the JSON that gets sent to the predictor). If you would rather create your own HTML, call it index.html and provide its directory as ``static_dir``. In that case you don't need to supply the field names -- that information should be implicit in your demo site. (Probably the easiest thing to do is just start with the bare-bones HTML and modify it.) In addition, if you want somehow transform the JSON prediction (e.g. by removing probabilities or logits) you can do that by passing in a ``sanitizer`` function.
[ "Creates", "a", "Flask", "app", "that", "serves", "up", "the", "provided", "Predictor", "along", "with", "a", "front", "-", "end", "for", "interacting", "with", "it", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/service/server_simple.py#L53-L139
23,168
allenai/allennlp
allennlp/service/server_simple.py
_html
def _html(title: str, field_names: List[str]) -> str: """ Returns bare bones HTML for serving up an input form with the specified fields that can render predictions from the configured model. """ inputs = ''.join(_SINGLE_INPUT_TEMPLATE.substitute(field_name=field_name) for field_name in field_names) quoted_field_names = [f"'{field_name}'" for field_name in field_names] quoted_field_list = f"[{','.join(quoted_field_names)}]" return _PAGE_TEMPLATE.substitute(title=title, css=_CSS, inputs=inputs, qfl=quoted_field_list)
python
def _html(title: str, field_names: List[str]) -> str: """ Returns bare bones HTML for serving up an input form with the specified fields that can render predictions from the configured model. """ inputs = ''.join(_SINGLE_INPUT_TEMPLATE.substitute(field_name=field_name) for field_name in field_names) quoted_field_names = [f"'{field_name}'" for field_name in field_names] quoted_field_list = f"[{','.join(quoted_field_names)}]" return _PAGE_TEMPLATE.substitute(title=title, css=_CSS, inputs=inputs, qfl=quoted_field_list)
[ "def", "_html", "(", "title", ":", "str", ",", "field_names", ":", "List", "[", "str", "]", ")", "->", "str", ":", "inputs", "=", "''", ".", "join", "(", "_SINGLE_INPUT_TEMPLATE", ".", "substitute", "(", "field_name", "=", "field_name", ")", "for", "field_name", "in", "field_names", ")", "quoted_field_names", "=", "[", "f\"'{field_name}'\"", "for", "field_name", "in", "field_names", "]", "quoted_field_list", "=", "f\"[{','.join(quoted_field_names)}]\"", "return", "_PAGE_TEMPLATE", ".", "substitute", "(", "title", "=", "title", ",", "css", "=", "_CSS", ",", "inputs", "=", "inputs", ",", "qfl", "=", "quoted_field_list", ")" ]
Returns bare bones HTML for serving up an input form with the specified fields that can render predictions from the configured model.
[ "Returns", "bare", "bones", "HTML", "for", "serving", "up", "an", "input", "form", "with", "the", "specified", "fields", "that", "can", "render", "predictions", "from", "the", "configured", "model", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/service/server_simple.py#L741-L755
23,169
allenai/allennlp
allennlp/state_machines/states/lambda_grammar_statelet.py
LambdaGrammarStatelet.get_valid_actions
def get_valid_actions(self) -> Dict[str, Tuple[torch.Tensor, torch.Tensor, List[int]]]: """ Returns the valid actions in the current grammar state. See the class docstring for a description of what we're returning here. """ actions = self._valid_actions[self._nonterminal_stack[-1]] context_actions = [] for type_, variable in self._lambda_stacks: if self._nonterminal_stack[-1] == type_: production_string = f"{type_} -> {variable}" context_actions.append(self._context_actions[production_string]) if context_actions: input_tensor, output_tensor, action_ids = actions['global'] new_inputs = [input_tensor] + [x[0] for x in context_actions] input_tensor = torch.cat(new_inputs, dim=0) new_outputs = [output_tensor] + [x[1] for x in context_actions] output_tensor = torch.cat(new_outputs, dim=0) new_action_ids = action_ids + [x[2] for x in context_actions] # We can't just reassign to actions['global'], because that would modify the state of # self._valid_actions. Instead, we need to construct a new actions dictionary. new_actions = {**actions} new_actions['global'] = (input_tensor, output_tensor, new_action_ids) actions = new_actions return actions
python
def get_valid_actions(self) -> Dict[str, Tuple[torch.Tensor, torch.Tensor, List[int]]]: """ Returns the valid actions in the current grammar state. See the class docstring for a description of what we're returning here. """ actions = self._valid_actions[self._nonterminal_stack[-1]] context_actions = [] for type_, variable in self._lambda_stacks: if self._nonterminal_stack[-1] == type_: production_string = f"{type_} -> {variable}" context_actions.append(self._context_actions[production_string]) if context_actions: input_tensor, output_tensor, action_ids = actions['global'] new_inputs = [input_tensor] + [x[0] for x in context_actions] input_tensor = torch.cat(new_inputs, dim=0) new_outputs = [output_tensor] + [x[1] for x in context_actions] output_tensor = torch.cat(new_outputs, dim=0) new_action_ids = action_ids + [x[2] for x in context_actions] # We can't just reassign to actions['global'], because that would modify the state of # self._valid_actions. Instead, we need to construct a new actions dictionary. new_actions = {**actions} new_actions['global'] = (input_tensor, output_tensor, new_action_ids) actions = new_actions return actions
[ "def", "get_valid_actions", "(", "self", ")", "->", "Dict", "[", "str", ",", "Tuple", "[", "torch", ".", "Tensor", ",", "torch", ".", "Tensor", ",", "List", "[", "int", "]", "]", "]", ":", "actions", "=", "self", ".", "_valid_actions", "[", "self", ".", "_nonterminal_stack", "[", "-", "1", "]", "]", "context_actions", "=", "[", "]", "for", "type_", ",", "variable", "in", "self", ".", "_lambda_stacks", ":", "if", "self", ".", "_nonterminal_stack", "[", "-", "1", "]", "==", "type_", ":", "production_string", "=", "f\"{type_} -> {variable}\"", "context_actions", ".", "append", "(", "self", ".", "_context_actions", "[", "production_string", "]", ")", "if", "context_actions", ":", "input_tensor", ",", "output_tensor", ",", "action_ids", "=", "actions", "[", "'global'", "]", "new_inputs", "=", "[", "input_tensor", "]", "+", "[", "x", "[", "0", "]", "for", "x", "in", "context_actions", "]", "input_tensor", "=", "torch", ".", "cat", "(", "new_inputs", ",", "dim", "=", "0", ")", "new_outputs", "=", "[", "output_tensor", "]", "+", "[", "x", "[", "1", "]", "for", "x", "in", "context_actions", "]", "output_tensor", "=", "torch", ".", "cat", "(", "new_outputs", ",", "dim", "=", "0", ")", "new_action_ids", "=", "action_ids", "+", "[", "x", "[", "2", "]", "for", "x", "in", "context_actions", "]", "# We can't just reassign to actions['global'], because that would modify the state of", "# self._valid_actions. Instead, we need to construct a new actions dictionary.", "new_actions", "=", "{", "*", "*", "actions", "}", "new_actions", "[", "'global'", "]", "=", "(", "input_tensor", ",", "output_tensor", ",", "new_action_ids", ")", "actions", "=", "new_actions", "return", "actions" ]
Returns the valid actions in the current grammar state. See the class docstring for a description of what we're returning here.
[ "Returns", "the", "valid", "actions", "in", "the", "current", "grammar", "state", ".", "See", "the", "class", "docstring", "for", "a", "description", "of", "what", "we", "re", "returning", "here", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/state_machines/states/lambda_grammar_statelet.py#L77-L100
23,170
allenai/allennlp
allennlp/training/moving_average.py
MovingAverage.assign_average_value
def assign_average_value(self) -> None: """ Replace all the parameter values with the averages. Save the current parameter values to restore later. """ for name, parameter in self._parameters: self._backups[name].copy_(parameter.data) parameter.data.copy_(self._shadows[name])
python
def assign_average_value(self) -> None: """ Replace all the parameter values with the averages. Save the current parameter values to restore later. """ for name, parameter in self._parameters: self._backups[name].copy_(parameter.data) parameter.data.copy_(self._shadows[name])
[ "def", "assign_average_value", "(", "self", ")", "->", "None", ":", "for", "name", ",", "parameter", "in", "self", ".", "_parameters", ":", "self", ".", "_backups", "[", "name", "]", ".", "copy_", "(", "parameter", ".", "data", ")", "parameter", ".", "data", ".", "copy_", "(", "self", ".", "_shadows", "[", "name", "]", ")" ]
Replace all the parameter values with the averages. Save the current parameter values to restore later.
[ "Replace", "all", "the", "parameter", "values", "with", "the", "averages", ".", "Save", "the", "current", "parameter", "values", "to", "restore", "later", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/training/moving_average.py#L27-L34
23,171
allenai/allennlp
allennlp/state_machines/trainers/expected_risk_minimization.py
ExpectedRiskMinimization._prune_beam
def _prune_beam(states: List[State], beam_size: int, sort_states: bool = False) -> List[State]: """ This method can be used to prune the set of unfinished states on a beam or finished states at the end of search. In the former case, the states need not be sorted because the all come from the same decoding step, which does the sorting. However, if the states are finished and this method is called at the end of the search, they need to be sorted because they come from different decoding steps. """ states_by_batch_index: Dict[int, List[State]] = defaultdict(list) for state in states: assert len(state.batch_indices) == 1 batch_index = state.batch_indices[0] states_by_batch_index[batch_index].append(state) pruned_states = [] for _, instance_states in states_by_batch_index.items(): if sort_states: scores = torch.cat([state.score[0].view(-1) for state in instance_states]) _, sorted_indices = scores.sort(-1, descending=True) sorted_states = [instance_states[i] for i in sorted_indices.detach().cpu().numpy()] instance_states = sorted_states for state in instance_states[:beam_size]: pruned_states.append(state) return pruned_states
python
def _prune_beam(states: List[State], beam_size: int, sort_states: bool = False) -> List[State]: """ This method can be used to prune the set of unfinished states on a beam or finished states at the end of search. In the former case, the states need not be sorted because the all come from the same decoding step, which does the sorting. However, if the states are finished and this method is called at the end of the search, they need to be sorted because they come from different decoding steps. """ states_by_batch_index: Dict[int, List[State]] = defaultdict(list) for state in states: assert len(state.batch_indices) == 1 batch_index = state.batch_indices[0] states_by_batch_index[batch_index].append(state) pruned_states = [] for _, instance_states in states_by_batch_index.items(): if sort_states: scores = torch.cat([state.score[0].view(-1) for state in instance_states]) _, sorted_indices = scores.sort(-1, descending=True) sorted_states = [instance_states[i] for i in sorted_indices.detach().cpu().numpy()] instance_states = sorted_states for state in instance_states[:beam_size]: pruned_states.append(state) return pruned_states
[ "def", "_prune_beam", "(", "states", ":", "List", "[", "State", "]", ",", "beam_size", ":", "int", ",", "sort_states", ":", "bool", "=", "False", ")", "->", "List", "[", "State", "]", ":", "states_by_batch_index", ":", "Dict", "[", "int", ",", "List", "[", "State", "]", "]", "=", "defaultdict", "(", "list", ")", "for", "state", "in", "states", ":", "assert", "len", "(", "state", ".", "batch_indices", ")", "==", "1", "batch_index", "=", "state", ".", "batch_indices", "[", "0", "]", "states_by_batch_index", "[", "batch_index", "]", ".", "append", "(", "state", ")", "pruned_states", "=", "[", "]", "for", "_", ",", "instance_states", "in", "states_by_batch_index", ".", "items", "(", ")", ":", "if", "sort_states", ":", "scores", "=", "torch", ".", "cat", "(", "[", "state", ".", "score", "[", "0", "]", ".", "view", "(", "-", "1", ")", "for", "state", "in", "instance_states", "]", ")", "_", ",", "sorted_indices", "=", "scores", ".", "sort", "(", "-", "1", ",", "descending", "=", "True", ")", "sorted_states", "=", "[", "instance_states", "[", "i", "]", "for", "i", "in", "sorted_indices", ".", "detach", "(", ")", ".", "cpu", "(", ")", ".", "numpy", "(", ")", "]", "instance_states", "=", "sorted_states", "for", "state", "in", "instance_states", "[", ":", "beam_size", "]", ":", "pruned_states", ".", "append", "(", "state", ")", "return", "pruned_states" ]
This method can be used to prune the set of unfinished states on a beam or finished states at the end of search. In the former case, the states need not be sorted because the all come from the same decoding step, which does the sorting. However, if the states are finished and this method is called at the end of the search, they need to be sorted because they come from different decoding steps.
[ "This", "method", "can", "be", "used", "to", "prune", "the", "set", "of", "unfinished", "states", "on", "a", "beam", "or", "finished", "states", "at", "the", "end", "of", "search", ".", "In", "the", "former", "case", "the", "states", "need", "not", "be", "sorted", "because", "the", "all", "come", "from", "the", "same", "decoding", "step", "which", "does", "the", "sorting", ".", "However", "if", "the", "states", "are", "finished", "and", "this", "method", "is", "called", "at", "the", "end", "of", "the", "search", "they", "need", "to", "be", "sorted", "because", "they", "come", "from", "different", "decoding", "steps", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/state_machines/trainers/expected_risk_minimization.py#L101-L125
23,172
allenai/allennlp
allennlp/state_machines/trainers/expected_risk_minimization.py
ExpectedRiskMinimization._get_best_final_states
def _get_best_final_states(self, finished_states: List[StateType]) -> Dict[int, List[StateType]]: """ Returns the best finished states for each batch instance based on model scores. We return at most ``self._max_num_decoded_sequences`` number of sequences per instance. """ batch_states: Dict[int, List[StateType]] = defaultdict(list) for state in finished_states: batch_states[state.batch_indices[0]].append(state) best_states: Dict[int, List[StateType]] = {} for batch_index, states in batch_states.items(): # The time this sort takes is pretty negligible, no particular need to optimize this # yet. Maybe with a larger beam size... finished_to_sort = [(-state.score[0].item(), state) for state in states] finished_to_sort.sort(key=lambda x: x[0]) best_states[batch_index] = [state[1] for state in finished_to_sort[:self._beam_size]] return best_states
python
def _get_best_final_states(self, finished_states: List[StateType]) -> Dict[int, List[StateType]]: """ Returns the best finished states for each batch instance based on model scores. We return at most ``self._max_num_decoded_sequences`` number of sequences per instance. """ batch_states: Dict[int, List[StateType]] = defaultdict(list) for state in finished_states: batch_states[state.batch_indices[0]].append(state) best_states: Dict[int, List[StateType]] = {} for batch_index, states in batch_states.items(): # The time this sort takes is pretty negligible, no particular need to optimize this # yet. Maybe with a larger beam size... finished_to_sort = [(-state.score[0].item(), state) for state in states] finished_to_sort.sort(key=lambda x: x[0]) best_states[batch_index] = [state[1] for state in finished_to_sort[:self._beam_size]] return best_states
[ "def", "_get_best_final_states", "(", "self", ",", "finished_states", ":", "List", "[", "StateType", "]", ")", "->", "Dict", "[", "int", ",", "List", "[", "StateType", "]", "]", ":", "batch_states", ":", "Dict", "[", "int", ",", "List", "[", "StateType", "]", "]", "=", "defaultdict", "(", "list", ")", "for", "state", "in", "finished_states", ":", "batch_states", "[", "state", ".", "batch_indices", "[", "0", "]", "]", ".", "append", "(", "state", ")", "best_states", ":", "Dict", "[", "int", ",", "List", "[", "StateType", "]", "]", "=", "{", "}", "for", "batch_index", ",", "states", "in", "batch_states", ".", "items", "(", ")", ":", "# The time this sort takes is pretty negligible, no particular need to optimize this", "# yet. Maybe with a larger beam size...", "finished_to_sort", "=", "[", "(", "-", "state", ".", "score", "[", "0", "]", ".", "item", "(", ")", ",", "state", ")", "for", "state", "in", "states", "]", "finished_to_sort", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ")", "best_states", "[", "batch_index", "]", "=", "[", "state", "[", "1", "]", "for", "state", "in", "finished_to_sort", "[", ":", "self", ".", "_beam_size", "]", "]", "return", "best_states" ]
Returns the best finished states for each batch instance based on model scores. We return at most ``self._max_num_decoded_sequences`` number of sequences per instance.
[ "Returns", "the", "best", "finished", "states", "for", "each", "batch", "instance", "based", "on", "model", "scores", ".", "We", "return", "at", "most", "self", ".", "_max_num_decoded_sequences", "number", "of", "sequences", "per", "instance", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/state_machines/trainers/expected_risk_minimization.py#L151-L166
23,173
allenai/allennlp
allennlp/modules/token_embedders/embedding.py
_read_pretrained_embeddings_file
def _read_pretrained_embeddings_file(file_uri: str, embedding_dim: int, vocab: Vocabulary, namespace: str = "tokens") -> torch.FloatTensor: """ Returns and embedding matrix for the given vocabulary using the pretrained embeddings contained in the given file. Embeddings for tokens not found in the pretrained embedding file are randomly initialized using a normal distribution with mean and standard deviation equal to those of the pretrained embeddings. We support two file formats: * text format - utf-8 encoded text file with space separated fields: [word] [dim 1] [dim 2] ... The text file can eventually be compressed, and even resides in an archive with multiple files. If the file resides in an archive with other files, then ``embeddings_filename`` must be a URI "(archive_uri)#file_path_inside_the_archive" * hdf5 format - hdf5 file containing an embedding matrix in the form of a torch.Tensor. If the filename ends with '.hdf5' or '.h5' then we load from hdf5, otherwise we assume text format. Parameters ---------- file_uri : str, required. It can be: * a file system path or a URL of an eventually compressed text file or a zip/tar archive containing a single file. * URI of the type ``(archive_path_or_url)#file_path_inside_archive`` if the text file is contained in a multi-file archive. vocab : Vocabulary, required. A Vocabulary object. namespace : str, (optional, default=tokens) The namespace of the vocabulary to find pretrained embeddings for. trainable : bool, (optional, default=True) Whether or not the embedding parameters should be optimized. Returns ------- A weight matrix with embeddings initialized from the read file. The matrix has shape ``(vocab.get_vocab_size(namespace), embedding_dim)``, where the indices of words appearing in the pretrained embedding file are initialized to the pretrained embedding value. """ file_ext = get_file_extension(file_uri) if file_ext in ['.h5', '.hdf5']: return _read_embeddings_from_hdf5(file_uri, embedding_dim, vocab, namespace) return _read_embeddings_from_text_file(file_uri, embedding_dim, vocab, namespace)
python
def _read_pretrained_embeddings_file(file_uri: str, embedding_dim: int, vocab: Vocabulary, namespace: str = "tokens") -> torch.FloatTensor: """ Returns and embedding matrix for the given vocabulary using the pretrained embeddings contained in the given file. Embeddings for tokens not found in the pretrained embedding file are randomly initialized using a normal distribution with mean and standard deviation equal to those of the pretrained embeddings. We support two file formats: * text format - utf-8 encoded text file with space separated fields: [word] [dim 1] [dim 2] ... The text file can eventually be compressed, and even resides in an archive with multiple files. If the file resides in an archive with other files, then ``embeddings_filename`` must be a URI "(archive_uri)#file_path_inside_the_archive" * hdf5 format - hdf5 file containing an embedding matrix in the form of a torch.Tensor. If the filename ends with '.hdf5' or '.h5' then we load from hdf5, otherwise we assume text format. Parameters ---------- file_uri : str, required. It can be: * a file system path or a URL of an eventually compressed text file or a zip/tar archive containing a single file. * URI of the type ``(archive_path_or_url)#file_path_inside_archive`` if the text file is contained in a multi-file archive. vocab : Vocabulary, required. A Vocabulary object. namespace : str, (optional, default=tokens) The namespace of the vocabulary to find pretrained embeddings for. trainable : bool, (optional, default=True) Whether or not the embedding parameters should be optimized. Returns ------- A weight matrix with embeddings initialized from the read file. The matrix has shape ``(vocab.get_vocab_size(namespace), embedding_dim)``, where the indices of words appearing in the pretrained embedding file are initialized to the pretrained embedding value. """ file_ext = get_file_extension(file_uri) if file_ext in ['.h5', '.hdf5']: return _read_embeddings_from_hdf5(file_uri, embedding_dim, vocab, namespace) return _read_embeddings_from_text_file(file_uri, embedding_dim, vocab, namespace)
[ "def", "_read_pretrained_embeddings_file", "(", "file_uri", ":", "str", ",", "embedding_dim", ":", "int", ",", "vocab", ":", "Vocabulary", ",", "namespace", ":", "str", "=", "\"tokens\"", ")", "->", "torch", ".", "FloatTensor", ":", "file_ext", "=", "get_file_extension", "(", "file_uri", ")", "if", "file_ext", "in", "[", "'.h5'", ",", "'.hdf5'", "]", ":", "return", "_read_embeddings_from_hdf5", "(", "file_uri", ",", "embedding_dim", ",", "vocab", ",", "namespace", ")", "return", "_read_embeddings_from_text_file", "(", "file_uri", ",", "embedding_dim", ",", "vocab", ",", "namespace", ")" ]
Returns and embedding matrix for the given vocabulary using the pretrained embeddings contained in the given file. Embeddings for tokens not found in the pretrained embedding file are randomly initialized using a normal distribution with mean and standard deviation equal to those of the pretrained embeddings. We support two file formats: * text format - utf-8 encoded text file with space separated fields: [word] [dim 1] [dim 2] ... The text file can eventually be compressed, and even resides in an archive with multiple files. If the file resides in an archive with other files, then ``embeddings_filename`` must be a URI "(archive_uri)#file_path_inside_the_archive" * hdf5 format - hdf5 file containing an embedding matrix in the form of a torch.Tensor. If the filename ends with '.hdf5' or '.h5' then we load from hdf5, otherwise we assume text format. Parameters ---------- file_uri : str, required. It can be: * a file system path or a URL of an eventually compressed text file or a zip/tar archive containing a single file. * URI of the type ``(archive_path_or_url)#file_path_inside_archive`` if the text file is contained in a multi-file archive. vocab : Vocabulary, required. A Vocabulary object. namespace : str, (optional, default=tokens) The namespace of the vocabulary to find pretrained embeddings for. trainable : bool, (optional, default=True) Whether or not the embedding parameters should be optimized. Returns ------- A weight matrix with embeddings initialized from the read file. The matrix has shape ``(vocab.get_vocab_size(namespace), embedding_dim)``, where the indices of words appearing in the pretrained embedding file are initialized to the pretrained embedding value.
[ "Returns", "and", "embedding", "matrix", "for", "the", "given", "vocabulary", "using", "the", "pretrained", "embeddings", "contained", "in", "the", "given", "file", ".", "Embeddings", "for", "tokens", "not", "found", "in", "the", "pretrained", "embedding", "file", "are", "randomly", "initialized", "using", "a", "normal", "distribution", "with", "mean", "and", "standard", "deviation", "equal", "to", "those", "of", "the", "pretrained", "embeddings", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/modules/token_embedders/embedding.py#L317-L371
23,174
allenai/allennlp
allennlp/modules/token_embedders/embedding.py
EmbeddingsTextFile._get_num_tokens_from_first_line
def _get_num_tokens_from_first_line(line: str) -> Optional[int]: """ This function takes in input a string and if it contains 1 or 2 integers, it assumes the largest one it the number of tokens. Returns None if the line doesn't match that pattern. """ fields = line.split(' ') if 1 <= len(fields) <= 2: try: int_fields = [int(x) for x in fields] except ValueError: return None else: num_tokens = max(int_fields) logger.info('Recognized a header line in the embedding file with number of tokens: %d', num_tokens) return num_tokens return None
python
def _get_num_tokens_from_first_line(line: str) -> Optional[int]: """ This function takes in input a string and if it contains 1 or 2 integers, it assumes the largest one it the number of tokens. Returns None if the line doesn't match that pattern. """ fields = line.split(' ') if 1 <= len(fields) <= 2: try: int_fields = [int(x) for x in fields] except ValueError: return None else: num_tokens = max(int_fields) logger.info('Recognized a header line in the embedding file with number of tokens: %d', num_tokens) return num_tokens return None
[ "def", "_get_num_tokens_from_first_line", "(", "line", ":", "str", ")", "->", "Optional", "[", "int", "]", ":", "fields", "=", "line", ".", "split", "(", "' '", ")", "if", "1", "<=", "len", "(", "fields", ")", "<=", "2", ":", "try", ":", "int_fields", "=", "[", "int", "(", "x", ")", "for", "x", "in", "fields", "]", "except", "ValueError", ":", "return", "None", "else", ":", "num_tokens", "=", "max", "(", "int_fields", ")", "logger", ".", "info", "(", "'Recognized a header line in the embedding file with number of tokens: %d'", ",", "num_tokens", ")", "return", "num_tokens", "return", "None" ]
This function takes in input a string and if it contains 1 or 2 integers, it assumes the largest one it the number of tokens. Returns None if the line doesn't match that pattern.
[ "This", "function", "takes", "in", "input", "a", "string", "and", "if", "it", "contains", "1", "or", "2", "integers", "it", "assumes", "the", "largest", "one", "it", "the", "number", "of", "tokens", ".", "Returns", "None", "if", "the", "line", "doesn", "t", "match", "that", "pattern", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/modules/token_embedders/embedding.py#L632-L646
23,175
allenai/allennlp
allennlp/state_machines/transition_functions/coverage_transition_function.py
CoverageTransitionFunction._get_predicted_embedding_addition
def _get_predicted_embedding_addition(self, checklist_state: ChecklistStatelet, action_ids: List[int], action_embeddings: torch.Tensor) -> torch.Tensor: """ Gets the embeddings of desired terminal actions yet to be produced by the decoder, and returns their sum for the decoder to add it to the predicted embedding to bias the prediction towards missing actions. """ # Our basic approach here will be to figure out which actions we want to bias, by doing # some fancy indexing work, then multiply the action embeddings by a mask for those # actions, and return the sum of the result. # Shape: (num_terminal_actions, 1). This is 1 if we still want to predict something on the # checklist, and 0 otherwise. checklist_balance = checklist_state.get_balance().clamp(min=0) # (num_terminal_actions, 1) actions_in_agenda = checklist_state.terminal_actions # (1, num_current_actions) action_id_tensor = checklist_balance.new(action_ids).long().unsqueeze(0) # Shape: (num_terminal_actions, num_current_actions). Will have a value of 1 if the # terminal action i is our current action j, and a value of 0 otherwise. Because both sets # of actions are free of duplicates, there will be at most one non-zero value per current # action, and per terminal action. current_agenda_actions = (actions_in_agenda == action_id_tensor).float() # Shape: (num_current_actions,). With the inner multiplication, we remove any current # agenda actions that are not in our checklist balance, then we sum over the terminal # action dimension, which will have a sum of at most one. So this will be a 0/1 tensor, # where a 1 means to encourage the current action in that position. actions_to_encourage = torch.sum(current_agenda_actions * checklist_balance, dim=0) # Shape: (action_embedding_dim,). This is the sum of the action embeddings that we want # the model to prefer. embedding_addition = torch.sum(action_embeddings * actions_to_encourage.unsqueeze(1), dim=0, keepdim=False) if self._add_action_bias: # If we're adding an action bias, the last dimension of the action embedding is a bias # weight. We don't want this addition to affect the bias (TODO(mattg): or do we?), so # we zero out that dimension here. embedding_addition[-1] = 0 return embedding_addition
python
def _get_predicted_embedding_addition(self, checklist_state: ChecklistStatelet, action_ids: List[int], action_embeddings: torch.Tensor) -> torch.Tensor: """ Gets the embeddings of desired terminal actions yet to be produced by the decoder, and returns their sum for the decoder to add it to the predicted embedding to bias the prediction towards missing actions. """ # Our basic approach here will be to figure out which actions we want to bias, by doing # some fancy indexing work, then multiply the action embeddings by a mask for those # actions, and return the sum of the result. # Shape: (num_terminal_actions, 1). This is 1 if we still want to predict something on the # checklist, and 0 otherwise. checklist_balance = checklist_state.get_balance().clamp(min=0) # (num_terminal_actions, 1) actions_in_agenda = checklist_state.terminal_actions # (1, num_current_actions) action_id_tensor = checklist_balance.new(action_ids).long().unsqueeze(0) # Shape: (num_terminal_actions, num_current_actions). Will have a value of 1 if the # terminal action i is our current action j, and a value of 0 otherwise. Because both sets # of actions are free of duplicates, there will be at most one non-zero value per current # action, and per terminal action. current_agenda_actions = (actions_in_agenda == action_id_tensor).float() # Shape: (num_current_actions,). With the inner multiplication, we remove any current # agenda actions that are not in our checklist balance, then we sum over the terminal # action dimension, which will have a sum of at most one. So this will be a 0/1 tensor, # where a 1 means to encourage the current action in that position. actions_to_encourage = torch.sum(current_agenda_actions * checklist_balance, dim=0) # Shape: (action_embedding_dim,). This is the sum of the action embeddings that we want # the model to prefer. embedding_addition = torch.sum(action_embeddings * actions_to_encourage.unsqueeze(1), dim=0, keepdim=False) if self._add_action_bias: # If we're adding an action bias, the last dimension of the action embedding is a bias # weight. We don't want this addition to affect the bias (TODO(mattg): or do we?), so # we zero out that dimension here. embedding_addition[-1] = 0 return embedding_addition
[ "def", "_get_predicted_embedding_addition", "(", "self", ",", "checklist_state", ":", "ChecklistStatelet", ",", "action_ids", ":", "List", "[", "int", "]", ",", "action_embeddings", ":", "torch", ".", "Tensor", ")", "->", "torch", ".", "Tensor", ":", "# Our basic approach here will be to figure out which actions we want to bias, by doing", "# some fancy indexing work, then multiply the action embeddings by a mask for those", "# actions, and return the sum of the result.", "# Shape: (num_terminal_actions, 1). This is 1 if we still want to predict something on the", "# checklist, and 0 otherwise.", "checklist_balance", "=", "checklist_state", ".", "get_balance", "(", ")", ".", "clamp", "(", "min", "=", "0", ")", "# (num_terminal_actions, 1)", "actions_in_agenda", "=", "checklist_state", ".", "terminal_actions", "# (1, num_current_actions)", "action_id_tensor", "=", "checklist_balance", ".", "new", "(", "action_ids", ")", ".", "long", "(", ")", ".", "unsqueeze", "(", "0", ")", "# Shape: (num_terminal_actions, num_current_actions). Will have a value of 1 if the", "# terminal action i is our current action j, and a value of 0 otherwise. Because both sets", "# of actions are free of duplicates, there will be at most one non-zero value per current", "# action, and per terminal action.", "current_agenda_actions", "=", "(", "actions_in_agenda", "==", "action_id_tensor", ")", ".", "float", "(", ")", "# Shape: (num_current_actions,). With the inner multiplication, we remove any current", "# agenda actions that are not in our checklist balance, then we sum over the terminal", "# action dimension, which will have a sum of at most one. So this will be a 0/1 tensor,", "# where a 1 means to encourage the current action in that position.", "actions_to_encourage", "=", "torch", ".", "sum", "(", "current_agenda_actions", "*", "checklist_balance", ",", "dim", "=", "0", ")", "# Shape: (action_embedding_dim,). This is the sum of the action embeddings that we want", "# the model to prefer.", "embedding_addition", "=", "torch", ".", "sum", "(", "action_embeddings", "*", "actions_to_encourage", ".", "unsqueeze", "(", "1", ")", ",", "dim", "=", "0", ",", "keepdim", "=", "False", ")", "if", "self", ".", "_add_action_bias", ":", "# If we're adding an action bias, the last dimension of the action embedding is a bias", "# weight. We don't want this addition to affect the bias (TODO(mattg): or do we?), so", "# we zero out that dimension here.", "embedding_addition", "[", "-", "1", "]", "=", "0", "return", "embedding_addition" ]
Gets the embeddings of desired terminal actions yet to be produced by the decoder, and returns their sum for the decoder to add it to the predicted embedding to bias the prediction towards missing actions.
[ "Gets", "the", "embeddings", "of", "desired", "terminal", "actions", "yet", "to", "be", "produced", "by", "the", "decoder", "and", "returns", "their", "sum", "for", "the", "decoder", "to", "add", "it", "to", "the", "predicted", "embedding", "to", "bias", "the", "prediction", "towards", "missing", "actions", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/state_machines/transition_functions/coverage_transition_function.py#L115-L160
23,176
allenai/allennlp
allennlp/data/iterators/multiprocess_iterator.py
_create_tensor_dicts
def _create_tensor_dicts(input_queue: Queue, output_queue: Queue, iterator: DataIterator, shuffle: bool, index: int) -> None: """ Pulls at most ``max_instances_in_memory`` from the input_queue, groups them into batches of size ``batch_size``, converts them to ``TensorDict`` s, and puts them on the ``output_queue``. """ def instances() -> Iterator[Instance]: instance = input_queue.get() while instance is not None: yield instance instance = input_queue.get() for tensor_dict in iterator(instances(), num_epochs=1, shuffle=shuffle): output_queue.put(tensor_dict) output_queue.put(index)
python
def _create_tensor_dicts(input_queue: Queue, output_queue: Queue, iterator: DataIterator, shuffle: bool, index: int) -> None: """ Pulls at most ``max_instances_in_memory`` from the input_queue, groups them into batches of size ``batch_size``, converts them to ``TensorDict`` s, and puts them on the ``output_queue``. """ def instances() -> Iterator[Instance]: instance = input_queue.get() while instance is not None: yield instance instance = input_queue.get() for tensor_dict in iterator(instances(), num_epochs=1, shuffle=shuffle): output_queue.put(tensor_dict) output_queue.put(index)
[ "def", "_create_tensor_dicts", "(", "input_queue", ":", "Queue", ",", "output_queue", ":", "Queue", ",", "iterator", ":", "DataIterator", ",", "shuffle", ":", "bool", ",", "index", ":", "int", ")", "->", "None", ":", "def", "instances", "(", ")", "->", "Iterator", "[", "Instance", "]", ":", "instance", "=", "input_queue", ".", "get", "(", ")", "while", "instance", "is", "not", "None", ":", "yield", "instance", "instance", "=", "input_queue", ".", "get", "(", ")", "for", "tensor_dict", "in", "iterator", "(", "instances", "(", ")", ",", "num_epochs", "=", "1", ",", "shuffle", "=", "shuffle", ")", ":", "output_queue", ".", "put", "(", "tensor_dict", ")", "output_queue", ".", "put", "(", "index", ")" ]
Pulls at most ``max_instances_in_memory`` from the input_queue, groups them into batches of size ``batch_size``, converts them to ``TensorDict`` s, and puts them on the ``output_queue``.
[ "Pulls", "at", "most", "max_instances_in_memory", "from", "the", "input_queue", "groups", "them", "into", "batches", "of", "size", "batch_size", "converts", "them", "to", "TensorDict", "s", "and", "puts", "them", "on", "the", "output_queue", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/iterators/multiprocess_iterator.py#L15-L34
23,177
allenai/allennlp
allennlp/data/iterators/multiprocess_iterator.py
_queuer
def _queuer(instances: Iterable[Instance], input_queue: Queue, num_workers: int, num_epochs: Optional[int]) -> None: """ Reads Instances from the iterable and puts them in the input_queue. """ epoch = 0 while num_epochs is None or epoch < num_epochs: epoch += 1 for instance in instances: input_queue.put(instance) # Now put a None for each worker, since each needs to receive one # to know that it's done. for _ in range(num_workers): input_queue.put(None)
python
def _queuer(instances: Iterable[Instance], input_queue: Queue, num_workers: int, num_epochs: Optional[int]) -> None: """ Reads Instances from the iterable and puts them in the input_queue. """ epoch = 0 while num_epochs is None or epoch < num_epochs: epoch += 1 for instance in instances: input_queue.put(instance) # Now put a None for each worker, since each needs to receive one # to know that it's done. for _ in range(num_workers): input_queue.put(None)
[ "def", "_queuer", "(", "instances", ":", "Iterable", "[", "Instance", "]", ",", "input_queue", ":", "Queue", ",", "num_workers", ":", "int", ",", "num_epochs", ":", "Optional", "[", "int", "]", ")", "->", "None", ":", "epoch", "=", "0", "while", "num_epochs", "is", "None", "or", "epoch", "<", "num_epochs", ":", "epoch", "+=", "1", "for", "instance", "in", "instances", ":", "input_queue", ".", "put", "(", "instance", ")", "# Now put a None for each worker, since each needs to receive one", "# to know that it's done.", "for", "_", "in", "range", "(", "num_workers", ")", ":", "input_queue", ".", "put", "(", "None", ")" ]
Reads Instances from the iterable and puts them in the input_queue.
[ "Reads", "Instances", "from", "the", "iterable", "and", "puts", "them", "in", "the", "input_queue", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/iterators/multiprocess_iterator.py#L36-L53
23,178
allenai/allennlp
allennlp/state_machines/states/grammar_based_state.py
GrammarBasedState.get_valid_actions
def get_valid_actions(self) -> List[Dict[str, Tuple[torch.Tensor, torch.Tensor, List[int]]]]: """ Returns a list of valid actions for each element of the group. """ return [state.get_valid_actions() for state in self.grammar_state]
python
def get_valid_actions(self) -> List[Dict[str, Tuple[torch.Tensor, torch.Tensor, List[int]]]]: """ Returns a list of valid actions for each element of the group. """ return [state.get_valid_actions() for state in self.grammar_state]
[ "def", "get_valid_actions", "(", "self", ")", "->", "List", "[", "Dict", "[", "str", ",", "Tuple", "[", "torch", ".", "Tensor", ",", "torch", ".", "Tensor", ",", "List", "[", "int", "]", "]", "]", "]", ":", "return", "[", "state", ".", "get_valid_actions", "(", ")", "for", "state", "in", "self", ".", "grammar_state", "]" ]
Returns a list of valid actions for each element of the group.
[ "Returns", "a", "list", "of", "valid", "actions", "for", "each", "element", "of", "the", "group", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/state_machines/states/grammar_based_state.py#L110-L114
23,179
allenai/allennlp
allennlp/data/dataset_readers/multiprocess_dataset_reader.py
_worker
def _worker(reader: DatasetReader, input_queue: Queue, output_queue: Queue, index: int) -> None: """ A worker that pulls filenames off the input queue, uses the dataset reader to read them, and places the generated instances on the output queue. When there are no filenames left on the input queue, it puts its ``index`` on the output queue and doesn't do anything else. """ # Keep going until you get a file_path that's None. while True: file_path = input_queue.get() if file_path is None: # Put my index on the queue to signify that I'm finished output_queue.put(index) break logger.info(f"reading instances from {file_path}") for instance in reader.read(file_path): output_queue.put(instance)
python
def _worker(reader: DatasetReader, input_queue: Queue, output_queue: Queue, index: int) -> None: """ A worker that pulls filenames off the input queue, uses the dataset reader to read them, and places the generated instances on the output queue. When there are no filenames left on the input queue, it puts its ``index`` on the output queue and doesn't do anything else. """ # Keep going until you get a file_path that's None. while True: file_path = input_queue.get() if file_path is None: # Put my index on the queue to signify that I'm finished output_queue.put(index) break logger.info(f"reading instances from {file_path}") for instance in reader.read(file_path): output_queue.put(instance)
[ "def", "_worker", "(", "reader", ":", "DatasetReader", ",", "input_queue", ":", "Queue", ",", "output_queue", ":", "Queue", ",", "index", ":", "int", ")", "->", "None", ":", "# Keep going until you get a file_path that's None.", "while", "True", ":", "file_path", "=", "input_queue", ".", "get", "(", ")", "if", "file_path", "is", "None", ":", "# Put my index on the queue to signify that I'm finished", "output_queue", ".", "put", "(", "index", ")", "break", "logger", ".", "info", "(", "f\"reading instances from {file_path}\"", ")", "for", "instance", "in", "reader", ".", "read", "(", "file_path", ")", ":", "output_queue", ".", "put", "(", "instance", ")" ]
A worker that pulls filenames off the input queue, uses the dataset reader to read them, and places the generated instances on the output queue. When there are no filenames left on the input queue, it puts its ``index`` on the output queue and doesn't do anything else.
[ "A", "worker", "that", "pulls", "filenames", "off", "the", "input", "queue", "uses", "the", "dataset", "reader", "to", "read", "them", "and", "places", "the", "generated", "instances", "on", "the", "output", "queue", ".", "When", "there", "are", "no", "filenames", "left", "on", "the", "input", "queue", "it", "puts", "its", "index", "on", "the", "output", "queue", "and", "doesn", "t", "do", "anything", "else", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/dataset_readers/multiprocess_dataset_reader.py#L30-L50
23,180
allenai/allennlp
allennlp/modules/conditional_random_field.py
allowed_transitions
def allowed_transitions(constraint_type: str, labels: Dict[int, str]) -> List[Tuple[int, int]]: """ Given labels and a constraint type, returns the allowed transitions. It will additionally include transitions for the start and end states, which are used by the conditional random field. Parameters ---------- constraint_type : ``str``, required Indicates which constraint to apply. Current choices are "BIO", "IOB1", "BIOUL", and "BMES". labels : ``Dict[int, str]``, required A mapping {label_id -> label}. Most commonly this would be the value from Vocabulary.get_index_to_token_vocabulary() Returns ------- ``List[Tuple[int, int]]`` The allowed transitions (from_label_id, to_label_id). """ num_labels = len(labels) start_tag = num_labels end_tag = num_labels + 1 labels_with_boundaries = list(labels.items()) + [(start_tag, "START"), (end_tag, "END")] allowed = [] for from_label_index, from_label in labels_with_boundaries: if from_label in ("START", "END"): from_tag = from_label from_entity = "" else: from_tag = from_label[0] from_entity = from_label[1:] for to_label_index, to_label in labels_with_boundaries: if to_label in ("START", "END"): to_tag = to_label to_entity = "" else: to_tag = to_label[0] to_entity = to_label[1:] if is_transition_allowed(constraint_type, from_tag, from_entity, to_tag, to_entity): allowed.append((from_label_index, to_label_index)) return allowed
python
def allowed_transitions(constraint_type: str, labels: Dict[int, str]) -> List[Tuple[int, int]]: """ Given labels and a constraint type, returns the allowed transitions. It will additionally include transitions for the start and end states, which are used by the conditional random field. Parameters ---------- constraint_type : ``str``, required Indicates which constraint to apply. Current choices are "BIO", "IOB1", "BIOUL", and "BMES". labels : ``Dict[int, str]``, required A mapping {label_id -> label}. Most commonly this would be the value from Vocabulary.get_index_to_token_vocabulary() Returns ------- ``List[Tuple[int, int]]`` The allowed transitions (from_label_id, to_label_id). """ num_labels = len(labels) start_tag = num_labels end_tag = num_labels + 1 labels_with_boundaries = list(labels.items()) + [(start_tag, "START"), (end_tag, "END")] allowed = [] for from_label_index, from_label in labels_with_boundaries: if from_label in ("START", "END"): from_tag = from_label from_entity = "" else: from_tag = from_label[0] from_entity = from_label[1:] for to_label_index, to_label in labels_with_boundaries: if to_label in ("START", "END"): to_tag = to_label to_entity = "" else: to_tag = to_label[0] to_entity = to_label[1:] if is_transition_allowed(constraint_type, from_tag, from_entity, to_tag, to_entity): allowed.append((from_label_index, to_label_index)) return allowed
[ "def", "allowed_transitions", "(", "constraint_type", ":", "str", ",", "labels", ":", "Dict", "[", "int", ",", "str", "]", ")", "->", "List", "[", "Tuple", "[", "int", ",", "int", "]", "]", ":", "num_labels", "=", "len", "(", "labels", ")", "start_tag", "=", "num_labels", "end_tag", "=", "num_labels", "+", "1", "labels_with_boundaries", "=", "list", "(", "labels", ".", "items", "(", ")", ")", "+", "[", "(", "start_tag", ",", "\"START\"", ")", ",", "(", "end_tag", ",", "\"END\"", ")", "]", "allowed", "=", "[", "]", "for", "from_label_index", ",", "from_label", "in", "labels_with_boundaries", ":", "if", "from_label", "in", "(", "\"START\"", ",", "\"END\"", ")", ":", "from_tag", "=", "from_label", "from_entity", "=", "\"\"", "else", ":", "from_tag", "=", "from_label", "[", "0", "]", "from_entity", "=", "from_label", "[", "1", ":", "]", "for", "to_label_index", ",", "to_label", "in", "labels_with_boundaries", ":", "if", "to_label", "in", "(", "\"START\"", ",", "\"END\"", ")", ":", "to_tag", "=", "to_label", "to_entity", "=", "\"\"", "else", ":", "to_tag", "=", "to_label", "[", "0", "]", "to_entity", "=", "to_label", "[", "1", ":", "]", "if", "is_transition_allowed", "(", "constraint_type", ",", "from_tag", ",", "from_entity", ",", "to_tag", ",", "to_entity", ")", ":", "allowed", ".", "append", "(", "(", "from_label_index", ",", "to_label_index", ")", ")", "return", "allowed" ]
Given labels and a constraint type, returns the allowed transitions. It will additionally include transitions for the start and end states, which are used by the conditional random field. Parameters ---------- constraint_type : ``str``, required Indicates which constraint to apply. Current choices are "BIO", "IOB1", "BIOUL", and "BMES". labels : ``Dict[int, str]``, required A mapping {label_id -> label}. Most commonly this would be the value from Vocabulary.get_index_to_token_vocabulary() Returns ------- ``List[Tuple[int, int]]`` The allowed transitions (from_label_id, to_label_id).
[ "Given", "labels", "and", "a", "constraint", "type", "returns", "the", "allowed", "transitions", ".", "It", "will", "additionally", "include", "transitions", "for", "the", "start", "and", "end", "states", "which", "are", "used", "by", "the", "conditional", "random", "field", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/modules/conditional_random_field.py#L12-L55
23,181
allenai/allennlp
allennlp/modules/conditional_random_field.py
is_transition_allowed
def is_transition_allowed(constraint_type: str, from_tag: str, from_entity: str, to_tag: str, to_entity: str): """ Given a constraint type and strings ``from_tag`` and ``to_tag`` that represent the origin and destination of the transition, return whether the transition is allowed under the given constraint type. Parameters ---------- constraint_type : ``str``, required Indicates which constraint to apply. Current choices are "BIO", "IOB1", "BIOUL", and "BMES". from_tag : ``str``, required The tag that the transition originates from. For example, if the label is ``I-PER``, the ``from_tag`` is ``I``. from_entity: ``str``, required The entity corresponding to the ``from_tag``. For example, if the label is ``I-PER``, the ``from_entity`` is ``PER``. to_tag : ``str``, required The tag that the transition leads to. For example, if the label is ``I-PER``, the ``to_tag`` is ``I``. to_entity: ``str``, required The entity corresponding to the ``to_tag``. For example, if the label is ``I-PER``, the ``to_entity`` is ``PER``. Returns ------- ``bool`` Whether the transition is allowed under the given ``constraint_type``. """ # pylint: disable=too-many-return-statements if to_tag == "START" or from_tag == "END": # Cannot transition into START or from END return False if constraint_type == "BIOUL": if from_tag == "START": return to_tag in ('O', 'B', 'U') if to_tag == "END": return from_tag in ('O', 'L', 'U') return any([ # O can transition to O, B-* or U-* # L-x can transition to O, B-*, or U-* # U-x can transition to O, B-*, or U-* from_tag in ('O', 'L', 'U') and to_tag in ('O', 'B', 'U'), # B-x can only transition to I-x or L-x # I-x can only transition to I-x or L-x from_tag in ('B', 'I') and to_tag in ('I', 'L') and from_entity == to_entity ]) elif constraint_type == "BIO": if from_tag == "START": return to_tag in ('O', 'B') if to_tag == "END": return from_tag in ('O', 'B', 'I') return any([ # Can always transition to O or B-x to_tag in ('O', 'B'), # Can only transition to I-x from B-x or I-x to_tag == 'I' and from_tag in ('B', 'I') and from_entity == to_entity ]) elif constraint_type == "IOB1": if from_tag == "START": return to_tag in ('O', 'I') if to_tag == "END": return from_tag in ('O', 'B', 'I') return any([ # Can always transition to O or I-x to_tag in ('O', 'I'), # Can only transition to B-x from B-x or I-x, where # x is the same tag. to_tag == 'B' and from_tag in ('B', 'I') and from_entity == to_entity ]) elif constraint_type == "BMES": if from_tag == "START": return to_tag in ('B', 'S') if to_tag == "END": return from_tag in ('E', 'S') return any([ # Can only transition to B or S from E or S. to_tag in ('B', 'S') and from_tag in ('E', 'S'), # Can only transition to M-x from B-x, where # x is the same tag. to_tag == 'M' and from_tag in ('B', 'M') and from_entity == to_entity, # Can only transition to E-x from B-x or M-x, where # x is the same tag. to_tag == 'E' and from_tag in ('B', 'M') and from_entity == to_entity, ]) else: raise ConfigurationError(f"Unknown constraint type: {constraint_type}")
python
def is_transition_allowed(constraint_type: str, from_tag: str, from_entity: str, to_tag: str, to_entity: str): """ Given a constraint type and strings ``from_tag`` and ``to_tag`` that represent the origin and destination of the transition, return whether the transition is allowed under the given constraint type. Parameters ---------- constraint_type : ``str``, required Indicates which constraint to apply. Current choices are "BIO", "IOB1", "BIOUL", and "BMES". from_tag : ``str``, required The tag that the transition originates from. For example, if the label is ``I-PER``, the ``from_tag`` is ``I``. from_entity: ``str``, required The entity corresponding to the ``from_tag``. For example, if the label is ``I-PER``, the ``from_entity`` is ``PER``. to_tag : ``str``, required The tag that the transition leads to. For example, if the label is ``I-PER``, the ``to_tag`` is ``I``. to_entity: ``str``, required The entity corresponding to the ``to_tag``. For example, if the label is ``I-PER``, the ``to_entity`` is ``PER``. Returns ------- ``bool`` Whether the transition is allowed under the given ``constraint_type``. """ # pylint: disable=too-many-return-statements if to_tag == "START" or from_tag == "END": # Cannot transition into START or from END return False if constraint_type == "BIOUL": if from_tag == "START": return to_tag in ('O', 'B', 'U') if to_tag == "END": return from_tag in ('O', 'L', 'U') return any([ # O can transition to O, B-* or U-* # L-x can transition to O, B-*, or U-* # U-x can transition to O, B-*, or U-* from_tag in ('O', 'L', 'U') and to_tag in ('O', 'B', 'U'), # B-x can only transition to I-x or L-x # I-x can only transition to I-x or L-x from_tag in ('B', 'I') and to_tag in ('I', 'L') and from_entity == to_entity ]) elif constraint_type == "BIO": if from_tag == "START": return to_tag in ('O', 'B') if to_tag == "END": return from_tag in ('O', 'B', 'I') return any([ # Can always transition to O or B-x to_tag in ('O', 'B'), # Can only transition to I-x from B-x or I-x to_tag == 'I' and from_tag in ('B', 'I') and from_entity == to_entity ]) elif constraint_type == "IOB1": if from_tag == "START": return to_tag in ('O', 'I') if to_tag == "END": return from_tag in ('O', 'B', 'I') return any([ # Can always transition to O or I-x to_tag in ('O', 'I'), # Can only transition to B-x from B-x or I-x, where # x is the same tag. to_tag == 'B' and from_tag in ('B', 'I') and from_entity == to_entity ]) elif constraint_type == "BMES": if from_tag == "START": return to_tag in ('B', 'S') if to_tag == "END": return from_tag in ('E', 'S') return any([ # Can only transition to B or S from E or S. to_tag in ('B', 'S') and from_tag in ('E', 'S'), # Can only transition to M-x from B-x, where # x is the same tag. to_tag == 'M' and from_tag in ('B', 'M') and from_entity == to_entity, # Can only transition to E-x from B-x or M-x, where # x is the same tag. to_tag == 'E' and from_tag in ('B', 'M') and from_entity == to_entity, ]) else: raise ConfigurationError(f"Unknown constraint type: {constraint_type}")
[ "def", "is_transition_allowed", "(", "constraint_type", ":", "str", ",", "from_tag", ":", "str", ",", "from_entity", ":", "str", ",", "to_tag", ":", "str", ",", "to_entity", ":", "str", ")", ":", "# pylint: disable=too-many-return-statements", "if", "to_tag", "==", "\"START\"", "or", "from_tag", "==", "\"END\"", ":", "# Cannot transition into START or from END", "return", "False", "if", "constraint_type", "==", "\"BIOUL\"", ":", "if", "from_tag", "==", "\"START\"", ":", "return", "to_tag", "in", "(", "'O'", ",", "'B'", ",", "'U'", ")", "if", "to_tag", "==", "\"END\"", ":", "return", "from_tag", "in", "(", "'O'", ",", "'L'", ",", "'U'", ")", "return", "any", "(", "[", "# O can transition to O, B-* or U-*", "# L-x can transition to O, B-*, or U-*", "# U-x can transition to O, B-*, or U-*", "from_tag", "in", "(", "'O'", ",", "'L'", ",", "'U'", ")", "and", "to_tag", "in", "(", "'O'", ",", "'B'", ",", "'U'", ")", ",", "# B-x can only transition to I-x or L-x", "# I-x can only transition to I-x or L-x", "from_tag", "in", "(", "'B'", ",", "'I'", ")", "and", "to_tag", "in", "(", "'I'", ",", "'L'", ")", "and", "from_entity", "==", "to_entity", "]", ")", "elif", "constraint_type", "==", "\"BIO\"", ":", "if", "from_tag", "==", "\"START\"", ":", "return", "to_tag", "in", "(", "'O'", ",", "'B'", ")", "if", "to_tag", "==", "\"END\"", ":", "return", "from_tag", "in", "(", "'O'", ",", "'B'", ",", "'I'", ")", "return", "any", "(", "[", "# Can always transition to O or B-x", "to_tag", "in", "(", "'O'", ",", "'B'", ")", ",", "# Can only transition to I-x from B-x or I-x", "to_tag", "==", "'I'", "and", "from_tag", "in", "(", "'B'", ",", "'I'", ")", "and", "from_entity", "==", "to_entity", "]", ")", "elif", "constraint_type", "==", "\"IOB1\"", ":", "if", "from_tag", "==", "\"START\"", ":", "return", "to_tag", "in", "(", "'O'", ",", "'I'", ")", "if", "to_tag", "==", "\"END\"", ":", "return", "from_tag", "in", "(", "'O'", ",", "'B'", ",", "'I'", ")", "return", "any", "(", "[", "# Can always transition to O or I-x", "to_tag", "in", "(", "'O'", ",", "'I'", ")", ",", "# Can only transition to B-x from B-x or I-x, where", "# x is the same tag.", "to_tag", "==", "'B'", "and", "from_tag", "in", "(", "'B'", ",", "'I'", ")", "and", "from_entity", "==", "to_entity", "]", ")", "elif", "constraint_type", "==", "\"BMES\"", ":", "if", "from_tag", "==", "\"START\"", ":", "return", "to_tag", "in", "(", "'B'", ",", "'S'", ")", "if", "to_tag", "==", "\"END\"", ":", "return", "from_tag", "in", "(", "'E'", ",", "'S'", ")", "return", "any", "(", "[", "# Can only transition to B or S from E or S.", "to_tag", "in", "(", "'B'", ",", "'S'", ")", "and", "from_tag", "in", "(", "'E'", ",", "'S'", ")", ",", "# Can only transition to M-x from B-x, where", "# x is the same tag.", "to_tag", "==", "'M'", "and", "from_tag", "in", "(", "'B'", ",", "'M'", ")", "and", "from_entity", "==", "to_entity", ",", "# Can only transition to E-x from B-x or M-x, where", "# x is the same tag.", "to_tag", "==", "'E'", "and", "from_tag", "in", "(", "'B'", ",", "'M'", ")", "and", "from_entity", "==", "to_entity", ",", "]", ")", "else", ":", "raise", "ConfigurationError", "(", "f\"Unknown constraint type: {constraint_type}\"", ")" ]
Given a constraint type and strings ``from_tag`` and ``to_tag`` that represent the origin and destination of the transition, return whether the transition is allowed under the given constraint type. Parameters ---------- constraint_type : ``str``, required Indicates which constraint to apply. Current choices are "BIO", "IOB1", "BIOUL", and "BMES". from_tag : ``str``, required The tag that the transition originates from. For example, if the label is ``I-PER``, the ``from_tag`` is ``I``. from_entity: ``str``, required The entity corresponding to the ``from_tag``. For example, if the label is ``I-PER``, the ``from_entity`` is ``PER``. to_tag : ``str``, required The tag that the transition leads to. For example, if the label is ``I-PER``, the ``to_tag`` is ``I``. to_entity: ``str``, required The entity corresponding to the ``to_tag``. For example, if the label is ``I-PER``, the ``to_entity`` is ``PER``. Returns ------- ``bool`` Whether the transition is allowed under the given ``constraint_type``.
[ "Given", "a", "constraint", "type", "and", "strings", "from_tag", "and", "to_tag", "that", "represent", "the", "origin", "and", "destination", "of", "the", "transition", "return", "whether", "the", "transition", "is", "allowed", "under", "the", "given", "constraint", "type", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/modules/conditional_random_field.py#L58-L149
23,182
allenai/allennlp
allennlp/modules/conditional_random_field.py
ConditionalRandomField.viterbi_tags
def viterbi_tags(self, logits: torch.Tensor, mask: torch.Tensor) -> List[Tuple[List[int], float]]: """ Uses viterbi algorithm to find most likely tags for the given inputs. If constraints are applied, disallows all other transitions. """ _, max_seq_length, num_tags = logits.size() # Get the tensors out of the variables logits, mask = logits.data, mask.data # Augment transitions matrix with start and end transitions start_tag = num_tags end_tag = num_tags + 1 transitions = torch.Tensor(num_tags + 2, num_tags + 2).fill_(-10000.) # Apply transition constraints constrained_transitions = ( self.transitions * self._constraint_mask[:num_tags, :num_tags] + -10000.0 * (1 - self._constraint_mask[:num_tags, :num_tags]) ) transitions[:num_tags, :num_tags] = constrained_transitions.data if self.include_start_end_transitions: transitions[start_tag, :num_tags] = ( self.start_transitions.detach() * self._constraint_mask[start_tag, :num_tags].data + -10000.0 * (1 - self._constraint_mask[start_tag, :num_tags].detach()) ) transitions[:num_tags, end_tag] = ( self.end_transitions.detach() * self._constraint_mask[:num_tags, end_tag].data + -10000.0 * (1 - self._constraint_mask[:num_tags, end_tag].detach()) ) else: transitions[start_tag, :num_tags] = (-10000.0 * (1 - self._constraint_mask[start_tag, :num_tags].detach())) transitions[:num_tags, end_tag] = -10000.0 * (1 - self._constraint_mask[:num_tags, end_tag].detach()) best_paths = [] # Pad the max sequence length by 2 to account for start_tag + end_tag. tag_sequence = torch.Tensor(max_seq_length + 2, num_tags + 2) for prediction, prediction_mask in zip(logits, mask): sequence_length = torch.sum(prediction_mask) # Start with everything totally unlikely tag_sequence.fill_(-10000.) # At timestep 0 we must have the START_TAG tag_sequence[0, start_tag] = 0. # At steps 1, ..., sequence_length we just use the incoming prediction tag_sequence[1:(sequence_length + 1), :num_tags] = prediction[:sequence_length] # And at the last timestep we must have the END_TAG tag_sequence[sequence_length + 1, end_tag] = 0. # We pass the tags and the transitions to ``viterbi_decode``. viterbi_path, viterbi_score = util.viterbi_decode(tag_sequence[:(sequence_length + 2)], transitions) # Get rid of START and END sentinels and append. viterbi_path = viterbi_path[1:-1] best_paths.append((viterbi_path, viterbi_score.item())) return best_paths
python
def viterbi_tags(self, logits: torch.Tensor, mask: torch.Tensor) -> List[Tuple[List[int], float]]: """ Uses viterbi algorithm to find most likely tags for the given inputs. If constraints are applied, disallows all other transitions. """ _, max_seq_length, num_tags = logits.size() # Get the tensors out of the variables logits, mask = logits.data, mask.data # Augment transitions matrix with start and end transitions start_tag = num_tags end_tag = num_tags + 1 transitions = torch.Tensor(num_tags + 2, num_tags + 2).fill_(-10000.) # Apply transition constraints constrained_transitions = ( self.transitions * self._constraint_mask[:num_tags, :num_tags] + -10000.0 * (1 - self._constraint_mask[:num_tags, :num_tags]) ) transitions[:num_tags, :num_tags] = constrained_transitions.data if self.include_start_end_transitions: transitions[start_tag, :num_tags] = ( self.start_transitions.detach() * self._constraint_mask[start_tag, :num_tags].data + -10000.0 * (1 - self._constraint_mask[start_tag, :num_tags].detach()) ) transitions[:num_tags, end_tag] = ( self.end_transitions.detach() * self._constraint_mask[:num_tags, end_tag].data + -10000.0 * (1 - self._constraint_mask[:num_tags, end_tag].detach()) ) else: transitions[start_tag, :num_tags] = (-10000.0 * (1 - self._constraint_mask[start_tag, :num_tags].detach())) transitions[:num_tags, end_tag] = -10000.0 * (1 - self._constraint_mask[:num_tags, end_tag].detach()) best_paths = [] # Pad the max sequence length by 2 to account for start_tag + end_tag. tag_sequence = torch.Tensor(max_seq_length + 2, num_tags + 2) for prediction, prediction_mask in zip(logits, mask): sequence_length = torch.sum(prediction_mask) # Start with everything totally unlikely tag_sequence.fill_(-10000.) # At timestep 0 we must have the START_TAG tag_sequence[0, start_tag] = 0. # At steps 1, ..., sequence_length we just use the incoming prediction tag_sequence[1:(sequence_length + 1), :num_tags] = prediction[:sequence_length] # And at the last timestep we must have the END_TAG tag_sequence[sequence_length + 1, end_tag] = 0. # We pass the tags and the transitions to ``viterbi_decode``. viterbi_path, viterbi_score = util.viterbi_decode(tag_sequence[:(sequence_length + 2)], transitions) # Get rid of START and END sentinels and append. viterbi_path = viterbi_path[1:-1] best_paths.append((viterbi_path, viterbi_score.item())) return best_paths
[ "def", "viterbi_tags", "(", "self", ",", "logits", ":", "torch", ".", "Tensor", ",", "mask", ":", "torch", ".", "Tensor", ")", "->", "List", "[", "Tuple", "[", "List", "[", "int", "]", ",", "float", "]", "]", ":", "_", ",", "max_seq_length", ",", "num_tags", "=", "logits", ".", "size", "(", ")", "# Get the tensors out of the variables", "logits", ",", "mask", "=", "logits", ".", "data", ",", "mask", ".", "data", "# Augment transitions matrix with start and end transitions", "start_tag", "=", "num_tags", "end_tag", "=", "num_tags", "+", "1", "transitions", "=", "torch", ".", "Tensor", "(", "num_tags", "+", "2", ",", "num_tags", "+", "2", ")", ".", "fill_", "(", "-", "10000.", ")", "# Apply transition constraints", "constrained_transitions", "=", "(", "self", ".", "transitions", "*", "self", ".", "_constraint_mask", "[", ":", "num_tags", ",", ":", "num_tags", "]", "+", "-", "10000.0", "*", "(", "1", "-", "self", ".", "_constraint_mask", "[", ":", "num_tags", ",", ":", "num_tags", "]", ")", ")", "transitions", "[", ":", "num_tags", ",", ":", "num_tags", "]", "=", "constrained_transitions", ".", "data", "if", "self", ".", "include_start_end_transitions", ":", "transitions", "[", "start_tag", ",", ":", "num_tags", "]", "=", "(", "self", ".", "start_transitions", ".", "detach", "(", ")", "*", "self", ".", "_constraint_mask", "[", "start_tag", ",", ":", "num_tags", "]", ".", "data", "+", "-", "10000.0", "*", "(", "1", "-", "self", ".", "_constraint_mask", "[", "start_tag", ",", ":", "num_tags", "]", ".", "detach", "(", ")", ")", ")", "transitions", "[", ":", "num_tags", ",", "end_tag", "]", "=", "(", "self", ".", "end_transitions", ".", "detach", "(", ")", "*", "self", ".", "_constraint_mask", "[", ":", "num_tags", ",", "end_tag", "]", ".", "data", "+", "-", "10000.0", "*", "(", "1", "-", "self", ".", "_constraint_mask", "[", ":", "num_tags", ",", "end_tag", "]", ".", "detach", "(", ")", ")", ")", "else", ":", "transitions", "[", "start_tag", ",", ":", "num_tags", "]", "=", "(", "-", "10000.0", "*", "(", "1", "-", "self", ".", "_constraint_mask", "[", "start_tag", ",", ":", "num_tags", "]", ".", "detach", "(", ")", ")", ")", "transitions", "[", ":", "num_tags", ",", "end_tag", "]", "=", "-", "10000.0", "*", "(", "1", "-", "self", ".", "_constraint_mask", "[", ":", "num_tags", ",", "end_tag", "]", ".", "detach", "(", ")", ")", "best_paths", "=", "[", "]", "# Pad the max sequence length by 2 to account for start_tag + end_tag.", "tag_sequence", "=", "torch", ".", "Tensor", "(", "max_seq_length", "+", "2", ",", "num_tags", "+", "2", ")", "for", "prediction", ",", "prediction_mask", "in", "zip", "(", "logits", ",", "mask", ")", ":", "sequence_length", "=", "torch", ".", "sum", "(", "prediction_mask", ")", "# Start with everything totally unlikely", "tag_sequence", ".", "fill_", "(", "-", "10000.", ")", "# At timestep 0 we must have the START_TAG", "tag_sequence", "[", "0", ",", "start_tag", "]", "=", "0.", "# At steps 1, ..., sequence_length we just use the incoming prediction", "tag_sequence", "[", "1", ":", "(", "sequence_length", "+", "1", ")", ",", ":", "num_tags", "]", "=", "prediction", "[", ":", "sequence_length", "]", "# And at the last timestep we must have the END_TAG", "tag_sequence", "[", "sequence_length", "+", "1", ",", "end_tag", "]", "=", "0.", "# We pass the tags and the transitions to ``viterbi_decode``.", "viterbi_path", ",", "viterbi_score", "=", "util", ".", "viterbi_decode", "(", "tag_sequence", "[", ":", "(", "sequence_length", "+", "2", ")", "]", ",", "transitions", ")", "# Get rid of START and END sentinels and append.", "viterbi_path", "=", "viterbi_path", "[", "1", ":", "-", "1", "]", "best_paths", ".", "append", "(", "(", "viterbi_path", ",", "viterbi_score", ".", "item", "(", ")", ")", ")", "return", "best_paths" ]
Uses viterbi algorithm to find most likely tags for the given inputs. If constraints are applied, disallows all other transitions.
[ "Uses", "viterbi", "algorithm", "to", "find", "most", "likely", "tags", "for", "the", "given", "inputs", ".", "If", "constraints", "are", "applied", "disallows", "all", "other", "transitions", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/modules/conditional_random_field.py#L324-L384
23,183
allenai/allennlp
allennlp/common/from_params.py
takes_arg
def takes_arg(obj, arg: str) -> bool: """ Checks whether the provided obj takes a certain arg. If it's a class, we're really checking whether its constructor does. If it's a function or method, we're checking the object itself. Otherwise, we raise an error. """ if inspect.isclass(obj): signature = inspect.signature(obj.__init__) elif inspect.ismethod(obj) or inspect.isfunction(obj): signature = inspect.signature(obj) else: raise ConfigurationError(f"object {obj} is not callable") return arg in signature.parameters
python
def takes_arg(obj, arg: str) -> bool: """ Checks whether the provided obj takes a certain arg. If it's a class, we're really checking whether its constructor does. If it's a function or method, we're checking the object itself. Otherwise, we raise an error. """ if inspect.isclass(obj): signature = inspect.signature(obj.__init__) elif inspect.ismethod(obj) or inspect.isfunction(obj): signature = inspect.signature(obj) else: raise ConfigurationError(f"object {obj} is not callable") return arg in signature.parameters
[ "def", "takes_arg", "(", "obj", ",", "arg", ":", "str", ")", "->", "bool", ":", "if", "inspect", ".", "isclass", "(", "obj", ")", ":", "signature", "=", "inspect", ".", "signature", "(", "obj", ".", "__init__", ")", "elif", "inspect", ".", "ismethod", "(", "obj", ")", "or", "inspect", ".", "isfunction", "(", "obj", ")", ":", "signature", "=", "inspect", ".", "signature", "(", "obj", ")", "else", ":", "raise", "ConfigurationError", "(", "f\"object {obj} is not callable\"", ")", "return", "arg", "in", "signature", ".", "parameters" ]
Checks whether the provided obj takes a certain arg. If it's a class, we're really checking whether its constructor does. If it's a function or method, we're checking the object itself. Otherwise, we raise an error.
[ "Checks", "whether", "the", "provided", "obj", "takes", "a", "certain", "arg", ".", "If", "it", "s", "a", "class", "we", "re", "really", "checking", "whether", "its", "constructor", "does", ".", "If", "it", "s", "a", "function", "or", "method", "we", "re", "checking", "the", "object", "itself", ".", "Otherwise", "we", "raise", "an", "error", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/from_params.py#L59-L72
23,184
allenai/allennlp
allennlp/common/from_params.py
create_kwargs
def create_kwargs(cls: Type[T], params: Params, **extras) -> Dict[str, Any]: """ Given some class, a `Params` object, and potentially other keyword arguments, create a dict of keyword args suitable for passing to the class's constructor. The function does this by finding the class's constructor, matching the constructor arguments to entries in the `params` object, and instantiating values for the parameters using the type annotation and possibly a from_params method. Any values that are provided in the `extras` will just be used as is. For instance, you might provide an existing `Vocabulary` this way. """ # Get the signature of the constructor. signature = inspect.signature(cls.__init__) kwargs: Dict[str, Any] = {} # Iterate over all the constructor parameters and their annotations. for name, param in signature.parameters.items(): # Skip "self". You're not *required* to call the first parameter "self", # so in theory this logic is fragile, but if you don't call the self parameter # "self" you kind of deserve what happens. if name == "self": continue # If the annotation is a compound type like typing.Dict[str, int], # it will have an __origin__ field indicating `typing.Dict` # and an __args__ field indicating `(str, int)`. We capture both. annotation = remove_optional(param.annotation) kwargs[name] = construct_arg(cls, name, annotation, param.default, params, **extras) params.assert_empty(cls.__name__) return kwargs
python
def create_kwargs(cls: Type[T], params: Params, **extras) -> Dict[str, Any]: """ Given some class, a `Params` object, and potentially other keyword arguments, create a dict of keyword args suitable for passing to the class's constructor. The function does this by finding the class's constructor, matching the constructor arguments to entries in the `params` object, and instantiating values for the parameters using the type annotation and possibly a from_params method. Any values that are provided in the `extras` will just be used as is. For instance, you might provide an existing `Vocabulary` this way. """ # Get the signature of the constructor. signature = inspect.signature(cls.__init__) kwargs: Dict[str, Any] = {} # Iterate over all the constructor parameters and their annotations. for name, param in signature.parameters.items(): # Skip "self". You're not *required* to call the first parameter "self", # so in theory this logic is fragile, but if you don't call the self parameter # "self" you kind of deserve what happens. if name == "self": continue # If the annotation is a compound type like typing.Dict[str, int], # it will have an __origin__ field indicating `typing.Dict` # and an __args__ field indicating `(str, int)`. We capture both. annotation = remove_optional(param.annotation) kwargs[name] = construct_arg(cls, name, annotation, param.default, params, **extras) params.assert_empty(cls.__name__) return kwargs
[ "def", "create_kwargs", "(", "cls", ":", "Type", "[", "T", "]", ",", "params", ":", "Params", ",", "*", "*", "extras", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "# Get the signature of the constructor.", "signature", "=", "inspect", ".", "signature", "(", "cls", ".", "__init__", ")", "kwargs", ":", "Dict", "[", "str", ",", "Any", "]", "=", "{", "}", "# Iterate over all the constructor parameters and their annotations.", "for", "name", ",", "param", "in", "signature", ".", "parameters", ".", "items", "(", ")", ":", "# Skip \"self\". You're not *required* to call the first parameter \"self\",", "# so in theory this logic is fragile, but if you don't call the self parameter", "# \"self\" you kind of deserve what happens.", "if", "name", "==", "\"self\"", ":", "continue", "# If the annotation is a compound type like typing.Dict[str, int],", "# it will have an __origin__ field indicating `typing.Dict`", "# and an __args__ field indicating `(str, int)`. We capture both.", "annotation", "=", "remove_optional", "(", "param", ".", "annotation", ")", "kwargs", "[", "name", "]", "=", "construct_arg", "(", "cls", ",", "name", ",", "annotation", ",", "param", ".", "default", ",", "params", ",", "*", "*", "extras", ")", "params", ".", "assert_empty", "(", "cls", ".", "__name__", ")", "return", "kwargs" ]
Given some class, a `Params` object, and potentially other keyword arguments, create a dict of keyword args suitable for passing to the class's constructor. The function does this by finding the class's constructor, matching the constructor arguments to entries in the `params` object, and instantiating values for the parameters using the type annotation and possibly a from_params method. Any values that are provided in the `extras` will just be used as is. For instance, you might provide an existing `Vocabulary` this way.
[ "Given", "some", "class", "a", "Params", "object", "and", "potentially", "other", "keyword", "arguments", "create", "a", "dict", "of", "keyword", "args", "suitable", "for", "passing", "to", "the", "class", "s", "constructor", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/from_params.py#L105-L136
23,185
allenai/allennlp
allennlp/state_machines/transition_functions/transition_function.py
TransitionFunction.take_step
def take_step(self, state: StateType, max_actions: int = None, allowed_actions: List[Set] = None) -> List[StateType]: """ The main method in the ``TransitionFunction`` API. This function defines the computation done at each step of decoding and returns a ranked list of next states. The input state is `grouped`, to allow for efficient computation, but the output states should all have a ``group_size`` of 1, to make things easier on the decoding algorithm. They will get regrouped later as needed. Because of the way we handle grouping in the decoder states, constructing a new state is actually a relatively expensive operation. If you know a priori that only some of the states will be needed (either because you have a set of gold action sequences, or you have a fixed beam size), passing that information into this function will keep us from constructing more states than we need, which will greatly speed up your computation. IMPORTANT: This method `must` returns states already sorted by their score, otherwise ``BeamSearch`` and other methods will break. For efficiency, we do not perform an additional sort in those methods. ALSO IMPORTANT: When ``allowed_actions`` is given and ``max_actions`` is not, we assume you want to evaluate all possible states and do not need any sorting (e.g., this is true for maximum marginal likelihood training that does not use a beam search). In this case, we may skip the sorting step for efficiency reasons. Parameters ---------- state : ``State`` The current state of the decoder, which we will take a step `from`. We may be grouping together computation for several states here. Because we can have several states for each instance in the original batch being evaluated at the same time, we use ``group_size`` for this kind of batching, and ``batch_size`` for the `original` batch in ``model.forward.`` max_actions : ``int``, optional If you know that you will only need a certain number of states out of this (e.g., in a beam search), you can pass in the max number of actions that you need, and we will only construct that many states (for each `batch` instance - `not` for each `group` instance!). This can save a whole lot of computation if you have an action space that's much larger than your beam size. allowed_actions : ``List[Set]``, optional If the ``DecoderTrainer`` has constraints on which actions need to be evaluated (e.g., maximum marginal likelihood only needs to evaluate action sequences in a given set), you can pass those constraints here, to avoid constructing state objects unnecessarily. If there are no constraints from the trainer, passing a value of ``None`` here will allow all actions to be considered. This is a list because it is `batched` - every instance in the batch has a set of allowed actions. Note that the size of this list is the ``group_size`` in the ``State``, `not` the ``batch_size`` of ``model.forward``. The training algorithm needs to convert from the `batched` allowed action sequences that it has to a `grouped` allowed action sequence list. Returns ------- next_states : ``List[State]`` A list of next states, ordered by score. """ raise NotImplementedError
python
def take_step(self, state: StateType, max_actions: int = None, allowed_actions: List[Set] = None) -> List[StateType]: """ The main method in the ``TransitionFunction`` API. This function defines the computation done at each step of decoding and returns a ranked list of next states. The input state is `grouped`, to allow for efficient computation, but the output states should all have a ``group_size`` of 1, to make things easier on the decoding algorithm. They will get regrouped later as needed. Because of the way we handle grouping in the decoder states, constructing a new state is actually a relatively expensive operation. If you know a priori that only some of the states will be needed (either because you have a set of gold action sequences, or you have a fixed beam size), passing that information into this function will keep us from constructing more states than we need, which will greatly speed up your computation. IMPORTANT: This method `must` returns states already sorted by their score, otherwise ``BeamSearch`` and other methods will break. For efficiency, we do not perform an additional sort in those methods. ALSO IMPORTANT: When ``allowed_actions`` is given and ``max_actions`` is not, we assume you want to evaluate all possible states and do not need any sorting (e.g., this is true for maximum marginal likelihood training that does not use a beam search). In this case, we may skip the sorting step for efficiency reasons. Parameters ---------- state : ``State`` The current state of the decoder, which we will take a step `from`. We may be grouping together computation for several states here. Because we can have several states for each instance in the original batch being evaluated at the same time, we use ``group_size`` for this kind of batching, and ``batch_size`` for the `original` batch in ``model.forward.`` max_actions : ``int``, optional If you know that you will only need a certain number of states out of this (e.g., in a beam search), you can pass in the max number of actions that you need, and we will only construct that many states (for each `batch` instance - `not` for each `group` instance!). This can save a whole lot of computation if you have an action space that's much larger than your beam size. allowed_actions : ``List[Set]``, optional If the ``DecoderTrainer`` has constraints on which actions need to be evaluated (e.g., maximum marginal likelihood only needs to evaluate action sequences in a given set), you can pass those constraints here, to avoid constructing state objects unnecessarily. If there are no constraints from the trainer, passing a value of ``None`` here will allow all actions to be considered. This is a list because it is `batched` - every instance in the batch has a set of allowed actions. Note that the size of this list is the ``group_size`` in the ``State``, `not` the ``batch_size`` of ``model.forward``. The training algorithm needs to convert from the `batched` allowed action sequences that it has to a `grouped` allowed action sequence list. Returns ------- next_states : ``List[State]`` A list of next states, ordered by score. """ raise NotImplementedError
[ "def", "take_step", "(", "self", ",", "state", ":", "StateType", ",", "max_actions", ":", "int", "=", "None", ",", "allowed_actions", ":", "List", "[", "Set", "]", "=", "None", ")", "->", "List", "[", "StateType", "]", ":", "raise", "NotImplementedError" ]
The main method in the ``TransitionFunction`` API. This function defines the computation done at each step of decoding and returns a ranked list of next states. The input state is `grouped`, to allow for efficient computation, but the output states should all have a ``group_size`` of 1, to make things easier on the decoding algorithm. They will get regrouped later as needed. Because of the way we handle grouping in the decoder states, constructing a new state is actually a relatively expensive operation. If you know a priori that only some of the states will be needed (either because you have a set of gold action sequences, or you have a fixed beam size), passing that information into this function will keep us from constructing more states than we need, which will greatly speed up your computation. IMPORTANT: This method `must` returns states already sorted by their score, otherwise ``BeamSearch`` and other methods will break. For efficiency, we do not perform an additional sort in those methods. ALSO IMPORTANT: When ``allowed_actions`` is given and ``max_actions`` is not, we assume you want to evaluate all possible states and do not need any sorting (e.g., this is true for maximum marginal likelihood training that does not use a beam search). In this case, we may skip the sorting step for efficiency reasons. Parameters ---------- state : ``State`` The current state of the decoder, which we will take a step `from`. We may be grouping together computation for several states here. Because we can have several states for each instance in the original batch being evaluated at the same time, we use ``group_size`` for this kind of batching, and ``batch_size`` for the `original` batch in ``model.forward.`` max_actions : ``int``, optional If you know that you will only need a certain number of states out of this (e.g., in a beam search), you can pass in the max number of actions that you need, and we will only construct that many states (for each `batch` instance - `not` for each `group` instance!). This can save a whole lot of computation if you have an action space that's much larger than your beam size. allowed_actions : ``List[Set]``, optional If the ``DecoderTrainer`` has constraints on which actions need to be evaluated (e.g., maximum marginal likelihood only needs to evaluate action sequences in a given set), you can pass those constraints here, to avoid constructing state objects unnecessarily. If there are no constraints from the trainer, passing a value of ``None`` here will allow all actions to be considered. This is a list because it is `batched` - every instance in the batch has a set of allowed actions. Note that the size of this list is the ``group_size`` in the ``State``, `not` the ``batch_size`` of ``model.forward``. The training algorithm needs to convert from the `batched` allowed action sequences that it has to a `grouped` allowed action sequence list. Returns ------- next_states : ``List[State]`` A list of next states, ordered by score.
[ "The", "main", "method", "in", "the", "TransitionFunction", "API", ".", "This", "function", "defines", "the", "computation", "done", "at", "each", "step", "of", "decoding", "and", "returns", "a", "ranked", "list", "of", "next", "states", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/state_machines/transition_functions/transition_function.py#L23-L82
23,186
allenai/allennlp
allennlp/data/dataset_readers/semantic_dependency_parsing.py
parse_sentence
def parse_sentence(sentence_blob: str) -> Tuple[List[Dict[str, str]], List[Tuple[int, int]], List[str]]: """ Parses a chunk of text in the SemEval SDP format. Each word in the sentence is returned as a dictionary with the following format: 'id': '1', 'form': 'Pierre', 'lemma': 'Pierre', 'pos': 'NNP', 'head': '2', # Note that this is the `syntactic` head. 'deprel': 'nn', 'top': '-', 'pred': '+', 'frame': 'named:x-c' Along with a list of arcs and their corresponding tags. Note that in semantic dependency parsing words can have more than one head (it is not a tree), meaning that the list of arcs and tags are not tied to the length of the sentence. """ annotated_sentence = [] arc_indices = [] arc_tags = [] predicates = [] lines = [line.split("\t") for line in sentence_blob.split("\n") if line and not line.strip().startswith("#")] for line_idx, line in enumerate(lines): annotated_token = {k:v for k, v in zip(FIELDS, line)} if annotated_token['pred'] == "+": predicates.append(line_idx) annotated_sentence.append(annotated_token) for line_idx, line in enumerate(lines): for predicate_idx, arg in enumerate(line[len(FIELDS):]): if arg != "_": arc_indices.append((line_idx, predicates[predicate_idx])) arc_tags.append(arg) return annotated_sentence, arc_indices, arc_tags
python
def parse_sentence(sentence_blob: str) -> Tuple[List[Dict[str, str]], List[Tuple[int, int]], List[str]]: """ Parses a chunk of text in the SemEval SDP format. Each word in the sentence is returned as a dictionary with the following format: 'id': '1', 'form': 'Pierre', 'lemma': 'Pierre', 'pos': 'NNP', 'head': '2', # Note that this is the `syntactic` head. 'deprel': 'nn', 'top': '-', 'pred': '+', 'frame': 'named:x-c' Along with a list of arcs and their corresponding tags. Note that in semantic dependency parsing words can have more than one head (it is not a tree), meaning that the list of arcs and tags are not tied to the length of the sentence. """ annotated_sentence = [] arc_indices = [] arc_tags = [] predicates = [] lines = [line.split("\t") for line in sentence_blob.split("\n") if line and not line.strip().startswith("#")] for line_idx, line in enumerate(lines): annotated_token = {k:v for k, v in zip(FIELDS, line)} if annotated_token['pred'] == "+": predicates.append(line_idx) annotated_sentence.append(annotated_token) for line_idx, line in enumerate(lines): for predicate_idx, arg in enumerate(line[len(FIELDS):]): if arg != "_": arc_indices.append((line_idx, predicates[predicate_idx])) arc_tags.append(arg) return annotated_sentence, arc_indices, arc_tags
[ "def", "parse_sentence", "(", "sentence_blob", ":", "str", ")", "->", "Tuple", "[", "List", "[", "Dict", "[", "str", ",", "str", "]", "]", ",", "List", "[", "Tuple", "[", "int", ",", "int", "]", "]", ",", "List", "[", "str", "]", "]", ":", "annotated_sentence", "=", "[", "]", "arc_indices", "=", "[", "]", "arc_tags", "=", "[", "]", "predicates", "=", "[", "]", "lines", "=", "[", "line", ".", "split", "(", "\"\\t\"", ")", "for", "line", "in", "sentence_blob", ".", "split", "(", "\"\\n\"", ")", "if", "line", "and", "not", "line", ".", "strip", "(", ")", ".", "startswith", "(", "\"#\"", ")", "]", "for", "line_idx", ",", "line", "in", "enumerate", "(", "lines", ")", ":", "annotated_token", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "zip", "(", "FIELDS", ",", "line", ")", "}", "if", "annotated_token", "[", "'pred'", "]", "==", "\"+\"", ":", "predicates", ".", "append", "(", "line_idx", ")", "annotated_sentence", ".", "append", "(", "annotated_token", ")", "for", "line_idx", ",", "line", "in", "enumerate", "(", "lines", ")", ":", "for", "predicate_idx", ",", "arg", "in", "enumerate", "(", "line", "[", "len", "(", "FIELDS", ")", ":", "]", ")", ":", "if", "arg", "!=", "\"_\"", ":", "arc_indices", ".", "append", "(", "(", "line_idx", ",", "predicates", "[", "predicate_idx", "]", ")", ")", "arc_tags", ".", "append", "(", "arg", ")", "return", "annotated_sentence", ",", "arc_indices", ",", "arc_tags" ]
Parses a chunk of text in the SemEval SDP format. Each word in the sentence is returned as a dictionary with the following format: 'id': '1', 'form': 'Pierre', 'lemma': 'Pierre', 'pos': 'NNP', 'head': '2', # Note that this is the `syntactic` head. 'deprel': 'nn', 'top': '-', 'pred': '+', 'frame': 'named:x-c' Along with a list of arcs and their corresponding tags. Note that in semantic dependency parsing words can have more than one head (it is not a tree), meaning that the list of arcs and tags are not tied to the length of the sentence.
[ "Parses", "a", "chunk", "of", "text", "in", "the", "SemEval", "SDP", "format", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/dataset_readers/semantic_dependency_parsing.py#L17-L56
23,187
allenai/allennlp
allennlp/common/checks.py
parse_cuda_device
def parse_cuda_device(cuda_device: Union[str, int, List[int]]) -> Union[int, List[int]]: """ Disambiguates single GPU and multiple GPU settings for cuda_device param. """ def from_list(strings): if len(strings) > 1: return [int(d) for d in strings] elif len(strings) == 1: return int(strings[0]) else: return -1 if isinstance(cuda_device, str): return from_list(re.split(r',\s*', cuda_device)) elif isinstance(cuda_device, int): return cuda_device elif isinstance(cuda_device, list): return from_list(cuda_device) else: # TODO(brendanr): Determine why mypy can't tell that this matches the Union. return int(cuda_device)
python
def parse_cuda_device(cuda_device: Union[str, int, List[int]]) -> Union[int, List[int]]: """ Disambiguates single GPU and multiple GPU settings for cuda_device param. """ def from_list(strings): if len(strings) > 1: return [int(d) for d in strings] elif len(strings) == 1: return int(strings[0]) else: return -1 if isinstance(cuda_device, str): return from_list(re.split(r',\s*', cuda_device)) elif isinstance(cuda_device, int): return cuda_device elif isinstance(cuda_device, list): return from_list(cuda_device) else: # TODO(brendanr): Determine why mypy can't tell that this matches the Union. return int(cuda_device)
[ "def", "parse_cuda_device", "(", "cuda_device", ":", "Union", "[", "str", ",", "int", ",", "List", "[", "int", "]", "]", ")", "->", "Union", "[", "int", ",", "List", "[", "int", "]", "]", ":", "def", "from_list", "(", "strings", ")", ":", "if", "len", "(", "strings", ")", ">", "1", ":", "return", "[", "int", "(", "d", ")", "for", "d", "in", "strings", "]", "elif", "len", "(", "strings", ")", "==", "1", ":", "return", "int", "(", "strings", "[", "0", "]", ")", "else", ":", "return", "-", "1", "if", "isinstance", "(", "cuda_device", ",", "str", ")", ":", "return", "from_list", "(", "re", ".", "split", "(", "r',\\s*'", ",", "cuda_device", ")", ")", "elif", "isinstance", "(", "cuda_device", ",", "int", ")", ":", "return", "cuda_device", "elif", "isinstance", "(", "cuda_device", ",", "list", ")", ":", "return", "from_list", "(", "cuda_device", ")", "else", ":", "# TODO(brendanr): Determine why mypy can't tell that this matches the Union.", "return", "int", "(", "cuda_device", ")" ]
Disambiguates single GPU and multiple GPU settings for cuda_device param.
[ "Disambiguates", "single", "GPU", "and", "multiple", "GPU", "settings", "for", "cuda_device", "param", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/checks.py#L51-L71
23,188
allenai/allennlp
allennlp/data/iterators/data_iterator.py
add_epoch_number
def add_epoch_number(batch: Batch, epoch: int) -> Batch: """ Add the epoch number to the batch instances as a MetadataField. """ for instance in batch.instances: instance.fields['epoch_num'] = MetadataField(epoch) return batch
python
def add_epoch_number(batch: Batch, epoch: int) -> Batch: """ Add the epoch number to the batch instances as a MetadataField. """ for instance in batch.instances: instance.fields['epoch_num'] = MetadataField(epoch) return batch
[ "def", "add_epoch_number", "(", "batch", ":", "Batch", ",", "epoch", ":", "int", ")", "->", "Batch", ":", "for", "instance", "in", "batch", ".", "instances", ":", "instance", ".", "fields", "[", "'epoch_num'", "]", "=", "MetadataField", "(", "epoch", ")", "return", "batch" ]
Add the epoch number to the batch instances as a MetadataField.
[ "Add", "the", "epoch", "number", "to", "the", "batch", "instances", "as", "a", "MetadataField", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/iterators/data_iterator.py#L22-L28
23,189
allenai/allennlp
allennlp/data/iterators/data_iterator.py
DataIterator._take_instances
def _take_instances(self, instances: Iterable[Instance], max_instances: Optional[int] = None) -> Iterator[Instance]: """ Take the next `max_instances` instances from the given dataset. If `max_instances` is `None`, then just take all instances from the dataset. If `max_instances` is not `None`, each call resumes where the previous one left off, and when you get to the end of the dataset you start again from the beginning. """ # If max_instances isn't specified, just iterate once over the whole dataset if max_instances is None: yield from iter(instances) else: # If we don't have a cursor for this dataset, create one. We use ``id()`` # for the key because ``instances`` could be a list, which can't be used as a key. key = id(instances) iterator = self._cursors.get(key, iter(instances)) while max_instances > 0: try: # If there are instances left on this iterator, # yield one and decrement max_instances. yield next(iterator) max_instances -= 1 except StopIteration: # None left, so start over again at the beginning of the dataset. iterator = iter(instances) # We may have a new iterator, so update the cursor. self._cursors[key] = iterator
python
def _take_instances(self, instances: Iterable[Instance], max_instances: Optional[int] = None) -> Iterator[Instance]: """ Take the next `max_instances` instances from the given dataset. If `max_instances` is `None`, then just take all instances from the dataset. If `max_instances` is not `None`, each call resumes where the previous one left off, and when you get to the end of the dataset you start again from the beginning. """ # If max_instances isn't specified, just iterate once over the whole dataset if max_instances is None: yield from iter(instances) else: # If we don't have a cursor for this dataset, create one. We use ``id()`` # for the key because ``instances`` could be a list, which can't be used as a key. key = id(instances) iterator = self._cursors.get(key, iter(instances)) while max_instances > 0: try: # If there are instances left on this iterator, # yield one and decrement max_instances. yield next(iterator) max_instances -= 1 except StopIteration: # None left, so start over again at the beginning of the dataset. iterator = iter(instances) # We may have a new iterator, so update the cursor. self._cursors[key] = iterator
[ "def", "_take_instances", "(", "self", ",", "instances", ":", "Iterable", "[", "Instance", "]", ",", "max_instances", ":", "Optional", "[", "int", "]", "=", "None", ")", "->", "Iterator", "[", "Instance", "]", ":", "# If max_instances isn't specified, just iterate once over the whole dataset", "if", "max_instances", "is", "None", ":", "yield", "from", "iter", "(", "instances", ")", "else", ":", "# If we don't have a cursor for this dataset, create one. We use ``id()``", "# for the key because ``instances`` could be a list, which can't be used as a key.", "key", "=", "id", "(", "instances", ")", "iterator", "=", "self", ".", "_cursors", ".", "get", "(", "key", ",", "iter", "(", "instances", ")", ")", "while", "max_instances", ">", "0", ":", "try", ":", "# If there are instances left on this iterator,", "# yield one and decrement max_instances.", "yield", "next", "(", "iterator", ")", "max_instances", "-=", "1", "except", "StopIteration", ":", "# None left, so start over again at the beginning of the dataset.", "iterator", "=", "iter", "(", "instances", ")", "# We may have a new iterator, so update the cursor.", "self", ".", "_cursors", "[", "key", "]", "=", "iterator" ]
Take the next `max_instances` instances from the given dataset. If `max_instances` is `None`, then just take all instances from the dataset. If `max_instances` is not `None`, each call resumes where the previous one left off, and when you get to the end of the dataset you start again from the beginning.
[ "Take", "the", "next", "max_instances", "instances", "from", "the", "given", "dataset", ".", "If", "max_instances", "is", "None", "then", "just", "take", "all", "instances", "from", "the", "dataset", ".", "If", "max_instances", "is", "not", "None", "each", "call", "resumes", "where", "the", "previous", "one", "left", "off", "and", "when", "you", "get", "to", "the", "end", "of", "the", "dataset", "you", "start", "again", "from", "the", "beginning", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/iterators/data_iterator.py#L163-L192
23,190
allenai/allennlp
allennlp/data/iterators/data_iterator.py
DataIterator._memory_sized_lists
def _memory_sized_lists(self, instances: Iterable[Instance]) -> Iterable[List[Instance]]: """ Breaks the dataset into "memory-sized" lists of instances, which it yields up one at a time until it gets through a full epoch. For example, if the dataset is already an in-memory list, and each epoch represents one pass through the dataset, it just yields back the dataset. Whereas if the dataset is lazily read from disk and we've specified to load 1000 instances at a time, then it yields lists of 1000 instances each. """ lazy = is_lazy(instances) # Get an iterator over the next epoch worth of instances. iterator = self._take_instances(instances, self._instances_per_epoch) # We have four different cases to deal with: # With lazy instances and no guidance about how many to load into memory, # we just load ``batch_size`` instances at a time: if lazy and self._max_instances_in_memory is None: yield from lazy_groups_of(iterator, self._batch_size) # If we specified max instances in memory, lazy or not, we just # load ``max_instances_in_memory`` instances at a time: elif self._max_instances_in_memory is not None: yield from lazy_groups_of(iterator, self._max_instances_in_memory) # If we have non-lazy instances, and we want all instances each epoch, # then we just yield back the list of instances: elif self._instances_per_epoch is None: yield ensure_list(instances) # In the final case we have non-lazy instances, we want a specific number # of instances each epoch, and we didn't specify how to many instances to load # into memory. So we convert the whole iterator to a list: else: yield list(iterator)
python
def _memory_sized_lists(self, instances: Iterable[Instance]) -> Iterable[List[Instance]]: """ Breaks the dataset into "memory-sized" lists of instances, which it yields up one at a time until it gets through a full epoch. For example, if the dataset is already an in-memory list, and each epoch represents one pass through the dataset, it just yields back the dataset. Whereas if the dataset is lazily read from disk and we've specified to load 1000 instances at a time, then it yields lists of 1000 instances each. """ lazy = is_lazy(instances) # Get an iterator over the next epoch worth of instances. iterator = self._take_instances(instances, self._instances_per_epoch) # We have four different cases to deal with: # With lazy instances and no guidance about how many to load into memory, # we just load ``batch_size`` instances at a time: if lazy and self._max_instances_in_memory is None: yield from lazy_groups_of(iterator, self._batch_size) # If we specified max instances in memory, lazy or not, we just # load ``max_instances_in_memory`` instances at a time: elif self._max_instances_in_memory is not None: yield from lazy_groups_of(iterator, self._max_instances_in_memory) # If we have non-lazy instances, and we want all instances each epoch, # then we just yield back the list of instances: elif self._instances_per_epoch is None: yield ensure_list(instances) # In the final case we have non-lazy instances, we want a specific number # of instances each epoch, and we didn't specify how to many instances to load # into memory. So we convert the whole iterator to a list: else: yield list(iterator)
[ "def", "_memory_sized_lists", "(", "self", ",", "instances", ":", "Iterable", "[", "Instance", "]", ")", "->", "Iterable", "[", "List", "[", "Instance", "]", "]", ":", "lazy", "=", "is_lazy", "(", "instances", ")", "# Get an iterator over the next epoch worth of instances.", "iterator", "=", "self", ".", "_take_instances", "(", "instances", ",", "self", ".", "_instances_per_epoch", ")", "# We have four different cases to deal with:", "# With lazy instances and no guidance about how many to load into memory,", "# we just load ``batch_size`` instances at a time:", "if", "lazy", "and", "self", ".", "_max_instances_in_memory", "is", "None", ":", "yield", "from", "lazy_groups_of", "(", "iterator", ",", "self", ".", "_batch_size", ")", "# If we specified max instances in memory, lazy or not, we just", "# load ``max_instances_in_memory`` instances at a time:", "elif", "self", ".", "_max_instances_in_memory", "is", "not", "None", ":", "yield", "from", "lazy_groups_of", "(", "iterator", ",", "self", ".", "_max_instances_in_memory", ")", "# If we have non-lazy instances, and we want all instances each epoch,", "# then we just yield back the list of instances:", "elif", "self", ".", "_instances_per_epoch", "is", "None", ":", "yield", "ensure_list", "(", "instances", ")", "# In the final case we have non-lazy instances, we want a specific number", "# of instances each epoch, and we didn't specify how to many instances to load", "# into memory. So we convert the whole iterator to a list:", "else", ":", "yield", "list", "(", "iterator", ")" ]
Breaks the dataset into "memory-sized" lists of instances, which it yields up one at a time until it gets through a full epoch. For example, if the dataset is already an in-memory list, and each epoch represents one pass through the dataset, it just yields back the dataset. Whereas if the dataset is lazily read from disk and we've specified to load 1000 instances at a time, then it yields lists of 1000 instances each.
[ "Breaks", "the", "dataset", "into", "memory", "-", "sized", "lists", "of", "instances", "which", "it", "yields", "up", "one", "at", "a", "time", "until", "it", "gets", "through", "a", "full", "epoch", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/iterators/data_iterator.py#L194-L228
23,191
allenai/allennlp
allennlp/data/iterators/data_iterator.py
DataIterator._ensure_batch_is_sufficiently_small
def _ensure_batch_is_sufficiently_small( self, batch_instances: Iterable[Instance], excess: Deque[Instance]) -> List[List[Instance]]: """ If self._maximum_samples_per_batch is specified, then split the batch into smaller sub-batches if it exceeds the maximum size. Parameters ---------- batch_instances : ``Iterable[Instance]`` A candidate batch. excess : ``Deque[Instance]`` Instances that were not sufficient to form an entire batch previously. They will be used as part of the first sub-batch. This will be populated with instances from the end of batch_instances that do not consist of more than self._maximum_samples_per_batch samples or self._batch_size instances. It is the caller's responsibility to place these in a batch too, which may, of course, be done in part with subsequent calls to this method. WARNING: Mutated in place! """ if self._maximum_samples_per_batch is None: assert not excess return [list(batch_instances)] key, limit = self._maximum_samples_per_batch batches: List[List[Instance]] = [] batch: List[Instance] = [] padding_length = -1 excess.extend(batch_instances) while excess: instance = excess.popleft() if self.vocab is not None: # we index here to ensure that shape information is available, # as in some cases (with self._maximum_samples_per_batch) # we need access to shaping information before batches are constructed) instance.index_fields(self.vocab) field_lengths = instance.get_padding_lengths() for _, lengths in field_lengths.items(): try: padding_length = max(padding_length, lengths[key]) except KeyError: pass proposed_batch_size = len(batch) + 1 # Adding the current instance would exceed the batch size or sample size. if proposed_batch_size >= self._batch_size or padding_length * proposed_batch_size > limit: # Output the already existing batch batches.append(batch) # Put the current instance back, reset state. excess.appendleft(instance) batch = [] padding_length = -1 else: batch.append(instance) # Keep the current batch as excess. excess.extend(batch) return batches
python
def _ensure_batch_is_sufficiently_small( self, batch_instances: Iterable[Instance], excess: Deque[Instance]) -> List[List[Instance]]: """ If self._maximum_samples_per_batch is specified, then split the batch into smaller sub-batches if it exceeds the maximum size. Parameters ---------- batch_instances : ``Iterable[Instance]`` A candidate batch. excess : ``Deque[Instance]`` Instances that were not sufficient to form an entire batch previously. They will be used as part of the first sub-batch. This will be populated with instances from the end of batch_instances that do not consist of more than self._maximum_samples_per_batch samples or self._batch_size instances. It is the caller's responsibility to place these in a batch too, which may, of course, be done in part with subsequent calls to this method. WARNING: Mutated in place! """ if self._maximum_samples_per_batch is None: assert not excess return [list(batch_instances)] key, limit = self._maximum_samples_per_batch batches: List[List[Instance]] = [] batch: List[Instance] = [] padding_length = -1 excess.extend(batch_instances) while excess: instance = excess.popleft() if self.vocab is not None: # we index here to ensure that shape information is available, # as in some cases (with self._maximum_samples_per_batch) # we need access to shaping information before batches are constructed) instance.index_fields(self.vocab) field_lengths = instance.get_padding_lengths() for _, lengths in field_lengths.items(): try: padding_length = max(padding_length, lengths[key]) except KeyError: pass proposed_batch_size = len(batch) + 1 # Adding the current instance would exceed the batch size or sample size. if proposed_batch_size >= self._batch_size or padding_length * proposed_batch_size > limit: # Output the already existing batch batches.append(batch) # Put the current instance back, reset state. excess.appendleft(instance) batch = [] padding_length = -1 else: batch.append(instance) # Keep the current batch as excess. excess.extend(batch) return batches
[ "def", "_ensure_batch_is_sufficiently_small", "(", "self", ",", "batch_instances", ":", "Iterable", "[", "Instance", "]", ",", "excess", ":", "Deque", "[", "Instance", "]", ")", "->", "List", "[", "List", "[", "Instance", "]", "]", ":", "if", "self", ".", "_maximum_samples_per_batch", "is", "None", ":", "assert", "not", "excess", "return", "[", "list", "(", "batch_instances", ")", "]", "key", ",", "limit", "=", "self", ".", "_maximum_samples_per_batch", "batches", ":", "List", "[", "List", "[", "Instance", "]", "]", "=", "[", "]", "batch", ":", "List", "[", "Instance", "]", "=", "[", "]", "padding_length", "=", "-", "1", "excess", ".", "extend", "(", "batch_instances", ")", "while", "excess", ":", "instance", "=", "excess", ".", "popleft", "(", ")", "if", "self", ".", "vocab", "is", "not", "None", ":", "# we index here to ensure that shape information is available,", "# as in some cases (with self._maximum_samples_per_batch)", "# we need access to shaping information before batches are constructed)", "instance", ".", "index_fields", "(", "self", ".", "vocab", ")", "field_lengths", "=", "instance", ".", "get_padding_lengths", "(", ")", "for", "_", ",", "lengths", "in", "field_lengths", ".", "items", "(", ")", ":", "try", ":", "padding_length", "=", "max", "(", "padding_length", ",", "lengths", "[", "key", "]", ")", "except", "KeyError", ":", "pass", "proposed_batch_size", "=", "len", "(", "batch", ")", "+", "1", "# Adding the current instance would exceed the batch size or sample size.", "if", "proposed_batch_size", ">=", "self", ".", "_batch_size", "or", "padding_length", "*", "proposed_batch_size", ">", "limit", ":", "# Output the already existing batch", "batches", ".", "append", "(", "batch", ")", "# Put the current instance back, reset state.", "excess", ".", "appendleft", "(", "instance", ")", "batch", "=", "[", "]", "padding_length", "=", "-", "1", "else", ":", "batch", ".", "append", "(", "instance", ")", "# Keep the current batch as excess.", "excess", ".", "extend", "(", "batch", ")", "return", "batches" ]
If self._maximum_samples_per_batch is specified, then split the batch into smaller sub-batches if it exceeds the maximum size. Parameters ---------- batch_instances : ``Iterable[Instance]`` A candidate batch. excess : ``Deque[Instance]`` Instances that were not sufficient to form an entire batch previously. They will be used as part of the first sub-batch. This will be populated with instances from the end of batch_instances that do not consist of more than self._maximum_samples_per_batch samples or self._batch_size instances. It is the caller's responsibility to place these in a batch too, which may, of course, be done in part with subsequent calls to this method. WARNING: Mutated in place!
[ "If", "self", ".", "_maximum_samples_per_batch", "is", "specified", "then", "split", "the", "batch", "into", "smaller", "sub", "-", "batches", "if", "it", "exceeds", "the", "maximum", "size", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/iterators/data_iterator.py#L230-L297
23,192
allenai/allennlp
allennlp/data/iterators/data_iterator.py
DataIterator._create_batches
def _create_batches(self, instances: Iterable[Instance], shuffle: bool) -> Iterable[Batch]: """ This method should return one epoch worth of batches. """ raise NotImplementedError
python
def _create_batches(self, instances: Iterable[Instance], shuffle: bool) -> Iterable[Batch]: """ This method should return one epoch worth of batches. """ raise NotImplementedError
[ "def", "_create_batches", "(", "self", ",", "instances", ":", "Iterable", "[", "Instance", "]", ",", "shuffle", ":", "bool", ")", "->", "Iterable", "[", "Batch", "]", ":", "raise", "NotImplementedError" ]
This method should return one epoch worth of batches.
[ "This", "method", "should", "return", "one", "epoch", "worth", "of", "batches", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/iterators/data_iterator.py#L314-L318
23,193
allenai/allennlp
allennlp/modules/seq2seq_encoders/bidirectional_language_model_transformer.py
attention
def attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: torch.Tensor = None, dropout: Callable = None) -> Tuple[torch.Tensor, torch.Tensor]: """Compute 'Scaled Dot Product Attention'""" d_k = query.size(-1) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) if mask is not None: scores = scores.masked_fill(mask == 0, -1e9) p_attn = F.softmax(scores, dim=-1) if dropout is not None: p_attn = dropout(p_attn) return torch.matmul(p_attn, value), p_attn
python
def attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: torch.Tensor = None, dropout: Callable = None) -> Tuple[torch.Tensor, torch.Tensor]: """Compute 'Scaled Dot Product Attention'""" d_k = query.size(-1) scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) if mask is not None: scores = scores.masked_fill(mask == 0, -1e9) p_attn = F.softmax(scores, dim=-1) if dropout is not None: p_attn = dropout(p_attn) return torch.matmul(p_attn, value), p_attn
[ "def", "attention", "(", "query", ":", "torch", ".", "Tensor", ",", "key", ":", "torch", ".", "Tensor", ",", "value", ":", "torch", ".", "Tensor", ",", "mask", ":", "torch", ".", "Tensor", "=", "None", ",", "dropout", ":", "Callable", "=", "None", ")", "->", "Tuple", "[", "torch", ".", "Tensor", ",", "torch", ".", "Tensor", "]", ":", "d_k", "=", "query", ".", "size", "(", "-", "1", ")", "scores", "=", "torch", ".", "matmul", "(", "query", ",", "key", ".", "transpose", "(", "-", "2", ",", "-", "1", ")", ")", "/", "math", ".", "sqrt", "(", "d_k", ")", "if", "mask", "is", "not", "None", ":", "scores", "=", "scores", ".", "masked_fill", "(", "mask", "==", "0", ",", "-", "1e9", ")", "p_attn", "=", "F", ".", "softmax", "(", "scores", ",", "dim", "=", "-", "1", ")", "if", "dropout", "is", "not", "None", ":", "p_attn", "=", "dropout", "(", "p_attn", ")", "return", "torch", ".", "matmul", "(", "p_attn", ",", "value", ")", ",", "p_attn" ]
Compute 'Scaled Dot Product Attention
[ "Compute", "Scaled", "Dot", "Product", "Attention" ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/modules/seq2seq_encoders/bidirectional_language_model_transformer.py#L24-L37
23,194
allenai/allennlp
allennlp/modules/seq2seq_encoders/bidirectional_language_model_transformer.py
subsequent_mask
def subsequent_mask(size: int, device: str = 'cpu') -> torch.Tensor: """Mask out subsequent positions.""" mask = torch.tril(torch.ones(size, size, device=device, dtype=torch.int32)).unsqueeze(0) return mask
python
def subsequent_mask(size: int, device: str = 'cpu') -> torch.Tensor: """Mask out subsequent positions.""" mask = torch.tril(torch.ones(size, size, device=device, dtype=torch.int32)).unsqueeze(0) return mask
[ "def", "subsequent_mask", "(", "size", ":", "int", ",", "device", ":", "str", "=", "'cpu'", ")", "->", "torch", ".", "Tensor", ":", "mask", "=", "torch", ".", "tril", "(", "torch", ".", "ones", "(", "size", ",", "size", ",", "device", "=", "device", ",", "dtype", "=", "torch", ".", "int32", ")", ")", ".", "unsqueeze", "(", "0", ")", "return", "mask" ]
Mask out subsequent positions.
[ "Mask", "out", "subsequent", "positions", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/modules/seq2seq_encoders/bidirectional_language_model_transformer.py#L40-L43
23,195
allenai/allennlp
allennlp/modules/seq2seq_encoders/bidirectional_language_model_transformer.py
SublayerConnection.forward
def forward(self, x: torch.Tensor, sublayer: Callable[[torch.Tensor], torch.Tensor]) -> torch.Tensor: """Apply residual connection to any sublayer with the same size.""" return x + self.dropout(sublayer(self.norm(x)))
python
def forward(self, x: torch.Tensor, sublayer: Callable[[torch.Tensor], torch.Tensor]) -> torch.Tensor: """Apply residual connection to any sublayer with the same size.""" return x + self.dropout(sublayer(self.norm(x)))
[ "def", "forward", "(", "self", ",", "x", ":", "torch", ".", "Tensor", ",", "sublayer", ":", "Callable", "[", "[", "torch", ".", "Tensor", "]", ",", "torch", ".", "Tensor", "]", ")", "->", "torch", ".", "Tensor", ":", "return", "x", "+", "self", ".", "dropout", "(", "sublayer", "(", "self", ".", "norm", "(", "x", ")", ")", ")" ]
Apply residual connection to any sublayer with the same size.
[ "Apply", "residual", "connection", "to", "any", "sublayer", "with", "the", "same", "size", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/modules/seq2seq_encoders/bidirectional_language_model_transformer.py#L114-L116
23,196
allenai/allennlp
allennlp/nn/initializers.py
block_orthogonal
def block_orthogonal(tensor: torch.Tensor, split_sizes: List[int], gain: float = 1.0) -> None: """ An initializer which allows initializing model parameters in "blocks". This is helpful in the case of recurrent models which use multiple gates applied to linear projections, which can be computed efficiently if they are concatenated together. However, they are separate parameters which should be initialized independently. Parameters ---------- tensor : ``torch.Tensor``, required. A tensor to initialize. split_sizes : List[int], required. A list of length ``tensor.ndim()`` specifying the size of the blocks along that particular dimension. E.g. ``[10, 20]`` would result in the tensor being split into chunks of size 10 along the first dimension and 20 along the second. gain : float, optional (default = 1.0) The gain (scaling) applied to the orthogonal initialization. """ data = tensor.data sizes = list(tensor.size()) if any([a % b != 0 for a, b in zip(sizes, split_sizes)]): raise ConfigurationError("tensor dimensions must be divisible by their respective " "split_sizes. Found size: {} and split_sizes: {}".format(sizes, split_sizes)) indexes = [list(range(0, max_size, split)) for max_size, split in zip(sizes, split_sizes)] # Iterate over all possible blocks within the tensor. for block_start_indices in itertools.product(*indexes): # A list of tuples containing the index to start at for this block # and the appropriate step size (i.e split_size[i] for dimension i). index_and_step_tuples = zip(block_start_indices, split_sizes) # This is a tuple of slices corresponding to: # tensor[index: index + step_size, ...]. This is # required because we could have an arbitrary number # of dimensions. The actual slices we need are the # start_index: start_index + step for each dimension in the tensor. block_slice = tuple([slice(start_index, start_index + step) for start_index, step in index_and_step_tuples]) data[block_slice] = torch.nn.init.orthogonal_(tensor[block_slice].contiguous(), gain=gain)
python
def block_orthogonal(tensor: torch.Tensor, split_sizes: List[int], gain: float = 1.0) -> None: """ An initializer which allows initializing model parameters in "blocks". This is helpful in the case of recurrent models which use multiple gates applied to linear projections, which can be computed efficiently if they are concatenated together. However, they are separate parameters which should be initialized independently. Parameters ---------- tensor : ``torch.Tensor``, required. A tensor to initialize. split_sizes : List[int], required. A list of length ``tensor.ndim()`` specifying the size of the blocks along that particular dimension. E.g. ``[10, 20]`` would result in the tensor being split into chunks of size 10 along the first dimension and 20 along the second. gain : float, optional (default = 1.0) The gain (scaling) applied to the orthogonal initialization. """ data = tensor.data sizes = list(tensor.size()) if any([a % b != 0 for a, b in zip(sizes, split_sizes)]): raise ConfigurationError("tensor dimensions must be divisible by their respective " "split_sizes. Found size: {} and split_sizes: {}".format(sizes, split_sizes)) indexes = [list(range(0, max_size, split)) for max_size, split in zip(sizes, split_sizes)] # Iterate over all possible blocks within the tensor. for block_start_indices in itertools.product(*indexes): # A list of tuples containing the index to start at for this block # and the appropriate step size (i.e split_size[i] for dimension i). index_and_step_tuples = zip(block_start_indices, split_sizes) # This is a tuple of slices corresponding to: # tensor[index: index + step_size, ...]. This is # required because we could have an arbitrary number # of dimensions. The actual slices we need are the # start_index: start_index + step for each dimension in the tensor. block_slice = tuple([slice(start_index, start_index + step) for start_index, step in index_and_step_tuples]) data[block_slice] = torch.nn.init.orthogonal_(tensor[block_slice].contiguous(), gain=gain)
[ "def", "block_orthogonal", "(", "tensor", ":", "torch", ".", "Tensor", ",", "split_sizes", ":", "List", "[", "int", "]", ",", "gain", ":", "float", "=", "1.0", ")", "->", "None", ":", "data", "=", "tensor", ".", "data", "sizes", "=", "list", "(", "tensor", ".", "size", "(", ")", ")", "if", "any", "(", "[", "a", "%", "b", "!=", "0", "for", "a", ",", "b", "in", "zip", "(", "sizes", ",", "split_sizes", ")", "]", ")", ":", "raise", "ConfigurationError", "(", "\"tensor dimensions must be divisible by their respective \"", "\"split_sizes. Found size: {} and split_sizes: {}\"", ".", "format", "(", "sizes", ",", "split_sizes", ")", ")", "indexes", "=", "[", "list", "(", "range", "(", "0", ",", "max_size", ",", "split", ")", ")", "for", "max_size", ",", "split", "in", "zip", "(", "sizes", ",", "split_sizes", ")", "]", "# Iterate over all possible blocks within the tensor.", "for", "block_start_indices", "in", "itertools", ".", "product", "(", "*", "indexes", ")", ":", "# A list of tuples containing the index to start at for this block", "# and the appropriate step size (i.e split_size[i] for dimension i).", "index_and_step_tuples", "=", "zip", "(", "block_start_indices", ",", "split_sizes", ")", "# This is a tuple of slices corresponding to:", "# tensor[index: index + step_size, ...]. This is", "# required because we could have an arbitrary number", "# of dimensions. The actual slices we need are the", "# start_index: start_index + step for each dimension in the tensor.", "block_slice", "=", "tuple", "(", "[", "slice", "(", "start_index", ",", "start_index", "+", "step", ")", "for", "start_index", ",", "step", "in", "index_and_step_tuples", "]", ")", "data", "[", "block_slice", "]", "=", "torch", ".", "nn", ".", "init", ".", "orthogonal_", "(", "tensor", "[", "block_slice", "]", ".", "contiguous", "(", ")", ",", "gain", "=", "gain", ")" ]
An initializer which allows initializing model parameters in "blocks". This is helpful in the case of recurrent models which use multiple gates applied to linear projections, which can be computed efficiently if they are concatenated together. However, they are separate parameters which should be initialized independently. Parameters ---------- tensor : ``torch.Tensor``, required. A tensor to initialize. split_sizes : List[int], required. A list of length ``tensor.ndim()`` specifying the size of the blocks along that particular dimension. E.g. ``[10, 20]`` would result in the tensor being split into chunks of size 10 along the first dimension and 20 along the second. gain : float, optional (default = 1.0) The gain (scaling) applied to the orthogonal initialization.
[ "An", "initializer", "which", "allows", "initializing", "model", "parameters", "in", "blocks", ".", "This", "is", "helpful", "in", "the", "case", "of", "recurrent", "models", "which", "use", "multiple", "gates", "applied", "to", "linear", "projections", "which", "can", "be", "computed", "efficiently", "if", "they", "are", "concatenated", "together", ".", "However", "they", "are", "separate", "parameters", "which", "should", "be", "initialized", "independently", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/nn/initializers.py#L98-L138
23,197
allenai/allennlp
allennlp/nn/initializers.py
lstm_hidden_bias
def lstm_hidden_bias(tensor: torch.Tensor) -> None: """ Initialize the biases of the forget gate to 1, and all other gates to 0, following Jozefowicz et al., An Empirical Exploration of Recurrent Network Architectures """ # gates are (b_hi|b_hf|b_hg|b_ho) of shape (4*hidden_size) tensor.data.zero_() hidden_size = tensor.shape[0] // 4 tensor.data[hidden_size:(2 * hidden_size)] = 1.0
python
def lstm_hidden_bias(tensor: torch.Tensor) -> None: """ Initialize the biases of the forget gate to 1, and all other gates to 0, following Jozefowicz et al., An Empirical Exploration of Recurrent Network Architectures """ # gates are (b_hi|b_hf|b_hg|b_ho) of shape (4*hidden_size) tensor.data.zero_() hidden_size = tensor.shape[0] // 4 tensor.data[hidden_size:(2 * hidden_size)] = 1.0
[ "def", "lstm_hidden_bias", "(", "tensor", ":", "torch", ".", "Tensor", ")", "->", "None", ":", "# gates are (b_hi|b_hf|b_hg|b_ho) of shape (4*hidden_size)", "tensor", ".", "data", ".", "zero_", "(", ")", "hidden_size", "=", "tensor", ".", "shape", "[", "0", "]", "//", "4", "tensor", ".", "data", "[", "hidden_size", ":", "(", "2", "*", "hidden_size", ")", "]", "=", "1.0" ]
Initialize the biases of the forget gate to 1, and all other gates to 0, following Jozefowicz et al., An Empirical Exploration of Recurrent Network Architectures
[ "Initialize", "the", "biases", "of", "the", "forget", "gate", "to", "1", "and", "all", "other", "gates", "to", "0", "following", "Jozefowicz", "et", "al", ".", "An", "Empirical", "Exploration", "of", "Recurrent", "Network", "Architectures" ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/nn/initializers.py#L144-L152
23,198
allenai/allennlp
allennlp/semparse/contexts/table_question_knowledge_graph.py
TableQuestionKnowledgeGraph._should_split_column_cells
def _should_split_column_cells(cls, column_cells: List[str]) -> bool: """ Returns true if there is any cell in this column that can be split. """ return any(cls._should_split_cell(cell_text) for cell_text in column_cells)
python
def _should_split_column_cells(cls, column_cells: List[str]) -> bool: """ Returns true if there is any cell in this column that can be split. """ return any(cls._should_split_cell(cell_text) for cell_text in column_cells)
[ "def", "_should_split_column_cells", "(", "cls", ",", "column_cells", ":", "List", "[", "str", "]", ")", "->", "bool", ":", "return", "any", "(", "cls", ".", "_should_split_cell", "(", "cell_text", ")", "for", "cell_text", "in", "column_cells", ")" ]
Returns true if there is any cell in this column that can be split.
[ "Returns", "true", "if", "there", "is", "any", "cell", "in", "this", "column", "that", "can", "be", "split", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/contexts/table_question_knowledge_graph.py#L329-L333
23,199
allenai/allennlp
allennlp/semparse/contexts/table_question_knowledge_graph.py
TableQuestionKnowledgeGraph._should_split_cell
def _should_split_cell(cls, cell_text: str) -> bool: """ Checks whether the cell should be split. We're just doing the same thing that SEMPRE did here. """ if ', ' in cell_text or '\n' in cell_text or '/' in cell_text: return True return False
python
def _should_split_cell(cls, cell_text: str) -> bool: """ Checks whether the cell should be split. We're just doing the same thing that SEMPRE did here. """ if ', ' in cell_text or '\n' in cell_text or '/' in cell_text: return True return False
[ "def", "_should_split_cell", "(", "cls", ",", "cell_text", ":", "str", ")", "->", "bool", ":", "if", "', '", "in", "cell_text", "or", "'\\n'", "in", "cell_text", "or", "'/'", "in", "cell_text", ":", "return", "True", "return", "False" ]
Checks whether the cell should be split. We're just doing the same thing that SEMPRE did here.
[ "Checks", "whether", "the", "cell", "should", "be", "split", ".", "We", "re", "just", "doing", "the", "same", "thing", "that", "SEMPRE", "did", "here", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/contexts/table_question_knowledge_graph.py#L336-L343