id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
23,000
allenai/allennlp
allennlp/semparse/domain_languages/wikitables_language.py
WikiTablesLanguage.first
def first(self, rows: List[Row]) -> List[Row]: """ Takes an expression that evaluates to a list of rows, and returns the first one in that list. """ if not rows: logger.warning("Trying to get first row from an empty list") return [] return [rows[0]]
python
def first(self, rows: List[Row]) -> List[Row]: """ Takes an expression that evaluates to a list of rows, and returns the first one in that list. """ if not rows: logger.warning("Trying to get first row from an empty list") return [] return [rows[0]]
[ "def", "first", "(", "self", ",", "rows", ":", "List", "[", "Row", "]", ")", "->", "List", "[", "Row", "]", ":", "if", "not", "rows", ":", "logger", ".", "warning", "(", "\"Trying to get first row from an empty list\"", ")", "return", "[", "]", "return", "[", "rows", "[", "0", "]", "]" ]
Takes an expression that evaluates to a list of rows, and returns the first one in that list.
[ "Takes", "an", "expression", "that", "evaluates", "to", "a", "list", "of", "rows", "and", "returns", "the", "first", "one", "in", "that", "list", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/wikitables_language.py#L410-L418
23,001
allenai/allennlp
allennlp/semparse/domain_languages/wikitables_language.py
WikiTablesLanguage.last
def last(self, rows: List[Row]) -> List[Row]: """ Takes an expression that evaluates to a list of rows, and returns the last one in that list. """ if not rows: logger.warning("Trying to get last row from an empty list") return [] return [rows[-1]]
python
def last(self, rows: List[Row]) -> List[Row]: """ Takes an expression that evaluates to a list of rows, and returns the last one in that list. """ if not rows: logger.warning("Trying to get last row from an empty list") return [] return [rows[-1]]
[ "def", "last", "(", "self", ",", "rows", ":", "List", "[", "Row", "]", ")", "->", "List", "[", "Row", "]", ":", "if", "not", "rows", ":", "logger", ".", "warning", "(", "\"Trying to get last row from an empty list\"", ")", "return", "[", "]", "return", "[", "rows", "[", "-", "1", "]", "]" ]
Takes an expression that evaluates to a list of rows, and returns the last one in that list.
[ "Takes", "an", "expression", "that", "evaluates", "to", "a", "list", "of", "rows", "and", "returns", "the", "last", "one", "in", "that", "list", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/wikitables_language.py#L421-L429
23,002
allenai/allennlp
allennlp/semparse/domain_languages/wikitables_language.py
WikiTablesLanguage.previous
def previous(self, rows: List[Row]) -> List[Row]: """ Takes an expression that evaluates to a single row, and returns the row that occurs before the input row in the original set of rows. If the input row happens to be the top row, we will return an empty list. """ if not rows: return [] input_row_index = self._get_row_index(rows[0]) if input_row_index > 0: return [self.table_data[input_row_index - 1]] return []
python
def previous(self, rows: List[Row]) -> List[Row]: """ Takes an expression that evaluates to a single row, and returns the row that occurs before the input row in the original set of rows. If the input row happens to be the top row, we will return an empty list. """ if not rows: return [] input_row_index = self._get_row_index(rows[0]) if input_row_index > 0: return [self.table_data[input_row_index - 1]] return []
[ "def", "previous", "(", "self", ",", "rows", ":", "List", "[", "Row", "]", ")", "->", "List", "[", "Row", "]", ":", "if", "not", "rows", ":", "return", "[", "]", "input_row_index", "=", "self", ".", "_get_row_index", "(", "rows", "[", "0", "]", ")", "if", "input_row_index", ">", "0", ":", "return", "[", "self", ".", "table_data", "[", "input_row_index", "-", "1", "]", "]", "return", "[", "]" ]
Takes an expression that evaluates to a single row, and returns the row that occurs before the input row in the original set of rows. If the input row happens to be the top row, we will return an empty list.
[ "Takes", "an", "expression", "that", "evaluates", "to", "a", "single", "row", "and", "returns", "the", "row", "that", "occurs", "before", "the", "input", "row", "in", "the", "original", "set", "of", "rows", ".", "If", "the", "input", "row", "happens", "to", "be", "the", "top", "row", "we", "will", "return", "an", "empty", "list", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/wikitables_language.py#L432-L443
23,003
allenai/allennlp
allennlp/semparse/domain_languages/wikitables_language.py
WikiTablesLanguage.next
def next(self, rows: List[Row]) -> List[Row]: """ Takes an expression that evaluates to a single row, and returns the row that occurs after the input row in the original set of rows. If the input row happens to be the last row, we will return an empty list. """ if not rows: return [] input_row_index = self._get_row_index(rows[0]) if input_row_index < len(self.table_data) - 1 and input_row_index != -1: return [self.table_data[input_row_index + 1]] return []
python
def next(self, rows: List[Row]) -> List[Row]: """ Takes an expression that evaluates to a single row, and returns the row that occurs after the input row in the original set of rows. If the input row happens to be the last row, we will return an empty list. """ if not rows: return [] input_row_index = self._get_row_index(rows[0]) if input_row_index < len(self.table_data) - 1 and input_row_index != -1: return [self.table_data[input_row_index + 1]] return []
[ "def", "next", "(", "self", ",", "rows", ":", "List", "[", "Row", "]", ")", "->", "List", "[", "Row", "]", ":", "if", "not", "rows", ":", "return", "[", "]", "input_row_index", "=", "self", ".", "_get_row_index", "(", "rows", "[", "0", "]", ")", "if", "input_row_index", "<", "len", "(", "self", ".", "table_data", ")", "-", "1", "and", "input_row_index", "!=", "-", "1", ":", "return", "[", "self", ".", "table_data", "[", "input_row_index", "+", "1", "]", "]", "return", "[", "]" ]
Takes an expression that evaluates to a single row, and returns the row that occurs after the input row in the original set of rows. If the input row happens to be the last row, we will return an empty list.
[ "Takes", "an", "expression", "that", "evaluates", "to", "a", "single", "row", "and", "returns", "the", "row", "that", "occurs", "after", "the", "input", "row", "in", "the", "original", "set", "of", "rows", ".", "If", "the", "input", "row", "happens", "to", "be", "the", "last", "row", "we", "will", "return", "an", "empty", "list", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/wikitables_language.py#L446-L457
23,004
allenai/allennlp
allennlp/semparse/domain_languages/wikitables_language.py
WikiTablesLanguage.average
def average(self, rows: List[Row], column: NumberColumn) -> Number: """ Takes a list of rows and a column and returns the mean of the values under that column in those rows. """ cell_values = [row.values[column.name] for row in rows] if not cell_values: return 0.0 # type: ignore return sum(cell_values) / len(cell_values)
python
def average(self, rows: List[Row], column: NumberColumn) -> Number: """ Takes a list of rows and a column and returns the mean of the values under that column in those rows. """ cell_values = [row.values[column.name] for row in rows] if not cell_values: return 0.0 # type: ignore return sum(cell_values) / len(cell_values)
[ "def", "average", "(", "self", ",", "rows", ":", "List", "[", "Row", "]", ",", "column", ":", "NumberColumn", ")", "->", "Number", ":", "cell_values", "=", "[", "row", ".", "values", "[", "column", ".", "name", "]", "for", "row", "in", "rows", "]", "if", "not", "cell_values", ":", "return", "0.0", "# type: ignore", "return", "sum", "(", "cell_values", ")", "/", "len", "(", "cell_values", ")" ]
Takes a list of rows and a column and returns the mean of the values under that column in those rows.
[ "Takes", "a", "list", "of", "rows", "and", "a", "column", "and", "returns", "the", "mean", "of", "the", "values", "under", "that", "column", "in", "those", "rows", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/wikitables_language.py#L737-L745
23,005
allenai/allennlp
allennlp/semparse/domain_languages/wikitables_language.py
WikiTablesLanguage.diff
def diff(self, first_row: List[Row], second_row: List[Row], column: NumberColumn) -> Number: """ Takes a two rows and a number column and returns the difference between the values under that column in those two rows. """ if not first_row or not second_row: return 0.0 # type: ignore first_value = first_row[0].values[column.name] second_value = second_row[0].values[column.name] if isinstance(first_value, float) and isinstance(second_value, float): return first_value - second_value # type: ignore else: raise ExecutionError(f"Invalid column for diff: {column.name}")
python
def diff(self, first_row: List[Row], second_row: List[Row], column: NumberColumn) -> Number: """ Takes a two rows and a number column and returns the difference between the values under that column in those two rows. """ if not first_row or not second_row: return 0.0 # type: ignore first_value = first_row[0].values[column.name] second_value = second_row[0].values[column.name] if isinstance(first_value, float) and isinstance(second_value, float): return first_value - second_value # type: ignore else: raise ExecutionError(f"Invalid column for diff: {column.name}")
[ "def", "diff", "(", "self", ",", "first_row", ":", "List", "[", "Row", "]", ",", "second_row", ":", "List", "[", "Row", "]", ",", "column", ":", "NumberColumn", ")", "->", "Number", ":", "if", "not", "first_row", "or", "not", "second_row", ":", "return", "0.0", "# type: ignore", "first_value", "=", "first_row", "[", "0", "]", ".", "values", "[", "column", ".", "name", "]", "second_value", "=", "second_row", "[", "0", "]", ".", "values", "[", "column", ".", "name", "]", "if", "isinstance", "(", "first_value", ",", "float", ")", "and", "isinstance", "(", "second_value", ",", "float", ")", ":", "return", "first_value", "-", "second_value", "# type: ignore", "else", ":", "raise", "ExecutionError", "(", "f\"Invalid column for diff: {column.name}\"", ")" ]
Takes a two rows and a number column and returns the difference between the values under that column in those two rows.
[ "Takes", "a", "two", "rows", "and", "a", "number", "column", "and", "returns", "the", "difference", "between", "the", "values", "under", "that", "column", "in", "those", "two", "rows", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/wikitables_language.py#L747-L759
23,006
allenai/allennlp
allennlp/semparse/worlds/world.py
World.is_terminal
def is_terminal(self, symbol: str) -> bool: """ This function will be called on nodes of a logical form tree, which are either non-terminal symbols that can be expanded or terminal symbols that must be leaf nodes. Returns ``True`` if the given symbol is a terminal symbol. """ # We special-case 'lambda' here because it behaves weirdly in action sequences. return (symbol in self.global_name_mapping or symbol in self.local_name_mapping or 'lambda' in symbol)
python
def is_terminal(self, symbol: str) -> bool: """ This function will be called on nodes of a logical form tree, which are either non-terminal symbols that can be expanded or terminal symbols that must be leaf nodes. Returns ``True`` if the given symbol is a terminal symbol. """ # We special-case 'lambda' here because it behaves weirdly in action sequences. return (symbol in self.global_name_mapping or symbol in self.local_name_mapping or 'lambda' in symbol)
[ "def", "is_terminal", "(", "self", ",", "symbol", ":", "str", ")", "->", "bool", ":", "# We special-case 'lambda' here because it behaves weirdly in action sequences.", "return", "(", "symbol", "in", "self", ".", "global_name_mapping", "or", "symbol", "in", "self", ".", "local_name_mapping", "or", "'lambda'", "in", "symbol", ")" ]
This function will be called on nodes of a logical form tree, which are either non-terminal symbols that can be expanded or terminal symbols that must be leaf nodes. Returns ``True`` if the given symbol is a terminal symbol.
[ "This", "function", "will", "be", "called", "on", "nodes", "of", "a", "logical", "form", "tree", "which", "are", "either", "non", "-", "terminal", "symbols", "that", "can", "be", "expanded", "or", "terminal", "symbols", "that", "must", "be", "leaf", "nodes", ".", "Returns", "True", "if", "the", "given", "symbol", "is", "a", "terminal", "symbol", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/worlds/world.py#L76-L85
23,007
allenai/allennlp
allennlp/semparse/worlds/world.py
World.get_multi_match_mapping
def get_multi_match_mapping(self) -> Dict[Type, List[Type]]: """ Returns a mapping from each `MultiMatchNamedBasicType` to all the `NamedBasicTypes` that it matches. """ if self._multi_match_mapping is None: self._multi_match_mapping = {} basic_types = self.get_basic_types() for basic_type in basic_types: if isinstance(basic_type, types.MultiMatchNamedBasicType): matched_types: List[str] = [] # We need to check if each type in the `types_to_match` field for the given # MultiMatchNamedBasic type is itself in the set of basic types allowed in this # world, and add it to the mapping only if it is. Some basic types that the # multi match type can match with may be diallowed in the world due to the # instance-specific context. for type_ in basic_type.types_to_match: if type_ in basic_types: matched_types.append(type_) self._multi_match_mapping[basic_type] = matched_types return self._multi_match_mapping
python
def get_multi_match_mapping(self) -> Dict[Type, List[Type]]: """ Returns a mapping from each `MultiMatchNamedBasicType` to all the `NamedBasicTypes` that it matches. """ if self._multi_match_mapping is None: self._multi_match_mapping = {} basic_types = self.get_basic_types() for basic_type in basic_types: if isinstance(basic_type, types.MultiMatchNamedBasicType): matched_types: List[str] = [] # We need to check if each type in the `types_to_match` field for the given # MultiMatchNamedBasic type is itself in the set of basic types allowed in this # world, and add it to the mapping only if it is. Some basic types that the # multi match type can match with may be diallowed in the world due to the # instance-specific context. for type_ in basic_type.types_to_match: if type_ in basic_types: matched_types.append(type_) self._multi_match_mapping[basic_type] = matched_types return self._multi_match_mapping
[ "def", "get_multi_match_mapping", "(", "self", ")", "->", "Dict", "[", "Type", ",", "List", "[", "Type", "]", "]", ":", "if", "self", ".", "_multi_match_mapping", "is", "None", ":", "self", ".", "_multi_match_mapping", "=", "{", "}", "basic_types", "=", "self", ".", "get_basic_types", "(", ")", "for", "basic_type", "in", "basic_types", ":", "if", "isinstance", "(", "basic_type", ",", "types", ".", "MultiMatchNamedBasicType", ")", ":", "matched_types", ":", "List", "[", "str", "]", "=", "[", "]", "# We need to check if each type in the `types_to_match` field for the given", "# MultiMatchNamedBasic type is itself in the set of basic types allowed in this", "# world, and add it to the mapping only if it is. Some basic types that the", "# multi match type can match with may be diallowed in the world due to the", "# instance-specific context.", "for", "type_", "in", "basic_type", ".", "types_to_match", ":", "if", "type_", "in", "basic_types", ":", "matched_types", ".", "append", "(", "type_", ")", "self", ".", "_multi_match_mapping", "[", "basic_type", "]", "=", "matched_types", "return", "self", ".", "_multi_match_mapping" ]
Returns a mapping from each `MultiMatchNamedBasicType` to all the `NamedBasicTypes` that it matches.
[ "Returns", "a", "mapping", "from", "each", "MultiMatchNamedBasicType", "to", "all", "the", "NamedBasicTypes", "that", "it", "matches", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/worlds/world.py#L184-L204
23,008
allenai/allennlp
allennlp/semparse/worlds/world.py
World.parse_logical_form
def parse_logical_form(self, logical_form: str, remove_var_function: bool = True) -> Expression: """ Takes a logical form as a string, maps its tokens using the mapping and returns a parsed expression. Parameters ---------- logical_form : ``str`` Logical form to parse remove_var_function : ``bool`` (optional) ``var`` is a special function that some languages use within lambda functions to indicate the usage of a variable. If your language uses it, and you do not want to include it in the parsed expression, set this flag. You may want to do this if you are generating an action sequence from this parsed expression, because it is easier to let the decoder not produce this function due to the way constrained decoding is currently implemented. """ if not logical_form.startswith("("): logical_form = f"({logical_form})" if remove_var_function: # Replace "(x)" with "x" logical_form = re.sub(r'\(([x-z])\)', r'\1', logical_form) # Replace "(var x)" with "(x)" logical_form = re.sub(r'\(var ([x-z])\)', r'(\1)', logical_form) parsed_lisp = semparse_util.lisp_to_nested_expression(logical_form) translated_string = self._process_nested_expression(parsed_lisp) type_signature = self.local_type_signatures.copy() type_signature.update(self.global_type_signatures) return self._logic_parser.parse(translated_string, signature=type_signature)
python
def parse_logical_form(self, logical_form: str, remove_var_function: bool = True) -> Expression: """ Takes a logical form as a string, maps its tokens using the mapping and returns a parsed expression. Parameters ---------- logical_form : ``str`` Logical form to parse remove_var_function : ``bool`` (optional) ``var`` is a special function that some languages use within lambda functions to indicate the usage of a variable. If your language uses it, and you do not want to include it in the parsed expression, set this flag. You may want to do this if you are generating an action sequence from this parsed expression, because it is easier to let the decoder not produce this function due to the way constrained decoding is currently implemented. """ if not logical_form.startswith("("): logical_form = f"({logical_form})" if remove_var_function: # Replace "(x)" with "x" logical_form = re.sub(r'\(([x-z])\)', r'\1', logical_form) # Replace "(var x)" with "(x)" logical_form = re.sub(r'\(var ([x-z])\)', r'(\1)', logical_form) parsed_lisp = semparse_util.lisp_to_nested_expression(logical_form) translated_string = self._process_nested_expression(parsed_lisp) type_signature = self.local_type_signatures.copy() type_signature.update(self.global_type_signatures) return self._logic_parser.parse(translated_string, signature=type_signature)
[ "def", "parse_logical_form", "(", "self", ",", "logical_form", ":", "str", ",", "remove_var_function", ":", "bool", "=", "True", ")", "->", "Expression", ":", "if", "not", "logical_form", ".", "startswith", "(", "\"(\"", ")", ":", "logical_form", "=", "f\"({logical_form})\"", "if", "remove_var_function", ":", "# Replace \"(x)\" with \"x\"", "logical_form", "=", "re", ".", "sub", "(", "r'\\(([x-z])\\)'", ",", "r'\\1'", ",", "logical_form", ")", "# Replace \"(var x)\" with \"(x)\"", "logical_form", "=", "re", ".", "sub", "(", "r'\\(var ([x-z])\\)'", ",", "r'(\\1)'", ",", "logical_form", ")", "parsed_lisp", "=", "semparse_util", ".", "lisp_to_nested_expression", "(", "logical_form", ")", "translated_string", "=", "self", ".", "_process_nested_expression", "(", "parsed_lisp", ")", "type_signature", "=", "self", ".", "local_type_signatures", ".", "copy", "(", ")", "type_signature", ".", "update", "(", "self", ".", "global_type_signatures", ")", "return", "self", ".", "_logic_parser", ".", "parse", "(", "translated_string", ",", "signature", "=", "type_signature", ")" ]
Takes a logical form as a string, maps its tokens using the mapping and returns a parsed expression. Parameters ---------- logical_form : ``str`` Logical form to parse remove_var_function : ``bool`` (optional) ``var`` is a special function that some languages use within lambda functions to indicate the usage of a variable. If your language uses it, and you do not want to include it in the parsed expression, set this flag. You may want to do this if you are generating an action sequence from this parsed expression, because it is easier to let the decoder not produce this function due to the way constrained decoding is currently implemented.
[ "Takes", "a", "logical", "form", "as", "a", "string", "maps", "its", "tokens", "using", "the", "mapping", "and", "returns", "a", "parsed", "expression", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/worlds/world.py#L206-L235
23,009
allenai/allennlp
allennlp/semparse/worlds/world.py
World.get_logical_form
def get_logical_form(self, action_sequence: List[str], add_var_function: bool = True) -> str: """ Takes an action sequence and constructs a logical form from it. This is useful if you want to get a logical form from a decoded sequence of actions generated by a transition based semantic parser. Parameters ---------- action_sequence : ``List[str]`` The sequence of actions as strings (eg.: ``['{START_SYMBOL} -> t', 't -> <e,t>', ...]``). add_var_function : ``bool`` (optional) ``var`` is a special function that some languages use within lambda functions to indicate the use of a variable (eg.: ``(lambda x (fb:row.row.year (var x)))``). Due to the way constrained decoding is currently implemented, it is easier for the decoder to not produce these functions. In that case, setting this flag adds the function in the logical form even though it is not present in the action sequence. """ # Basic outline: we assume that the bracketing that we get in the RHS of each action is the # correct bracketing for reconstructing the logical form. This is true when there is no # currying in the action sequence. Given this assumption, we just need to construct a tree # from the action sequence, then output all of the leaves in the tree, with brackets around # the children of all non-terminal nodes. remaining_actions = [action.split(" -> ") for action in action_sequence] tree = Tree(remaining_actions[0][1], []) try: remaining_actions = self._construct_node_from_actions(tree, remaining_actions[1:], add_var_function) except ParsingError: logger.error("Error parsing action sequence: %s", action_sequence) raise if remaining_actions: logger.error("Error parsing action sequence: %s", action_sequence) logger.error("Remaining actions were: %s", remaining_actions) raise ParsingError("Extra actions in action sequence") return nltk_tree_to_logical_form(tree)
python
def get_logical_form(self, action_sequence: List[str], add_var_function: bool = True) -> str: """ Takes an action sequence and constructs a logical form from it. This is useful if you want to get a logical form from a decoded sequence of actions generated by a transition based semantic parser. Parameters ---------- action_sequence : ``List[str]`` The sequence of actions as strings (eg.: ``['{START_SYMBOL} -> t', 't -> <e,t>', ...]``). add_var_function : ``bool`` (optional) ``var`` is a special function that some languages use within lambda functions to indicate the use of a variable (eg.: ``(lambda x (fb:row.row.year (var x)))``). Due to the way constrained decoding is currently implemented, it is easier for the decoder to not produce these functions. In that case, setting this flag adds the function in the logical form even though it is not present in the action sequence. """ # Basic outline: we assume that the bracketing that we get in the RHS of each action is the # correct bracketing for reconstructing the logical form. This is true when there is no # currying in the action sequence. Given this assumption, we just need to construct a tree # from the action sequence, then output all of the leaves in the tree, with brackets around # the children of all non-terminal nodes. remaining_actions = [action.split(" -> ") for action in action_sequence] tree = Tree(remaining_actions[0][1], []) try: remaining_actions = self._construct_node_from_actions(tree, remaining_actions[1:], add_var_function) except ParsingError: logger.error("Error parsing action sequence: %s", action_sequence) raise if remaining_actions: logger.error("Error parsing action sequence: %s", action_sequence) logger.error("Remaining actions were: %s", remaining_actions) raise ParsingError("Extra actions in action sequence") return nltk_tree_to_logical_form(tree)
[ "def", "get_logical_form", "(", "self", ",", "action_sequence", ":", "List", "[", "str", "]", ",", "add_var_function", ":", "bool", "=", "True", ")", "->", "str", ":", "# Basic outline: we assume that the bracketing that we get in the RHS of each action is the", "# correct bracketing for reconstructing the logical form. This is true when there is no", "# currying in the action sequence. Given this assumption, we just need to construct a tree", "# from the action sequence, then output all of the leaves in the tree, with brackets around", "# the children of all non-terminal nodes.", "remaining_actions", "=", "[", "action", ".", "split", "(", "\" -> \"", ")", "for", "action", "in", "action_sequence", "]", "tree", "=", "Tree", "(", "remaining_actions", "[", "0", "]", "[", "1", "]", ",", "[", "]", ")", "try", ":", "remaining_actions", "=", "self", ".", "_construct_node_from_actions", "(", "tree", ",", "remaining_actions", "[", "1", ":", "]", ",", "add_var_function", ")", "except", "ParsingError", ":", "logger", ".", "error", "(", "\"Error parsing action sequence: %s\"", ",", "action_sequence", ")", "raise", "if", "remaining_actions", ":", "logger", ".", "error", "(", "\"Error parsing action sequence: %s\"", ",", "action_sequence", ")", "logger", ".", "error", "(", "\"Remaining actions were: %s\"", ",", "remaining_actions", ")", "raise", "ParsingError", "(", "\"Extra actions in action sequence\"", ")", "return", "nltk_tree_to_logical_form", "(", "tree", ")" ]
Takes an action sequence and constructs a logical form from it. This is useful if you want to get a logical form from a decoded sequence of actions generated by a transition based semantic parser. Parameters ---------- action_sequence : ``List[str]`` The sequence of actions as strings (eg.: ``['{START_SYMBOL} -> t', 't -> <e,t>', ...]``). add_var_function : ``bool`` (optional) ``var`` is a special function that some languages use within lambda functions to indicate the use of a variable (eg.: ``(lambda x (fb:row.row.year (var x)))``). Due to the way constrained decoding is currently implemented, it is easier for the decoder to not produce these functions. In that case, setting this flag adds the function in the logical form even though it is not present in the action sequence.
[ "Takes", "an", "action", "sequence", "and", "constructs", "a", "logical", "form", "from", "it", ".", "This", "is", "useful", "if", "you", "want", "to", "get", "a", "logical", "form", "from", "a", "decoded", "sequence", "of", "actions", "generated", "by", "a", "transition", "based", "semantic", "parser", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/worlds/world.py#L245-L285
23,010
allenai/allennlp
allennlp/semparse/worlds/world.py
World._process_nested_expression
def _process_nested_expression(self, nested_expression) -> str: """ ``nested_expression`` is the result of parsing a logical form in Lisp format. We process it recursively and return a string in the format that NLTK's ``LogicParser`` would understand. """ expression_is_list = isinstance(nested_expression, list) expression_size = len(nested_expression) if expression_is_list and expression_size == 1 and isinstance(nested_expression[0], list): return self._process_nested_expression(nested_expression[0]) elements_are_leaves = [isinstance(element, str) for element in nested_expression] if all(elements_are_leaves): mapped_names = [self._map_name(name) for name in nested_expression] else: mapped_names = [] for element, is_leaf in zip(nested_expression, elements_are_leaves): if is_leaf: mapped_names.append(self._map_name(element)) else: mapped_names.append(self._process_nested_expression(element)) if mapped_names[0] == "\\": # This means the predicate is lambda. NLTK wants the variable name to not be within parantheses. # Adding parentheses after the variable. arguments = [mapped_names[1]] + [f"({name})" for name in mapped_names[2:]] else: arguments = [f"({name})" for name in mapped_names[1:]] return f'({mapped_names[0]} {" ".join(arguments)})'
python
def _process_nested_expression(self, nested_expression) -> str: """ ``nested_expression`` is the result of parsing a logical form in Lisp format. We process it recursively and return a string in the format that NLTK's ``LogicParser`` would understand. """ expression_is_list = isinstance(nested_expression, list) expression_size = len(nested_expression) if expression_is_list and expression_size == 1 and isinstance(nested_expression[0], list): return self._process_nested_expression(nested_expression[0]) elements_are_leaves = [isinstance(element, str) for element in nested_expression] if all(elements_are_leaves): mapped_names = [self._map_name(name) for name in nested_expression] else: mapped_names = [] for element, is_leaf in zip(nested_expression, elements_are_leaves): if is_leaf: mapped_names.append(self._map_name(element)) else: mapped_names.append(self._process_nested_expression(element)) if mapped_names[0] == "\\": # This means the predicate is lambda. NLTK wants the variable name to not be within parantheses. # Adding parentheses after the variable. arguments = [mapped_names[1]] + [f"({name})" for name in mapped_names[2:]] else: arguments = [f"({name})" for name in mapped_names[1:]] return f'({mapped_names[0]} {" ".join(arguments)})'
[ "def", "_process_nested_expression", "(", "self", ",", "nested_expression", ")", "->", "str", ":", "expression_is_list", "=", "isinstance", "(", "nested_expression", ",", "list", ")", "expression_size", "=", "len", "(", "nested_expression", ")", "if", "expression_is_list", "and", "expression_size", "==", "1", "and", "isinstance", "(", "nested_expression", "[", "0", "]", ",", "list", ")", ":", "return", "self", ".", "_process_nested_expression", "(", "nested_expression", "[", "0", "]", ")", "elements_are_leaves", "=", "[", "isinstance", "(", "element", ",", "str", ")", "for", "element", "in", "nested_expression", "]", "if", "all", "(", "elements_are_leaves", ")", ":", "mapped_names", "=", "[", "self", ".", "_map_name", "(", "name", ")", "for", "name", "in", "nested_expression", "]", "else", ":", "mapped_names", "=", "[", "]", "for", "element", ",", "is_leaf", "in", "zip", "(", "nested_expression", ",", "elements_are_leaves", ")", ":", "if", "is_leaf", ":", "mapped_names", ".", "append", "(", "self", ".", "_map_name", "(", "element", ")", ")", "else", ":", "mapped_names", ".", "append", "(", "self", ".", "_process_nested_expression", "(", "element", ")", ")", "if", "mapped_names", "[", "0", "]", "==", "\"\\\\\"", ":", "# This means the predicate is lambda. NLTK wants the variable name to not be within parantheses.", "# Adding parentheses after the variable.", "arguments", "=", "[", "mapped_names", "[", "1", "]", "]", "+", "[", "f\"({name})\"", "for", "name", "in", "mapped_names", "[", "2", ":", "]", "]", "else", ":", "arguments", "=", "[", "f\"({name})\"", "for", "name", "in", "mapped_names", "[", "1", ":", "]", "]", "return", "f'({mapped_names[0]} {\" \".join(arguments)})'" ]
``nested_expression`` is the result of parsing a logical form in Lisp format. We process it recursively and return a string in the format that NLTK's ``LogicParser`` would understand.
[ "nested_expression", "is", "the", "result", "of", "parsing", "a", "logical", "form", "in", "Lisp", "format", ".", "We", "process", "it", "recursively", "and", "return", "a", "string", "in", "the", "format", "that", "NLTK", "s", "LogicParser", "would", "understand", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/worlds/world.py#L382-L408
23,011
allenai/allennlp
allennlp/semparse/worlds/world.py
World._add_name_mapping
def _add_name_mapping(self, name: str, translated_name: str, name_type: Type = None): """ Utility method to add a name and its translation to the local name mapping, and the corresponding signature, if available to the local type signatures. This method also updates the reverse name mapping. """ self.local_name_mapping[name] = translated_name self.reverse_name_mapping[translated_name] = name if name_type: self.local_type_signatures[translated_name] = name_type
python
def _add_name_mapping(self, name: str, translated_name: str, name_type: Type = None): """ Utility method to add a name and its translation to the local name mapping, and the corresponding signature, if available to the local type signatures. This method also updates the reverse name mapping. """ self.local_name_mapping[name] = translated_name self.reverse_name_mapping[translated_name] = name if name_type: self.local_type_signatures[translated_name] = name_type
[ "def", "_add_name_mapping", "(", "self", ",", "name", ":", "str", ",", "translated_name", ":", "str", ",", "name_type", ":", "Type", "=", "None", ")", ":", "self", ".", "local_name_mapping", "[", "name", "]", "=", "translated_name", "self", ".", "reverse_name_mapping", "[", "translated_name", "]", "=", "name", "if", "name_type", ":", "self", ".", "local_type_signatures", "[", "translated_name", "]", "=", "name_type" ]
Utility method to add a name and its translation to the local name mapping, and the corresponding signature, if available to the local type signatures. This method also updates the reverse name mapping.
[ "Utility", "method", "to", "add", "a", "name", "and", "its", "translation", "to", "the", "local", "name", "mapping", "and", "the", "corresponding", "signature", "if", "available", "to", "the", "local", "type", "signatures", ".", "This", "method", "also", "updates", "the", "reverse", "name", "mapping", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/worlds/world.py#L434-L443
23,012
allenai/allennlp
allennlp/semparse/executors/wikitables_sempre_executor.py
WikiTablesSempreExecutor._create_sempre_executor
def _create_sempre_executor(self) -> None: """ Creates a server running SEMPRE that we can send logical forms to for evaluation. This uses inter-process communication, because SEMPRE is java code. We also need to be careful to clean up the process when our program exits. """ if self._executor_process: return # It'd be much nicer to just use `cached_path` for these files. However, the SEMPRE jar # that we're using expects to find these files in a particular location, so we need to make # sure we put the files in that location. os.makedirs(SEMPRE_DIR, exist_ok=True) abbreviations_path = os.path.join(SEMPRE_DIR, 'abbreviations.tsv') if not os.path.exists(abbreviations_path): result = requests.get(ABBREVIATIONS_FILE) with open(abbreviations_path, 'wb') as downloaded_file: downloaded_file.write(result.content) grammar_path = os.path.join(SEMPRE_DIR, 'grow.grammar') if not os.path.exists(grammar_path): result = requests.get(GROW_FILE) with open(grammar_path, 'wb') as downloaded_file: downloaded_file.write(result.content) if not check_for_java(): raise RuntimeError('Java is not installed properly.') args = ['java', '-jar', cached_path(SEMPRE_EXECUTOR_JAR), 'serve', self._table_directory] self._executor_process = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, bufsize=1) lines = [] for _ in range(6): # SEMPRE outputs six lines of stuff when it loads that I can't disable. So, we clear # that here. lines.append(str(self._executor_process.stdout.readline())) assert 'Parser' in lines[-1], "SEMPRE server output unexpected; the server may have changed" logger.info("Started SEMPRE server for evaluating logical forms") # This is supposed to ensure that the subprocess gets killed when python exits. atexit.register(self._stop_sempre_executor)
python
def _create_sempre_executor(self) -> None: """ Creates a server running SEMPRE that we can send logical forms to for evaluation. This uses inter-process communication, because SEMPRE is java code. We also need to be careful to clean up the process when our program exits. """ if self._executor_process: return # It'd be much nicer to just use `cached_path` for these files. However, the SEMPRE jar # that we're using expects to find these files in a particular location, so we need to make # sure we put the files in that location. os.makedirs(SEMPRE_DIR, exist_ok=True) abbreviations_path = os.path.join(SEMPRE_DIR, 'abbreviations.tsv') if not os.path.exists(abbreviations_path): result = requests.get(ABBREVIATIONS_FILE) with open(abbreviations_path, 'wb') as downloaded_file: downloaded_file.write(result.content) grammar_path = os.path.join(SEMPRE_DIR, 'grow.grammar') if not os.path.exists(grammar_path): result = requests.get(GROW_FILE) with open(grammar_path, 'wb') as downloaded_file: downloaded_file.write(result.content) if not check_for_java(): raise RuntimeError('Java is not installed properly.') args = ['java', '-jar', cached_path(SEMPRE_EXECUTOR_JAR), 'serve', self._table_directory] self._executor_process = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, bufsize=1) lines = [] for _ in range(6): # SEMPRE outputs six lines of stuff when it loads that I can't disable. So, we clear # that here. lines.append(str(self._executor_process.stdout.readline())) assert 'Parser' in lines[-1], "SEMPRE server output unexpected; the server may have changed" logger.info("Started SEMPRE server for evaluating logical forms") # This is supposed to ensure that the subprocess gets killed when python exits. atexit.register(self._stop_sempre_executor)
[ "def", "_create_sempre_executor", "(", "self", ")", "->", "None", ":", "if", "self", ".", "_executor_process", ":", "return", "# It'd be much nicer to just use `cached_path` for these files. However, the SEMPRE jar", "# that we're using expects to find these files in a particular location, so we need to make", "# sure we put the files in that location.", "os", ".", "makedirs", "(", "SEMPRE_DIR", ",", "exist_ok", "=", "True", ")", "abbreviations_path", "=", "os", ".", "path", ".", "join", "(", "SEMPRE_DIR", ",", "'abbreviations.tsv'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "abbreviations_path", ")", ":", "result", "=", "requests", ".", "get", "(", "ABBREVIATIONS_FILE", ")", "with", "open", "(", "abbreviations_path", ",", "'wb'", ")", "as", "downloaded_file", ":", "downloaded_file", ".", "write", "(", "result", ".", "content", ")", "grammar_path", "=", "os", ".", "path", ".", "join", "(", "SEMPRE_DIR", ",", "'grow.grammar'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "grammar_path", ")", ":", "result", "=", "requests", ".", "get", "(", "GROW_FILE", ")", "with", "open", "(", "grammar_path", ",", "'wb'", ")", "as", "downloaded_file", ":", "downloaded_file", ".", "write", "(", "result", ".", "content", ")", "if", "not", "check_for_java", "(", ")", ":", "raise", "RuntimeError", "(", "'Java is not installed properly.'", ")", "args", "=", "[", "'java'", ",", "'-jar'", ",", "cached_path", "(", "SEMPRE_EXECUTOR_JAR", ")", ",", "'serve'", ",", "self", ".", "_table_directory", "]", "self", ".", "_executor_process", "=", "subprocess", ".", "Popen", "(", "args", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "bufsize", "=", "1", ")", "lines", "=", "[", "]", "for", "_", "in", "range", "(", "6", ")", ":", "# SEMPRE outputs six lines of stuff when it loads that I can't disable. So, we clear", "# that here.", "lines", ".", "append", "(", "str", "(", "self", ".", "_executor_process", ".", "stdout", ".", "readline", "(", ")", ")", ")", "assert", "'Parser'", "in", "lines", "[", "-", "1", "]", ",", "\"SEMPRE server output unexpected; the server may have changed\"", "logger", ".", "info", "(", "\"Started SEMPRE server for evaluating logical forms\"", ")", "# This is supposed to ensure that the subprocess gets killed when python exits.", "atexit", ".", "register", "(", "self", ".", "_stop_sempre_executor", ")" ]
Creates a server running SEMPRE that we can send logical forms to for evaluation. This uses inter-process communication, because SEMPRE is java code. We also need to be careful to clean up the process when our program exits.
[ "Creates", "a", "server", "running", "SEMPRE", "that", "we", "can", "send", "logical", "forms", "to", "for", "evaluation", ".", "This", "uses", "inter", "-", "process", "communication", "because", "SEMPRE", "is", "java", "code", ".", "We", "also", "need", "to", "be", "careful", "to", "clean", "up", "the", "process", "when", "our", "program", "exits", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/executors/wikitables_sempre_executor.py#L63-L105
23,013
allenai/allennlp
allennlp/training/metrics/conll_coref_scores.py
Scorer.phi4
def phi4(gold_clustering, predicted_clustering): """ Subroutine for ceafe. Computes the mention F measure between gold and predicted mentions in a cluster. """ return 2 * len([mention for mention in gold_clustering if mention in predicted_clustering]) \ / float(len(gold_clustering) + len(predicted_clustering))
python
def phi4(gold_clustering, predicted_clustering): """ Subroutine for ceafe. Computes the mention F measure between gold and predicted mentions in a cluster. """ return 2 * len([mention for mention in gold_clustering if mention in predicted_clustering]) \ / float(len(gold_clustering) + len(predicted_clustering))
[ "def", "phi4", "(", "gold_clustering", ",", "predicted_clustering", ")", ":", "return", "2", "*", "len", "(", "[", "mention", "for", "mention", "in", "gold_clustering", "if", "mention", "in", "predicted_clustering", "]", ")", "/", "float", "(", "len", "(", "gold_clustering", ")", "+", "len", "(", "predicted_clustering", ")", ")" ]
Subroutine for ceafe. Computes the mention F measure between gold and predicted mentions in a cluster.
[ "Subroutine", "for", "ceafe", ".", "Computes", "the", "mention", "F", "measure", "between", "gold", "and", "predicted", "mentions", "in", "a", "cluster", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/training/metrics/conll_coref_scores.py#L208-L214
23,014
allenai/allennlp
allennlp/training/util.py
sparse_clip_norm
def sparse_clip_norm(parameters, max_norm, norm_type=2) -> float: """Clips gradient norm of an iterable of parameters. The norm is computed over all gradients together, as if they were concatenated into a single vector. Gradients are modified in-place. Supports sparse gradients. Parameters ---------- parameters : ``(Iterable[torch.Tensor])`` An iterable of Tensors that will have gradients normalized. max_norm : ``float`` The max norm of the gradients. norm_type : ``float`` The type of the used p-norm. Can be ``'inf'`` for infinity norm. Returns ------- Total norm of the parameters (viewed as a single vector). """ # pylint: disable=invalid-name,protected-access parameters = list(filter(lambda p: p.grad is not None, parameters)) max_norm = float(max_norm) norm_type = float(norm_type) if norm_type == float('inf'): total_norm = max(p.grad.data.abs().max() for p in parameters) else: total_norm = 0 for p in parameters: if p.grad.is_sparse: # need to coalesce the repeated indices before finding norm grad = p.grad.data.coalesce() param_norm = grad._values().norm(norm_type) else: param_norm = p.grad.data.norm(norm_type) total_norm += param_norm ** norm_type total_norm = total_norm ** (1. / norm_type) clip_coef = max_norm / (total_norm + 1e-6) if clip_coef < 1: for p in parameters: if p.grad.is_sparse: p.grad.data._values().mul_(clip_coef) else: p.grad.data.mul_(clip_coef) return total_norm
python
def sparse_clip_norm(parameters, max_norm, norm_type=2) -> float: """Clips gradient norm of an iterable of parameters. The norm is computed over all gradients together, as if they were concatenated into a single vector. Gradients are modified in-place. Supports sparse gradients. Parameters ---------- parameters : ``(Iterable[torch.Tensor])`` An iterable of Tensors that will have gradients normalized. max_norm : ``float`` The max norm of the gradients. norm_type : ``float`` The type of the used p-norm. Can be ``'inf'`` for infinity norm. Returns ------- Total norm of the parameters (viewed as a single vector). """ # pylint: disable=invalid-name,protected-access parameters = list(filter(lambda p: p.grad is not None, parameters)) max_norm = float(max_norm) norm_type = float(norm_type) if norm_type == float('inf'): total_norm = max(p.grad.data.abs().max() for p in parameters) else: total_norm = 0 for p in parameters: if p.grad.is_sparse: # need to coalesce the repeated indices before finding norm grad = p.grad.data.coalesce() param_norm = grad._values().norm(norm_type) else: param_norm = p.grad.data.norm(norm_type) total_norm += param_norm ** norm_type total_norm = total_norm ** (1. / norm_type) clip_coef = max_norm / (total_norm + 1e-6) if clip_coef < 1: for p in parameters: if p.grad.is_sparse: p.grad.data._values().mul_(clip_coef) else: p.grad.data.mul_(clip_coef) return total_norm
[ "def", "sparse_clip_norm", "(", "parameters", ",", "max_norm", ",", "norm_type", "=", "2", ")", "->", "float", ":", "# pylint: disable=invalid-name,protected-access", "parameters", "=", "list", "(", "filter", "(", "lambda", "p", ":", "p", ".", "grad", "is", "not", "None", ",", "parameters", ")", ")", "max_norm", "=", "float", "(", "max_norm", ")", "norm_type", "=", "float", "(", "norm_type", ")", "if", "norm_type", "==", "float", "(", "'inf'", ")", ":", "total_norm", "=", "max", "(", "p", ".", "grad", ".", "data", ".", "abs", "(", ")", ".", "max", "(", ")", "for", "p", "in", "parameters", ")", "else", ":", "total_norm", "=", "0", "for", "p", "in", "parameters", ":", "if", "p", ".", "grad", ".", "is_sparse", ":", "# need to coalesce the repeated indices before finding norm", "grad", "=", "p", ".", "grad", ".", "data", ".", "coalesce", "(", ")", "param_norm", "=", "grad", ".", "_values", "(", ")", ".", "norm", "(", "norm_type", ")", "else", ":", "param_norm", "=", "p", ".", "grad", ".", "data", ".", "norm", "(", "norm_type", ")", "total_norm", "+=", "param_norm", "**", "norm_type", "total_norm", "=", "total_norm", "**", "(", "1.", "/", "norm_type", ")", "clip_coef", "=", "max_norm", "/", "(", "total_norm", "+", "1e-6", ")", "if", "clip_coef", "<", "1", ":", "for", "p", "in", "parameters", ":", "if", "p", ".", "grad", ".", "is_sparse", ":", "p", ".", "grad", ".", "data", ".", "_values", "(", ")", ".", "mul_", "(", "clip_coef", ")", "else", ":", "p", ".", "grad", ".", "data", ".", "mul_", "(", "clip_coef", ")", "return", "total_norm" ]
Clips gradient norm of an iterable of parameters. The norm is computed over all gradients together, as if they were concatenated into a single vector. Gradients are modified in-place. Supports sparse gradients. Parameters ---------- parameters : ``(Iterable[torch.Tensor])`` An iterable of Tensors that will have gradients normalized. max_norm : ``float`` The max norm of the gradients. norm_type : ``float`` The type of the used p-norm. Can be ``'inf'`` for infinity norm. Returns ------- Total norm of the parameters (viewed as a single vector).
[ "Clips", "gradient", "norm", "of", "an", "iterable", "of", "parameters", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/training/util.py#L34-L78
23,015
allenai/allennlp
allennlp/training/util.py
move_optimizer_to_cuda
def move_optimizer_to_cuda(optimizer): """ Move the optimizer state to GPU, if necessary. After calling, any parameter specific state in the optimizer will be located on the same device as the parameter. """ for param_group in optimizer.param_groups: for param in param_group['params']: if param.is_cuda: param_state = optimizer.state[param] for k in param_state.keys(): if isinstance(param_state[k], torch.Tensor): param_state[k] = param_state[k].cuda(device=param.get_device())
python
def move_optimizer_to_cuda(optimizer): """ Move the optimizer state to GPU, if necessary. After calling, any parameter specific state in the optimizer will be located on the same device as the parameter. """ for param_group in optimizer.param_groups: for param in param_group['params']: if param.is_cuda: param_state = optimizer.state[param] for k in param_state.keys(): if isinstance(param_state[k], torch.Tensor): param_state[k] = param_state[k].cuda(device=param.get_device())
[ "def", "move_optimizer_to_cuda", "(", "optimizer", ")", ":", "for", "param_group", "in", "optimizer", ".", "param_groups", ":", "for", "param", "in", "param_group", "[", "'params'", "]", ":", "if", "param", ".", "is_cuda", ":", "param_state", "=", "optimizer", ".", "state", "[", "param", "]", "for", "k", "in", "param_state", ".", "keys", "(", ")", ":", "if", "isinstance", "(", "param_state", "[", "k", "]", ",", "torch", ".", "Tensor", ")", ":", "param_state", "[", "k", "]", "=", "param_state", "[", "k", "]", ".", "cuda", "(", "device", "=", "param", ".", "get_device", "(", ")", ")" ]
Move the optimizer state to GPU, if necessary. After calling, any parameter specific state in the optimizer will be located on the same device as the parameter.
[ "Move", "the", "optimizer", "state", "to", "GPU", "if", "necessary", ".", "After", "calling", "any", "parameter", "specific", "state", "in", "the", "optimizer", "will", "be", "located", "on", "the", "same", "device", "as", "the", "parameter", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/training/util.py#L81-L93
23,016
allenai/allennlp
allennlp/training/util.py
get_batch_size
def get_batch_size(batch: Union[Dict, torch.Tensor]) -> int: """ Returns the size of the batch dimension. Assumes a well-formed batch, returns 0 otherwise. """ if isinstance(batch, torch.Tensor): return batch.size(0) # type: ignore elif isinstance(batch, Dict): return get_batch_size(next(iter(batch.values()))) else: return 0
python
def get_batch_size(batch: Union[Dict, torch.Tensor]) -> int: """ Returns the size of the batch dimension. Assumes a well-formed batch, returns 0 otherwise. """ if isinstance(batch, torch.Tensor): return batch.size(0) # type: ignore elif isinstance(batch, Dict): return get_batch_size(next(iter(batch.values()))) else: return 0
[ "def", "get_batch_size", "(", "batch", ":", "Union", "[", "Dict", ",", "torch", ".", "Tensor", "]", ")", "->", "int", ":", "if", "isinstance", "(", "batch", ",", "torch", ".", "Tensor", ")", ":", "return", "batch", ".", "size", "(", "0", ")", "# type: ignore", "elif", "isinstance", "(", "batch", ",", "Dict", ")", ":", "return", "get_batch_size", "(", "next", "(", "iter", "(", "batch", ".", "values", "(", ")", ")", ")", ")", "else", ":", "return", "0" ]
Returns the size of the batch dimension. Assumes a well-formed batch, returns 0 otherwise.
[ "Returns", "the", "size", "of", "the", "batch", "dimension", ".", "Assumes", "a", "well", "-", "formed", "batch", "returns", "0", "otherwise", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/training/util.py#L96-L106
23,017
allenai/allennlp
allennlp/training/util.py
time_to_str
def time_to_str(timestamp: int) -> str: """ Convert seconds past Epoch to human readable string. """ datetimestamp = datetime.datetime.fromtimestamp(timestamp) return '{:04d}-{:02d}-{:02d}-{:02d}-{:02d}-{:02d}'.format( datetimestamp.year, datetimestamp.month, datetimestamp.day, datetimestamp.hour, datetimestamp.minute, datetimestamp.second )
python
def time_to_str(timestamp: int) -> str: """ Convert seconds past Epoch to human readable string. """ datetimestamp = datetime.datetime.fromtimestamp(timestamp) return '{:04d}-{:02d}-{:02d}-{:02d}-{:02d}-{:02d}'.format( datetimestamp.year, datetimestamp.month, datetimestamp.day, datetimestamp.hour, datetimestamp.minute, datetimestamp.second )
[ "def", "time_to_str", "(", "timestamp", ":", "int", ")", "->", "str", ":", "datetimestamp", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "timestamp", ")", "return", "'{:04d}-{:02d}-{:02d}-{:02d}-{:02d}-{:02d}'", ".", "format", "(", "datetimestamp", ".", "year", ",", "datetimestamp", ".", "month", ",", "datetimestamp", ".", "day", ",", "datetimestamp", ".", "hour", ",", "datetimestamp", ".", "minute", ",", "datetimestamp", ".", "second", ")" ]
Convert seconds past Epoch to human readable string.
[ "Convert", "seconds", "past", "Epoch", "to", "human", "readable", "string", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/training/util.py#L109-L117
23,018
allenai/allennlp
allennlp/training/util.py
str_to_time
def str_to_time(time_str: str) -> datetime.datetime: """ Convert human readable string to datetime.datetime. """ pieces: Any = [int(piece) for piece in time_str.split('-')] return datetime.datetime(*pieces)
python
def str_to_time(time_str: str) -> datetime.datetime: """ Convert human readable string to datetime.datetime. """ pieces: Any = [int(piece) for piece in time_str.split('-')] return datetime.datetime(*pieces)
[ "def", "str_to_time", "(", "time_str", ":", "str", ")", "->", "datetime", ".", "datetime", ":", "pieces", ":", "Any", "=", "[", "int", "(", "piece", ")", "for", "piece", "in", "time_str", ".", "split", "(", "'-'", ")", "]", "return", "datetime", ".", "datetime", "(", "*", "pieces", ")" ]
Convert human readable string to datetime.datetime.
[ "Convert", "human", "readable", "string", "to", "datetime", ".", "datetime", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/training/util.py#L120-L125
23,019
allenai/allennlp
allennlp/training/util.py
datasets_from_params
def datasets_from_params(params: Params, cache_directory: str = None, cache_prefix: str = None) -> Dict[str, Iterable[Instance]]: """ Load all the datasets specified by the config. Parameters ---------- params : ``Params`` cache_directory : ``str``, optional If given, we will instruct the ``DatasetReaders`` that we construct to cache their instances in this location (or read their instances from caches in this location, if a suitable cache already exists). This is essentially a `base` directory for the cache, as we will additionally add the ``cache_prefix`` to this directory, giving an actual cache location of ``cache_directory + cache_prefix``. cache_prefix : ``str``, optional This works in conjunction with the ``cache_directory``. The idea is that the ``cache_directory`` contains caches for all different parameter settings, while the ``cache_prefix`` captures a specific set of parameters that led to a particular cache file. That is, if you change the tokenization settings inside your ``DatasetReader``, you don't want to read cached data that used the old settings. In order to avoid this, we compute a hash of the parameters used to construct each ``DatasetReader`` and use that as a "prefix" to the cache files inside the base ``cache_directory``. So, a given ``input_file`` would be cached essentially as ``cache_directory + cache_prefix + input_file``, where you specify a ``cache_directory``, the ``cache_prefix`` is based on the dataset reader parameters, and the ``input_file`` is whatever path you provided to ``DatasetReader.read()``. In order to allow you to give recognizable names to these prefixes if you want them, you can manually specify the ``cache_prefix``. Note that in some rare cases this can be dangerous, as we'll use the `same` prefix for both train and validation dataset readers. """ dataset_reader_params = params.pop('dataset_reader') validation_dataset_reader_params = params.pop('validation_dataset_reader', None) train_cache_dir, validation_cache_dir = _set_up_cache_files(dataset_reader_params, validation_dataset_reader_params, cache_directory, cache_prefix) dataset_reader = DatasetReader.from_params(dataset_reader_params) validation_and_test_dataset_reader: DatasetReader = dataset_reader if validation_dataset_reader_params is not None: logger.info("Using a separate dataset reader to load validation and test data.") validation_and_test_dataset_reader = DatasetReader.from_params(validation_dataset_reader_params) if train_cache_dir: dataset_reader.cache_data(train_cache_dir) validation_and_test_dataset_reader.cache_data(validation_cache_dir) train_data_path = params.pop('train_data_path') logger.info("Reading training data from %s", train_data_path) train_data = dataset_reader.read(train_data_path) datasets: Dict[str, Iterable[Instance]] = {"train": train_data} validation_data_path = params.pop('validation_data_path', None) if validation_data_path is not None: logger.info("Reading validation data from %s", validation_data_path) validation_data = validation_and_test_dataset_reader.read(validation_data_path) datasets["validation"] = validation_data test_data_path = params.pop("test_data_path", None) if test_data_path is not None: logger.info("Reading test data from %s", test_data_path) test_data = validation_and_test_dataset_reader.read(test_data_path) datasets["test"] = test_data return datasets
python
def datasets_from_params(params: Params, cache_directory: str = None, cache_prefix: str = None) -> Dict[str, Iterable[Instance]]: """ Load all the datasets specified by the config. Parameters ---------- params : ``Params`` cache_directory : ``str``, optional If given, we will instruct the ``DatasetReaders`` that we construct to cache their instances in this location (or read their instances from caches in this location, if a suitable cache already exists). This is essentially a `base` directory for the cache, as we will additionally add the ``cache_prefix`` to this directory, giving an actual cache location of ``cache_directory + cache_prefix``. cache_prefix : ``str``, optional This works in conjunction with the ``cache_directory``. The idea is that the ``cache_directory`` contains caches for all different parameter settings, while the ``cache_prefix`` captures a specific set of parameters that led to a particular cache file. That is, if you change the tokenization settings inside your ``DatasetReader``, you don't want to read cached data that used the old settings. In order to avoid this, we compute a hash of the parameters used to construct each ``DatasetReader`` and use that as a "prefix" to the cache files inside the base ``cache_directory``. So, a given ``input_file`` would be cached essentially as ``cache_directory + cache_prefix + input_file``, where you specify a ``cache_directory``, the ``cache_prefix`` is based on the dataset reader parameters, and the ``input_file`` is whatever path you provided to ``DatasetReader.read()``. In order to allow you to give recognizable names to these prefixes if you want them, you can manually specify the ``cache_prefix``. Note that in some rare cases this can be dangerous, as we'll use the `same` prefix for both train and validation dataset readers. """ dataset_reader_params = params.pop('dataset_reader') validation_dataset_reader_params = params.pop('validation_dataset_reader', None) train_cache_dir, validation_cache_dir = _set_up_cache_files(dataset_reader_params, validation_dataset_reader_params, cache_directory, cache_prefix) dataset_reader = DatasetReader.from_params(dataset_reader_params) validation_and_test_dataset_reader: DatasetReader = dataset_reader if validation_dataset_reader_params is not None: logger.info("Using a separate dataset reader to load validation and test data.") validation_and_test_dataset_reader = DatasetReader.from_params(validation_dataset_reader_params) if train_cache_dir: dataset_reader.cache_data(train_cache_dir) validation_and_test_dataset_reader.cache_data(validation_cache_dir) train_data_path = params.pop('train_data_path') logger.info("Reading training data from %s", train_data_path) train_data = dataset_reader.read(train_data_path) datasets: Dict[str, Iterable[Instance]] = {"train": train_data} validation_data_path = params.pop('validation_data_path', None) if validation_data_path is not None: logger.info("Reading validation data from %s", validation_data_path) validation_data = validation_and_test_dataset_reader.read(validation_data_path) datasets["validation"] = validation_data test_data_path = params.pop("test_data_path", None) if test_data_path is not None: logger.info("Reading test data from %s", test_data_path) test_data = validation_and_test_dataset_reader.read(test_data_path) datasets["test"] = test_data return datasets
[ "def", "datasets_from_params", "(", "params", ":", "Params", ",", "cache_directory", ":", "str", "=", "None", ",", "cache_prefix", ":", "str", "=", "None", ")", "->", "Dict", "[", "str", ",", "Iterable", "[", "Instance", "]", "]", ":", "dataset_reader_params", "=", "params", ".", "pop", "(", "'dataset_reader'", ")", "validation_dataset_reader_params", "=", "params", ".", "pop", "(", "'validation_dataset_reader'", ",", "None", ")", "train_cache_dir", ",", "validation_cache_dir", "=", "_set_up_cache_files", "(", "dataset_reader_params", ",", "validation_dataset_reader_params", ",", "cache_directory", ",", "cache_prefix", ")", "dataset_reader", "=", "DatasetReader", ".", "from_params", "(", "dataset_reader_params", ")", "validation_and_test_dataset_reader", ":", "DatasetReader", "=", "dataset_reader", "if", "validation_dataset_reader_params", "is", "not", "None", ":", "logger", ".", "info", "(", "\"Using a separate dataset reader to load validation and test data.\"", ")", "validation_and_test_dataset_reader", "=", "DatasetReader", ".", "from_params", "(", "validation_dataset_reader_params", ")", "if", "train_cache_dir", ":", "dataset_reader", ".", "cache_data", "(", "train_cache_dir", ")", "validation_and_test_dataset_reader", ".", "cache_data", "(", "validation_cache_dir", ")", "train_data_path", "=", "params", ".", "pop", "(", "'train_data_path'", ")", "logger", ".", "info", "(", "\"Reading training data from %s\"", ",", "train_data_path", ")", "train_data", "=", "dataset_reader", ".", "read", "(", "train_data_path", ")", "datasets", ":", "Dict", "[", "str", ",", "Iterable", "[", "Instance", "]", "]", "=", "{", "\"train\"", ":", "train_data", "}", "validation_data_path", "=", "params", ".", "pop", "(", "'validation_data_path'", ",", "None", ")", "if", "validation_data_path", "is", "not", "None", ":", "logger", ".", "info", "(", "\"Reading validation data from %s\"", ",", "validation_data_path", ")", "validation_data", "=", "validation_and_test_dataset_reader", ".", "read", "(", "validation_data_path", ")", "datasets", "[", "\"validation\"", "]", "=", "validation_data", "test_data_path", "=", "params", ".", "pop", "(", "\"test_data_path\"", ",", "None", ")", "if", "test_data_path", "is", "not", "None", ":", "logger", ".", "info", "(", "\"Reading test data from %s\"", ",", "test_data_path", ")", "test_data", "=", "validation_and_test_dataset_reader", ".", "read", "(", "test_data_path", ")", "datasets", "[", "\"test\"", "]", "=", "test_data", "return", "datasets" ]
Load all the datasets specified by the config. Parameters ---------- params : ``Params`` cache_directory : ``str``, optional If given, we will instruct the ``DatasetReaders`` that we construct to cache their instances in this location (or read their instances from caches in this location, if a suitable cache already exists). This is essentially a `base` directory for the cache, as we will additionally add the ``cache_prefix`` to this directory, giving an actual cache location of ``cache_directory + cache_prefix``. cache_prefix : ``str``, optional This works in conjunction with the ``cache_directory``. The idea is that the ``cache_directory`` contains caches for all different parameter settings, while the ``cache_prefix`` captures a specific set of parameters that led to a particular cache file. That is, if you change the tokenization settings inside your ``DatasetReader``, you don't want to read cached data that used the old settings. In order to avoid this, we compute a hash of the parameters used to construct each ``DatasetReader`` and use that as a "prefix" to the cache files inside the base ``cache_directory``. So, a given ``input_file`` would be cached essentially as ``cache_directory + cache_prefix + input_file``, where you specify a ``cache_directory``, the ``cache_prefix`` is based on the dataset reader parameters, and the ``input_file`` is whatever path you provided to ``DatasetReader.read()``. In order to allow you to give recognizable names to these prefixes if you want them, you can manually specify the ``cache_prefix``. Note that in some rare cases this can be dangerous, as we'll use the `same` prefix for both train and validation dataset readers.
[ "Load", "all", "the", "datasets", "specified", "by", "the", "config", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/training/util.py#L128-L194
23,020
allenai/allennlp
allennlp/training/util.py
create_serialization_dir
def create_serialization_dir( params: Params, serialization_dir: str, recover: bool, force: bool) -> None: """ This function creates the serialization directory if it doesn't exist. If it already exists and is non-empty, then it verifies that we're recovering from a training with an identical configuration. Parameters ---------- params: ``Params`` A parameter object specifying an AllenNLP Experiment. serialization_dir: ``str`` The directory in which to save results and logs. recover: ``bool`` If ``True``, we will try to recover from an existing serialization directory, and crash if the directory doesn't exist, or doesn't match the configuration we're given. force: ``bool`` If ``True``, we will overwrite the serialization directory if it already exists. """ if recover and force: raise ConfigurationError("Illegal arguments: both force and recover are true.") if os.path.exists(serialization_dir) and force: shutil.rmtree(serialization_dir) if os.path.exists(serialization_dir) and os.listdir(serialization_dir): if not recover: raise ConfigurationError(f"Serialization directory ({serialization_dir}) already exists and is " f"not empty. Specify --recover to recover training from existing output.") logger.info(f"Recovering from prior training at {serialization_dir}.") recovered_config_file = os.path.join(serialization_dir, CONFIG_NAME) if not os.path.exists(recovered_config_file): raise ConfigurationError("The serialization directory already exists but doesn't " "contain a config.json. You probably gave the wrong directory.") else: loaded_params = Params.from_file(recovered_config_file) # Check whether any of the training configuration differs from the configuration we are # resuming. If so, warn the user that training may fail. fail = False flat_params = params.as_flat_dict() flat_loaded = loaded_params.as_flat_dict() for key in flat_params.keys() - flat_loaded.keys(): logger.error(f"Key '{key}' found in training configuration but not in the serialization " f"directory we're recovering from.") fail = True for key in flat_loaded.keys() - flat_params.keys(): logger.error(f"Key '{key}' found in the serialization directory we're recovering from " f"but not in the training config.") fail = True for key in flat_params.keys(): if flat_params.get(key, None) != flat_loaded.get(key, None): logger.error(f"Value for '{key}' in training configuration does not match that the value in " f"the serialization directory we're recovering from: " f"{flat_params[key]} != {flat_loaded[key]}") fail = True if fail: raise ConfigurationError("Training configuration does not match the configuration we're " "recovering from.") else: if recover: raise ConfigurationError(f"--recover specified but serialization_dir ({serialization_dir}) " "does not exist. There is nothing to recover from.") os.makedirs(serialization_dir, exist_ok=True)
python
def create_serialization_dir( params: Params, serialization_dir: str, recover: bool, force: bool) -> None: """ This function creates the serialization directory if it doesn't exist. If it already exists and is non-empty, then it verifies that we're recovering from a training with an identical configuration. Parameters ---------- params: ``Params`` A parameter object specifying an AllenNLP Experiment. serialization_dir: ``str`` The directory in which to save results and logs. recover: ``bool`` If ``True``, we will try to recover from an existing serialization directory, and crash if the directory doesn't exist, or doesn't match the configuration we're given. force: ``bool`` If ``True``, we will overwrite the serialization directory if it already exists. """ if recover and force: raise ConfigurationError("Illegal arguments: both force and recover are true.") if os.path.exists(serialization_dir) and force: shutil.rmtree(serialization_dir) if os.path.exists(serialization_dir) and os.listdir(serialization_dir): if not recover: raise ConfigurationError(f"Serialization directory ({serialization_dir}) already exists and is " f"not empty. Specify --recover to recover training from existing output.") logger.info(f"Recovering from prior training at {serialization_dir}.") recovered_config_file = os.path.join(serialization_dir, CONFIG_NAME) if not os.path.exists(recovered_config_file): raise ConfigurationError("The serialization directory already exists but doesn't " "contain a config.json. You probably gave the wrong directory.") else: loaded_params = Params.from_file(recovered_config_file) # Check whether any of the training configuration differs from the configuration we are # resuming. If so, warn the user that training may fail. fail = False flat_params = params.as_flat_dict() flat_loaded = loaded_params.as_flat_dict() for key in flat_params.keys() - flat_loaded.keys(): logger.error(f"Key '{key}' found in training configuration but not in the serialization " f"directory we're recovering from.") fail = True for key in flat_loaded.keys() - flat_params.keys(): logger.error(f"Key '{key}' found in the serialization directory we're recovering from " f"but not in the training config.") fail = True for key in flat_params.keys(): if flat_params.get(key, None) != flat_loaded.get(key, None): logger.error(f"Value for '{key}' in training configuration does not match that the value in " f"the serialization directory we're recovering from: " f"{flat_params[key]} != {flat_loaded[key]}") fail = True if fail: raise ConfigurationError("Training configuration does not match the configuration we're " "recovering from.") else: if recover: raise ConfigurationError(f"--recover specified but serialization_dir ({serialization_dir}) " "does not exist. There is nothing to recover from.") os.makedirs(serialization_dir, exist_ok=True)
[ "def", "create_serialization_dir", "(", "params", ":", "Params", ",", "serialization_dir", ":", "str", ",", "recover", ":", "bool", ",", "force", ":", "bool", ")", "->", "None", ":", "if", "recover", "and", "force", ":", "raise", "ConfigurationError", "(", "\"Illegal arguments: both force and recover are true.\"", ")", "if", "os", ".", "path", ".", "exists", "(", "serialization_dir", ")", "and", "force", ":", "shutil", ".", "rmtree", "(", "serialization_dir", ")", "if", "os", ".", "path", ".", "exists", "(", "serialization_dir", ")", "and", "os", ".", "listdir", "(", "serialization_dir", ")", ":", "if", "not", "recover", ":", "raise", "ConfigurationError", "(", "f\"Serialization directory ({serialization_dir}) already exists and is \"", "f\"not empty. Specify --recover to recover training from existing output.\"", ")", "logger", ".", "info", "(", "f\"Recovering from prior training at {serialization_dir}.\"", ")", "recovered_config_file", "=", "os", ".", "path", ".", "join", "(", "serialization_dir", ",", "CONFIG_NAME", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "recovered_config_file", ")", ":", "raise", "ConfigurationError", "(", "\"The serialization directory already exists but doesn't \"", "\"contain a config.json. You probably gave the wrong directory.\"", ")", "else", ":", "loaded_params", "=", "Params", ".", "from_file", "(", "recovered_config_file", ")", "# Check whether any of the training configuration differs from the configuration we are", "# resuming. If so, warn the user that training may fail.", "fail", "=", "False", "flat_params", "=", "params", ".", "as_flat_dict", "(", ")", "flat_loaded", "=", "loaded_params", ".", "as_flat_dict", "(", ")", "for", "key", "in", "flat_params", ".", "keys", "(", ")", "-", "flat_loaded", ".", "keys", "(", ")", ":", "logger", ".", "error", "(", "f\"Key '{key}' found in training configuration but not in the serialization \"", "f\"directory we're recovering from.\"", ")", "fail", "=", "True", "for", "key", "in", "flat_loaded", ".", "keys", "(", ")", "-", "flat_params", ".", "keys", "(", ")", ":", "logger", ".", "error", "(", "f\"Key '{key}' found in the serialization directory we're recovering from \"", "f\"but not in the training config.\"", ")", "fail", "=", "True", "for", "key", "in", "flat_params", ".", "keys", "(", ")", ":", "if", "flat_params", ".", "get", "(", "key", ",", "None", ")", "!=", "flat_loaded", ".", "get", "(", "key", ",", "None", ")", ":", "logger", ".", "error", "(", "f\"Value for '{key}' in training configuration does not match that the value in \"", "f\"the serialization directory we're recovering from: \"", "f\"{flat_params[key]} != {flat_loaded[key]}\"", ")", "fail", "=", "True", "if", "fail", ":", "raise", "ConfigurationError", "(", "\"Training configuration does not match the configuration we're \"", "\"recovering from.\"", ")", "else", ":", "if", "recover", ":", "raise", "ConfigurationError", "(", "f\"--recover specified but serialization_dir ({serialization_dir}) \"", "\"does not exist. There is nothing to recover from.\"", ")", "os", ".", "makedirs", "(", "serialization_dir", ",", "exist_ok", "=", "True", ")" ]
This function creates the serialization directory if it doesn't exist. If it already exists and is non-empty, then it verifies that we're recovering from a training with an identical configuration. Parameters ---------- params: ``Params`` A parameter object specifying an AllenNLP Experiment. serialization_dir: ``str`` The directory in which to save results and logs. recover: ``bool`` If ``True``, we will try to recover from an existing serialization directory, and crash if the directory doesn't exist, or doesn't match the configuration we're given. force: ``bool`` If ``True``, we will overwrite the serialization directory if it already exists.
[ "This", "function", "creates", "the", "serialization", "directory", "if", "it", "doesn", "t", "exist", ".", "If", "it", "already", "exists", "and", "is", "non", "-", "empty", "then", "it", "verifies", "that", "we", "re", "recovering", "from", "a", "training", "with", "an", "identical", "configuration", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/training/util.py#L242-L309
23,021
allenai/allennlp
allennlp/training/util.py
data_parallel
def data_parallel(batch_group: List[TensorDict], model: Model, cuda_devices: List) -> Dict[str, torch.Tensor]: """ Performs a forward pass using multiple GPUs. This is a simplification of torch.nn.parallel.data_parallel to support the allennlp model interface. """ assert len(batch_group) <= len(cuda_devices) moved = [nn_util.move_to_device(batch, device) for batch, device in zip(batch_group, cuda_devices)] used_device_ids = cuda_devices[:len(moved)] # Counterintuitively, it appears replicate expects the source device id to be the first element # in the device id list. See torch.cuda.comm.broadcast_coalesced, which is called indirectly. replicas = replicate(model, used_device_ids) # We pass all our arguments as kwargs. Create a list of empty tuples of the # correct shape to serve as (non-existent) positional arguments. inputs = [()] * len(batch_group) outputs = parallel_apply(replicas, inputs, moved, used_device_ids) # Only the 'loss' is needed. # a (num_gpu, ) tensor with loss on each GPU losses = gather([output['loss'].unsqueeze(0) for output in outputs], used_device_ids[0], 0) return {'loss': losses.mean()}
python
def data_parallel(batch_group: List[TensorDict], model: Model, cuda_devices: List) -> Dict[str, torch.Tensor]: """ Performs a forward pass using multiple GPUs. This is a simplification of torch.nn.parallel.data_parallel to support the allennlp model interface. """ assert len(batch_group) <= len(cuda_devices) moved = [nn_util.move_to_device(batch, device) for batch, device in zip(batch_group, cuda_devices)] used_device_ids = cuda_devices[:len(moved)] # Counterintuitively, it appears replicate expects the source device id to be the first element # in the device id list. See torch.cuda.comm.broadcast_coalesced, which is called indirectly. replicas = replicate(model, used_device_ids) # We pass all our arguments as kwargs. Create a list of empty tuples of the # correct shape to serve as (non-existent) positional arguments. inputs = [()] * len(batch_group) outputs = parallel_apply(replicas, inputs, moved, used_device_ids) # Only the 'loss' is needed. # a (num_gpu, ) tensor with loss on each GPU losses = gather([output['loss'].unsqueeze(0) for output in outputs], used_device_ids[0], 0) return {'loss': losses.mean()}
[ "def", "data_parallel", "(", "batch_group", ":", "List", "[", "TensorDict", "]", ",", "model", ":", "Model", ",", "cuda_devices", ":", "List", ")", "->", "Dict", "[", "str", ",", "torch", ".", "Tensor", "]", ":", "assert", "len", "(", "batch_group", ")", "<=", "len", "(", "cuda_devices", ")", "moved", "=", "[", "nn_util", ".", "move_to_device", "(", "batch", ",", "device", ")", "for", "batch", ",", "device", "in", "zip", "(", "batch_group", ",", "cuda_devices", ")", "]", "used_device_ids", "=", "cuda_devices", "[", ":", "len", "(", "moved", ")", "]", "# Counterintuitively, it appears replicate expects the source device id to be the first element", "# in the device id list. See torch.cuda.comm.broadcast_coalesced, which is called indirectly.", "replicas", "=", "replicate", "(", "model", ",", "used_device_ids", ")", "# We pass all our arguments as kwargs. Create a list of empty tuples of the", "# correct shape to serve as (non-existent) positional arguments.", "inputs", "=", "[", "(", ")", "]", "*", "len", "(", "batch_group", ")", "outputs", "=", "parallel_apply", "(", "replicas", ",", "inputs", ",", "moved", ",", "used_device_ids", ")", "# Only the 'loss' is needed.", "# a (num_gpu, ) tensor with loss on each GPU", "losses", "=", "gather", "(", "[", "output", "[", "'loss'", "]", ".", "unsqueeze", "(", "0", ")", "for", "output", "in", "outputs", "]", ",", "used_device_ids", "[", "0", "]", ",", "0", ")", "return", "{", "'loss'", ":", "losses", ".", "mean", "(", ")", "}" ]
Performs a forward pass using multiple GPUs. This is a simplification of torch.nn.parallel.data_parallel to support the allennlp model interface.
[ "Performs", "a", "forward", "pass", "using", "multiple", "GPUs", ".", "This", "is", "a", "simplification", "of", "torch", ".", "nn", ".", "parallel", ".", "data_parallel", "to", "support", "the", "allennlp", "model", "interface", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/training/util.py#L311-L337
23,022
allenai/allennlp
allennlp/training/util.py
rescale_gradients
def rescale_gradients(model: Model, grad_norm: Optional[float] = None) -> Optional[float]: """ Performs gradient rescaling. Is a no-op if gradient rescaling is not enabled. """ if grad_norm: parameters_to_clip = [p for p in model.parameters() if p.grad is not None] return sparse_clip_norm(parameters_to_clip, grad_norm) return None
python
def rescale_gradients(model: Model, grad_norm: Optional[float] = None) -> Optional[float]: """ Performs gradient rescaling. Is a no-op if gradient rescaling is not enabled. """ if grad_norm: parameters_to_clip = [p for p in model.parameters() if p.grad is not None] return sparse_clip_norm(parameters_to_clip, grad_norm) return None
[ "def", "rescale_gradients", "(", "model", ":", "Model", ",", "grad_norm", ":", "Optional", "[", "float", "]", "=", "None", ")", "->", "Optional", "[", "float", "]", ":", "if", "grad_norm", ":", "parameters_to_clip", "=", "[", "p", "for", "p", "in", "model", ".", "parameters", "(", ")", "if", "p", ".", "grad", "is", "not", "None", "]", "return", "sparse_clip_norm", "(", "parameters_to_clip", ",", "grad_norm", ")", "return", "None" ]
Performs gradient rescaling. Is a no-op if gradient rescaling is not enabled.
[ "Performs", "gradient", "rescaling", ".", "Is", "a", "no", "-", "op", "if", "gradient", "rescaling", "is", "not", "enabled", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/training/util.py#L347-L355
23,023
allenai/allennlp
allennlp/training/util.py
get_metrics
def get_metrics(model: Model, total_loss: float, num_batches: int, reset: bool = False) -> Dict[str, float]: """ Gets the metrics but sets ``"loss"`` to the total loss divided by the ``num_batches`` so that the ``"loss"`` metric is "average loss per batch". """ metrics = model.get_metrics(reset=reset) metrics["loss"] = float(total_loss / num_batches) if num_batches > 0 else 0.0 return metrics
python
def get_metrics(model: Model, total_loss: float, num_batches: int, reset: bool = False) -> Dict[str, float]: """ Gets the metrics but sets ``"loss"`` to the total loss divided by the ``num_batches`` so that the ``"loss"`` metric is "average loss per batch". """ metrics = model.get_metrics(reset=reset) metrics["loss"] = float(total_loss / num_batches) if num_batches > 0 else 0.0 return metrics
[ "def", "get_metrics", "(", "model", ":", "Model", ",", "total_loss", ":", "float", ",", "num_batches", ":", "int", ",", "reset", ":", "bool", "=", "False", ")", "->", "Dict", "[", "str", ",", "float", "]", ":", "metrics", "=", "model", ".", "get_metrics", "(", "reset", "=", "reset", ")", "metrics", "[", "\"loss\"", "]", "=", "float", "(", "total_loss", "/", "num_batches", ")", "if", "num_batches", ">", "0", "else", "0.0", "return", "metrics" ]
Gets the metrics but sets ``"loss"`` to the total loss divided by the ``num_batches`` so that the ``"loss"`` metric is "average loss per batch".
[ "Gets", "the", "metrics", "but", "sets", "loss", "to", "the", "total", "loss", "divided", "by", "the", "num_batches", "so", "that", "the", "loss", "metric", "is", "average", "loss", "per", "batch", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/training/util.py#L357-L365
23,024
allenai/allennlp
scripts/check_requirements_and_setup.py
parse_requirements
def parse_requirements() -> Tuple[PackagesType, PackagesType, Set[str]]: """Parse all dependencies out of the requirements.txt file.""" essential_packages: PackagesType = {} other_packages: PackagesType = {} duplicates: Set[str] = set() with open("requirements.txt", "r") as req_file: section: str = "" for line in req_file: line = line.strip() if line.startswith("####"): # Line is a section name. section = parse_section_name(line) continue if not line or line.startswith("#"): # Line is empty or just regular comment. continue module, version = parse_package(line) if module in essential_packages or module in other_packages: duplicates.add(module) if section.startswith("ESSENTIAL"): essential_packages[module] = version else: other_packages[module] = version return essential_packages, other_packages, duplicates
python
def parse_requirements() -> Tuple[PackagesType, PackagesType, Set[str]]: """Parse all dependencies out of the requirements.txt file.""" essential_packages: PackagesType = {} other_packages: PackagesType = {} duplicates: Set[str] = set() with open("requirements.txt", "r") as req_file: section: str = "" for line in req_file: line = line.strip() if line.startswith("####"): # Line is a section name. section = parse_section_name(line) continue if not line or line.startswith("#"): # Line is empty or just regular comment. continue module, version = parse_package(line) if module in essential_packages or module in other_packages: duplicates.add(module) if section.startswith("ESSENTIAL"): essential_packages[module] = version else: other_packages[module] = version return essential_packages, other_packages, duplicates
[ "def", "parse_requirements", "(", ")", "->", "Tuple", "[", "PackagesType", ",", "PackagesType", ",", "Set", "[", "str", "]", "]", ":", "essential_packages", ":", "PackagesType", "=", "{", "}", "other_packages", ":", "PackagesType", "=", "{", "}", "duplicates", ":", "Set", "[", "str", "]", "=", "set", "(", ")", "with", "open", "(", "\"requirements.txt\"", ",", "\"r\"", ")", "as", "req_file", ":", "section", ":", "str", "=", "\"\"", "for", "line", "in", "req_file", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "line", ".", "startswith", "(", "\"####\"", ")", ":", "# Line is a section name.", "section", "=", "parse_section_name", "(", "line", ")", "continue", "if", "not", "line", "or", "line", ".", "startswith", "(", "\"#\"", ")", ":", "# Line is empty or just regular comment.", "continue", "module", ",", "version", "=", "parse_package", "(", "line", ")", "if", "module", "in", "essential_packages", "or", "module", "in", "other_packages", ":", "duplicates", ".", "add", "(", "module", ")", "if", "section", ".", "startswith", "(", "\"ESSENTIAL\"", ")", ":", "essential_packages", "[", "module", "]", "=", "version", "else", ":", "other_packages", "[", "module", "]", "=", "version", "return", "essential_packages", ",", "other_packages", ",", "duplicates" ]
Parse all dependencies out of the requirements.txt file.
[ "Parse", "all", "dependencies", "out", "of", "the", "requirements", ".", "txt", "file", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/scripts/check_requirements_and_setup.py#L32-L60
23,025
allenai/allennlp
scripts/check_requirements_and_setup.py
parse_setup
def parse_setup() -> Tuple[PackagesType, PackagesType, Set[str], Set[str]]: """Parse all dependencies out of the setup.py script.""" essential_packages: PackagesType = {} test_packages: PackagesType = {} essential_duplicates: Set[str] = set() test_duplicates: Set[str] = set() with open('setup.py') as setup_file: contents = setup_file.read() # Parse out essential packages. package_string = re.search(r"""install_requires=\[[\s\n]*['"](.*?)['"],?[\s\n]*\]""", contents, re.DOTALL).groups()[0].strip() for package in re.split(r"""['"],[\s\n]+['"]""", package_string): module, version = parse_package(package) if module in essential_packages: essential_duplicates.add(module) else: essential_packages[module] = version # Parse packages only needed for testing. package_string = re.search(r"""tests_require=\[[\s\n]*['"](.*?)['"],?[\s\n]*\]""", contents, re.DOTALL).groups()[0].strip() for package in re.split(r"""['"],[\s\n]+['"]""", package_string): module, version = parse_package(package) if module in test_packages: test_duplicates.add(module) else: test_packages[module] = version return essential_packages, test_packages, essential_duplicates, test_duplicates
python
def parse_setup() -> Tuple[PackagesType, PackagesType, Set[str], Set[str]]: """Parse all dependencies out of the setup.py script.""" essential_packages: PackagesType = {} test_packages: PackagesType = {} essential_duplicates: Set[str] = set() test_duplicates: Set[str] = set() with open('setup.py') as setup_file: contents = setup_file.read() # Parse out essential packages. package_string = re.search(r"""install_requires=\[[\s\n]*['"](.*?)['"],?[\s\n]*\]""", contents, re.DOTALL).groups()[0].strip() for package in re.split(r"""['"],[\s\n]+['"]""", package_string): module, version = parse_package(package) if module in essential_packages: essential_duplicates.add(module) else: essential_packages[module] = version # Parse packages only needed for testing. package_string = re.search(r"""tests_require=\[[\s\n]*['"](.*?)['"],?[\s\n]*\]""", contents, re.DOTALL).groups()[0].strip() for package in re.split(r"""['"],[\s\n]+['"]""", package_string): module, version = parse_package(package) if module in test_packages: test_duplicates.add(module) else: test_packages[module] = version return essential_packages, test_packages, essential_duplicates, test_duplicates
[ "def", "parse_setup", "(", ")", "->", "Tuple", "[", "PackagesType", ",", "PackagesType", ",", "Set", "[", "str", "]", ",", "Set", "[", "str", "]", "]", ":", "essential_packages", ":", "PackagesType", "=", "{", "}", "test_packages", ":", "PackagesType", "=", "{", "}", "essential_duplicates", ":", "Set", "[", "str", "]", "=", "set", "(", ")", "test_duplicates", ":", "Set", "[", "str", "]", "=", "set", "(", ")", "with", "open", "(", "'setup.py'", ")", "as", "setup_file", ":", "contents", "=", "setup_file", ".", "read", "(", ")", "# Parse out essential packages.", "package_string", "=", "re", ".", "search", "(", "r\"\"\"install_requires=\\[[\\s\\n]*['\"](.*?)['\"],?[\\s\\n]*\\]\"\"\"", ",", "contents", ",", "re", ".", "DOTALL", ")", ".", "groups", "(", ")", "[", "0", "]", ".", "strip", "(", ")", "for", "package", "in", "re", ".", "split", "(", "r\"\"\"['\"],[\\s\\n]+['\"]\"\"\"", ",", "package_string", ")", ":", "module", ",", "version", "=", "parse_package", "(", "package", ")", "if", "module", "in", "essential_packages", ":", "essential_duplicates", ".", "add", "(", "module", ")", "else", ":", "essential_packages", "[", "module", "]", "=", "version", "# Parse packages only needed for testing.", "package_string", "=", "re", ".", "search", "(", "r\"\"\"tests_require=\\[[\\s\\n]*['\"](.*?)['\"],?[\\s\\n]*\\]\"\"\"", ",", "contents", ",", "re", ".", "DOTALL", ")", ".", "groups", "(", ")", "[", "0", "]", ".", "strip", "(", ")", "for", "package", "in", "re", ".", "split", "(", "r\"\"\"['\"],[\\s\\n]+['\"]\"\"\"", ",", "package_string", ")", ":", "module", ",", "version", "=", "parse_package", "(", "package", ")", "if", "module", "in", "test_packages", ":", "test_duplicates", ".", "add", "(", "module", ")", "else", ":", "test_packages", "[", "module", "]", "=", "version", "return", "essential_packages", ",", "test_packages", ",", "essential_duplicates", ",", "test_duplicates" ]
Parse all dependencies out of the setup.py script.
[ "Parse", "all", "dependencies", "out", "of", "the", "setup", ".", "py", "script", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/scripts/check_requirements_and_setup.py#L63-L93
23,026
allenai/allennlp
allennlp/data/dataset_readers/dataset_utils/span_utils.py
enumerate_spans
def enumerate_spans(sentence: List[T], offset: int = 0, max_span_width: int = None, min_span_width: int = 1, filter_function: Callable[[List[T]], bool] = None) -> List[Tuple[int, int]]: """ Given a sentence, return all token spans within the sentence. Spans are `inclusive`. Additionally, you can provide a maximum and minimum span width, which will be used to exclude spans outside of this range. Finally, you can provide a function mapping ``List[T] -> bool``, which will be applied to every span to decide whether that span should be included. This allows filtering by length, regex matches, pos tags or any Spacy ``Token`` attributes, for example. Parameters ---------- sentence : ``List[T]``, required. The sentence to generate spans for. The type is generic, as this function can be used with strings, or Spacy ``Tokens`` or other sequences. offset : ``int``, optional (default = 0) A numeric offset to add to all span start and end indices. This is helpful if the sentence is part of a larger structure, such as a document, which the indices need to respect. max_span_width : ``int``, optional (default = None) The maximum length of spans which should be included. Defaults to len(sentence). min_span_width : ``int``, optional (default = 1) The minimum length of spans which should be included. Defaults to 1. filter_function : ``Callable[[List[T]], bool]``, optional (default = None) A function mapping sequences of the passed type T to a boolean value. If ``True``, the span is included in the returned spans from the sentence, otherwise it is excluded.. """ max_span_width = max_span_width or len(sentence) filter_function = filter_function or (lambda x: True) spans: List[Tuple[int, int]] = [] for start_index in range(len(sentence)): last_end_index = min(start_index + max_span_width, len(sentence)) first_end_index = min(start_index + min_span_width - 1, len(sentence)) for end_index in range(first_end_index, last_end_index): start = offset + start_index end = offset + end_index # add 1 to end index because span indices are inclusive. if filter_function(sentence[slice(start_index, end_index + 1)]): spans.append((start, end)) return spans
python
def enumerate_spans(sentence: List[T], offset: int = 0, max_span_width: int = None, min_span_width: int = 1, filter_function: Callable[[List[T]], bool] = None) -> List[Tuple[int, int]]: """ Given a sentence, return all token spans within the sentence. Spans are `inclusive`. Additionally, you can provide a maximum and minimum span width, which will be used to exclude spans outside of this range. Finally, you can provide a function mapping ``List[T] -> bool``, which will be applied to every span to decide whether that span should be included. This allows filtering by length, regex matches, pos tags or any Spacy ``Token`` attributes, for example. Parameters ---------- sentence : ``List[T]``, required. The sentence to generate spans for. The type is generic, as this function can be used with strings, or Spacy ``Tokens`` or other sequences. offset : ``int``, optional (default = 0) A numeric offset to add to all span start and end indices. This is helpful if the sentence is part of a larger structure, such as a document, which the indices need to respect. max_span_width : ``int``, optional (default = None) The maximum length of spans which should be included. Defaults to len(sentence). min_span_width : ``int``, optional (default = 1) The minimum length of spans which should be included. Defaults to 1. filter_function : ``Callable[[List[T]], bool]``, optional (default = None) A function mapping sequences of the passed type T to a boolean value. If ``True``, the span is included in the returned spans from the sentence, otherwise it is excluded.. """ max_span_width = max_span_width or len(sentence) filter_function = filter_function or (lambda x: True) spans: List[Tuple[int, int]] = [] for start_index in range(len(sentence)): last_end_index = min(start_index + max_span_width, len(sentence)) first_end_index = min(start_index + min_span_width - 1, len(sentence)) for end_index in range(first_end_index, last_end_index): start = offset + start_index end = offset + end_index # add 1 to end index because span indices are inclusive. if filter_function(sentence[slice(start_index, end_index + 1)]): spans.append((start, end)) return spans
[ "def", "enumerate_spans", "(", "sentence", ":", "List", "[", "T", "]", ",", "offset", ":", "int", "=", "0", ",", "max_span_width", ":", "int", "=", "None", ",", "min_span_width", ":", "int", "=", "1", ",", "filter_function", ":", "Callable", "[", "[", "List", "[", "T", "]", "]", ",", "bool", "]", "=", "None", ")", "->", "List", "[", "Tuple", "[", "int", ",", "int", "]", "]", ":", "max_span_width", "=", "max_span_width", "or", "len", "(", "sentence", ")", "filter_function", "=", "filter_function", "or", "(", "lambda", "x", ":", "True", ")", "spans", ":", "List", "[", "Tuple", "[", "int", ",", "int", "]", "]", "=", "[", "]", "for", "start_index", "in", "range", "(", "len", "(", "sentence", ")", ")", ":", "last_end_index", "=", "min", "(", "start_index", "+", "max_span_width", ",", "len", "(", "sentence", ")", ")", "first_end_index", "=", "min", "(", "start_index", "+", "min_span_width", "-", "1", ",", "len", "(", "sentence", ")", ")", "for", "end_index", "in", "range", "(", "first_end_index", ",", "last_end_index", ")", ":", "start", "=", "offset", "+", "start_index", "end", "=", "offset", "+", "end_index", "# add 1 to end index because span indices are inclusive.", "if", "filter_function", "(", "sentence", "[", "slice", "(", "start_index", ",", "end_index", "+", "1", ")", "]", ")", ":", "spans", ".", "append", "(", "(", "start", ",", "end", ")", ")", "return", "spans" ]
Given a sentence, return all token spans within the sentence. Spans are `inclusive`. Additionally, you can provide a maximum and minimum span width, which will be used to exclude spans outside of this range. Finally, you can provide a function mapping ``List[T] -> bool``, which will be applied to every span to decide whether that span should be included. This allows filtering by length, regex matches, pos tags or any Spacy ``Token`` attributes, for example. Parameters ---------- sentence : ``List[T]``, required. The sentence to generate spans for. The type is generic, as this function can be used with strings, or Spacy ``Tokens`` or other sequences. offset : ``int``, optional (default = 0) A numeric offset to add to all span start and end indices. This is helpful if the sentence is part of a larger structure, such as a document, which the indices need to respect. max_span_width : ``int``, optional (default = None) The maximum length of spans which should be included. Defaults to len(sentence). min_span_width : ``int``, optional (default = 1) The minimum length of spans which should be included. Defaults to 1. filter_function : ``Callable[[List[T]], bool]``, optional (default = None) A function mapping sequences of the passed type T to a boolean value. If ``True``, the span is included in the returned spans from the sentence, otherwise it is excluded..
[ "Given", "a", "sentence", "return", "all", "token", "spans", "within", "the", "sentence", ".", "Spans", "are", "inclusive", ".", "Additionally", "you", "can", "provide", "a", "maximum", "and", "minimum", "span", "width", "which", "will", "be", "used", "to", "exclude", "spans", "outside", "of", "this", "range", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/dataset_readers/dataset_utils/span_utils.py#L20-L66
23,027
allenai/allennlp
allennlp/data/dataset_readers/dataset_utils/span_utils.py
to_bioul
def to_bioul(tag_sequence: List[str], encoding: str = "IOB1") -> List[str]: """ Given a tag sequence encoded with IOB1 labels, recode to BIOUL. In the IOB1 scheme, I is a token inside a span, O is a token outside a span and B is the beginning of span immediately following another span of the same type. In the BIO scheme, I is a token inside a span, O is a token outside a span and B is the beginning of a span. Parameters ---------- tag_sequence : ``List[str]``, required. The tag sequence encoded in IOB1, e.g. ["I-PER", "I-PER", "O"]. encoding : `str`, optional, (default = ``IOB1``). The encoding type to convert from. Must be either "IOB1" or "BIO". Returns ------- bioul_sequence: ``List[str]`` The tag sequence encoded in IOB1, e.g. ["B-PER", "L-PER", "O"]. """ if not encoding in {"IOB1", "BIO"}: raise ConfigurationError(f"Invalid encoding {encoding} passed to 'to_bioul'.") # pylint: disable=len-as-condition def replace_label(full_label, new_label): # example: full_label = 'I-PER', new_label = 'U', returns 'U-PER' parts = list(full_label.partition('-')) parts[0] = new_label return ''.join(parts) def pop_replace_append(in_stack, out_stack, new_label): # pop the last element from in_stack, replace the label, append # to out_stack tag = in_stack.pop() new_tag = replace_label(tag, new_label) out_stack.append(new_tag) def process_stack(stack, out_stack): # process a stack of labels, add them to out_stack if len(stack) == 1: # just a U token pop_replace_append(stack, out_stack, 'U') else: # need to code as BIL recoded_stack = [] pop_replace_append(stack, recoded_stack, 'L') while len(stack) >= 2: pop_replace_append(stack, recoded_stack, 'I') pop_replace_append(stack, recoded_stack, 'B') recoded_stack.reverse() out_stack.extend(recoded_stack) # Process the tag_sequence one tag at a time, adding spans to a stack, # then recode them. bioul_sequence = [] stack: List[str] = [] for label in tag_sequence: # need to make a dict like # token = {'token': 'Matt', "labels": {'conll2003': "B-PER"} # 'gold': 'I-PER'} # where 'gold' is the raw value from the CoNLL data set if label == 'O' and len(stack) == 0: bioul_sequence.append(label) elif label == 'O' and len(stack) > 0: # need to process the entries on the stack plus this one process_stack(stack, bioul_sequence) bioul_sequence.append(label) elif label[0] == 'I': # check if the previous type is the same as this one # if it is then append to stack # otherwise this start a new entity if the type # is different if len(stack) == 0: if encoding == "BIO": raise InvalidTagSequence(tag_sequence) stack.append(label) else: # check if the previous type is the same as this one this_type = label.partition('-')[2] prev_type = stack[-1].partition('-')[2] if this_type == prev_type: stack.append(label) else: if encoding == "BIO": raise InvalidTagSequence(tag_sequence) # a new entity process_stack(stack, bioul_sequence) stack.append(label) elif label[0] == 'B': if len(stack) > 0: process_stack(stack, bioul_sequence) stack.append(label) else: raise InvalidTagSequence(tag_sequence) # process the stack if len(stack) > 0: process_stack(stack, bioul_sequence) return bioul_sequence
python
def to_bioul(tag_sequence: List[str], encoding: str = "IOB1") -> List[str]: """ Given a tag sequence encoded with IOB1 labels, recode to BIOUL. In the IOB1 scheme, I is a token inside a span, O is a token outside a span and B is the beginning of span immediately following another span of the same type. In the BIO scheme, I is a token inside a span, O is a token outside a span and B is the beginning of a span. Parameters ---------- tag_sequence : ``List[str]``, required. The tag sequence encoded in IOB1, e.g. ["I-PER", "I-PER", "O"]. encoding : `str`, optional, (default = ``IOB1``). The encoding type to convert from. Must be either "IOB1" or "BIO". Returns ------- bioul_sequence: ``List[str]`` The tag sequence encoded in IOB1, e.g. ["B-PER", "L-PER", "O"]. """ if not encoding in {"IOB1", "BIO"}: raise ConfigurationError(f"Invalid encoding {encoding} passed to 'to_bioul'.") # pylint: disable=len-as-condition def replace_label(full_label, new_label): # example: full_label = 'I-PER', new_label = 'U', returns 'U-PER' parts = list(full_label.partition('-')) parts[0] = new_label return ''.join(parts) def pop_replace_append(in_stack, out_stack, new_label): # pop the last element from in_stack, replace the label, append # to out_stack tag = in_stack.pop() new_tag = replace_label(tag, new_label) out_stack.append(new_tag) def process_stack(stack, out_stack): # process a stack of labels, add them to out_stack if len(stack) == 1: # just a U token pop_replace_append(stack, out_stack, 'U') else: # need to code as BIL recoded_stack = [] pop_replace_append(stack, recoded_stack, 'L') while len(stack) >= 2: pop_replace_append(stack, recoded_stack, 'I') pop_replace_append(stack, recoded_stack, 'B') recoded_stack.reverse() out_stack.extend(recoded_stack) # Process the tag_sequence one tag at a time, adding spans to a stack, # then recode them. bioul_sequence = [] stack: List[str] = [] for label in tag_sequence: # need to make a dict like # token = {'token': 'Matt', "labels": {'conll2003': "B-PER"} # 'gold': 'I-PER'} # where 'gold' is the raw value from the CoNLL data set if label == 'O' and len(stack) == 0: bioul_sequence.append(label) elif label == 'O' and len(stack) > 0: # need to process the entries on the stack plus this one process_stack(stack, bioul_sequence) bioul_sequence.append(label) elif label[0] == 'I': # check if the previous type is the same as this one # if it is then append to stack # otherwise this start a new entity if the type # is different if len(stack) == 0: if encoding == "BIO": raise InvalidTagSequence(tag_sequence) stack.append(label) else: # check if the previous type is the same as this one this_type = label.partition('-')[2] prev_type = stack[-1].partition('-')[2] if this_type == prev_type: stack.append(label) else: if encoding == "BIO": raise InvalidTagSequence(tag_sequence) # a new entity process_stack(stack, bioul_sequence) stack.append(label) elif label[0] == 'B': if len(stack) > 0: process_stack(stack, bioul_sequence) stack.append(label) else: raise InvalidTagSequence(tag_sequence) # process the stack if len(stack) > 0: process_stack(stack, bioul_sequence) return bioul_sequence
[ "def", "to_bioul", "(", "tag_sequence", ":", "List", "[", "str", "]", ",", "encoding", ":", "str", "=", "\"IOB1\"", ")", "->", "List", "[", "str", "]", ":", "if", "not", "encoding", "in", "{", "\"IOB1\"", ",", "\"BIO\"", "}", ":", "raise", "ConfigurationError", "(", "f\"Invalid encoding {encoding} passed to 'to_bioul'.\"", ")", "# pylint: disable=len-as-condition", "def", "replace_label", "(", "full_label", ",", "new_label", ")", ":", "# example: full_label = 'I-PER', new_label = 'U', returns 'U-PER'", "parts", "=", "list", "(", "full_label", ".", "partition", "(", "'-'", ")", ")", "parts", "[", "0", "]", "=", "new_label", "return", "''", ".", "join", "(", "parts", ")", "def", "pop_replace_append", "(", "in_stack", ",", "out_stack", ",", "new_label", ")", ":", "# pop the last element from in_stack, replace the label, append", "# to out_stack", "tag", "=", "in_stack", ".", "pop", "(", ")", "new_tag", "=", "replace_label", "(", "tag", ",", "new_label", ")", "out_stack", ".", "append", "(", "new_tag", ")", "def", "process_stack", "(", "stack", ",", "out_stack", ")", ":", "# process a stack of labels, add them to out_stack", "if", "len", "(", "stack", ")", "==", "1", ":", "# just a U token", "pop_replace_append", "(", "stack", ",", "out_stack", ",", "'U'", ")", "else", ":", "# need to code as BIL", "recoded_stack", "=", "[", "]", "pop_replace_append", "(", "stack", ",", "recoded_stack", ",", "'L'", ")", "while", "len", "(", "stack", ")", ">=", "2", ":", "pop_replace_append", "(", "stack", ",", "recoded_stack", ",", "'I'", ")", "pop_replace_append", "(", "stack", ",", "recoded_stack", ",", "'B'", ")", "recoded_stack", ".", "reverse", "(", ")", "out_stack", ".", "extend", "(", "recoded_stack", ")", "# Process the tag_sequence one tag at a time, adding spans to a stack,", "# then recode them.", "bioul_sequence", "=", "[", "]", "stack", ":", "List", "[", "str", "]", "=", "[", "]", "for", "label", "in", "tag_sequence", ":", "# need to make a dict like", "# token = {'token': 'Matt', \"labels\": {'conll2003': \"B-PER\"}", "# 'gold': 'I-PER'}", "# where 'gold' is the raw value from the CoNLL data set", "if", "label", "==", "'O'", "and", "len", "(", "stack", ")", "==", "0", ":", "bioul_sequence", ".", "append", "(", "label", ")", "elif", "label", "==", "'O'", "and", "len", "(", "stack", ")", ">", "0", ":", "# need to process the entries on the stack plus this one", "process_stack", "(", "stack", ",", "bioul_sequence", ")", "bioul_sequence", ".", "append", "(", "label", ")", "elif", "label", "[", "0", "]", "==", "'I'", ":", "# check if the previous type is the same as this one", "# if it is then append to stack", "# otherwise this start a new entity if the type", "# is different", "if", "len", "(", "stack", ")", "==", "0", ":", "if", "encoding", "==", "\"BIO\"", ":", "raise", "InvalidTagSequence", "(", "tag_sequence", ")", "stack", ".", "append", "(", "label", ")", "else", ":", "# check if the previous type is the same as this one", "this_type", "=", "label", ".", "partition", "(", "'-'", ")", "[", "2", "]", "prev_type", "=", "stack", "[", "-", "1", "]", ".", "partition", "(", "'-'", ")", "[", "2", "]", "if", "this_type", "==", "prev_type", ":", "stack", ".", "append", "(", "label", ")", "else", ":", "if", "encoding", "==", "\"BIO\"", ":", "raise", "InvalidTagSequence", "(", "tag_sequence", ")", "# a new entity", "process_stack", "(", "stack", ",", "bioul_sequence", ")", "stack", ".", "append", "(", "label", ")", "elif", "label", "[", "0", "]", "==", "'B'", ":", "if", "len", "(", "stack", ")", ">", "0", ":", "process_stack", "(", "stack", ",", "bioul_sequence", ")", "stack", ".", "append", "(", "label", ")", "else", ":", "raise", "InvalidTagSequence", "(", "tag_sequence", ")", "# process the stack", "if", "len", "(", "stack", ")", ">", "0", ":", "process_stack", "(", "stack", ",", "bioul_sequence", ")", "return", "bioul_sequence" ]
Given a tag sequence encoded with IOB1 labels, recode to BIOUL. In the IOB1 scheme, I is a token inside a span, O is a token outside a span and B is the beginning of span immediately following another span of the same type. In the BIO scheme, I is a token inside a span, O is a token outside a span and B is the beginning of a span. Parameters ---------- tag_sequence : ``List[str]``, required. The tag sequence encoded in IOB1, e.g. ["I-PER", "I-PER", "O"]. encoding : `str`, optional, (default = ``IOB1``). The encoding type to convert from. Must be either "IOB1" or "BIO". Returns ------- bioul_sequence: ``List[str]`` The tag sequence encoded in IOB1, e.g. ["B-PER", "L-PER", "O"].
[ "Given", "a", "tag", "sequence", "encoded", "with", "IOB1", "labels", "recode", "to", "BIOUL", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/dataset_readers/dataset_utils/span_utils.py#L267-L373
23,028
allenai/allennlp
scripts/check_links.py
url_ok
def url_ok(match_tuple: MatchTuple) -> bool: """Check if a URL is reachable.""" try: result = requests.get(match_tuple.link, timeout=5) return result.ok except (requests.ConnectionError, requests.Timeout): return False
python
def url_ok(match_tuple: MatchTuple) -> bool: """Check if a URL is reachable.""" try: result = requests.get(match_tuple.link, timeout=5) return result.ok except (requests.ConnectionError, requests.Timeout): return False
[ "def", "url_ok", "(", "match_tuple", ":", "MatchTuple", ")", "->", "bool", ":", "try", ":", "result", "=", "requests", ".", "get", "(", "match_tuple", ".", "link", ",", "timeout", "=", "5", ")", "return", "result", ".", "ok", "except", "(", "requests", ".", "ConnectionError", ",", "requests", ".", "Timeout", ")", ":", "return", "False" ]
Check if a URL is reachable.
[ "Check", "if", "a", "URL", "is", "reachable", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/scripts/check_links.py#L24-L30
23,029
allenai/allennlp
scripts/check_links.py
path_ok
def path_ok(match_tuple: MatchTuple) -> bool: """Check if a file in this repository exists.""" relative_path = match_tuple.link.split("#")[0] full_path = os.path.join(os.path.dirname(str(match_tuple.source)), relative_path) return os.path.exists(full_path)
python
def path_ok(match_tuple: MatchTuple) -> bool: """Check if a file in this repository exists.""" relative_path = match_tuple.link.split("#")[0] full_path = os.path.join(os.path.dirname(str(match_tuple.source)), relative_path) return os.path.exists(full_path)
[ "def", "path_ok", "(", "match_tuple", ":", "MatchTuple", ")", "->", "bool", ":", "relative_path", "=", "match_tuple", ".", "link", ".", "split", "(", "\"#\"", ")", "[", "0", "]", "full_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "str", "(", "match_tuple", ".", "source", ")", ")", ",", "relative_path", ")", "return", "os", ".", "path", ".", "exists", "(", "full_path", ")" ]
Check if a file in this repository exists.
[ "Check", "if", "a", "file", "in", "this", "repository", "exists", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/scripts/check_links.py#L33-L37
23,030
allenai/allennlp
allennlp/common/params.py
_environment_variables
def _environment_variables() -> Dict[str, str]: """ Wraps `os.environ` to filter out non-encodable values. """ return {key: value for key, value in os.environ.items() if _is_encodable(value)}
python
def _environment_variables() -> Dict[str, str]: """ Wraps `os.environ` to filter out non-encodable values. """ return {key: value for key, value in os.environ.items() if _is_encodable(value)}
[ "def", "_environment_variables", "(", ")", "->", "Dict", "[", "str", ",", "str", "]", ":", "return", "{", "key", ":", "value", "for", "key", ",", "value", "in", "os", ".", "environ", ".", "items", "(", ")", "if", "_is_encodable", "(", "value", ")", "}" ]
Wraps `os.environ` to filter out non-encodable values.
[ "Wraps", "os", ".", "environ", "to", "filter", "out", "non", "-", "encodable", "values", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/params.py#L85-L91
23,031
allenai/allennlp
allennlp/common/params.py
with_fallback
def with_fallback(preferred: Dict[str, Any], fallback: Dict[str, Any]) -> Dict[str, Any]: """ Deep merge two dicts, preferring values from `preferred`. """ def merge(preferred_value: Any, fallback_value: Any) -> Any: if isinstance(preferred_value, dict) and isinstance(fallback_value, dict): return with_fallback(preferred_value, fallback_value) elif isinstance(preferred_value, dict) and isinstance(fallback_value, list): # treat preferred_value as a sparse list, where each key is an index to be overridden merged_list = fallback_value for elem_key, preferred_element in preferred_value.items(): try: index = int(elem_key) merged_list[index] = merge(preferred_element, fallback_value[index]) except ValueError: raise ConfigurationError("could not merge dicts - the preferred dict contains " f"invalid keys (key {elem_key} is not a valid list index)") except IndexError: raise ConfigurationError("could not merge dicts - the preferred dict contains " f"invalid keys (key {index} is out of bounds)") return merged_list else: return copy.deepcopy(preferred_value) preferred_keys = set(preferred.keys()) fallback_keys = set(fallback.keys()) common_keys = preferred_keys & fallback_keys merged: Dict[str, Any] = {} for key in preferred_keys - fallback_keys: merged[key] = copy.deepcopy(preferred[key]) for key in fallback_keys - preferred_keys: merged[key] = copy.deepcopy(fallback[key]) for key in common_keys: preferred_value = preferred[key] fallback_value = fallback[key] merged[key] = merge(preferred_value, fallback_value) return merged
python
def with_fallback(preferred: Dict[str, Any], fallback: Dict[str, Any]) -> Dict[str, Any]: """ Deep merge two dicts, preferring values from `preferred`. """ def merge(preferred_value: Any, fallback_value: Any) -> Any: if isinstance(preferred_value, dict) and isinstance(fallback_value, dict): return with_fallback(preferred_value, fallback_value) elif isinstance(preferred_value, dict) and isinstance(fallback_value, list): # treat preferred_value as a sparse list, where each key is an index to be overridden merged_list = fallback_value for elem_key, preferred_element in preferred_value.items(): try: index = int(elem_key) merged_list[index] = merge(preferred_element, fallback_value[index]) except ValueError: raise ConfigurationError("could not merge dicts - the preferred dict contains " f"invalid keys (key {elem_key} is not a valid list index)") except IndexError: raise ConfigurationError("could not merge dicts - the preferred dict contains " f"invalid keys (key {index} is out of bounds)") return merged_list else: return copy.deepcopy(preferred_value) preferred_keys = set(preferred.keys()) fallback_keys = set(fallback.keys()) common_keys = preferred_keys & fallback_keys merged: Dict[str, Any] = {} for key in preferred_keys - fallback_keys: merged[key] = copy.deepcopy(preferred[key]) for key in fallback_keys - preferred_keys: merged[key] = copy.deepcopy(fallback[key]) for key in common_keys: preferred_value = preferred[key] fallback_value = fallback[key] merged[key] = merge(preferred_value, fallback_value) return merged
[ "def", "with_fallback", "(", "preferred", ":", "Dict", "[", "str", ",", "Any", "]", ",", "fallback", ":", "Dict", "[", "str", ",", "Any", "]", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "def", "merge", "(", "preferred_value", ":", "Any", ",", "fallback_value", ":", "Any", ")", "->", "Any", ":", "if", "isinstance", "(", "preferred_value", ",", "dict", ")", "and", "isinstance", "(", "fallback_value", ",", "dict", ")", ":", "return", "with_fallback", "(", "preferred_value", ",", "fallback_value", ")", "elif", "isinstance", "(", "preferred_value", ",", "dict", ")", "and", "isinstance", "(", "fallback_value", ",", "list", ")", ":", "# treat preferred_value as a sparse list, where each key is an index to be overridden", "merged_list", "=", "fallback_value", "for", "elem_key", ",", "preferred_element", "in", "preferred_value", ".", "items", "(", ")", ":", "try", ":", "index", "=", "int", "(", "elem_key", ")", "merged_list", "[", "index", "]", "=", "merge", "(", "preferred_element", ",", "fallback_value", "[", "index", "]", ")", "except", "ValueError", ":", "raise", "ConfigurationError", "(", "\"could not merge dicts - the preferred dict contains \"", "f\"invalid keys (key {elem_key} is not a valid list index)\"", ")", "except", "IndexError", ":", "raise", "ConfigurationError", "(", "\"could not merge dicts - the preferred dict contains \"", "f\"invalid keys (key {index} is out of bounds)\"", ")", "return", "merged_list", "else", ":", "return", "copy", ".", "deepcopy", "(", "preferred_value", ")", "preferred_keys", "=", "set", "(", "preferred", ".", "keys", "(", ")", ")", "fallback_keys", "=", "set", "(", "fallback", ".", "keys", "(", ")", ")", "common_keys", "=", "preferred_keys", "&", "fallback_keys", "merged", ":", "Dict", "[", "str", ",", "Any", "]", "=", "{", "}", "for", "key", "in", "preferred_keys", "-", "fallback_keys", ":", "merged", "[", "key", "]", "=", "copy", ".", "deepcopy", "(", "preferred", "[", "key", "]", ")", "for", "key", "in", "fallback_keys", "-", "preferred_keys", ":", "merged", "[", "key", "]", "=", "copy", ".", "deepcopy", "(", "fallback", "[", "key", "]", ")", "for", "key", "in", "common_keys", ":", "preferred_value", "=", "preferred", "[", "key", "]", "fallback_value", "=", "fallback", "[", "key", "]", "merged", "[", "key", "]", "=", "merge", "(", "preferred_value", ",", "fallback_value", ")", "return", "merged" ]
Deep merge two dicts, preferring values from `preferred`.
[ "Deep", "merge", "two", "dicts", "preferring", "values", "from", "preferred", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/params.py#L121-L161
23,032
allenai/allennlp
allennlp/common/params.py
Params.add_file_to_archive
def add_file_to_archive(self, name: str) -> None: """ Any class in its ``from_params`` method can request that some of its input files be added to the archive by calling this method. For example, if some class ``A`` had an ``input_file`` parameter, it could call ``` params.add_file_to_archive("input_file") ``` which would store the supplied value for ``input_file`` at the key ``previous.history.and.then.input_file``. The ``files_to_archive`` dict is shared with child instances via the ``_check_is_dict`` method, so that the final mapping can be retrieved from the top-level ``Params`` object. NOTE: You must call ``add_file_to_archive`` before you ``pop()`` the parameter, because the ``Params`` instance looks up the value of the filename inside itself. If the ``loading_from_archive`` flag is True, this will be a no-op. """ if not self.loading_from_archive: self.files_to_archive[f"{self.history}{name}"] = cached_path(self.get(name))
python
def add_file_to_archive(self, name: str) -> None: """ Any class in its ``from_params`` method can request that some of its input files be added to the archive by calling this method. For example, if some class ``A`` had an ``input_file`` parameter, it could call ``` params.add_file_to_archive("input_file") ``` which would store the supplied value for ``input_file`` at the key ``previous.history.and.then.input_file``. The ``files_to_archive`` dict is shared with child instances via the ``_check_is_dict`` method, so that the final mapping can be retrieved from the top-level ``Params`` object. NOTE: You must call ``add_file_to_archive`` before you ``pop()`` the parameter, because the ``Params`` instance looks up the value of the filename inside itself. If the ``loading_from_archive`` flag is True, this will be a no-op. """ if not self.loading_from_archive: self.files_to_archive[f"{self.history}{name}"] = cached_path(self.get(name))
[ "def", "add_file_to_archive", "(", "self", ",", "name", ":", "str", ")", "->", "None", ":", "if", "not", "self", ".", "loading_from_archive", ":", "self", ".", "files_to_archive", "[", "f\"{self.history}{name}\"", "]", "=", "cached_path", "(", "self", ".", "get", "(", "name", ")", ")" ]
Any class in its ``from_params`` method can request that some of its input files be added to the archive by calling this method. For example, if some class ``A`` had an ``input_file`` parameter, it could call ``` params.add_file_to_archive("input_file") ``` which would store the supplied value for ``input_file`` at the key ``previous.history.and.then.input_file``. The ``files_to_archive`` dict is shared with child instances via the ``_check_is_dict`` method, so that the final mapping can be retrieved from the top-level ``Params`` object. NOTE: You must call ``add_file_to_archive`` before you ``pop()`` the parameter, because the ``Params`` instance looks up the value of the filename inside itself. If the ``loading_from_archive`` flag is True, this will be a no-op.
[ "Any", "class", "in", "its", "from_params", "method", "can", "request", "that", "some", "of", "its", "input", "files", "be", "added", "to", "the", "archive", "by", "calling", "this", "method", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/params.py#L209-L232
23,033
allenai/allennlp
allennlp/common/params.py
Params.pop_int
def pop_int(self, key: str, default: Any = DEFAULT) -> int: """ Performs a pop and coerces to an int. """ value = self.pop(key, default) if value is None: return None else: return int(value)
python
def pop_int(self, key: str, default: Any = DEFAULT) -> int: """ Performs a pop and coerces to an int. """ value = self.pop(key, default) if value is None: return None else: return int(value)
[ "def", "pop_int", "(", "self", ",", "key", ":", "str", ",", "default", ":", "Any", "=", "DEFAULT", ")", "->", "int", ":", "value", "=", "self", ".", "pop", "(", "key", ",", "default", ")", "if", "value", "is", "None", ":", "return", "None", "else", ":", "return", "int", "(", "value", ")" ]
Performs a pop and coerces to an int.
[ "Performs", "a", "pop", "and", "coerces", "to", "an", "int", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/params.py#L254-L262
23,034
allenai/allennlp
allennlp/common/params.py
Params.pop_float
def pop_float(self, key: str, default: Any = DEFAULT) -> float: """ Performs a pop and coerces to a float. """ value = self.pop(key, default) if value is None: return None else: return float(value)
python
def pop_float(self, key: str, default: Any = DEFAULT) -> float: """ Performs a pop and coerces to a float. """ value = self.pop(key, default) if value is None: return None else: return float(value)
[ "def", "pop_float", "(", "self", ",", "key", ":", "str", ",", "default", ":", "Any", "=", "DEFAULT", ")", "->", "float", ":", "value", "=", "self", ".", "pop", "(", "key", ",", "default", ")", "if", "value", "is", "None", ":", "return", "None", "else", ":", "return", "float", "(", "value", ")" ]
Performs a pop and coerces to a float.
[ "Performs", "a", "pop", "and", "coerces", "to", "a", "float", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/params.py#L264-L272
23,035
allenai/allennlp
allennlp/common/params.py
Params.pop_bool
def pop_bool(self, key: str, default: Any = DEFAULT) -> bool: """ Performs a pop and coerces to a bool. """ value = self.pop(key, default) if value is None: return None elif isinstance(value, bool): return value elif value == "true": return True elif value == "false": return False else: raise ValueError("Cannot convert variable to bool: " + value)
python
def pop_bool(self, key: str, default: Any = DEFAULT) -> bool: """ Performs a pop and coerces to a bool. """ value = self.pop(key, default) if value is None: return None elif isinstance(value, bool): return value elif value == "true": return True elif value == "false": return False else: raise ValueError("Cannot convert variable to bool: " + value)
[ "def", "pop_bool", "(", "self", ",", "key", ":", "str", ",", "default", ":", "Any", "=", "DEFAULT", ")", "->", "bool", ":", "value", "=", "self", ".", "pop", "(", "key", ",", "default", ")", "if", "value", "is", "None", ":", "return", "None", "elif", "isinstance", "(", "value", ",", "bool", ")", ":", "return", "value", "elif", "value", "==", "\"true\"", ":", "return", "True", "elif", "value", "==", "\"false\"", ":", "return", "False", "else", ":", "raise", "ValueError", "(", "\"Cannot convert variable to bool: \"", "+", "value", ")" ]
Performs a pop and coerces to a bool.
[ "Performs", "a", "pop", "and", "coerces", "to", "a", "bool", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/params.py#L274-L288
23,036
allenai/allennlp
allennlp/common/params.py
Params.pop_choice
def pop_choice(self, key: str, choices: List[Any], default_to_first_choice: bool = False) -> Any: """ Gets the value of ``key`` in the ``params`` dictionary, ensuring that the value is one of the given choices. Note that this `pops` the key from params, modifying the dictionary, consistent with how parameters are processed in this codebase. Parameters ---------- key: str Key to get the value from in the param dictionary choices: List[Any] A list of valid options for values corresponding to ``key``. For example, if you're specifying the type of encoder to use for some part of your model, the choices might be the list of encoder classes we know about and can instantiate. If the value we find in the param dictionary is not in ``choices``, we raise a ``ConfigurationError``, because the user specified an invalid value in their parameter file. default_to_first_choice: bool, optional (default=False) If this is ``True``, we allow the ``key`` to not be present in the parameter dictionary. If the key is not present, we will use the return as the value the first choice in the ``choices`` list. If this is ``False``, we raise a ``ConfigurationError``, because specifying the ``key`` is required (e.g., you `have` to specify your model class when running an experiment, but you can feel free to use default settings for encoders if you want). """ default = choices[0] if default_to_first_choice else self.DEFAULT value = self.pop(key, default) if value not in choices: key_str = self.history + key message = '%s not in acceptable choices for %s: %s' % (value, key_str, str(choices)) raise ConfigurationError(message) return value
python
def pop_choice(self, key: str, choices: List[Any], default_to_first_choice: bool = False) -> Any: """ Gets the value of ``key`` in the ``params`` dictionary, ensuring that the value is one of the given choices. Note that this `pops` the key from params, modifying the dictionary, consistent with how parameters are processed in this codebase. Parameters ---------- key: str Key to get the value from in the param dictionary choices: List[Any] A list of valid options for values corresponding to ``key``. For example, if you're specifying the type of encoder to use for some part of your model, the choices might be the list of encoder classes we know about and can instantiate. If the value we find in the param dictionary is not in ``choices``, we raise a ``ConfigurationError``, because the user specified an invalid value in their parameter file. default_to_first_choice: bool, optional (default=False) If this is ``True``, we allow the ``key`` to not be present in the parameter dictionary. If the key is not present, we will use the return as the value the first choice in the ``choices`` list. If this is ``False``, we raise a ``ConfigurationError``, because specifying the ``key`` is required (e.g., you `have` to specify your model class when running an experiment, but you can feel free to use default settings for encoders if you want). """ default = choices[0] if default_to_first_choice else self.DEFAULT value = self.pop(key, default) if value not in choices: key_str = self.history + key message = '%s not in acceptable choices for %s: %s' % (value, key_str, str(choices)) raise ConfigurationError(message) return value
[ "def", "pop_choice", "(", "self", ",", "key", ":", "str", ",", "choices", ":", "List", "[", "Any", "]", ",", "default_to_first_choice", ":", "bool", "=", "False", ")", "->", "Any", ":", "default", "=", "choices", "[", "0", "]", "if", "default_to_first_choice", "else", "self", ".", "DEFAULT", "value", "=", "self", ".", "pop", "(", "key", ",", "default", ")", "if", "value", "not", "in", "choices", ":", "key_str", "=", "self", ".", "history", "+", "key", "message", "=", "'%s not in acceptable choices for %s: %s'", "%", "(", "value", ",", "key_str", ",", "str", "(", "choices", ")", ")", "raise", "ConfigurationError", "(", "message", ")", "return", "value" ]
Gets the value of ``key`` in the ``params`` dictionary, ensuring that the value is one of the given choices. Note that this `pops` the key from params, modifying the dictionary, consistent with how parameters are processed in this codebase. Parameters ---------- key: str Key to get the value from in the param dictionary choices: List[Any] A list of valid options for values corresponding to ``key``. For example, if you're specifying the type of encoder to use for some part of your model, the choices might be the list of encoder classes we know about and can instantiate. If the value we find in the param dictionary is not in ``choices``, we raise a ``ConfigurationError``, because the user specified an invalid value in their parameter file. default_to_first_choice: bool, optional (default=False) If this is ``True``, we allow the ``key`` to not be present in the parameter dictionary. If the key is not present, we will use the return as the value the first choice in the ``choices`` list. If this is ``False``, we raise a ``ConfigurationError``, because specifying the ``key`` is required (e.g., you `have` to specify your model class when running an experiment, but you can feel free to use default settings for encoders if you want).
[ "Gets", "the", "value", "of", "key", "in", "the", "params", "dictionary", "ensuring", "that", "the", "value", "is", "one", "of", "the", "given", "choices", ".", "Note", "that", "this", "pops", "the", "key", "from", "params", "modifying", "the", "dictionary", "consistent", "with", "how", "parameters", "are", "processed", "in", "this", "codebase", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/params.py#L305-L335
23,037
allenai/allennlp
allennlp/common/params.py
Params.as_dict
def as_dict(self, quiet: bool = False, infer_type_and_cast: bool = False): """ Sometimes we need to just represent the parameters as a dict, for instance when we pass them to PyTorch code. Parameters ---------- quiet: bool, optional (default = False) Whether to log the parameters before returning them as a dict. infer_type_and_cast : bool, optional (default = False) If True, we infer types and cast (e.g. things that look like floats to floats). """ if infer_type_and_cast: params_as_dict = infer_and_cast(self.params) else: params_as_dict = self.params if quiet: return params_as_dict def log_recursively(parameters, history): for key, value in parameters.items(): if isinstance(value, dict): new_local_history = history + key + "." log_recursively(value, new_local_history) else: logger.info(history + key + " = " + str(value)) logger.info("Converting Params object to dict; logging of default " "values will not occur when dictionary parameters are " "used subsequently.") logger.info("CURRENTLY DEFINED PARAMETERS: ") log_recursively(self.params, self.history) return params_as_dict
python
def as_dict(self, quiet: bool = False, infer_type_and_cast: bool = False): """ Sometimes we need to just represent the parameters as a dict, for instance when we pass them to PyTorch code. Parameters ---------- quiet: bool, optional (default = False) Whether to log the parameters before returning them as a dict. infer_type_and_cast : bool, optional (default = False) If True, we infer types and cast (e.g. things that look like floats to floats). """ if infer_type_and_cast: params_as_dict = infer_and_cast(self.params) else: params_as_dict = self.params if quiet: return params_as_dict def log_recursively(parameters, history): for key, value in parameters.items(): if isinstance(value, dict): new_local_history = history + key + "." log_recursively(value, new_local_history) else: logger.info(history + key + " = " + str(value)) logger.info("Converting Params object to dict; logging of default " "values will not occur when dictionary parameters are " "used subsequently.") logger.info("CURRENTLY DEFINED PARAMETERS: ") log_recursively(self.params, self.history) return params_as_dict
[ "def", "as_dict", "(", "self", ",", "quiet", ":", "bool", "=", "False", ",", "infer_type_and_cast", ":", "bool", "=", "False", ")", ":", "if", "infer_type_and_cast", ":", "params_as_dict", "=", "infer_and_cast", "(", "self", ".", "params", ")", "else", ":", "params_as_dict", "=", "self", ".", "params", "if", "quiet", ":", "return", "params_as_dict", "def", "log_recursively", "(", "parameters", ",", "history", ")", ":", "for", "key", ",", "value", "in", "parameters", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "new_local_history", "=", "history", "+", "key", "+", "\".\"", "log_recursively", "(", "value", ",", "new_local_history", ")", "else", ":", "logger", ".", "info", "(", "history", "+", "key", "+", "\" = \"", "+", "str", "(", "value", ")", ")", "logger", ".", "info", "(", "\"Converting Params object to dict; logging of default \"", "\"values will not occur when dictionary parameters are \"", "\"used subsequently.\"", ")", "logger", ".", "info", "(", "\"CURRENTLY DEFINED PARAMETERS: \"", ")", "log_recursively", "(", "self", ".", "params", ",", "self", ".", "history", ")", "return", "params_as_dict" ]
Sometimes we need to just represent the parameters as a dict, for instance when we pass them to PyTorch code. Parameters ---------- quiet: bool, optional (default = False) Whether to log the parameters before returning them as a dict. infer_type_and_cast : bool, optional (default = False) If True, we infer types and cast (e.g. things that look like floats to floats).
[ "Sometimes", "we", "need", "to", "just", "represent", "the", "parameters", "as", "a", "dict", "for", "instance", "when", "we", "pass", "them", "to", "PyTorch", "code", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/params.py#L337-L370
23,038
allenai/allennlp
allennlp/common/params.py
Params.as_flat_dict
def as_flat_dict(self): """ Returns the parameters of a flat dictionary from keys to values. Nested structure is collapsed with periods. """ flat_params = {} def recurse(parameters, path): for key, value in parameters.items(): newpath = path + [key] if isinstance(value, dict): recurse(value, newpath) else: flat_params['.'.join(newpath)] = value recurse(self.params, []) return flat_params
python
def as_flat_dict(self): """ Returns the parameters of a flat dictionary from keys to values. Nested structure is collapsed with periods. """ flat_params = {} def recurse(parameters, path): for key, value in parameters.items(): newpath = path + [key] if isinstance(value, dict): recurse(value, newpath) else: flat_params['.'.join(newpath)] = value recurse(self.params, []) return flat_params
[ "def", "as_flat_dict", "(", "self", ")", ":", "flat_params", "=", "{", "}", "def", "recurse", "(", "parameters", ",", "path", ")", ":", "for", "key", ",", "value", "in", "parameters", ".", "items", "(", ")", ":", "newpath", "=", "path", "+", "[", "key", "]", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "recurse", "(", "value", ",", "newpath", ")", "else", ":", "flat_params", "[", "'.'", ".", "join", "(", "newpath", ")", "]", "=", "value", "recurse", "(", "self", ".", "params", ",", "[", "]", ")", "return", "flat_params" ]
Returns the parameters of a flat dictionary from keys to values. Nested structure is collapsed with periods.
[ "Returns", "the", "parameters", "of", "a", "flat", "dictionary", "from", "keys", "to", "values", ".", "Nested", "structure", "is", "collapsed", "with", "periods", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/params.py#L372-L387
23,039
allenai/allennlp
allennlp/common/params.py
Params.from_file
def from_file(params_file: str, params_overrides: str = "", ext_vars: dict = None) -> 'Params': """ Load a `Params` object from a configuration file. Parameters ---------- params_file : ``str`` The path to the configuration file to load. params_overrides : ``str``, optional A dict of overrides that can be applied to final object. e.g. {"model.embedding_dim": 10} ext_vars : ``dict``, optional Our config files are Jsonnet, which allows specifying external variables for later substitution. Typically we substitute these using environment variables; however, you can also specify them here, in which case they take priority over environment variables. e.g. {"HOME_DIR": "/Users/allennlp/home"} """ if ext_vars is None: ext_vars = {} # redirect to cache, if necessary params_file = cached_path(params_file) ext_vars = {**_environment_variables(), **ext_vars} file_dict = json.loads(evaluate_file(params_file, ext_vars=ext_vars)) overrides_dict = parse_overrides(params_overrides) param_dict = with_fallback(preferred=overrides_dict, fallback=file_dict) return Params(param_dict)
python
def from_file(params_file: str, params_overrides: str = "", ext_vars: dict = None) -> 'Params': """ Load a `Params` object from a configuration file. Parameters ---------- params_file : ``str`` The path to the configuration file to load. params_overrides : ``str``, optional A dict of overrides that can be applied to final object. e.g. {"model.embedding_dim": 10} ext_vars : ``dict``, optional Our config files are Jsonnet, which allows specifying external variables for later substitution. Typically we substitute these using environment variables; however, you can also specify them here, in which case they take priority over environment variables. e.g. {"HOME_DIR": "/Users/allennlp/home"} """ if ext_vars is None: ext_vars = {} # redirect to cache, if necessary params_file = cached_path(params_file) ext_vars = {**_environment_variables(), **ext_vars} file_dict = json.loads(evaluate_file(params_file, ext_vars=ext_vars)) overrides_dict = parse_overrides(params_overrides) param_dict = with_fallback(preferred=overrides_dict, fallback=file_dict) return Params(param_dict)
[ "def", "from_file", "(", "params_file", ":", "str", ",", "params_overrides", ":", "str", "=", "\"\"", ",", "ext_vars", ":", "dict", "=", "None", ")", "->", "'Params'", ":", "if", "ext_vars", "is", "None", ":", "ext_vars", "=", "{", "}", "# redirect to cache, if necessary", "params_file", "=", "cached_path", "(", "params_file", ")", "ext_vars", "=", "{", "*", "*", "_environment_variables", "(", ")", ",", "*", "*", "ext_vars", "}", "file_dict", "=", "json", ".", "loads", "(", "evaluate_file", "(", "params_file", ",", "ext_vars", "=", "ext_vars", ")", ")", "overrides_dict", "=", "parse_overrides", "(", "params_overrides", ")", "param_dict", "=", "with_fallback", "(", "preferred", "=", "overrides_dict", ",", "fallback", "=", "file_dict", ")", "return", "Params", "(", "param_dict", ")" ]
Load a `Params` object from a configuration file. Parameters ---------- params_file : ``str`` The path to the configuration file to load. params_overrides : ``str``, optional A dict of overrides that can be applied to final object. e.g. {"model.embedding_dim": 10} ext_vars : ``dict``, optional Our config files are Jsonnet, which allows specifying external variables for later substitution. Typically we substitute these using environment variables; however, you can also specify them here, in which case they take priority over environment variables. e.g. {"HOME_DIR": "/Users/allennlp/home"}
[ "Load", "a", "Params", "object", "from", "a", "configuration", "file", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/params.py#L436-L466
23,040
allenai/allennlp
allennlp/common/params.py
Params.as_ordered_dict
def as_ordered_dict(self, preference_orders: List[List[str]] = None) -> OrderedDict: """ Returns Ordered Dict of Params from list of partial order preferences. Parameters ---------- preference_orders: List[List[str]], optional ``preference_orders`` is list of partial preference orders. ["A", "B", "C"] means "A" > "B" > "C". For multiple preference_orders first will be considered first. Keys not found, will have last but alphabetical preference. Default Preferences: ``[["dataset_reader", "iterator", "model", "train_data_path", "validation_data_path", "test_data_path", "trainer", "vocabulary"], ["type"]]`` """ params_dict = self.as_dict(quiet=True) if not preference_orders: preference_orders = [] preference_orders.append(["dataset_reader", "iterator", "model", "train_data_path", "validation_data_path", "test_data_path", "trainer", "vocabulary"]) preference_orders.append(["type"]) def order_func(key): # Makes a tuple to use for ordering. The tuple is an index into each of the `preference_orders`, # followed by the key itself. This gives us integer sorting if you have a key in one of the # `preference_orders`, followed by alphabetical ordering if not. order_tuple = [order.index(key) if key in order else len(order) for order in preference_orders] return order_tuple + [key] def order_dict(dictionary, order_func): # Recursively orders dictionary according to scoring order_func result = OrderedDict() for key, val in sorted(dictionary.items(), key=lambda item: order_func(item[0])): result[key] = order_dict(val, order_func) if isinstance(val, dict) else val return result return order_dict(params_dict, order_func)
python
def as_ordered_dict(self, preference_orders: List[List[str]] = None) -> OrderedDict: """ Returns Ordered Dict of Params from list of partial order preferences. Parameters ---------- preference_orders: List[List[str]], optional ``preference_orders`` is list of partial preference orders. ["A", "B", "C"] means "A" > "B" > "C". For multiple preference_orders first will be considered first. Keys not found, will have last but alphabetical preference. Default Preferences: ``[["dataset_reader", "iterator", "model", "train_data_path", "validation_data_path", "test_data_path", "trainer", "vocabulary"], ["type"]]`` """ params_dict = self.as_dict(quiet=True) if not preference_orders: preference_orders = [] preference_orders.append(["dataset_reader", "iterator", "model", "train_data_path", "validation_data_path", "test_data_path", "trainer", "vocabulary"]) preference_orders.append(["type"]) def order_func(key): # Makes a tuple to use for ordering. The tuple is an index into each of the `preference_orders`, # followed by the key itself. This gives us integer sorting if you have a key in one of the # `preference_orders`, followed by alphabetical ordering if not. order_tuple = [order.index(key) if key in order else len(order) for order in preference_orders] return order_tuple + [key] def order_dict(dictionary, order_func): # Recursively orders dictionary according to scoring order_func result = OrderedDict() for key, val in sorted(dictionary.items(), key=lambda item: order_func(item[0])): result[key] = order_dict(val, order_func) if isinstance(val, dict) else val return result return order_dict(params_dict, order_func)
[ "def", "as_ordered_dict", "(", "self", ",", "preference_orders", ":", "List", "[", "List", "[", "str", "]", "]", "=", "None", ")", "->", "OrderedDict", ":", "params_dict", "=", "self", ".", "as_dict", "(", "quiet", "=", "True", ")", "if", "not", "preference_orders", ":", "preference_orders", "=", "[", "]", "preference_orders", ".", "append", "(", "[", "\"dataset_reader\"", ",", "\"iterator\"", ",", "\"model\"", ",", "\"train_data_path\"", ",", "\"validation_data_path\"", ",", "\"test_data_path\"", ",", "\"trainer\"", ",", "\"vocabulary\"", "]", ")", "preference_orders", ".", "append", "(", "[", "\"type\"", "]", ")", "def", "order_func", "(", "key", ")", ":", "# Makes a tuple to use for ordering. The tuple is an index into each of the `preference_orders`,", "# followed by the key itself. This gives us integer sorting if you have a key in one of the", "# `preference_orders`, followed by alphabetical ordering if not.", "order_tuple", "=", "[", "order", ".", "index", "(", "key", ")", "if", "key", "in", "order", "else", "len", "(", "order", ")", "for", "order", "in", "preference_orders", "]", "return", "order_tuple", "+", "[", "key", "]", "def", "order_dict", "(", "dictionary", ",", "order_func", ")", ":", "# Recursively orders dictionary according to scoring order_func", "result", "=", "OrderedDict", "(", ")", "for", "key", ",", "val", "in", "sorted", "(", "dictionary", ".", "items", "(", ")", ",", "key", "=", "lambda", "item", ":", "order_func", "(", "item", "[", "0", "]", ")", ")", ":", "result", "[", "key", "]", "=", "order_dict", "(", "val", ",", "order_func", ")", "if", "isinstance", "(", "val", ",", "dict", ")", "else", "val", "return", "result", "return", "order_dict", "(", "params_dict", ",", "order_func", ")" ]
Returns Ordered Dict of Params from list of partial order preferences. Parameters ---------- preference_orders: List[List[str]], optional ``preference_orders`` is list of partial preference orders. ["A", "B", "C"] means "A" > "B" > "C". For multiple preference_orders first will be considered first. Keys not found, will have last but alphabetical preference. Default Preferences: ``[["dataset_reader", "iterator", "model", "train_data_path", "validation_data_path", "test_data_path", "trainer", "vocabulary"], ["type"]]``
[ "Returns", "Ordered", "Dict", "of", "Params", "from", "list", "of", "partial", "order", "preferences", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/params.py#L472-L507
23,041
allenai/allennlp
allennlp/training/metric_tracker.py
MetricTracker.clear
def clear(self) -> None: """ Clears out the tracked metrics, but keeps the patience and should_decrease settings. """ self._best_so_far = None self._epochs_with_no_improvement = 0 self._is_best_so_far = True self._epoch_number = 0 self.best_epoch = None
python
def clear(self) -> None: """ Clears out the tracked metrics, but keeps the patience and should_decrease settings. """ self._best_so_far = None self._epochs_with_no_improvement = 0 self._is_best_so_far = True self._epoch_number = 0 self.best_epoch = None
[ "def", "clear", "(", "self", ")", "->", "None", ":", "self", ".", "_best_so_far", "=", "None", "self", ".", "_epochs_with_no_improvement", "=", "0", "self", ".", "_is_best_so_far", "=", "True", "self", ".", "_epoch_number", "=", "0", "self", ".", "best_epoch", "=", "None" ]
Clears out the tracked metrics, but keeps the patience and should_decrease settings.
[ "Clears", "out", "the", "tracked", "metrics", "but", "keeps", "the", "patience", "and", "should_decrease", "settings", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/training/metric_tracker.py#L59-L67
23,042
allenai/allennlp
allennlp/training/metric_tracker.py
MetricTracker.state_dict
def state_dict(self) -> Dict[str, Any]: """ A ``Trainer`` can use this to serialize the state of the metric tracker. """ return { "best_so_far": self._best_so_far, "patience": self._patience, "epochs_with_no_improvement": self._epochs_with_no_improvement, "is_best_so_far": self._is_best_so_far, "should_decrease": self._should_decrease, "best_epoch_metrics": self.best_epoch_metrics, "epoch_number": self._epoch_number, "best_epoch": self.best_epoch }
python
def state_dict(self) -> Dict[str, Any]: """ A ``Trainer`` can use this to serialize the state of the metric tracker. """ return { "best_so_far": self._best_so_far, "patience": self._patience, "epochs_with_no_improvement": self._epochs_with_no_improvement, "is_best_so_far": self._is_best_so_far, "should_decrease": self._should_decrease, "best_epoch_metrics": self.best_epoch_metrics, "epoch_number": self._epoch_number, "best_epoch": self.best_epoch }
[ "def", "state_dict", "(", "self", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "return", "{", "\"best_so_far\"", ":", "self", ".", "_best_so_far", ",", "\"patience\"", ":", "self", ".", "_patience", ",", "\"epochs_with_no_improvement\"", ":", "self", ".", "_epochs_with_no_improvement", ",", "\"is_best_so_far\"", ":", "self", ".", "_is_best_so_far", ",", "\"should_decrease\"", ":", "self", ".", "_should_decrease", ",", "\"best_epoch_metrics\"", ":", "self", ".", "best_epoch_metrics", ",", "\"epoch_number\"", ":", "self", ".", "_epoch_number", ",", "\"best_epoch\"", ":", "self", ".", "best_epoch", "}" ]
A ``Trainer`` can use this to serialize the state of the metric tracker.
[ "A", "Trainer", "can", "use", "this", "to", "serialize", "the", "state", "of", "the", "metric", "tracker", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/training/metric_tracker.py#L69-L82
23,043
allenai/allennlp
allennlp/training/metric_tracker.py
MetricTracker.add_metric
def add_metric(self, metric: float) -> None: """ Record a new value of the metric and update the various things that depend on it. """ new_best = ((self._best_so_far is None) or (self._should_decrease and metric < self._best_so_far) or (not self._should_decrease and metric > self._best_so_far)) if new_best: self.best_epoch = self._epoch_number self._is_best_so_far = True self._best_so_far = metric self._epochs_with_no_improvement = 0 else: self._is_best_so_far = False self._epochs_with_no_improvement += 1 self._epoch_number += 1
python
def add_metric(self, metric: float) -> None: """ Record a new value of the metric and update the various things that depend on it. """ new_best = ((self._best_so_far is None) or (self._should_decrease and metric < self._best_so_far) or (not self._should_decrease and metric > self._best_so_far)) if new_best: self.best_epoch = self._epoch_number self._is_best_so_far = True self._best_so_far = metric self._epochs_with_no_improvement = 0 else: self._is_best_so_far = False self._epochs_with_no_improvement += 1 self._epoch_number += 1
[ "def", "add_metric", "(", "self", ",", "metric", ":", "float", ")", "->", "None", ":", "new_best", "=", "(", "(", "self", ".", "_best_so_far", "is", "None", ")", "or", "(", "self", ".", "_should_decrease", "and", "metric", "<", "self", ".", "_best_so_far", ")", "or", "(", "not", "self", ".", "_should_decrease", "and", "metric", ">", "self", ".", "_best_so_far", ")", ")", "if", "new_best", ":", "self", ".", "best_epoch", "=", "self", ".", "_epoch_number", "self", ".", "_is_best_so_far", "=", "True", "self", ".", "_best_so_far", "=", "metric", "self", ".", "_epochs_with_no_improvement", "=", "0", "else", ":", "self", ".", "_is_best_so_far", "=", "False", "self", ".", "_epochs_with_no_improvement", "+=", "1", "self", ".", "_epoch_number", "+=", "1" ]
Record a new value of the metric and update the various things that depend on it.
[ "Record", "a", "new", "value", "of", "the", "metric", "and", "update", "the", "various", "things", "that", "depend", "on", "it", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/training/metric_tracker.py#L97-L113
23,044
allenai/allennlp
allennlp/training/metric_tracker.py
MetricTracker.add_metrics
def add_metrics(self, metrics: Iterable[float]) -> None: """ Helper to add multiple metrics at once. """ for metric in metrics: self.add_metric(metric)
python
def add_metrics(self, metrics: Iterable[float]) -> None: """ Helper to add multiple metrics at once. """ for metric in metrics: self.add_metric(metric)
[ "def", "add_metrics", "(", "self", ",", "metrics", ":", "Iterable", "[", "float", "]", ")", "->", "None", ":", "for", "metric", "in", "metrics", ":", "self", ".", "add_metric", "(", "metric", ")" ]
Helper to add multiple metrics at once.
[ "Helper", "to", "add", "multiple", "metrics", "at", "once", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/training/metric_tracker.py#L115-L120
23,045
allenai/allennlp
allennlp/training/metric_tracker.py
MetricTracker.should_stop_early
def should_stop_early(self) -> bool: """ Returns true if improvement has stopped for long enough. """ if self._patience is None: return False else: return self._epochs_with_no_improvement >= self._patience
python
def should_stop_early(self) -> bool: """ Returns true if improvement has stopped for long enough. """ if self._patience is None: return False else: return self._epochs_with_no_improvement >= self._patience
[ "def", "should_stop_early", "(", "self", ")", "->", "bool", ":", "if", "self", ".", "_patience", "is", "None", ":", "return", "False", "else", ":", "return", "self", ".", "_epochs_with_no_improvement", ">=", "self", ".", "_patience" ]
Returns true if improvement has stopped for long enough.
[ "Returns", "true", "if", "improvement", "has", "stopped", "for", "long", "enough", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/training/metric_tracker.py#L128-L135
23,046
allenai/allennlp
allennlp/models/archival.py
archive_model
def archive_model(serialization_dir: str, weights: str = _DEFAULT_WEIGHTS, files_to_archive: Dict[str, str] = None, archive_path: str = None) -> None: """ Archive the model weights, its training configuration, and its vocabulary to `model.tar.gz`. Include the additional ``files_to_archive`` if provided. Parameters ---------- serialization_dir: ``str`` The directory where the weights and vocabulary are written out. weights: ``str``, optional (default=_DEFAULT_WEIGHTS) Which weights file to include in the archive. The default is ``best.th``. files_to_archive: ``Dict[str, str]``, optional (default=None) A mapping {flattened_key -> filename} of supplementary files to include in the archive. That is, if you wanted to include ``params['model']['weights']`` then you would specify the key as `"model.weights"`. archive_path : ``str``, optional, (default = None) A full path to serialize the model to. The default is "model.tar.gz" inside the serialization_dir. If you pass a directory here, we'll serialize the model to "model.tar.gz" inside the directory. """ weights_file = os.path.join(serialization_dir, weights) if not os.path.exists(weights_file): logger.error("weights file %s does not exist, unable to archive model", weights_file) return config_file = os.path.join(serialization_dir, CONFIG_NAME) if not os.path.exists(config_file): logger.error("config file %s does not exist, unable to archive model", config_file) # If there are files we want to archive, write out the mapping # so that we can use it during de-archiving. if files_to_archive: fta_filename = os.path.join(serialization_dir, _FTA_NAME) with open(fta_filename, 'w') as fta_file: fta_file.write(json.dumps(files_to_archive)) if archive_path is not None: archive_file = archive_path if os.path.isdir(archive_file): archive_file = os.path.join(archive_file, "model.tar.gz") else: archive_file = os.path.join(serialization_dir, "model.tar.gz") logger.info("archiving weights and vocabulary to %s", archive_file) with tarfile.open(archive_file, 'w:gz') as archive: archive.add(config_file, arcname=CONFIG_NAME) archive.add(weights_file, arcname=_WEIGHTS_NAME) archive.add(os.path.join(serialization_dir, "vocabulary"), arcname="vocabulary") # If there are supplemental files to archive: if files_to_archive: # Archive the { flattened_key -> original_filename } mapping. archive.add(fta_filename, arcname=_FTA_NAME) # And add each requested file to the archive. for key, filename in files_to_archive.items(): archive.add(filename, arcname=f"fta/{key}")
python
def archive_model(serialization_dir: str, weights: str = _DEFAULT_WEIGHTS, files_to_archive: Dict[str, str] = None, archive_path: str = None) -> None: """ Archive the model weights, its training configuration, and its vocabulary to `model.tar.gz`. Include the additional ``files_to_archive`` if provided. Parameters ---------- serialization_dir: ``str`` The directory where the weights and vocabulary are written out. weights: ``str``, optional (default=_DEFAULT_WEIGHTS) Which weights file to include in the archive. The default is ``best.th``. files_to_archive: ``Dict[str, str]``, optional (default=None) A mapping {flattened_key -> filename} of supplementary files to include in the archive. That is, if you wanted to include ``params['model']['weights']`` then you would specify the key as `"model.weights"`. archive_path : ``str``, optional, (default = None) A full path to serialize the model to. The default is "model.tar.gz" inside the serialization_dir. If you pass a directory here, we'll serialize the model to "model.tar.gz" inside the directory. """ weights_file = os.path.join(serialization_dir, weights) if not os.path.exists(weights_file): logger.error("weights file %s does not exist, unable to archive model", weights_file) return config_file = os.path.join(serialization_dir, CONFIG_NAME) if not os.path.exists(config_file): logger.error("config file %s does not exist, unable to archive model", config_file) # If there are files we want to archive, write out the mapping # so that we can use it during de-archiving. if files_to_archive: fta_filename = os.path.join(serialization_dir, _FTA_NAME) with open(fta_filename, 'w') as fta_file: fta_file.write(json.dumps(files_to_archive)) if archive_path is not None: archive_file = archive_path if os.path.isdir(archive_file): archive_file = os.path.join(archive_file, "model.tar.gz") else: archive_file = os.path.join(serialization_dir, "model.tar.gz") logger.info("archiving weights and vocabulary to %s", archive_file) with tarfile.open(archive_file, 'w:gz') as archive: archive.add(config_file, arcname=CONFIG_NAME) archive.add(weights_file, arcname=_WEIGHTS_NAME) archive.add(os.path.join(serialization_dir, "vocabulary"), arcname="vocabulary") # If there are supplemental files to archive: if files_to_archive: # Archive the { flattened_key -> original_filename } mapping. archive.add(fta_filename, arcname=_FTA_NAME) # And add each requested file to the archive. for key, filename in files_to_archive.items(): archive.add(filename, arcname=f"fta/{key}")
[ "def", "archive_model", "(", "serialization_dir", ":", "str", ",", "weights", ":", "str", "=", "_DEFAULT_WEIGHTS", ",", "files_to_archive", ":", "Dict", "[", "str", ",", "str", "]", "=", "None", ",", "archive_path", ":", "str", "=", "None", ")", "->", "None", ":", "weights_file", "=", "os", ".", "path", ".", "join", "(", "serialization_dir", ",", "weights", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "weights_file", ")", ":", "logger", ".", "error", "(", "\"weights file %s does not exist, unable to archive model\"", ",", "weights_file", ")", "return", "config_file", "=", "os", ".", "path", ".", "join", "(", "serialization_dir", ",", "CONFIG_NAME", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "config_file", ")", ":", "logger", ".", "error", "(", "\"config file %s does not exist, unable to archive model\"", ",", "config_file", ")", "# If there are files we want to archive, write out the mapping", "# so that we can use it during de-archiving.", "if", "files_to_archive", ":", "fta_filename", "=", "os", ".", "path", ".", "join", "(", "serialization_dir", ",", "_FTA_NAME", ")", "with", "open", "(", "fta_filename", ",", "'w'", ")", "as", "fta_file", ":", "fta_file", ".", "write", "(", "json", ".", "dumps", "(", "files_to_archive", ")", ")", "if", "archive_path", "is", "not", "None", ":", "archive_file", "=", "archive_path", "if", "os", ".", "path", ".", "isdir", "(", "archive_file", ")", ":", "archive_file", "=", "os", ".", "path", ".", "join", "(", "archive_file", ",", "\"model.tar.gz\"", ")", "else", ":", "archive_file", "=", "os", ".", "path", ".", "join", "(", "serialization_dir", ",", "\"model.tar.gz\"", ")", "logger", ".", "info", "(", "\"archiving weights and vocabulary to %s\"", ",", "archive_file", ")", "with", "tarfile", ".", "open", "(", "archive_file", ",", "'w:gz'", ")", "as", "archive", ":", "archive", ".", "add", "(", "config_file", ",", "arcname", "=", "CONFIG_NAME", ")", "archive", ".", "add", "(", "weights_file", ",", "arcname", "=", "_WEIGHTS_NAME", ")", "archive", ".", "add", "(", "os", ".", "path", ".", "join", "(", "serialization_dir", ",", "\"vocabulary\"", ")", ",", "arcname", "=", "\"vocabulary\"", ")", "# If there are supplemental files to archive:", "if", "files_to_archive", ":", "# Archive the { flattened_key -> original_filename } mapping.", "archive", ".", "add", "(", "fta_filename", ",", "arcname", "=", "_FTA_NAME", ")", "# And add each requested file to the archive.", "for", "key", ",", "filename", "in", "files_to_archive", ".", "items", "(", ")", ":", "archive", ".", "add", "(", "filename", ",", "arcname", "=", "f\"fta/{key}\"", ")" ]
Archive the model weights, its training configuration, and its vocabulary to `model.tar.gz`. Include the additional ``files_to_archive`` if provided. Parameters ---------- serialization_dir: ``str`` The directory where the weights and vocabulary are written out. weights: ``str``, optional (default=_DEFAULT_WEIGHTS) Which weights file to include in the archive. The default is ``best.th``. files_to_archive: ``Dict[str, str]``, optional (default=None) A mapping {flattened_key -> filename} of supplementary files to include in the archive. That is, if you wanted to include ``params['model']['weights']`` then you would specify the key as `"model.weights"`. archive_path : ``str``, optional, (default = None) A full path to serialize the model to. The default is "model.tar.gz" inside the serialization_dir. If you pass a directory here, we'll serialize the model to "model.tar.gz" inside the directory.
[ "Archive", "the", "model", "weights", "its", "training", "configuration", "and", "its", "vocabulary", "to", "model", ".", "tar", ".", "gz", ".", "Include", "the", "additional", "files_to_archive", "if", "provided", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/models/archival.py#L89-L148
23,047
allenai/allennlp
allennlp/models/archival.py
load_archive
def load_archive(archive_file: str, cuda_device: int = -1, overrides: str = "", weights_file: str = None) -> Archive: """ Instantiates an Archive from an archived `tar.gz` file. Parameters ---------- archive_file: ``str`` The archive file to load the model from. weights_file: ``str``, optional (default = None) The weights file to use. If unspecified, weights.th in the archive_file will be used. cuda_device: ``int``, optional (default = -1) If `cuda_device` is >= 0, the model will be loaded onto the corresponding GPU. Otherwise it will be loaded onto the CPU. overrides: ``str``, optional (default = "") JSON overrides to apply to the unarchived ``Params`` object. """ # redirect to the cache, if necessary resolved_archive_file = cached_path(archive_file) if resolved_archive_file == archive_file: logger.info(f"loading archive file {archive_file}") else: logger.info(f"loading archive file {archive_file} from cache at {resolved_archive_file}") if os.path.isdir(resolved_archive_file): serialization_dir = resolved_archive_file else: # Extract archive to temp dir tempdir = tempfile.mkdtemp() logger.info(f"extracting archive file {resolved_archive_file} to temp dir {tempdir}") with tarfile.open(resolved_archive_file, 'r:gz') as archive: archive.extractall(tempdir) # Postpone cleanup until exit in case the unarchived contents are needed outside # this function. atexit.register(_cleanup_archive_dir, tempdir) serialization_dir = tempdir # Check for supplemental files in archive fta_filename = os.path.join(serialization_dir, _FTA_NAME) if os.path.exists(fta_filename): with open(fta_filename, 'r') as fta_file: files_to_archive = json.loads(fta_file.read()) # Add these replacements to overrides replacements_dict: Dict[str, Any] = {} for key, original_filename in files_to_archive.items(): replacement_filename = os.path.join(serialization_dir, f"fta/{key}") if os.path.exists(replacement_filename): replacements_dict[key] = replacement_filename else: logger.warning(f"Archived file {replacement_filename} not found! At train time " f"this file was located at {original_filename}. This may be " "because you are loading a serialization directory. Attempting to " "load the file from its train-time location.") overrides_dict = parse_overrides(overrides) combined_dict = with_fallback(preferred=overrides_dict, fallback=unflatten(replacements_dict)) overrides = json.dumps(combined_dict) # Load config config = Params.from_file(os.path.join(serialization_dir, CONFIG_NAME), overrides) config.loading_from_archive = True if weights_file: weights_path = weights_file else: weights_path = os.path.join(serialization_dir, _WEIGHTS_NAME) # Fallback for serialization directories. if not os.path.exists(weights_path): weights_path = os.path.join(serialization_dir, _DEFAULT_WEIGHTS) # Instantiate model. Use a duplicate of the config, as it will get consumed. model = Model.load(config.duplicate(), weights_file=weights_path, serialization_dir=serialization_dir, cuda_device=cuda_device) return Archive(model=model, config=config)
python
def load_archive(archive_file: str, cuda_device: int = -1, overrides: str = "", weights_file: str = None) -> Archive: """ Instantiates an Archive from an archived `tar.gz` file. Parameters ---------- archive_file: ``str`` The archive file to load the model from. weights_file: ``str``, optional (default = None) The weights file to use. If unspecified, weights.th in the archive_file will be used. cuda_device: ``int``, optional (default = -1) If `cuda_device` is >= 0, the model will be loaded onto the corresponding GPU. Otherwise it will be loaded onto the CPU. overrides: ``str``, optional (default = "") JSON overrides to apply to the unarchived ``Params`` object. """ # redirect to the cache, if necessary resolved_archive_file = cached_path(archive_file) if resolved_archive_file == archive_file: logger.info(f"loading archive file {archive_file}") else: logger.info(f"loading archive file {archive_file} from cache at {resolved_archive_file}") if os.path.isdir(resolved_archive_file): serialization_dir = resolved_archive_file else: # Extract archive to temp dir tempdir = tempfile.mkdtemp() logger.info(f"extracting archive file {resolved_archive_file} to temp dir {tempdir}") with tarfile.open(resolved_archive_file, 'r:gz') as archive: archive.extractall(tempdir) # Postpone cleanup until exit in case the unarchived contents are needed outside # this function. atexit.register(_cleanup_archive_dir, tempdir) serialization_dir = tempdir # Check for supplemental files in archive fta_filename = os.path.join(serialization_dir, _FTA_NAME) if os.path.exists(fta_filename): with open(fta_filename, 'r') as fta_file: files_to_archive = json.loads(fta_file.read()) # Add these replacements to overrides replacements_dict: Dict[str, Any] = {} for key, original_filename in files_to_archive.items(): replacement_filename = os.path.join(serialization_dir, f"fta/{key}") if os.path.exists(replacement_filename): replacements_dict[key] = replacement_filename else: logger.warning(f"Archived file {replacement_filename} not found! At train time " f"this file was located at {original_filename}. This may be " "because you are loading a serialization directory. Attempting to " "load the file from its train-time location.") overrides_dict = parse_overrides(overrides) combined_dict = with_fallback(preferred=overrides_dict, fallback=unflatten(replacements_dict)) overrides = json.dumps(combined_dict) # Load config config = Params.from_file(os.path.join(serialization_dir, CONFIG_NAME), overrides) config.loading_from_archive = True if weights_file: weights_path = weights_file else: weights_path = os.path.join(serialization_dir, _WEIGHTS_NAME) # Fallback for serialization directories. if not os.path.exists(weights_path): weights_path = os.path.join(serialization_dir, _DEFAULT_WEIGHTS) # Instantiate model. Use a duplicate of the config, as it will get consumed. model = Model.load(config.duplicate(), weights_file=weights_path, serialization_dir=serialization_dir, cuda_device=cuda_device) return Archive(model=model, config=config)
[ "def", "load_archive", "(", "archive_file", ":", "str", ",", "cuda_device", ":", "int", "=", "-", "1", ",", "overrides", ":", "str", "=", "\"\"", ",", "weights_file", ":", "str", "=", "None", ")", "->", "Archive", ":", "# redirect to the cache, if necessary", "resolved_archive_file", "=", "cached_path", "(", "archive_file", ")", "if", "resolved_archive_file", "==", "archive_file", ":", "logger", ".", "info", "(", "f\"loading archive file {archive_file}\"", ")", "else", ":", "logger", ".", "info", "(", "f\"loading archive file {archive_file} from cache at {resolved_archive_file}\"", ")", "if", "os", ".", "path", ".", "isdir", "(", "resolved_archive_file", ")", ":", "serialization_dir", "=", "resolved_archive_file", "else", ":", "# Extract archive to temp dir", "tempdir", "=", "tempfile", ".", "mkdtemp", "(", ")", "logger", ".", "info", "(", "f\"extracting archive file {resolved_archive_file} to temp dir {tempdir}\"", ")", "with", "tarfile", ".", "open", "(", "resolved_archive_file", ",", "'r:gz'", ")", "as", "archive", ":", "archive", ".", "extractall", "(", "tempdir", ")", "# Postpone cleanup until exit in case the unarchived contents are needed outside", "# this function.", "atexit", ".", "register", "(", "_cleanup_archive_dir", ",", "tempdir", ")", "serialization_dir", "=", "tempdir", "# Check for supplemental files in archive", "fta_filename", "=", "os", ".", "path", ".", "join", "(", "serialization_dir", ",", "_FTA_NAME", ")", "if", "os", ".", "path", ".", "exists", "(", "fta_filename", ")", ":", "with", "open", "(", "fta_filename", ",", "'r'", ")", "as", "fta_file", ":", "files_to_archive", "=", "json", ".", "loads", "(", "fta_file", ".", "read", "(", ")", ")", "# Add these replacements to overrides", "replacements_dict", ":", "Dict", "[", "str", ",", "Any", "]", "=", "{", "}", "for", "key", ",", "original_filename", "in", "files_to_archive", ".", "items", "(", ")", ":", "replacement_filename", "=", "os", ".", "path", ".", "join", "(", "serialization_dir", ",", "f\"fta/{key}\"", ")", "if", "os", ".", "path", ".", "exists", "(", "replacement_filename", ")", ":", "replacements_dict", "[", "key", "]", "=", "replacement_filename", "else", ":", "logger", ".", "warning", "(", "f\"Archived file {replacement_filename} not found! At train time \"", "f\"this file was located at {original_filename}. This may be \"", "\"because you are loading a serialization directory. Attempting to \"", "\"load the file from its train-time location.\"", ")", "overrides_dict", "=", "parse_overrides", "(", "overrides", ")", "combined_dict", "=", "with_fallback", "(", "preferred", "=", "overrides_dict", ",", "fallback", "=", "unflatten", "(", "replacements_dict", ")", ")", "overrides", "=", "json", ".", "dumps", "(", "combined_dict", ")", "# Load config", "config", "=", "Params", ".", "from_file", "(", "os", ".", "path", ".", "join", "(", "serialization_dir", ",", "CONFIG_NAME", ")", ",", "overrides", ")", "config", ".", "loading_from_archive", "=", "True", "if", "weights_file", ":", "weights_path", "=", "weights_file", "else", ":", "weights_path", "=", "os", ".", "path", ".", "join", "(", "serialization_dir", ",", "_WEIGHTS_NAME", ")", "# Fallback for serialization directories.", "if", "not", "os", ".", "path", ".", "exists", "(", "weights_path", ")", ":", "weights_path", "=", "os", ".", "path", ".", "join", "(", "serialization_dir", ",", "_DEFAULT_WEIGHTS", ")", "# Instantiate model. Use a duplicate of the config, as it will get consumed.", "model", "=", "Model", ".", "load", "(", "config", ".", "duplicate", "(", ")", ",", "weights_file", "=", "weights_path", ",", "serialization_dir", "=", "serialization_dir", ",", "cuda_device", "=", "cuda_device", ")", "return", "Archive", "(", "model", "=", "model", ",", "config", "=", "config", ")" ]
Instantiates an Archive from an archived `tar.gz` file. Parameters ---------- archive_file: ``str`` The archive file to load the model from. weights_file: ``str``, optional (default = None) The weights file to use. If unspecified, weights.th in the archive_file will be used. cuda_device: ``int``, optional (default = -1) If `cuda_device` is >= 0, the model will be loaded onto the corresponding GPU. Otherwise it will be loaded onto the CPU. overrides: ``str``, optional (default = "") JSON overrides to apply to the unarchived ``Params`` object.
[ "Instantiates", "an", "Archive", "from", "an", "archived", "tar", ".", "gz", "file", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/models/archival.py#L150-L232
23,048
allenai/allennlp
allennlp/models/archival.py
Archive.extract_module
def extract_module(self, path: str, freeze: bool = True) -> Module: """ This method can be used to load a module from the pretrained model archive. It is also used implicitly in FromParams based construction. So instead of using standard params to construct a module, you can instead load a pretrained module from the model archive directly. For eg, instead of using params like {"type": "module_type", ...}, you can use the following template:: { "_pretrained": { "archive_file": "../path/to/model.tar.gz", "path": "path.to.module.in.model", "freeze": False } } If you use this feature with FromParams, take care of the following caveat: Call to initializer(self) at end of model initializer can potentially wipe the transferred parameters by reinitializing them. This can happen if you have setup initializer regex that also matches parameters of the transferred module. To safe-guard against this, you can either update your initializer regex to prevent conflicting match or add extra initializer:: [ [".*transferred_module_name.*", "prevent"]] ] Parameters ---------- path : ``str``, required Path of target module to be loaded from the model. Eg. "_textfield_embedder.token_embedder_tokens" freeze : ``bool``, optional (default=True) Whether to freeze the module parameters or not. """ modules_dict = {path: module for path, module in self.model.named_modules()} module = modules_dict.get(path, None) if not module: raise ConfigurationError(f"You asked to transfer module at path {path} from " f"the model {type(self.model)}. But it's not present.") if not isinstance(module, Module): raise ConfigurationError(f"The transferred object from model {type(self.model)} at path " f"{path} is not a PyTorch Module.") for parameter in module.parameters(): # type: ignore parameter.requires_grad_(not freeze) return module
python
def extract_module(self, path: str, freeze: bool = True) -> Module: """ This method can be used to load a module from the pretrained model archive. It is also used implicitly in FromParams based construction. So instead of using standard params to construct a module, you can instead load a pretrained module from the model archive directly. For eg, instead of using params like {"type": "module_type", ...}, you can use the following template:: { "_pretrained": { "archive_file": "../path/to/model.tar.gz", "path": "path.to.module.in.model", "freeze": False } } If you use this feature with FromParams, take care of the following caveat: Call to initializer(self) at end of model initializer can potentially wipe the transferred parameters by reinitializing them. This can happen if you have setup initializer regex that also matches parameters of the transferred module. To safe-guard against this, you can either update your initializer regex to prevent conflicting match or add extra initializer:: [ [".*transferred_module_name.*", "prevent"]] ] Parameters ---------- path : ``str``, required Path of target module to be loaded from the model. Eg. "_textfield_embedder.token_embedder_tokens" freeze : ``bool``, optional (default=True) Whether to freeze the module parameters or not. """ modules_dict = {path: module for path, module in self.model.named_modules()} module = modules_dict.get(path, None) if not module: raise ConfigurationError(f"You asked to transfer module at path {path} from " f"the model {type(self.model)}. But it's not present.") if not isinstance(module, Module): raise ConfigurationError(f"The transferred object from model {type(self.model)} at path " f"{path} is not a PyTorch Module.") for parameter in module.parameters(): # type: ignore parameter.requires_grad_(not freeze) return module
[ "def", "extract_module", "(", "self", ",", "path", ":", "str", ",", "freeze", ":", "bool", "=", "True", ")", "->", "Module", ":", "modules_dict", "=", "{", "path", ":", "module", "for", "path", ",", "module", "in", "self", ".", "model", ".", "named_modules", "(", ")", "}", "module", "=", "modules_dict", ".", "get", "(", "path", ",", "None", ")", "if", "not", "module", ":", "raise", "ConfigurationError", "(", "f\"You asked to transfer module at path {path} from \"", "f\"the model {type(self.model)}. But it's not present.\"", ")", "if", "not", "isinstance", "(", "module", ",", "Module", ")", ":", "raise", "ConfigurationError", "(", "f\"The transferred object from model {type(self.model)} at path \"", "f\"{path} is not a PyTorch Module.\"", ")", "for", "parameter", "in", "module", ".", "parameters", "(", ")", ":", "# type: ignore", "parameter", ".", "requires_grad_", "(", "not", "freeze", ")", "return", "module" ]
This method can be used to load a module from the pretrained model archive. It is also used implicitly in FromParams based construction. So instead of using standard params to construct a module, you can instead load a pretrained module from the model archive directly. For eg, instead of using params like {"type": "module_type", ...}, you can use the following template:: { "_pretrained": { "archive_file": "../path/to/model.tar.gz", "path": "path.to.module.in.model", "freeze": False } } If you use this feature with FromParams, take care of the following caveat: Call to initializer(self) at end of model initializer can potentially wipe the transferred parameters by reinitializing them. This can happen if you have setup initializer regex that also matches parameters of the transferred module. To safe-guard against this, you can either update your initializer regex to prevent conflicting match or add extra initializer:: [ [".*transferred_module_name.*", "prevent"]] ] Parameters ---------- path : ``str``, required Path of target module to be loaded from the model. Eg. "_textfield_embedder.token_embedder_tokens" freeze : ``bool``, optional (default=True) Whether to freeze the module parameters or not.
[ "This", "method", "can", "be", "used", "to", "load", "a", "module", "from", "the", "pretrained", "model", "archive", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/models/archival.py#L28-L76
23,049
allenai/allennlp
allennlp/models/semantic_parsing/nlvr/nlvr_semantic_parser.py
NlvrSemanticParser._get_action_strings
def _get_action_strings(cls, possible_actions: List[List[ProductionRule]], action_indices: Dict[int, List[List[int]]]) -> List[List[List[str]]]: """ Takes a list of possible actions and indices of decoded actions into those possible actions for a batch and returns sequences of action strings. We assume ``action_indices`` is a dict mapping batch indices to k-best decoded sequence lists. """ all_action_strings: List[List[List[str]]] = [] batch_size = len(possible_actions) for i in range(batch_size): batch_actions = possible_actions[i] batch_best_sequences = action_indices[i] if i in action_indices else [] # This will append an empty list to ``all_action_strings`` if ``batch_best_sequences`` # is empty. action_strings = [[batch_actions[rule_id][0] for rule_id in sequence] for sequence in batch_best_sequences] all_action_strings.append(action_strings) return all_action_strings
python
def _get_action_strings(cls, possible_actions: List[List[ProductionRule]], action_indices: Dict[int, List[List[int]]]) -> List[List[List[str]]]: """ Takes a list of possible actions and indices of decoded actions into those possible actions for a batch and returns sequences of action strings. We assume ``action_indices`` is a dict mapping batch indices to k-best decoded sequence lists. """ all_action_strings: List[List[List[str]]] = [] batch_size = len(possible_actions) for i in range(batch_size): batch_actions = possible_actions[i] batch_best_sequences = action_indices[i] if i in action_indices else [] # This will append an empty list to ``all_action_strings`` if ``batch_best_sequences`` # is empty. action_strings = [[batch_actions[rule_id][0] for rule_id in sequence] for sequence in batch_best_sequences] all_action_strings.append(action_strings) return all_action_strings
[ "def", "_get_action_strings", "(", "cls", ",", "possible_actions", ":", "List", "[", "List", "[", "ProductionRule", "]", "]", ",", "action_indices", ":", "Dict", "[", "int", ",", "List", "[", "List", "[", "int", "]", "]", "]", ")", "->", "List", "[", "List", "[", "List", "[", "str", "]", "]", "]", ":", "all_action_strings", ":", "List", "[", "List", "[", "List", "[", "str", "]", "]", "]", "=", "[", "]", "batch_size", "=", "len", "(", "possible_actions", ")", "for", "i", "in", "range", "(", "batch_size", ")", ":", "batch_actions", "=", "possible_actions", "[", "i", "]", "batch_best_sequences", "=", "action_indices", "[", "i", "]", "if", "i", "in", "action_indices", "else", "[", "]", "# This will append an empty list to ``all_action_strings`` if ``batch_best_sequences``", "# is empty.", "action_strings", "=", "[", "[", "batch_actions", "[", "rule_id", "]", "[", "0", "]", "for", "rule_id", "in", "sequence", "]", "for", "sequence", "in", "batch_best_sequences", "]", "all_action_strings", ".", "append", "(", "action_strings", ")", "return", "all_action_strings" ]
Takes a list of possible actions and indices of decoded actions into those possible actions for a batch and returns sequences of action strings. We assume ``action_indices`` is a dict mapping batch indices to k-best decoded sequence lists.
[ "Takes", "a", "list", "of", "possible", "actions", "and", "indices", "of", "decoded", "actions", "into", "those", "possible", "actions", "for", "a", "batch", "and", "returns", "sequences", "of", "action", "strings", ".", "We", "assume", "action_indices", "is", "a", "dict", "mapping", "batch", "indices", "to", "k", "-", "best", "decoded", "sequence", "lists", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/models/semantic_parsing/nlvr/nlvr_semantic_parser.py#L122-L140
23,050
allenai/allennlp
allennlp/models/semantic_parsing/nlvr/nlvr_semantic_parser.py
NlvrSemanticParser.decode
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: """ This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test time, to finalize predictions. We only transform the action string sequences into logical forms here. """ best_action_strings = output_dict["best_action_strings"] # Instantiating an empty world for getting logical forms. world = NlvrLanguage(set()) logical_forms = [] for instance_action_sequences in best_action_strings: instance_logical_forms = [] for action_strings in instance_action_sequences: if action_strings: instance_logical_forms.append(world.action_sequence_to_logical_form(action_strings)) else: instance_logical_forms.append('') logical_forms.append(instance_logical_forms) action_mapping = output_dict['action_mapping'] best_actions = output_dict['best_action_strings'] debug_infos = output_dict['debug_info'] batch_action_info = [] for batch_index, (predicted_actions, debug_info) in enumerate(zip(best_actions, debug_infos)): instance_action_info = [] for predicted_action, action_debug_info in zip(predicted_actions[0], debug_info): action_info = {} action_info['predicted_action'] = predicted_action considered_actions = action_debug_info['considered_actions'] probabilities = action_debug_info['probabilities'] actions = [] for action, probability in zip(considered_actions, probabilities): if action != -1: actions.append((action_mapping[(batch_index, action)], probability)) actions.sort() considered_actions, probabilities = zip(*actions) action_info['considered_actions'] = considered_actions action_info['action_probabilities'] = probabilities action_info['question_attention'] = action_debug_info.get('question_attention', []) instance_action_info.append(action_info) batch_action_info.append(instance_action_info) output_dict["predicted_actions"] = batch_action_info output_dict["logical_form"] = logical_forms return output_dict
python
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: """ This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test time, to finalize predictions. We only transform the action string sequences into logical forms here. """ best_action_strings = output_dict["best_action_strings"] # Instantiating an empty world for getting logical forms. world = NlvrLanguage(set()) logical_forms = [] for instance_action_sequences in best_action_strings: instance_logical_forms = [] for action_strings in instance_action_sequences: if action_strings: instance_logical_forms.append(world.action_sequence_to_logical_form(action_strings)) else: instance_logical_forms.append('') logical_forms.append(instance_logical_forms) action_mapping = output_dict['action_mapping'] best_actions = output_dict['best_action_strings'] debug_infos = output_dict['debug_info'] batch_action_info = [] for batch_index, (predicted_actions, debug_info) in enumerate(zip(best_actions, debug_infos)): instance_action_info = [] for predicted_action, action_debug_info in zip(predicted_actions[0], debug_info): action_info = {} action_info['predicted_action'] = predicted_action considered_actions = action_debug_info['considered_actions'] probabilities = action_debug_info['probabilities'] actions = [] for action, probability in zip(considered_actions, probabilities): if action != -1: actions.append((action_mapping[(batch_index, action)], probability)) actions.sort() considered_actions, probabilities = zip(*actions) action_info['considered_actions'] = considered_actions action_info['action_probabilities'] = probabilities action_info['question_attention'] = action_debug_info.get('question_attention', []) instance_action_info.append(action_info) batch_action_info.append(instance_action_info) output_dict["predicted_actions"] = batch_action_info output_dict["logical_form"] = logical_forms return output_dict
[ "def", "decode", "(", "self", ",", "output_dict", ":", "Dict", "[", "str", ",", "torch", ".", "Tensor", "]", ")", "->", "Dict", "[", "str", ",", "torch", ".", "Tensor", "]", ":", "best_action_strings", "=", "output_dict", "[", "\"best_action_strings\"", "]", "# Instantiating an empty world for getting logical forms.", "world", "=", "NlvrLanguage", "(", "set", "(", ")", ")", "logical_forms", "=", "[", "]", "for", "instance_action_sequences", "in", "best_action_strings", ":", "instance_logical_forms", "=", "[", "]", "for", "action_strings", "in", "instance_action_sequences", ":", "if", "action_strings", ":", "instance_logical_forms", ".", "append", "(", "world", ".", "action_sequence_to_logical_form", "(", "action_strings", ")", ")", "else", ":", "instance_logical_forms", ".", "append", "(", "''", ")", "logical_forms", ".", "append", "(", "instance_logical_forms", ")", "action_mapping", "=", "output_dict", "[", "'action_mapping'", "]", "best_actions", "=", "output_dict", "[", "'best_action_strings'", "]", "debug_infos", "=", "output_dict", "[", "'debug_info'", "]", "batch_action_info", "=", "[", "]", "for", "batch_index", ",", "(", "predicted_actions", ",", "debug_info", ")", "in", "enumerate", "(", "zip", "(", "best_actions", ",", "debug_infos", ")", ")", ":", "instance_action_info", "=", "[", "]", "for", "predicted_action", ",", "action_debug_info", "in", "zip", "(", "predicted_actions", "[", "0", "]", ",", "debug_info", ")", ":", "action_info", "=", "{", "}", "action_info", "[", "'predicted_action'", "]", "=", "predicted_action", "considered_actions", "=", "action_debug_info", "[", "'considered_actions'", "]", "probabilities", "=", "action_debug_info", "[", "'probabilities'", "]", "actions", "=", "[", "]", "for", "action", ",", "probability", "in", "zip", "(", "considered_actions", ",", "probabilities", ")", ":", "if", "action", "!=", "-", "1", ":", "actions", ".", "append", "(", "(", "action_mapping", "[", "(", "batch_index", ",", "action", ")", "]", ",", "probability", ")", ")", "actions", ".", "sort", "(", ")", "considered_actions", ",", "probabilities", "=", "zip", "(", "*", "actions", ")", "action_info", "[", "'considered_actions'", "]", "=", "considered_actions", "action_info", "[", "'action_probabilities'", "]", "=", "probabilities", "action_info", "[", "'question_attention'", "]", "=", "action_debug_info", ".", "get", "(", "'question_attention'", ",", "[", "]", ")", "instance_action_info", ".", "append", "(", "action_info", ")", "batch_action_info", ".", "append", "(", "instance_action_info", ")", "output_dict", "[", "\"predicted_actions\"", "]", "=", "batch_action_info", "output_dict", "[", "\"logical_form\"", "]", "=", "logical_forms", "return", "output_dict" ]
This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test time, to finalize predictions. We only transform the action string sequences into logical forms here.
[ "This", "method", "overrides", "Model", ".", "decode", "which", "gets", "called", "after", "Model", ".", "forward", "at", "test", "time", "to", "finalize", "predictions", ".", "We", "only", "transform", "the", "action", "string", "sequences", "into", "logical", "forms", "here", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/models/semantic_parsing/nlvr/nlvr_semantic_parser.py#L201-L244
23,051
allenai/allennlp
allennlp/models/semantic_parsing/nlvr/nlvr_semantic_parser.py
NlvrSemanticParser._check_state_denotations
def _check_state_denotations(self, state: GrammarBasedState, worlds: List[NlvrLanguage]) -> List[bool]: """ Returns whether action history in the state evaluates to the correct denotations over all worlds. Only defined when the state is finished. """ assert state.is_finished(), "Cannot compute denotations for unfinished states!" # Since this is a finished state, its group size must be 1. batch_index = state.batch_indices[0] instance_label_strings = state.extras[batch_index] history = state.action_history[0] all_actions = state.possible_actions[0] action_sequence = [all_actions[action][0] for action in history] return self._check_denotation(action_sequence, instance_label_strings, worlds)
python
def _check_state_denotations(self, state: GrammarBasedState, worlds: List[NlvrLanguage]) -> List[bool]: """ Returns whether action history in the state evaluates to the correct denotations over all worlds. Only defined when the state is finished. """ assert state.is_finished(), "Cannot compute denotations for unfinished states!" # Since this is a finished state, its group size must be 1. batch_index = state.batch_indices[0] instance_label_strings = state.extras[batch_index] history = state.action_history[0] all_actions = state.possible_actions[0] action_sequence = [all_actions[action][0] for action in history] return self._check_denotation(action_sequence, instance_label_strings, worlds)
[ "def", "_check_state_denotations", "(", "self", ",", "state", ":", "GrammarBasedState", ",", "worlds", ":", "List", "[", "NlvrLanguage", "]", ")", "->", "List", "[", "bool", "]", ":", "assert", "state", ".", "is_finished", "(", ")", ",", "\"Cannot compute denotations for unfinished states!\"", "# Since this is a finished state, its group size must be 1.", "batch_index", "=", "state", ".", "batch_indices", "[", "0", "]", "instance_label_strings", "=", "state", ".", "extras", "[", "batch_index", "]", "history", "=", "state", ".", "action_history", "[", "0", "]", "all_actions", "=", "state", ".", "possible_actions", "[", "0", "]", "action_sequence", "=", "[", "all_actions", "[", "action", "]", "[", "0", "]", "for", "action", "in", "history", "]", "return", "self", ".", "_check_denotation", "(", "action_sequence", ",", "instance_label_strings", ",", "worlds", ")" ]
Returns whether action history in the state evaluates to the correct denotations over all worlds. Only defined when the state is finished.
[ "Returns", "whether", "action", "history", "in", "the", "state", "evaluates", "to", "the", "correct", "denotations", "over", "all", "worlds", ".", "Only", "defined", "when", "the", "state", "is", "finished", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/models/semantic_parsing/nlvr/nlvr_semantic_parser.py#L246-L258
23,052
allenai/allennlp
allennlp/commands/find_learning_rate.py
find_learning_rate_from_args
def find_learning_rate_from_args(args: argparse.Namespace) -> None: """ Start learning rate finder for given args """ params = Params.from_file(args.param_path, args.overrides) find_learning_rate_model(params, args.serialization_dir, start_lr=args.start_lr, end_lr=args.end_lr, num_batches=args.num_batches, linear_steps=args.linear, stopping_factor=args.stopping_factor, force=args.force)
python
def find_learning_rate_from_args(args: argparse.Namespace) -> None: """ Start learning rate finder for given args """ params = Params.from_file(args.param_path, args.overrides) find_learning_rate_model(params, args.serialization_dir, start_lr=args.start_lr, end_lr=args.end_lr, num_batches=args.num_batches, linear_steps=args.linear, stopping_factor=args.stopping_factor, force=args.force)
[ "def", "find_learning_rate_from_args", "(", "args", ":", "argparse", ".", "Namespace", ")", "->", "None", ":", "params", "=", "Params", ".", "from_file", "(", "args", ".", "param_path", ",", "args", ".", "overrides", ")", "find_learning_rate_model", "(", "params", ",", "args", ".", "serialization_dir", ",", "start_lr", "=", "args", ".", "start_lr", ",", "end_lr", "=", "args", ".", "end_lr", ",", "num_batches", "=", "args", ".", "num_batches", ",", "linear_steps", "=", "args", ".", "linear", ",", "stopping_factor", "=", "args", ".", "stopping_factor", ",", "force", "=", "args", ".", "force", ")" ]
Start learning rate finder for given args
[ "Start", "learning", "rate", "finder", "for", "given", "args" ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/commands/find_learning_rate.py#L121-L132
23,053
allenai/allennlp
allennlp/commands/find_learning_rate.py
find_learning_rate_model
def find_learning_rate_model(params: Params, serialization_dir: str, start_lr: float = 1e-5, end_lr: float = 10, num_batches: int = 100, linear_steps: bool = False, stopping_factor: float = None, force: bool = False) -> None: """ Runs learning rate search for given `num_batches` and saves the results in ``serialization_dir`` Parameters ---------- params : ``Params`` A parameter object specifying an AllenNLP Experiment. serialization_dir : ``str`` The directory in which to save results. start_lr: ``float`` Learning rate to start the search. end_lr: ``float`` Learning rate upto which search is done. num_batches: ``int`` Number of mini-batches to run Learning rate finder. linear_steps: ``bool`` Increase learning rate linearly if False exponentially. stopping_factor: ``float`` Stop the search when the current loss exceeds the best loss recorded by multiple of stopping factor. If ``None`` search proceeds till the ``end_lr`` force: ``bool`` If True and the serialization directory already exists, everything in it will be removed prior to finding the learning rate. """ if os.path.exists(serialization_dir) and force: shutil.rmtree(serialization_dir) if os.path.exists(serialization_dir) and os.listdir(serialization_dir): raise ConfigurationError(f'Serialization directory {serialization_dir} already exists and is ' f'not empty.') else: os.makedirs(serialization_dir, exist_ok=True) prepare_environment(params) cuda_device = params.params.get('trainer').get('cuda_device', -1) check_for_gpu(cuda_device) all_datasets = datasets_from_params(params) datasets_for_vocab_creation = set(params.pop("datasets_for_vocab_creation", all_datasets)) for dataset in datasets_for_vocab_creation: if dataset not in all_datasets: raise ConfigurationError(f"invalid 'dataset_for_vocab_creation' {dataset}") logger.info("From dataset instances, %s will be considered for vocabulary creation.", ", ".join(datasets_for_vocab_creation)) vocab = Vocabulary.from_params( params.pop("vocabulary", {}), (instance for key, dataset in all_datasets.items() for instance in dataset if key in datasets_for_vocab_creation) ) model = Model.from_params(vocab=vocab, params=params.pop('model')) iterator = DataIterator.from_params(params.pop("iterator")) iterator.index_with(vocab) train_data = all_datasets['train'] trainer_params = params.pop("trainer") no_grad_regexes = trainer_params.pop("no_grad", ()) for name, parameter in model.named_parameters(): if any(re.search(regex, name) for regex in no_grad_regexes): parameter.requires_grad_(False) trainer_choice = trainer_params.pop("type", "default") if trainer_choice != "default": raise ConfigurationError("currently find-learning-rate only works with the default Trainer") trainer = Trainer.from_params(model=model, serialization_dir=serialization_dir, iterator=iterator, train_data=train_data, validation_data=None, params=trainer_params, validation_iterator=None) logger.info(f'Starting learning rate search from {start_lr} to {end_lr} in {num_batches} iterations.') learning_rates, losses = search_learning_rate(trainer, start_lr=start_lr, end_lr=end_lr, num_batches=num_batches, linear_steps=linear_steps, stopping_factor=stopping_factor) logger.info(f'Finished learning rate search.') losses = _smooth(losses, 0.98) _save_plot(learning_rates, losses, os.path.join(serialization_dir, 'lr-losses.png'))
python
def find_learning_rate_model(params: Params, serialization_dir: str, start_lr: float = 1e-5, end_lr: float = 10, num_batches: int = 100, linear_steps: bool = False, stopping_factor: float = None, force: bool = False) -> None: """ Runs learning rate search for given `num_batches` and saves the results in ``serialization_dir`` Parameters ---------- params : ``Params`` A parameter object specifying an AllenNLP Experiment. serialization_dir : ``str`` The directory in which to save results. start_lr: ``float`` Learning rate to start the search. end_lr: ``float`` Learning rate upto which search is done. num_batches: ``int`` Number of mini-batches to run Learning rate finder. linear_steps: ``bool`` Increase learning rate linearly if False exponentially. stopping_factor: ``float`` Stop the search when the current loss exceeds the best loss recorded by multiple of stopping factor. If ``None`` search proceeds till the ``end_lr`` force: ``bool`` If True and the serialization directory already exists, everything in it will be removed prior to finding the learning rate. """ if os.path.exists(serialization_dir) and force: shutil.rmtree(serialization_dir) if os.path.exists(serialization_dir) and os.listdir(serialization_dir): raise ConfigurationError(f'Serialization directory {serialization_dir} already exists and is ' f'not empty.') else: os.makedirs(serialization_dir, exist_ok=True) prepare_environment(params) cuda_device = params.params.get('trainer').get('cuda_device', -1) check_for_gpu(cuda_device) all_datasets = datasets_from_params(params) datasets_for_vocab_creation = set(params.pop("datasets_for_vocab_creation", all_datasets)) for dataset in datasets_for_vocab_creation: if dataset not in all_datasets: raise ConfigurationError(f"invalid 'dataset_for_vocab_creation' {dataset}") logger.info("From dataset instances, %s will be considered for vocabulary creation.", ", ".join(datasets_for_vocab_creation)) vocab = Vocabulary.from_params( params.pop("vocabulary", {}), (instance for key, dataset in all_datasets.items() for instance in dataset if key in datasets_for_vocab_creation) ) model = Model.from_params(vocab=vocab, params=params.pop('model')) iterator = DataIterator.from_params(params.pop("iterator")) iterator.index_with(vocab) train_data = all_datasets['train'] trainer_params = params.pop("trainer") no_grad_regexes = trainer_params.pop("no_grad", ()) for name, parameter in model.named_parameters(): if any(re.search(regex, name) for regex in no_grad_regexes): parameter.requires_grad_(False) trainer_choice = trainer_params.pop("type", "default") if trainer_choice != "default": raise ConfigurationError("currently find-learning-rate only works with the default Trainer") trainer = Trainer.from_params(model=model, serialization_dir=serialization_dir, iterator=iterator, train_data=train_data, validation_data=None, params=trainer_params, validation_iterator=None) logger.info(f'Starting learning rate search from {start_lr} to {end_lr} in {num_batches} iterations.') learning_rates, losses = search_learning_rate(trainer, start_lr=start_lr, end_lr=end_lr, num_batches=num_batches, linear_steps=linear_steps, stopping_factor=stopping_factor) logger.info(f'Finished learning rate search.') losses = _smooth(losses, 0.98) _save_plot(learning_rates, losses, os.path.join(serialization_dir, 'lr-losses.png'))
[ "def", "find_learning_rate_model", "(", "params", ":", "Params", ",", "serialization_dir", ":", "str", ",", "start_lr", ":", "float", "=", "1e-5", ",", "end_lr", ":", "float", "=", "10", ",", "num_batches", ":", "int", "=", "100", ",", "linear_steps", ":", "bool", "=", "False", ",", "stopping_factor", ":", "float", "=", "None", ",", "force", ":", "bool", "=", "False", ")", "->", "None", ":", "if", "os", ".", "path", ".", "exists", "(", "serialization_dir", ")", "and", "force", ":", "shutil", ".", "rmtree", "(", "serialization_dir", ")", "if", "os", ".", "path", ".", "exists", "(", "serialization_dir", ")", "and", "os", ".", "listdir", "(", "serialization_dir", ")", ":", "raise", "ConfigurationError", "(", "f'Serialization directory {serialization_dir} already exists and is '", "f'not empty.'", ")", "else", ":", "os", ".", "makedirs", "(", "serialization_dir", ",", "exist_ok", "=", "True", ")", "prepare_environment", "(", "params", ")", "cuda_device", "=", "params", ".", "params", ".", "get", "(", "'trainer'", ")", ".", "get", "(", "'cuda_device'", ",", "-", "1", ")", "check_for_gpu", "(", "cuda_device", ")", "all_datasets", "=", "datasets_from_params", "(", "params", ")", "datasets_for_vocab_creation", "=", "set", "(", "params", ".", "pop", "(", "\"datasets_for_vocab_creation\"", ",", "all_datasets", ")", ")", "for", "dataset", "in", "datasets_for_vocab_creation", ":", "if", "dataset", "not", "in", "all_datasets", ":", "raise", "ConfigurationError", "(", "f\"invalid 'dataset_for_vocab_creation' {dataset}\"", ")", "logger", ".", "info", "(", "\"From dataset instances, %s will be considered for vocabulary creation.\"", ",", "\", \"", ".", "join", "(", "datasets_for_vocab_creation", ")", ")", "vocab", "=", "Vocabulary", ".", "from_params", "(", "params", ".", "pop", "(", "\"vocabulary\"", ",", "{", "}", ")", ",", "(", "instance", "for", "key", ",", "dataset", "in", "all_datasets", ".", "items", "(", ")", "for", "instance", "in", "dataset", "if", "key", "in", "datasets_for_vocab_creation", ")", ")", "model", "=", "Model", ".", "from_params", "(", "vocab", "=", "vocab", ",", "params", "=", "params", ".", "pop", "(", "'model'", ")", ")", "iterator", "=", "DataIterator", ".", "from_params", "(", "params", ".", "pop", "(", "\"iterator\"", ")", ")", "iterator", ".", "index_with", "(", "vocab", ")", "train_data", "=", "all_datasets", "[", "'train'", "]", "trainer_params", "=", "params", ".", "pop", "(", "\"trainer\"", ")", "no_grad_regexes", "=", "trainer_params", ".", "pop", "(", "\"no_grad\"", ",", "(", ")", ")", "for", "name", ",", "parameter", "in", "model", ".", "named_parameters", "(", ")", ":", "if", "any", "(", "re", ".", "search", "(", "regex", ",", "name", ")", "for", "regex", "in", "no_grad_regexes", ")", ":", "parameter", ".", "requires_grad_", "(", "False", ")", "trainer_choice", "=", "trainer_params", ".", "pop", "(", "\"type\"", ",", "\"default\"", ")", "if", "trainer_choice", "!=", "\"default\"", ":", "raise", "ConfigurationError", "(", "\"currently find-learning-rate only works with the default Trainer\"", ")", "trainer", "=", "Trainer", ".", "from_params", "(", "model", "=", "model", ",", "serialization_dir", "=", "serialization_dir", ",", "iterator", "=", "iterator", ",", "train_data", "=", "train_data", ",", "validation_data", "=", "None", ",", "params", "=", "trainer_params", ",", "validation_iterator", "=", "None", ")", "logger", ".", "info", "(", "f'Starting learning rate search from {start_lr} to {end_lr} in {num_batches} iterations.'", ")", "learning_rates", ",", "losses", "=", "search_learning_rate", "(", "trainer", ",", "start_lr", "=", "start_lr", ",", "end_lr", "=", "end_lr", ",", "num_batches", "=", "num_batches", ",", "linear_steps", "=", "linear_steps", ",", "stopping_factor", "=", "stopping_factor", ")", "logger", ".", "info", "(", "f'Finished learning rate search.'", ")", "losses", "=", "_smooth", "(", "losses", ",", "0.98", ")", "_save_plot", "(", "learning_rates", ",", "losses", ",", "os", ".", "path", ".", "join", "(", "serialization_dir", ",", "'lr-losses.png'", ")", ")" ]
Runs learning rate search for given `num_batches` and saves the results in ``serialization_dir`` Parameters ---------- params : ``Params`` A parameter object specifying an AllenNLP Experiment. serialization_dir : ``str`` The directory in which to save results. start_lr: ``float`` Learning rate to start the search. end_lr: ``float`` Learning rate upto which search is done. num_batches: ``int`` Number of mini-batches to run Learning rate finder. linear_steps: ``bool`` Increase learning rate linearly if False exponentially. stopping_factor: ``float`` Stop the search when the current loss exceeds the best loss recorded by multiple of stopping factor. If ``None`` search proceeds till the ``end_lr`` force: ``bool`` If True and the serialization directory already exists, everything in it will be removed prior to finding the learning rate.
[ "Runs", "learning", "rate", "search", "for", "given", "num_batches", "and", "saves", "the", "results", "in", "serialization_dir" ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/commands/find_learning_rate.py#L134-L229
23,054
allenai/allennlp
allennlp/commands/find_learning_rate.py
_smooth
def _smooth(values: List[float], beta: float) -> List[float]: """ Exponential smoothing of values """ avg_value = 0. smoothed = [] for i, value in enumerate(values): avg_value = beta * avg_value + (1 - beta) * value smoothed.append(avg_value / (1 - beta ** (i + 1))) return smoothed
python
def _smooth(values: List[float], beta: float) -> List[float]: """ Exponential smoothing of values """ avg_value = 0. smoothed = [] for i, value in enumerate(values): avg_value = beta * avg_value + (1 - beta) * value smoothed.append(avg_value / (1 - beta ** (i + 1))) return smoothed
[ "def", "_smooth", "(", "values", ":", "List", "[", "float", "]", ",", "beta", ":", "float", ")", "->", "List", "[", "float", "]", ":", "avg_value", "=", "0.", "smoothed", "=", "[", "]", "for", "i", ",", "value", "in", "enumerate", "(", "values", ")", ":", "avg_value", "=", "beta", "*", "avg_value", "+", "(", "1", "-", "beta", ")", "*", "value", "smoothed", ".", "append", "(", "avg_value", "/", "(", "1", "-", "beta", "**", "(", "i", "+", "1", ")", ")", ")", "return", "smoothed" ]
Exponential smoothing of values
[ "Exponential", "smoothing", "of", "values" ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/commands/find_learning_rate.py#L315-L322
23,055
allenai/allennlp
allennlp/modules/scalar_mix.py
ScalarMix.forward
def forward(self, tensors: List[torch.Tensor], # pylint: disable=arguments-differ mask: torch.Tensor = None) -> torch.Tensor: """ Compute a weighted average of the ``tensors``. The input tensors an be any shape with at least two dimensions, but must all be the same shape. When ``do_layer_norm=True``, the ``mask`` is required input. If the ``tensors`` are dimensioned ``(dim_0, ..., dim_{n-1}, dim_n)``, then the ``mask`` is dimensioned ``(dim_0, ..., dim_{n-1})``, as in the typical case with ``tensors`` of shape ``(batch_size, timesteps, dim)`` and ``mask`` of shape ``(batch_size, timesteps)``. When ``do_layer_norm=False`` the ``mask`` is ignored. """ if len(tensors) != self.mixture_size: raise ConfigurationError("{} tensors were passed, but the module was initialized to " "mix {} tensors.".format(len(tensors), self.mixture_size)) def _do_layer_norm(tensor, broadcast_mask, num_elements_not_masked): tensor_masked = tensor * broadcast_mask mean = torch.sum(tensor_masked) / num_elements_not_masked variance = torch.sum(((tensor_masked - mean) * broadcast_mask)**2) / num_elements_not_masked return (tensor - mean) / torch.sqrt(variance + 1E-12) normed_weights = torch.nn.functional.softmax(torch.cat([parameter for parameter in self.scalar_parameters]), dim=0) normed_weights = torch.split(normed_weights, split_size_or_sections=1) if not self.do_layer_norm: pieces = [] for weight, tensor in zip(normed_weights, tensors): pieces.append(weight * tensor) return self.gamma * sum(pieces) else: mask_float = mask.float() broadcast_mask = mask_float.unsqueeze(-1) input_dim = tensors[0].size(-1) num_elements_not_masked = torch.sum(mask_float) * input_dim pieces = [] for weight, tensor in zip(normed_weights, tensors): pieces.append(weight * _do_layer_norm(tensor, broadcast_mask, num_elements_not_masked)) return self.gamma * sum(pieces)
python
def forward(self, tensors: List[torch.Tensor], # pylint: disable=arguments-differ mask: torch.Tensor = None) -> torch.Tensor: """ Compute a weighted average of the ``tensors``. The input tensors an be any shape with at least two dimensions, but must all be the same shape. When ``do_layer_norm=True``, the ``mask`` is required input. If the ``tensors`` are dimensioned ``(dim_0, ..., dim_{n-1}, dim_n)``, then the ``mask`` is dimensioned ``(dim_0, ..., dim_{n-1})``, as in the typical case with ``tensors`` of shape ``(batch_size, timesteps, dim)`` and ``mask`` of shape ``(batch_size, timesteps)``. When ``do_layer_norm=False`` the ``mask`` is ignored. """ if len(tensors) != self.mixture_size: raise ConfigurationError("{} tensors were passed, but the module was initialized to " "mix {} tensors.".format(len(tensors), self.mixture_size)) def _do_layer_norm(tensor, broadcast_mask, num_elements_not_masked): tensor_masked = tensor * broadcast_mask mean = torch.sum(tensor_masked) / num_elements_not_masked variance = torch.sum(((tensor_masked - mean) * broadcast_mask)**2) / num_elements_not_masked return (tensor - mean) / torch.sqrt(variance + 1E-12) normed_weights = torch.nn.functional.softmax(torch.cat([parameter for parameter in self.scalar_parameters]), dim=0) normed_weights = torch.split(normed_weights, split_size_or_sections=1) if not self.do_layer_norm: pieces = [] for weight, tensor in zip(normed_weights, tensors): pieces.append(weight * tensor) return self.gamma * sum(pieces) else: mask_float = mask.float() broadcast_mask = mask_float.unsqueeze(-1) input_dim = tensors[0].size(-1) num_elements_not_masked = torch.sum(mask_float) * input_dim pieces = [] for weight, tensor in zip(normed_weights, tensors): pieces.append(weight * _do_layer_norm(tensor, broadcast_mask, num_elements_not_masked)) return self.gamma * sum(pieces)
[ "def", "forward", "(", "self", ",", "tensors", ":", "List", "[", "torch", ".", "Tensor", "]", ",", "# pylint: disable=arguments-differ", "mask", ":", "torch", ".", "Tensor", "=", "None", ")", "->", "torch", ".", "Tensor", ":", "if", "len", "(", "tensors", ")", "!=", "self", ".", "mixture_size", ":", "raise", "ConfigurationError", "(", "\"{} tensors were passed, but the module was initialized to \"", "\"mix {} tensors.\"", ".", "format", "(", "len", "(", "tensors", ")", ",", "self", ".", "mixture_size", ")", ")", "def", "_do_layer_norm", "(", "tensor", ",", "broadcast_mask", ",", "num_elements_not_masked", ")", ":", "tensor_masked", "=", "tensor", "*", "broadcast_mask", "mean", "=", "torch", ".", "sum", "(", "tensor_masked", ")", "/", "num_elements_not_masked", "variance", "=", "torch", ".", "sum", "(", "(", "(", "tensor_masked", "-", "mean", ")", "*", "broadcast_mask", ")", "**", "2", ")", "/", "num_elements_not_masked", "return", "(", "tensor", "-", "mean", ")", "/", "torch", ".", "sqrt", "(", "variance", "+", "1E-12", ")", "normed_weights", "=", "torch", ".", "nn", ".", "functional", ".", "softmax", "(", "torch", ".", "cat", "(", "[", "parameter", "for", "parameter", "in", "self", ".", "scalar_parameters", "]", ")", ",", "dim", "=", "0", ")", "normed_weights", "=", "torch", ".", "split", "(", "normed_weights", ",", "split_size_or_sections", "=", "1", ")", "if", "not", "self", ".", "do_layer_norm", ":", "pieces", "=", "[", "]", "for", "weight", ",", "tensor", "in", "zip", "(", "normed_weights", ",", "tensors", ")", ":", "pieces", ".", "append", "(", "weight", "*", "tensor", ")", "return", "self", ".", "gamma", "*", "sum", "(", "pieces", ")", "else", ":", "mask_float", "=", "mask", ".", "float", "(", ")", "broadcast_mask", "=", "mask_float", ".", "unsqueeze", "(", "-", "1", ")", "input_dim", "=", "tensors", "[", "0", "]", ".", "size", "(", "-", "1", ")", "num_elements_not_masked", "=", "torch", ".", "sum", "(", "mask_float", ")", "*", "input_dim", "pieces", "=", "[", "]", "for", "weight", ",", "tensor", "in", "zip", "(", "normed_weights", ",", "tensors", ")", ":", "pieces", ".", "append", "(", "weight", "*", "_do_layer_norm", "(", "tensor", ",", "broadcast_mask", ",", "num_elements_not_masked", ")", ")", "return", "self", ".", "gamma", "*", "sum", "(", "pieces", ")" ]
Compute a weighted average of the ``tensors``. The input tensors an be any shape with at least two dimensions, but must all be the same shape. When ``do_layer_norm=True``, the ``mask`` is required input. If the ``tensors`` are dimensioned ``(dim_0, ..., dim_{n-1}, dim_n)``, then the ``mask`` is dimensioned ``(dim_0, ..., dim_{n-1})``, as in the typical case with ``tensors`` of shape ``(batch_size, timesteps, dim)`` and ``mask`` of shape ``(batch_size, timesteps)``. When ``do_layer_norm=False`` the ``mask`` is ignored.
[ "Compute", "a", "weighted", "average", "of", "the", "tensors", ".", "The", "input", "tensors", "an", "be", "any", "shape", "with", "at", "least", "two", "dimensions", "but", "must", "all", "be", "the", "same", "shape", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/modules/scalar_mix.py#L38-L81
23,056
allenai/allennlp
allennlp/semparse/domain_languages/domain_language.py
DomainLanguage.execute
def execute(self, logical_form: str): """Executes a logical form, using whatever predicates you have defined.""" if not hasattr(self, '_functions'): raise RuntimeError("You must call super().__init__() in your Language constructor") logical_form = logical_form.replace(",", " ") expression = util.lisp_to_nested_expression(logical_form) return self._execute_expression(expression)
python
def execute(self, logical_form: str): """Executes a logical form, using whatever predicates you have defined.""" if not hasattr(self, '_functions'): raise RuntimeError("You must call super().__init__() in your Language constructor") logical_form = logical_form.replace(",", " ") expression = util.lisp_to_nested_expression(logical_form) return self._execute_expression(expression)
[ "def", "execute", "(", "self", ",", "logical_form", ":", "str", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_functions'", ")", ":", "raise", "RuntimeError", "(", "\"You must call super().__init__() in your Language constructor\"", ")", "logical_form", "=", "logical_form", ".", "replace", "(", "\",\"", ",", "\" \"", ")", "expression", "=", "util", ".", "lisp_to_nested_expression", "(", "logical_form", ")", "return", "self", ".", "_execute_expression", "(", "expression", ")" ]
Executes a logical form, using whatever predicates you have defined.
[ "Executes", "a", "logical", "form", "using", "whatever", "predicates", "you", "have", "defined", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/domain_language.py#L307-L313
23,057
allenai/allennlp
allennlp/semparse/domain_languages/domain_language.py
DomainLanguage.get_nonterminal_productions
def get_nonterminal_productions(self) -> Dict[str, List[str]]: """ Induces a grammar from the defined collection of predicates in this language and returns all productions in that grammar, keyed by the non-terminal they are expanding. This includes terminal productions implied by each predicate as well as productions for the `return type` of each defined predicate. For example, defining a "multiply" predicate adds a "<int,int:int> -> multiply" terminal production to the grammar, and `also` a "int -> [<int,int:int>, int, int]" non-terminal production, because I can use the "multiply" predicate to produce an int. """ if not self._nonterminal_productions: actions: Dict[str, Set[str]] = defaultdict(set) # If you didn't give us a set of valid start types, we'll assume all types we know # about (including functional types) are valid start types. if self._start_types: start_types = self._start_types else: start_types = set() for type_list in self._function_types.values(): start_types.update(type_list) for start_type in start_types: actions[START_SYMBOL].add(f"{START_SYMBOL} -> {start_type}") for name, function_type_list in self._function_types.items(): for function_type in function_type_list: actions[str(function_type)].add(f"{function_type} -> {name}") if isinstance(function_type, FunctionType): return_type = function_type.return_type arg_types = function_type.argument_types right_side = f"[{function_type}, {', '.join(str(arg_type) for arg_type in arg_types)}]" actions[str(return_type)].add(f"{return_type} -> {right_side}") self._nonterminal_productions = {key: sorted(value) for key, value in actions.items()} return self._nonterminal_productions
python
def get_nonterminal_productions(self) -> Dict[str, List[str]]: """ Induces a grammar from the defined collection of predicates in this language and returns all productions in that grammar, keyed by the non-terminal they are expanding. This includes terminal productions implied by each predicate as well as productions for the `return type` of each defined predicate. For example, defining a "multiply" predicate adds a "<int,int:int> -> multiply" terminal production to the grammar, and `also` a "int -> [<int,int:int>, int, int]" non-terminal production, because I can use the "multiply" predicate to produce an int. """ if not self._nonterminal_productions: actions: Dict[str, Set[str]] = defaultdict(set) # If you didn't give us a set of valid start types, we'll assume all types we know # about (including functional types) are valid start types. if self._start_types: start_types = self._start_types else: start_types = set() for type_list in self._function_types.values(): start_types.update(type_list) for start_type in start_types: actions[START_SYMBOL].add(f"{START_SYMBOL} -> {start_type}") for name, function_type_list in self._function_types.items(): for function_type in function_type_list: actions[str(function_type)].add(f"{function_type} -> {name}") if isinstance(function_type, FunctionType): return_type = function_type.return_type arg_types = function_type.argument_types right_side = f"[{function_type}, {', '.join(str(arg_type) for arg_type in arg_types)}]" actions[str(return_type)].add(f"{return_type} -> {right_side}") self._nonterminal_productions = {key: sorted(value) for key, value in actions.items()} return self._nonterminal_productions
[ "def", "get_nonterminal_productions", "(", "self", ")", "->", "Dict", "[", "str", ",", "List", "[", "str", "]", "]", ":", "if", "not", "self", ".", "_nonterminal_productions", ":", "actions", ":", "Dict", "[", "str", ",", "Set", "[", "str", "]", "]", "=", "defaultdict", "(", "set", ")", "# If you didn't give us a set of valid start types, we'll assume all types we know", "# about (including functional types) are valid start types.", "if", "self", ".", "_start_types", ":", "start_types", "=", "self", ".", "_start_types", "else", ":", "start_types", "=", "set", "(", ")", "for", "type_list", "in", "self", ".", "_function_types", ".", "values", "(", ")", ":", "start_types", ".", "update", "(", "type_list", ")", "for", "start_type", "in", "start_types", ":", "actions", "[", "START_SYMBOL", "]", ".", "add", "(", "f\"{START_SYMBOL} -> {start_type}\"", ")", "for", "name", ",", "function_type_list", "in", "self", ".", "_function_types", ".", "items", "(", ")", ":", "for", "function_type", "in", "function_type_list", ":", "actions", "[", "str", "(", "function_type", ")", "]", ".", "add", "(", "f\"{function_type} -> {name}\"", ")", "if", "isinstance", "(", "function_type", ",", "FunctionType", ")", ":", "return_type", "=", "function_type", ".", "return_type", "arg_types", "=", "function_type", ".", "argument_types", "right_side", "=", "f\"[{function_type}, {', '.join(str(arg_type) for arg_type in arg_types)}]\"", "actions", "[", "str", "(", "return_type", ")", "]", ".", "add", "(", "f\"{return_type} -> {right_side}\"", ")", "self", ".", "_nonterminal_productions", "=", "{", "key", ":", "sorted", "(", "value", ")", "for", "key", ",", "value", "in", "actions", ".", "items", "(", ")", "}", "return", "self", ".", "_nonterminal_productions" ]
Induces a grammar from the defined collection of predicates in this language and returns all productions in that grammar, keyed by the non-terminal they are expanding. This includes terminal productions implied by each predicate as well as productions for the `return type` of each defined predicate. For example, defining a "multiply" predicate adds a "<int,int:int> -> multiply" terminal production to the grammar, and `also` a "int -> [<int,int:int>, int, int]" non-terminal production, because I can use the "multiply" predicate to produce an int.
[ "Induces", "a", "grammar", "from", "the", "defined", "collection", "of", "predicates", "in", "this", "language", "and", "returns", "all", "productions", "in", "that", "grammar", "keyed", "by", "the", "non", "-", "terminal", "they", "are", "expanding", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/domain_language.py#L335-L367
23,058
allenai/allennlp
allennlp/semparse/domain_languages/domain_language.py
DomainLanguage.logical_form_to_action_sequence
def logical_form_to_action_sequence(self, logical_form: str) -> List[str]: """ Converts a logical form into a linearization of the production rules from its abstract syntax tree. The linearization is top-down, depth-first. Each production rule is formatted as "LHS -> RHS", where "LHS" is a single non-terminal type, and RHS is either a terminal or a list of non-terminals (other possible values for RHS in a more general context-free grammar are not produced by our grammar induction logic). Non-terminals are `types` in the grammar, either basic types (like ``int``, ``str``, or some class that you define), or functional types, represented with angle brackets with a colon separating arguments from the return type. Multi-argument functions have commas separating their argument types. For example, ``<int:int>`` is a function that takes an integer and returns an integer, and ``<int,int:int>`` is a function that takes two integer arguments and returns an integer. As an example translation from logical form to complete action sequence, the logical form ``(add 2 3)`` would be translated to ``['@start@ -> int', 'int -> [<int,int:int>, int, int]', '<int,int:int> -> add', 'int -> 2', 'int -> 3']``. """ expression = util.lisp_to_nested_expression(logical_form) try: transitions, start_type = self._get_transitions(expression, expected_type=None) if self._start_types and start_type not in self._start_types: raise ParsingError(f"Expression had unallowed start type of {start_type}: {expression}") except ParsingError: logger.error(f'Error parsing logical form: {logical_form}') raise transitions.insert(0, f'@start@ -> {start_type}') return transitions
python
def logical_form_to_action_sequence(self, logical_form: str) -> List[str]: """ Converts a logical form into a linearization of the production rules from its abstract syntax tree. The linearization is top-down, depth-first. Each production rule is formatted as "LHS -> RHS", where "LHS" is a single non-terminal type, and RHS is either a terminal or a list of non-terminals (other possible values for RHS in a more general context-free grammar are not produced by our grammar induction logic). Non-terminals are `types` in the grammar, either basic types (like ``int``, ``str``, or some class that you define), or functional types, represented with angle brackets with a colon separating arguments from the return type. Multi-argument functions have commas separating their argument types. For example, ``<int:int>`` is a function that takes an integer and returns an integer, and ``<int,int:int>`` is a function that takes two integer arguments and returns an integer. As an example translation from logical form to complete action sequence, the logical form ``(add 2 3)`` would be translated to ``['@start@ -> int', 'int -> [<int,int:int>, int, int]', '<int,int:int> -> add', 'int -> 2', 'int -> 3']``. """ expression = util.lisp_to_nested_expression(logical_form) try: transitions, start_type = self._get_transitions(expression, expected_type=None) if self._start_types and start_type not in self._start_types: raise ParsingError(f"Expression had unallowed start type of {start_type}: {expression}") except ParsingError: logger.error(f'Error parsing logical form: {logical_form}') raise transitions.insert(0, f'@start@ -> {start_type}') return transitions
[ "def", "logical_form_to_action_sequence", "(", "self", ",", "logical_form", ":", "str", ")", "->", "List", "[", "str", "]", ":", "expression", "=", "util", ".", "lisp_to_nested_expression", "(", "logical_form", ")", "try", ":", "transitions", ",", "start_type", "=", "self", ".", "_get_transitions", "(", "expression", ",", "expected_type", "=", "None", ")", "if", "self", ".", "_start_types", "and", "start_type", "not", "in", "self", ".", "_start_types", ":", "raise", "ParsingError", "(", "f\"Expression had unallowed start type of {start_type}: {expression}\"", ")", "except", "ParsingError", ":", "logger", ".", "error", "(", "f'Error parsing logical form: {logical_form}'", ")", "raise", "transitions", ".", "insert", "(", "0", ",", "f'@start@ -> {start_type}'", ")", "return", "transitions" ]
Converts a logical form into a linearization of the production rules from its abstract syntax tree. The linearization is top-down, depth-first. Each production rule is formatted as "LHS -> RHS", where "LHS" is a single non-terminal type, and RHS is either a terminal or a list of non-terminals (other possible values for RHS in a more general context-free grammar are not produced by our grammar induction logic). Non-terminals are `types` in the grammar, either basic types (like ``int``, ``str``, or some class that you define), or functional types, represented with angle brackets with a colon separating arguments from the return type. Multi-argument functions have commas separating their argument types. For example, ``<int:int>`` is a function that takes an integer and returns an integer, and ``<int,int:int>`` is a function that takes two integer arguments and returns an integer. As an example translation from logical form to complete action sequence, the logical form ``(add 2 3)`` would be translated to ``['@start@ -> int', 'int -> [<int,int:int>, int, int]', '<int,int:int> -> add', 'int -> 2', 'int -> 3']``.
[ "Converts", "a", "logical", "form", "into", "a", "linearization", "of", "the", "production", "rules", "from", "its", "abstract", "syntax", "tree", ".", "The", "linearization", "is", "top", "-", "down", "depth", "-", "first", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/domain_language.py#L379-L409
23,059
allenai/allennlp
allennlp/semparse/domain_languages/domain_language.py
DomainLanguage.is_nonterminal
def is_nonterminal(self, symbol: str) -> bool: """ Determines whether an input symbol is a valid non-terminal in the grammar. """ nonterminal_productions = self.get_nonterminal_productions() return symbol in nonterminal_productions
python
def is_nonterminal(self, symbol: str) -> bool: """ Determines whether an input symbol is a valid non-terminal in the grammar. """ nonterminal_productions = self.get_nonterminal_productions() return symbol in nonterminal_productions
[ "def", "is_nonterminal", "(", "self", ",", "symbol", ":", "str", ")", "->", "bool", ":", "nonterminal_productions", "=", "self", ".", "get_nonterminal_productions", "(", ")", "return", "symbol", "in", "nonterminal_productions" ]
Determines whether an input symbol is a valid non-terminal in the grammar.
[ "Determines", "whether", "an", "input", "symbol", "is", "a", "valid", "non", "-", "terminal", "in", "the", "grammar", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/domain_language.py#L488-L493
23,060
allenai/allennlp
allennlp/data/token_indexers/token_indexer.py
TokenIndexer.pad_token_sequence
def pad_token_sequence(self, tokens: Dict[str, List[TokenType]], desired_num_tokens: Dict[str, int], padding_lengths: Dict[str, int]) -> Dict[str, List[TokenType]]: """ This method pads a list of tokens to ``desired_num_tokens`` and returns a padded copy of the input tokens. If the input token list is longer than ``desired_num_tokens`` then it will be truncated. ``padding_lengths`` is used to provide supplemental padding parameters which are needed in some cases. For example, it contains the widths to pad characters to when doing character-level padding. """ raise NotImplementedError
python
def pad_token_sequence(self, tokens: Dict[str, List[TokenType]], desired_num_tokens: Dict[str, int], padding_lengths: Dict[str, int]) -> Dict[str, List[TokenType]]: """ This method pads a list of tokens to ``desired_num_tokens`` and returns a padded copy of the input tokens. If the input token list is longer than ``desired_num_tokens`` then it will be truncated. ``padding_lengths`` is used to provide supplemental padding parameters which are needed in some cases. For example, it contains the widths to pad characters to when doing character-level padding. """ raise NotImplementedError
[ "def", "pad_token_sequence", "(", "self", ",", "tokens", ":", "Dict", "[", "str", ",", "List", "[", "TokenType", "]", "]", ",", "desired_num_tokens", ":", "Dict", "[", "str", ",", "int", "]", ",", "padding_lengths", ":", "Dict", "[", "str", ",", "int", "]", ")", "->", "Dict", "[", "str", ",", "List", "[", "TokenType", "]", "]", ":", "raise", "NotImplementedError" ]
This method pads a list of tokens to ``desired_num_tokens`` and returns a padded copy of the input tokens. If the input token list is longer than ``desired_num_tokens`` then it will be truncated. ``padding_lengths`` is used to provide supplemental padding parameters which are needed in some cases. For example, it contains the widths to pad characters to when doing character-level padding.
[ "This", "method", "pads", "a", "list", "of", "tokens", "to", "desired_num_tokens", "and", "returns", "a", "padded", "copy", "of", "the", "input", "tokens", ".", "If", "the", "input", "token", "list", "is", "longer", "than", "desired_num_tokens", "then", "it", "will", "be", "truncated", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/token_indexers/token_indexer.py#L62-L75
23,061
allenai/allennlp
allennlp/data/dataset_readers/coreference_resolution/conll.py
canonicalize_clusters
def canonicalize_clusters(clusters: DefaultDict[int, List[Tuple[int, int]]]) -> List[List[Tuple[int, int]]]: """ The CONLL 2012 data includes 2 annotated spans which are identical, but have different ids. This checks all clusters for spans which are identical, and if it finds any, merges the clusters containing the identical spans. """ merged_clusters: List[Set[Tuple[int, int]]] = [] for cluster in clusters.values(): cluster_with_overlapping_mention = None for mention in cluster: # Look at clusters we have already processed to # see if they contain a mention in the current # cluster for comparison. for cluster2 in merged_clusters: if mention in cluster2: # first cluster in merged clusters # which contains this mention. cluster_with_overlapping_mention = cluster2 break # Already encountered overlap - no need to keep looking. if cluster_with_overlapping_mention is not None: break if cluster_with_overlapping_mention is not None: # Merge cluster we are currently processing into # the cluster in the processed list. cluster_with_overlapping_mention.update(cluster) else: merged_clusters.append(set(cluster)) return [list(c) for c in merged_clusters]
python
def canonicalize_clusters(clusters: DefaultDict[int, List[Tuple[int, int]]]) -> List[List[Tuple[int, int]]]: """ The CONLL 2012 data includes 2 annotated spans which are identical, but have different ids. This checks all clusters for spans which are identical, and if it finds any, merges the clusters containing the identical spans. """ merged_clusters: List[Set[Tuple[int, int]]] = [] for cluster in clusters.values(): cluster_with_overlapping_mention = None for mention in cluster: # Look at clusters we have already processed to # see if they contain a mention in the current # cluster for comparison. for cluster2 in merged_clusters: if mention in cluster2: # first cluster in merged clusters # which contains this mention. cluster_with_overlapping_mention = cluster2 break # Already encountered overlap - no need to keep looking. if cluster_with_overlapping_mention is not None: break if cluster_with_overlapping_mention is not None: # Merge cluster we are currently processing into # the cluster in the processed list. cluster_with_overlapping_mention.update(cluster) else: merged_clusters.append(set(cluster)) return [list(c) for c in merged_clusters]
[ "def", "canonicalize_clusters", "(", "clusters", ":", "DefaultDict", "[", "int", ",", "List", "[", "Tuple", "[", "int", ",", "int", "]", "]", "]", ")", "->", "List", "[", "List", "[", "Tuple", "[", "int", ",", "int", "]", "]", "]", ":", "merged_clusters", ":", "List", "[", "Set", "[", "Tuple", "[", "int", ",", "int", "]", "]", "]", "=", "[", "]", "for", "cluster", "in", "clusters", ".", "values", "(", ")", ":", "cluster_with_overlapping_mention", "=", "None", "for", "mention", "in", "cluster", ":", "# Look at clusters we have already processed to", "# see if they contain a mention in the current", "# cluster for comparison.", "for", "cluster2", "in", "merged_clusters", ":", "if", "mention", "in", "cluster2", ":", "# first cluster in merged clusters", "# which contains this mention.", "cluster_with_overlapping_mention", "=", "cluster2", "break", "# Already encountered overlap - no need to keep looking.", "if", "cluster_with_overlapping_mention", "is", "not", "None", ":", "break", "if", "cluster_with_overlapping_mention", "is", "not", "None", ":", "# Merge cluster we are currently processing into", "# the cluster in the processed list.", "cluster_with_overlapping_mention", ".", "update", "(", "cluster", ")", "else", ":", "merged_clusters", ".", "append", "(", "set", "(", "cluster", ")", ")", "return", "[", "list", "(", "c", ")", "for", "c", "in", "merged_clusters", "]" ]
The CONLL 2012 data includes 2 annotated spans which are identical, but have different ids. This checks all clusters for spans which are identical, and if it finds any, merges the clusters containing the identical spans.
[ "The", "CONLL", "2012", "data", "includes", "2", "annotated", "spans", "which", "are", "identical", "but", "have", "different", "ids", ".", "This", "checks", "all", "clusters", "for", "spans", "which", "are", "identical", "and", "if", "it", "finds", "any", "merges", "the", "clusters", "containing", "the", "identical", "spans", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/dataset_readers/coreference_resolution/conll.py#L18-L47
23,062
allenai/allennlp
allennlp/predictors/open_information_extraction.py
get_predicate_indices
def get_predicate_indices(tags: List[str]) -> List[int]: """ Return the word indices of a predicate in BIO tags. """ return [ind for ind, tag in enumerate(tags) if 'V' in tag]
python
def get_predicate_indices(tags: List[str]) -> List[int]: """ Return the word indices of a predicate in BIO tags. """ return [ind for ind, tag in enumerate(tags) if 'V' in tag]
[ "def", "get_predicate_indices", "(", "tags", ":", "List", "[", "str", "]", ")", "->", "List", "[", "int", "]", ":", "return", "[", "ind", "for", "ind", ",", "tag", "in", "enumerate", "(", "tags", ")", "if", "'V'", "in", "tag", "]" ]
Return the word indices of a predicate in BIO tags.
[ "Return", "the", "word", "indices", "of", "a", "predicate", "in", "BIO", "tags", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/predictors/open_information_extraction.py#L63-L67
23,063
allenai/allennlp
allennlp/predictors/open_information_extraction.py
get_predicate_text
def get_predicate_text(sent_tokens: List[Token], tags: List[str]) -> str: """ Get the predicate in this prediction. """ return " ".join([sent_tokens[pred_id].text for pred_id in get_predicate_indices(tags)])
python
def get_predicate_text(sent_tokens: List[Token], tags: List[str]) -> str: """ Get the predicate in this prediction. """ return " ".join([sent_tokens[pred_id].text for pred_id in get_predicate_indices(tags)])
[ "def", "get_predicate_text", "(", "sent_tokens", ":", "List", "[", "Token", "]", ",", "tags", ":", "List", "[", "str", "]", ")", "->", "str", ":", "return", "\" \"", ".", "join", "(", "[", "sent_tokens", "[", "pred_id", "]", ".", "text", "for", "pred_id", "in", "get_predicate_indices", "(", "tags", ")", "]", ")" ]
Get the predicate in this prediction.
[ "Get", "the", "predicate", "in", "this", "prediction", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/predictors/open_information_extraction.py#L69-L74
23,064
allenai/allennlp
allennlp/predictors/open_information_extraction.py
predicates_overlap
def predicates_overlap(tags1: List[str], tags2: List[str]) -> bool: """ Tests whether the predicate in BIO tags1 overlap with those of tags2. """ # Get predicate word indices from both predictions pred_ind1 = get_predicate_indices(tags1) pred_ind2 = get_predicate_indices(tags2) # Return if pred_ind1 pred_ind2 overlap return any(set.intersection(set(pred_ind1), set(pred_ind2)))
python
def predicates_overlap(tags1: List[str], tags2: List[str]) -> bool: """ Tests whether the predicate in BIO tags1 overlap with those of tags2. """ # Get predicate word indices from both predictions pred_ind1 = get_predicate_indices(tags1) pred_ind2 = get_predicate_indices(tags2) # Return if pred_ind1 pred_ind2 overlap return any(set.intersection(set(pred_ind1), set(pred_ind2)))
[ "def", "predicates_overlap", "(", "tags1", ":", "List", "[", "str", "]", ",", "tags2", ":", "List", "[", "str", "]", ")", "->", "bool", ":", "# Get predicate word indices from both predictions", "pred_ind1", "=", "get_predicate_indices", "(", "tags1", ")", "pred_ind2", "=", "get_predicate_indices", "(", "tags2", ")", "# Return if pred_ind1 pred_ind2 overlap", "return", "any", "(", "set", ".", "intersection", "(", "set", "(", "pred_ind1", ")", ",", "set", "(", "pred_ind2", ")", ")", ")" ]
Tests whether the predicate in BIO tags1 overlap with those of tags2.
[ "Tests", "whether", "the", "predicate", "in", "BIO", "tags1", "overlap", "with", "those", "of", "tags2", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/predictors/open_information_extraction.py#L76-L86
23,065
allenai/allennlp
allennlp/predictors/open_information_extraction.py
get_coherent_next_tag
def get_coherent_next_tag(prev_label: str, cur_label: str) -> str: """ Generate a coherent tag, given previous tag and current label. """ if cur_label == "O": # Don't need to add prefix to an "O" label return "O" if prev_label == cur_label: return f"I-{cur_label}" else: return f"B-{cur_label}"
python
def get_coherent_next_tag(prev_label: str, cur_label: str) -> str: """ Generate a coherent tag, given previous tag and current label. """ if cur_label == "O": # Don't need to add prefix to an "O" label return "O" if prev_label == cur_label: return f"I-{cur_label}" else: return f"B-{cur_label}"
[ "def", "get_coherent_next_tag", "(", "prev_label", ":", "str", ",", "cur_label", ":", "str", ")", "->", "str", ":", "if", "cur_label", "==", "\"O\"", ":", "# Don't need to add prefix to an \"O\" label", "return", "\"O\"", "if", "prev_label", "==", "cur_label", ":", "return", "f\"I-{cur_label}\"", "else", ":", "return", "f\"B-{cur_label}\"" ]
Generate a coherent tag, given previous tag and current label.
[ "Generate", "a", "coherent", "tag", "given", "previous", "tag", "and", "current", "label", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/predictors/open_information_extraction.py#L88-L99
23,066
allenai/allennlp
allennlp/predictors/open_information_extraction.py
merge_overlapping_predictions
def merge_overlapping_predictions(tags1: List[str], tags2: List[str]) -> List[str]: """ Merge two predictions into one. Assumes the predicate in tags1 overlap with the predicate of tags2. """ ret_sequence = [] prev_label = "O" # Build a coherent sequence out of two # spans which predicates' overlap for tag1, tag2 in zip(tags1, tags2): label1 = tag1.split("-")[-1] label2 = tag2.split("-")[-1] if (label1 == "V") or (label2 == "V"): # Construct maximal predicate length - # add predicate tag if any of the sequence predict it cur_label = "V" # Else - prefer an argument over 'O' label elif label1 != "O": cur_label = label1 else: cur_label = label2 # Append cur tag to the returned sequence cur_tag = get_coherent_next_tag(prev_label, cur_label) prev_label = cur_label ret_sequence.append(cur_tag) return ret_sequence
python
def merge_overlapping_predictions(tags1: List[str], tags2: List[str]) -> List[str]: """ Merge two predictions into one. Assumes the predicate in tags1 overlap with the predicate of tags2. """ ret_sequence = [] prev_label = "O" # Build a coherent sequence out of two # spans which predicates' overlap for tag1, tag2 in zip(tags1, tags2): label1 = tag1.split("-")[-1] label2 = tag2.split("-")[-1] if (label1 == "V") or (label2 == "V"): # Construct maximal predicate length - # add predicate tag if any of the sequence predict it cur_label = "V" # Else - prefer an argument over 'O' label elif label1 != "O": cur_label = label1 else: cur_label = label2 # Append cur tag to the returned sequence cur_tag = get_coherent_next_tag(prev_label, cur_label) prev_label = cur_label ret_sequence.append(cur_tag) return ret_sequence
[ "def", "merge_overlapping_predictions", "(", "tags1", ":", "List", "[", "str", "]", ",", "tags2", ":", "List", "[", "str", "]", ")", "->", "List", "[", "str", "]", ":", "ret_sequence", "=", "[", "]", "prev_label", "=", "\"O\"", "# Build a coherent sequence out of two", "# spans which predicates' overlap", "for", "tag1", ",", "tag2", "in", "zip", "(", "tags1", ",", "tags2", ")", ":", "label1", "=", "tag1", ".", "split", "(", "\"-\"", ")", "[", "-", "1", "]", "label2", "=", "tag2", ".", "split", "(", "\"-\"", ")", "[", "-", "1", "]", "if", "(", "label1", "==", "\"V\"", ")", "or", "(", "label2", "==", "\"V\"", ")", ":", "# Construct maximal predicate length -", "# add predicate tag if any of the sequence predict it", "cur_label", "=", "\"V\"", "# Else - prefer an argument over 'O' label", "elif", "label1", "!=", "\"O\"", ":", "cur_label", "=", "label1", "else", ":", "cur_label", "=", "label2", "# Append cur tag to the returned sequence", "cur_tag", "=", "get_coherent_next_tag", "(", "prev_label", ",", "cur_label", ")", "prev_label", "=", "cur_label", "ret_sequence", ".", "append", "(", "cur_tag", ")", "return", "ret_sequence" ]
Merge two predictions into one. Assumes the predicate in tags1 overlap with the predicate of tags2.
[ "Merge", "two", "predictions", "into", "one", ".", "Assumes", "the", "predicate", "in", "tags1", "overlap", "with", "the", "predicate", "of", "tags2", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/predictors/open_information_extraction.py#L101-L130
23,067
allenai/allennlp
allennlp/predictors/open_information_extraction.py
sanitize_label
def sanitize_label(label: str) -> str: """ Sanitize a BIO label - this deals with OIE labels sometimes having some noise, as parentheses. """ if "-" in label: prefix, suffix = label.split("-") suffix = suffix.split("(")[-1] return f"{prefix}-{suffix}" else: return label
python
def sanitize_label(label: str) -> str: """ Sanitize a BIO label - this deals with OIE labels sometimes having some noise, as parentheses. """ if "-" in label: prefix, suffix = label.split("-") suffix = suffix.split("(")[-1] return f"{prefix}-{suffix}" else: return label
[ "def", "sanitize_label", "(", "label", ":", "str", ")", "->", "str", ":", "if", "\"-\"", "in", "label", ":", "prefix", ",", "suffix", "=", "label", ".", "split", "(", "\"-\"", ")", "suffix", "=", "suffix", ".", "split", "(", "\"(\"", ")", "[", "-", "1", "]", "return", "f\"{prefix}-{suffix}\"", "else", ":", "return", "label" ]
Sanitize a BIO label - this deals with OIE labels sometimes having some noise, as parentheses.
[ "Sanitize", "a", "BIO", "label", "-", "this", "deals", "with", "OIE", "labels", "sometimes", "having", "some", "noise", "as", "parentheses", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/predictors/open_information_extraction.py#L161-L171
23,068
allenai/allennlp
allennlp/modules/elmo.py
_ElmoBiLm.create_cached_cnn_embeddings
def create_cached_cnn_embeddings(self, tokens: List[str]) -> None: """ Given a list of tokens, this method precomputes word representations by running just the character convolutions and highway layers of elmo, essentially creating uncontextual word vectors. On subsequent forward passes, the word ids are looked up from an embedding, rather than being computed on the fly via the CNN encoder. This function sets 3 attributes: _word_embedding : ``torch.Tensor`` The word embedding for each word in the tokens passed to this method. _bos_embedding : ``torch.Tensor`` The embedding for the BOS token. _eos_embedding : ``torch.Tensor`` The embedding for the EOS token. Parameters ---------- tokens : ``List[str]``, required. A list of tokens to precompute character convolutions for. """ tokens = [ELMoCharacterMapper.bos_token, ELMoCharacterMapper.eos_token] + tokens timesteps = 32 batch_size = 32 chunked_tokens = lazy_groups_of(iter(tokens), timesteps) all_embeddings = [] device = get_device_of(next(self.parameters())) for batch in lazy_groups_of(chunked_tokens, batch_size): # Shape (batch_size, timesteps, 50) batched_tensor = batch_to_ids(batch) # NOTE: This device check is for when a user calls this method having # already placed the model on a device. If this is called in the # constructor, it will probably happen on the CPU. This isn't too bad, # because it's only a few convolutions and will likely be very fast. if device >= 0: batched_tensor = batched_tensor.cuda(device) output = self._token_embedder(batched_tensor) token_embedding = output["token_embedding"] mask = output["mask"] token_embedding, _ = remove_sentence_boundaries(token_embedding, mask) all_embeddings.append(token_embedding.view(-1, token_embedding.size(-1))) full_embedding = torch.cat(all_embeddings, 0) # We might have some trailing embeddings from padding in the batch, so # we clip the embedding and lookup to the right size. full_embedding = full_embedding[:len(tokens), :] embedding = full_embedding[2:len(tokens), :] vocab_size, embedding_dim = list(embedding.size()) from allennlp.modules.token_embedders import Embedding # type: ignore self._bos_embedding = full_embedding[0, :] self._eos_embedding = full_embedding[1, :] self._word_embedding = Embedding(vocab_size, # type: ignore embedding_dim, weight=embedding.data, trainable=self._requires_grad, padding_index=0)
python
def create_cached_cnn_embeddings(self, tokens: List[str]) -> None: """ Given a list of tokens, this method precomputes word representations by running just the character convolutions and highway layers of elmo, essentially creating uncontextual word vectors. On subsequent forward passes, the word ids are looked up from an embedding, rather than being computed on the fly via the CNN encoder. This function sets 3 attributes: _word_embedding : ``torch.Tensor`` The word embedding for each word in the tokens passed to this method. _bos_embedding : ``torch.Tensor`` The embedding for the BOS token. _eos_embedding : ``torch.Tensor`` The embedding for the EOS token. Parameters ---------- tokens : ``List[str]``, required. A list of tokens to precompute character convolutions for. """ tokens = [ELMoCharacterMapper.bos_token, ELMoCharacterMapper.eos_token] + tokens timesteps = 32 batch_size = 32 chunked_tokens = lazy_groups_of(iter(tokens), timesteps) all_embeddings = [] device = get_device_of(next(self.parameters())) for batch in lazy_groups_of(chunked_tokens, batch_size): # Shape (batch_size, timesteps, 50) batched_tensor = batch_to_ids(batch) # NOTE: This device check is for when a user calls this method having # already placed the model on a device. If this is called in the # constructor, it will probably happen on the CPU. This isn't too bad, # because it's only a few convolutions and will likely be very fast. if device >= 0: batched_tensor = batched_tensor.cuda(device) output = self._token_embedder(batched_tensor) token_embedding = output["token_embedding"] mask = output["mask"] token_embedding, _ = remove_sentence_boundaries(token_embedding, mask) all_embeddings.append(token_embedding.view(-1, token_embedding.size(-1))) full_embedding = torch.cat(all_embeddings, 0) # We might have some trailing embeddings from padding in the batch, so # we clip the embedding and lookup to the right size. full_embedding = full_embedding[:len(tokens), :] embedding = full_embedding[2:len(tokens), :] vocab_size, embedding_dim = list(embedding.size()) from allennlp.modules.token_embedders import Embedding # type: ignore self._bos_embedding = full_embedding[0, :] self._eos_embedding = full_embedding[1, :] self._word_embedding = Embedding(vocab_size, # type: ignore embedding_dim, weight=embedding.data, trainable=self._requires_grad, padding_index=0)
[ "def", "create_cached_cnn_embeddings", "(", "self", ",", "tokens", ":", "List", "[", "str", "]", ")", "->", "None", ":", "tokens", "=", "[", "ELMoCharacterMapper", ".", "bos_token", ",", "ELMoCharacterMapper", ".", "eos_token", "]", "+", "tokens", "timesteps", "=", "32", "batch_size", "=", "32", "chunked_tokens", "=", "lazy_groups_of", "(", "iter", "(", "tokens", ")", ",", "timesteps", ")", "all_embeddings", "=", "[", "]", "device", "=", "get_device_of", "(", "next", "(", "self", ".", "parameters", "(", ")", ")", ")", "for", "batch", "in", "lazy_groups_of", "(", "chunked_tokens", ",", "batch_size", ")", ":", "# Shape (batch_size, timesteps, 50)", "batched_tensor", "=", "batch_to_ids", "(", "batch", ")", "# NOTE: This device check is for when a user calls this method having", "# already placed the model on a device. If this is called in the", "# constructor, it will probably happen on the CPU. This isn't too bad,", "# because it's only a few convolutions and will likely be very fast.", "if", "device", ">=", "0", ":", "batched_tensor", "=", "batched_tensor", ".", "cuda", "(", "device", ")", "output", "=", "self", ".", "_token_embedder", "(", "batched_tensor", ")", "token_embedding", "=", "output", "[", "\"token_embedding\"", "]", "mask", "=", "output", "[", "\"mask\"", "]", "token_embedding", ",", "_", "=", "remove_sentence_boundaries", "(", "token_embedding", ",", "mask", ")", "all_embeddings", ".", "append", "(", "token_embedding", ".", "view", "(", "-", "1", ",", "token_embedding", ".", "size", "(", "-", "1", ")", ")", ")", "full_embedding", "=", "torch", ".", "cat", "(", "all_embeddings", ",", "0", ")", "# We might have some trailing embeddings from padding in the batch, so", "# we clip the embedding and lookup to the right size.", "full_embedding", "=", "full_embedding", "[", ":", "len", "(", "tokens", ")", ",", ":", "]", "embedding", "=", "full_embedding", "[", "2", ":", "len", "(", "tokens", ")", ",", ":", "]", "vocab_size", ",", "embedding_dim", "=", "list", "(", "embedding", ".", "size", "(", ")", ")", "from", "allennlp", ".", "modules", ".", "token_embedders", "import", "Embedding", "# type: ignore", "self", ".", "_bos_embedding", "=", "full_embedding", "[", "0", ",", ":", "]", "self", ".", "_eos_embedding", "=", "full_embedding", "[", "1", ",", ":", "]", "self", ".", "_word_embedding", "=", "Embedding", "(", "vocab_size", ",", "# type: ignore", "embedding_dim", ",", "weight", "=", "embedding", ".", "data", ",", "trainable", "=", "self", ".", "_requires_grad", ",", "padding_index", "=", "0", ")" ]
Given a list of tokens, this method precomputes word representations by running just the character convolutions and highway layers of elmo, essentially creating uncontextual word vectors. On subsequent forward passes, the word ids are looked up from an embedding, rather than being computed on the fly via the CNN encoder. This function sets 3 attributes: _word_embedding : ``torch.Tensor`` The word embedding for each word in the tokens passed to this method. _bos_embedding : ``torch.Tensor`` The embedding for the BOS token. _eos_embedding : ``torch.Tensor`` The embedding for the EOS token. Parameters ---------- tokens : ``List[str]``, required. A list of tokens to precompute character convolutions for.
[ "Given", "a", "list", "of", "tokens", "this", "method", "precomputes", "word", "representations", "by", "running", "just", "the", "character", "convolutions", "and", "highway", "layers", "of", "elmo", "essentially", "creating", "uncontextual", "word", "vectors", ".", "On", "subsequent", "forward", "passes", "the", "word", "ids", "are", "looked", "up", "from", "an", "embedding", "rather", "than", "being", "computed", "on", "the", "fly", "via", "the", "CNN", "encoder", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/modules/elmo.py#L627-L685
23,069
allenai/allennlp
allennlp/data/dataset_readers/reading_comprehension/util.py
normalize_text
def normalize_text(text: str) -> str: """ Performs a normalization that is very similar to that done by the normalization functions in SQuAD and TriviaQA. This involves splitting and rejoining the text, and could be a somewhat expensive operation. """ return ' '.join([token for token in text.lower().strip(STRIPPED_CHARACTERS).split() if token not in IGNORED_TOKENS])
python
def normalize_text(text: str) -> str: """ Performs a normalization that is very similar to that done by the normalization functions in SQuAD and TriviaQA. This involves splitting and rejoining the text, and could be a somewhat expensive operation. """ return ' '.join([token for token in text.lower().strip(STRIPPED_CHARACTERS).split() if token not in IGNORED_TOKENS])
[ "def", "normalize_text", "(", "text", ":", "str", ")", "->", "str", ":", "return", "' '", ".", "join", "(", "[", "token", "for", "token", "in", "text", ".", "lower", "(", ")", ".", "strip", "(", "STRIPPED_CHARACTERS", ")", ".", "split", "(", ")", "if", "token", "not", "in", "IGNORED_TOKENS", "]", ")" ]
Performs a normalization that is very similar to that done by the normalization functions in SQuAD and TriviaQA. This involves splitting and rejoining the text, and could be a somewhat expensive operation.
[ "Performs", "a", "normalization", "that", "is", "very", "similar", "to", "that", "done", "by", "the", "normalization", "functions", "in", "SQuAD", "and", "TriviaQA", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/dataset_readers/reading_comprehension/util.py#L24-L33
23,070
allenai/allennlp
allennlp/data/dataset_readers/reading_comprehension/util.py
find_valid_answer_spans
def find_valid_answer_spans(passage_tokens: List[Token], answer_texts: List[str]) -> List[Tuple[int, int]]: """ Finds a list of token spans in ``passage_tokens`` that match the given ``answer_texts``. This tries to find all spans that would evaluate to correct given the SQuAD and TriviaQA official evaluation scripts, which do some normalization of the input text. Note that this could return duplicate spans! The caller is expected to be able to handle possible duplicates (as already happens in the SQuAD dev set, for instance). """ normalized_tokens = [token.text.lower().strip(STRIPPED_CHARACTERS) for token in passage_tokens] # Because there could be many `answer_texts`, we'll do the most expensive pre-processing # step once. This gives us a map from tokens to the position in the passage they appear. word_positions: Dict[str, List[int]] = defaultdict(list) for i, token in enumerate(normalized_tokens): word_positions[token].append(i) spans = [] for answer_text in answer_texts: # For each answer, we'll first find all valid start positions in the passage. Then # we'll grow each span to the same length as the number of answer tokens, and see if we # have a match. We're a little tricky as we grow the span, skipping words that are # already pruned from the normalized answer text, and stopping early if we don't match. answer_tokens = answer_text.lower().strip(STRIPPED_CHARACTERS).split() num_answer_tokens = len(answer_tokens) for span_start in word_positions[answer_tokens[0]]: span_end = span_start # span_end is _inclusive_ answer_index = 1 while answer_index < num_answer_tokens and span_end + 1 < len(normalized_tokens): token = normalized_tokens[span_end + 1] if answer_tokens[answer_index] == token: answer_index += 1 span_end += 1 elif token in IGNORED_TOKENS: span_end += 1 else: break if num_answer_tokens == answer_index: spans.append((span_start, span_end)) return spans
python
def find_valid_answer_spans(passage_tokens: List[Token], answer_texts: List[str]) -> List[Tuple[int, int]]: """ Finds a list of token spans in ``passage_tokens`` that match the given ``answer_texts``. This tries to find all spans that would evaluate to correct given the SQuAD and TriviaQA official evaluation scripts, which do some normalization of the input text. Note that this could return duplicate spans! The caller is expected to be able to handle possible duplicates (as already happens in the SQuAD dev set, for instance). """ normalized_tokens = [token.text.lower().strip(STRIPPED_CHARACTERS) for token in passage_tokens] # Because there could be many `answer_texts`, we'll do the most expensive pre-processing # step once. This gives us a map from tokens to the position in the passage they appear. word_positions: Dict[str, List[int]] = defaultdict(list) for i, token in enumerate(normalized_tokens): word_positions[token].append(i) spans = [] for answer_text in answer_texts: # For each answer, we'll first find all valid start positions in the passage. Then # we'll grow each span to the same length as the number of answer tokens, and see if we # have a match. We're a little tricky as we grow the span, skipping words that are # already pruned from the normalized answer text, and stopping early if we don't match. answer_tokens = answer_text.lower().strip(STRIPPED_CHARACTERS).split() num_answer_tokens = len(answer_tokens) for span_start in word_positions[answer_tokens[0]]: span_end = span_start # span_end is _inclusive_ answer_index = 1 while answer_index < num_answer_tokens and span_end + 1 < len(normalized_tokens): token = normalized_tokens[span_end + 1] if answer_tokens[answer_index] == token: answer_index += 1 span_end += 1 elif token in IGNORED_TOKENS: span_end += 1 else: break if num_answer_tokens == answer_index: spans.append((span_start, span_end)) return spans
[ "def", "find_valid_answer_spans", "(", "passage_tokens", ":", "List", "[", "Token", "]", ",", "answer_texts", ":", "List", "[", "str", "]", ")", "->", "List", "[", "Tuple", "[", "int", ",", "int", "]", "]", ":", "normalized_tokens", "=", "[", "token", ".", "text", ".", "lower", "(", ")", ".", "strip", "(", "STRIPPED_CHARACTERS", ")", "for", "token", "in", "passage_tokens", "]", "# Because there could be many `answer_texts`, we'll do the most expensive pre-processing", "# step once. This gives us a map from tokens to the position in the passage they appear.", "word_positions", ":", "Dict", "[", "str", ",", "List", "[", "int", "]", "]", "=", "defaultdict", "(", "list", ")", "for", "i", ",", "token", "in", "enumerate", "(", "normalized_tokens", ")", ":", "word_positions", "[", "token", "]", ".", "append", "(", "i", ")", "spans", "=", "[", "]", "for", "answer_text", "in", "answer_texts", ":", "# For each answer, we'll first find all valid start positions in the passage. Then", "# we'll grow each span to the same length as the number of answer tokens, and see if we", "# have a match. We're a little tricky as we grow the span, skipping words that are", "# already pruned from the normalized answer text, and stopping early if we don't match.", "answer_tokens", "=", "answer_text", ".", "lower", "(", ")", ".", "strip", "(", "STRIPPED_CHARACTERS", ")", ".", "split", "(", ")", "num_answer_tokens", "=", "len", "(", "answer_tokens", ")", "for", "span_start", "in", "word_positions", "[", "answer_tokens", "[", "0", "]", "]", ":", "span_end", "=", "span_start", "# span_end is _inclusive_", "answer_index", "=", "1", "while", "answer_index", "<", "num_answer_tokens", "and", "span_end", "+", "1", "<", "len", "(", "normalized_tokens", ")", ":", "token", "=", "normalized_tokens", "[", "span_end", "+", "1", "]", "if", "answer_tokens", "[", "answer_index", "]", "==", "token", ":", "answer_index", "+=", "1", "span_end", "+=", "1", "elif", "token", "in", "IGNORED_TOKENS", ":", "span_end", "+=", "1", "else", ":", "break", "if", "num_answer_tokens", "==", "answer_index", ":", "spans", ".", "append", "(", "(", "span_start", ",", "span_end", ")", ")", "return", "spans" ]
Finds a list of token spans in ``passage_tokens`` that match the given ``answer_texts``. This tries to find all spans that would evaluate to correct given the SQuAD and TriviaQA official evaluation scripts, which do some normalization of the input text. Note that this could return duplicate spans! The caller is expected to be able to handle possible duplicates (as already happens in the SQuAD dev set, for instance).
[ "Finds", "a", "list", "of", "token", "spans", "in", "passage_tokens", "that", "match", "the", "given", "answer_texts", ".", "This", "tries", "to", "find", "all", "spans", "that", "would", "evaluate", "to", "correct", "given", "the", "SQuAD", "and", "TriviaQA", "official", "evaluation", "scripts", "which", "do", "some", "normalization", "of", "the", "input", "text", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/dataset_readers/reading_comprehension/util.py#L97-L135
23,071
allenai/allennlp
allennlp/data/dataset_readers/reading_comprehension/util.py
handle_cannot
def handle_cannot(reference_answers: List[str]): """ Process a list of reference answers. If equal or more than half of the reference answers are "CANNOTANSWER", take it as gold. Otherwise, return answers that are not "CANNOTANSWER". """ num_cannot = 0 num_spans = 0 for ref in reference_answers: if ref == 'CANNOTANSWER': num_cannot += 1 else: num_spans += 1 if num_cannot >= num_spans: reference_answers = ['CANNOTANSWER'] else: reference_answers = [x for x in reference_answers if x != 'CANNOTANSWER'] return reference_answers
python
def handle_cannot(reference_answers: List[str]): """ Process a list of reference answers. If equal or more than half of the reference answers are "CANNOTANSWER", take it as gold. Otherwise, return answers that are not "CANNOTANSWER". """ num_cannot = 0 num_spans = 0 for ref in reference_answers: if ref == 'CANNOTANSWER': num_cannot += 1 else: num_spans += 1 if num_cannot >= num_spans: reference_answers = ['CANNOTANSWER'] else: reference_answers = [x for x in reference_answers if x != 'CANNOTANSWER'] return reference_answers
[ "def", "handle_cannot", "(", "reference_answers", ":", "List", "[", "str", "]", ")", ":", "num_cannot", "=", "0", "num_spans", "=", "0", "for", "ref", "in", "reference_answers", ":", "if", "ref", "==", "'CANNOTANSWER'", ":", "num_cannot", "+=", "1", "else", ":", "num_spans", "+=", "1", "if", "num_cannot", ">=", "num_spans", ":", "reference_answers", "=", "[", "'CANNOTANSWER'", "]", "else", ":", "reference_answers", "=", "[", "x", "for", "x", "in", "reference_answers", "if", "x", "!=", "'CANNOTANSWER'", "]", "return", "reference_answers" ]
Process a list of reference answers. If equal or more than half of the reference answers are "CANNOTANSWER", take it as gold. Otherwise, return answers that are not "CANNOTANSWER".
[ "Process", "a", "list", "of", "reference", "answers", ".", "If", "equal", "or", "more", "than", "half", "of", "the", "reference", "answers", "are", "CANNOTANSWER", "take", "it", "as", "gold", ".", "Otherwise", "return", "answers", "that", "are", "not", "CANNOTANSWER", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/dataset_readers/reading_comprehension/util.py#L354-L371
23,072
allenai/allennlp
allennlp/data/tokenizers/word_splitter.py
WordSplitter.batch_split_words
def batch_split_words(self, sentences: List[str]) -> List[List[Token]]: """ Spacy needs to do batch processing, or it can be really slow. This method lets you take advantage of that if you want. Default implementation is to just iterate of the sentences and call ``split_words``, but the ``SpacyWordSplitter`` will actually do batched processing. """ return [self.split_words(sentence) for sentence in sentences]
python
def batch_split_words(self, sentences: List[str]) -> List[List[Token]]: """ Spacy needs to do batch processing, or it can be really slow. This method lets you take advantage of that if you want. Default implementation is to just iterate of the sentences and call ``split_words``, but the ``SpacyWordSplitter`` will actually do batched processing. """ return [self.split_words(sentence) for sentence in sentences]
[ "def", "batch_split_words", "(", "self", ",", "sentences", ":", "List", "[", "str", "]", ")", "->", "List", "[", "List", "[", "Token", "]", "]", ":", "return", "[", "self", ".", "split_words", "(", "sentence", ")", "for", "sentence", "in", "sentences", "]" ]
Spacy needs to do batch processing, or it can be really slow. This method lets you take advantage of that if you want. Default implementation is to just iterate of the sentences and call ``split_words``, but the ``SpacyWordSplitter`` will actually do batched processing.
[ "Spacy", "needs", "to", "do", "batch", "processing", "or", "it", "can", "be", "really", "slow", ".", "This", "method", "lets", "you", "take", "advantage", "of", "that", "if", "you", "want", ".", "Default", "implementation", "is", "to", "just", "iterate", "of", "the", "sentences", "and", "call", "split_words", "but", "the", "SpacyWordSplitter", "will", "actually", "do", "batched", "processing", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/tokenizers/word_splitter.py#L25-L32
23,073
allenai/allennlp
allennlp/state_machines/beam_search.py
BeamSearch.constrained_to
def constrained_to(self, initial_sequence: torch.Tensor, keep_beam_details: bool = True) -> 'BeamSearch': """ Return a new BeamSearch instance that's like this one but with the specified constraint. """ return BeamSearch(self._beam_size, self._per_node_beam_size, initial_sequence, keep_beam_details)
python
def constrained_to(self, initial_sequence: torch.Tensor, keep_beam_details: bool = True) -> 'BeamSearch': """ Return a new BeamSearch instance that's like this one but with the specified constraint. """ return BeamSearch(self._beam_size, self._per_node_beam_size, initial_sequence, keep_beam_details)
[ "def", "constrained_to", "(", "self", ",", "initial_sequence", ":", "torch", ".", "Tensor", ",", "keep_beam_details", ":", "bool", "=", "True", ")", "->", "'BeamSearch'", ":", "return", "BeamSearch", "(", "self", ".", "_beam_size", ",", "self", ".", "_per_node_beam_size", ",", "initial_sequence", ",", "keep_beam_details", ")" ]
Return a new BeamSearch instance that's like this one but with the specified constraint.
[ "Return", "a", "new", "BeamSearch", "instance", "that", "s", "like", "this", "one", "but", "with", "the", "specified", "constraint", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/state_machines/beam_search.py#L70-L74
23,074
allenai/allennlp
allennlp/tools/drop_eval.py
_normalize_answer
def _normalize_answer(text: str) -> str: """Lower text and remove punctuation, articles and extra whitespace.""" parts = [_white_space_fix(_remove_articles(_normalize_number(_remove_punc(_lower(token))))) for token in _tokenize(text)] parts = [part for part in parts if part.strip()] normalized = ' '.join(parts).strip() return normalized
python
def _normalize_answer(text: str) -> str: """Lower text and remove punctuation, articles and extra whitespace.""" parts = [_white_space_fix(_remove_articles(_normalize_number(_remove_punc(_lower(token))))) for token in _tokenize(text)] parts = [part for part in parts if part.strip()] normalized = ' '.join(parts).strip() return normalized
[ "def", "_normalize_answer", "(", "text", ":", "str", ")", "->", "str", ":", "parts", "=", "[", "_white_space_fix", "(", "_remove_articles", "(", "_normalize_number", "(", "_remove_punc", "(", "_lower", "(", "token", ")", ")", ")", ")", ")", "for", "token", "in", "_tokenize", "(", "text", ")", "]", "parts", "=", "[", "part", "for", "part", "in", "parts", "if", "part", ".", "strip", "(", ")", "]", "normalized", "=", "' '", ".", "join", "(", "parts", ")", ".", "strip", "(", ")", "return", "normalized" ]
Lower text and remove punctuation, articles and extra whitespace.
[ "Lower", "text", "and", "remove", "punctuation", "articles", "and", "extra", "whitespace", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/tools/drop_eval.py#L36-L43
23,075
allenai/allennlp
allennlp/tools/drop_eval.py
_align_bags
def _align_bags(predicted: List[Set[str]], gold: List[Set[str]]) -> List[float]: """ Takes gold and predicted answer sets and first finds a greedy 1-1 alignment between them and gets maximum metric values over all the answers """ f1_scores = [] for gold_index, gold_item in enumerate(gold): max_f1 = 0.0 max_index = None best_alignment: Tuple[Set[str], Set[str]] = (set(), set()) if predicted: for pred_index, pred_item in enumerate(predicted): current_f1 = _compute_f1(pred_item, gold_item) if current_f1 >= max_f1: best_alignment = (gold_item, pred_item) max_f1 = current_f1 max_index = pred_index match_flag = _match_numbers_if_present(*best_alignment) gold[gold_index] = set() predicted[max_index] = set() else: match_flag = False if match_flag: f1_scores.append(max_f1) else: f1_scores.append(0.0) return f1_scores
python
def _align_bags(predicted: List[Set[str]], gold: List[Set[str]]) -> List[float]: """ Takes gold and predicted answer sets and first finds a greedy 1-1 alignment between them and gets maximum metric values over all the answers """ f1_scores = [] for gold_index, gold_item in enumerate(gold): max_f1 = 0.0 max_index = None best_alignment: Tuple[Set[str], Set[str]] = (set(), set()) if predicted: for pred_index, pred_item in enumerate(predicted): current_f1 = _compute_f1(pred_item, gold_item) if current_f1 >= max_f1: best_alignment = (gold_item, pred_item) max_f1 = current_f1 max_index = pred_index match_flag = _match_numbers_if_present(*best_alignment) gold[gold_index] = set() predicted[max_index] = set() else: match_flag = False if match_flag: f1_scores.append(max_f1) else: f1_scores.append(0.0) return f1_scores
[ "def", "_align_bags", "(", "predicted", ":", "List", "[", "Set", "[", "str", "]", "]", ",", "gold", ":", "List", "[", "Set", "[", "str", "]", "]", ")", "->", "List", "[", "float", "]", ":", "f1_scores", "=", "[", "]", "for", "gold_index", ",", "gold_item", "in", "enumerate", "(", "gold", ")", ":", "max_f1", "=", "0.0", "max_index", "=", "None", "best_alignment", ":", "Tuple", "[", "Set", "[", "str", "]", ",", "Set", "[", "str", "]", "]", "=", "(", "set", "(", ")", ",", "set", "(", ")", ")", "if", "predicted", ":", "for", "pred_index", ",", "pred_item", "in", "enumerate", "(", "predicted", ")", ":", "current_f1", "=", "_compute_f1", "(", "pred_item", ",", "gold_item", ")", "if", "current_f1", ">=", "max_f1", ":", "best_alignment", "=", "(", "gold_item", ",", "pred_item", ")", "max_f1", "=", "current_f1", "max_index", "=", "pred_index", "match_flag", "=", "_match_numbers_if_present", "(", "*", "best_alignment", ")", "gold", "[", "gold_index", "]", "=", "set", "(", ")", "predicted", "[", "max_index", "]", "=", "set", "(", ")", "else", ":", "match_flag", "=", "False", "if", "match_flag", ":", "f1_scores", ".", "append", "(", "max_f1", ")", "else", ":", "f1_scores", ".", "append", "(", "0.0", ")", "return", "f1_scores" ]
Takes gold and predicted answer sets and first finds a greedy 1-1 alignment between them and gets maximum metric values over all the answers
[ "Takes", "gold", "and", "predicted", "answer", "sets", "and", "first", "finds", "a", "greedy", "1", "-", "1", "alignment", "between", "them", "and", "gets", "maximum", "metric", "values", "over", "all", "the", "answers" ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/tools/drop_eval.py#L73-L99
23,076
allenai/allennlp
allennlp/tools/drop_eval.py
answer_json_to_strings
def answer_json_to_strings(answer: Dict[str, Any]) -> Tuple[Tuple[str, ...], str]: """ Takes an answer JSON blob from the DROP data release and converts it into strings used for evaluation. """ if "number" in answer and answer["number"]: return tuple([str(answer["number"])]), "number" elif "spans" in answer and answer["spans"]: return tuple(answer["spans"]), "span" if len(answer["spans"]) == 1 else "spans" elif "date" in answer: return tuple(["{0} {1} {2}".format(answer["date"]["day"], answer["date"]["month"], answer["date"]["year"])]), "date" else: raise ValueError(f"Answer type not found, should be one of number, spans or date at: {json.dumps(answer)}")
python
def answer_json_to_strings(answer: Dict[str, Any]) -> Tuple[Tuple[str, ...], str]: """ Takes an answer JSON blob from the DROP data release and converts it into strings used for evaluation. """ if "number" in answer and answer["number"]: return tuple([str(answer["number"])]), "number" elif "spans" in answer and answer["spans"]: return tuple(answer["spans"]), "span" if len(answer["spans"]) == 1 else "spans" elif "date" in answer: return tuple(["{0} {1} {2}".format(answer["date"]["day"], answer["date"]["month"], answer["date"]["year"])]), "date" else: raise ValueError(f"Answer type not found, should be one of number, spans or date at: {json.dumps(answer)}")
[ "def", "answer_json_to_strings", "(", "answer", ":", "Dict", "[", "str", ",", "Any", "]", ")", "->", "Tuple", "[", "Tuple", "[", "str", ",", "...", "]", ",", "str", "]", ":", "if", "\"number\"", "in", "answer", "and", "answer", "[", "\"number\"", "]", ":", "return", "tuple", "(", "[", "str", "(", "answer", "[", "\"number\"", "]", ")", "]", ")", ",", "\"number\"", "elif", "\"spans\"", "in", "answer", "and", "answer", "[", "\"spans\"", "]", ":", "return", "tuple", "(", "answer", "[", "\"spans\"", "]", ")", ",", "\"span\"", "if", "len", "(", "answer", "[", "\"spans\"", "]", ")", "==", "1", "else", "\"spans\"", "elif", "\"date\"", "in", "answer", ":", "return", "tuple", "(", "[", "\"{0} {1} {2}\"", ".", "format", "(", "answer", "[", "\"date\"", "]", "[", "\"day\"", "]", ",", "answer", "[", "\"date\"", "]", "[", "\"month\"", "]", ",", "answer", "[", "\"date\"", "]", "[", "\"year\"", "]", ")", "]", ")", ",", "\"date\"", "else", ":", "raise", "ValueError", "(", "f\"Answer type not found, should be one of number, spans or date at: {json.dumps(answer)}\"", ")" ]
Takes an answer JSON blob from the DROP data release and converts it into strings used for evaluation.
[ "Takes", "an", "answer", "JSON", "blob", "from", "the", "DROP", "data", "release", "and", "converts", "it", "into", "strings", "used", "for", "evaluation", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/tools/drop_eval.py#L150-L164
23,077
allenai/allennlp
allennlp/data/dataset_readers/dataset_reader.py
DatasetReader.read
def read(self, file_path: str) -> Iterable[Instance]: """ Returns an ``Iterable`` containing all the instances in the specified dataset. If ``self.lazy`` is False, this calls ``self._read()``, ensures that the result is a list, then returns the resulting list. If ``self.lazy`` is True, this returns an object whose ``__iter__`` method calls ``self._read()`` each iteration. In this case your implementation of ``_read()`` must also be lazy (that is, not load all instances into memory at once), otherwise you will get a ``ConfigurationError``. In either case, the returned ``Iterable`` can be iterated over multiple times. It's unlikely you want to override this function, but if you do your result should likewise be repeatedly iterable. """ lazy = getattr(self, 'lazy', None) if lazy is None: logger.warning("DatasetReader.lazy is not set, " "did you forget to call the superclass constructor?") if self._cache_directory: cache_file = self._get_cache_location_for_file_path(file_path) else: cache_file = None if lazy: return _LazyInstances(lambda: self._read(file_path), cache_file, self.deserialize_instance, self.serialize_instance) else: # First we read the instances, either from a cache or from the original file. if cache_file and os.path.exists(cache_file): instances = self._instances_from_cache_file(cache_file) else: instances = self._read(file_path) # Then some validation. if not isinstance(instances, list): instances = [instance for instance in Tqdm.tqdm(instances)] if not instances: raise ConfigurationError("No instances were read from the given filepath {}. " "Is the path correct?".format(file_path)) # And finally we write to the cache if we need to. if cache_file and not os.path.exists(cache_file): logger.info(f"Caching instances to {cache_file}") with open(cache_file, 'w') as cache: for instance in Tqdm.tqdm(instances): cache.write(self.serialize_instance(instance) + '\n') return instances
python
def read(self, file_path: str) -> Iterable[Instance]: """ Returns an ``Iterable`` containing all the instances in the specified dataset. If ``self.lazy`` is False, this calls ``self._read()``, ensures that the result is a list, then returns the resulting list. If ``self.lazy`` is True, this returns an object whose ``__iter__`` method calls ``self._read()`` each iteration. In this case your implementation of ``_read()`` must also be lazy (that is, not load all instances into memory at once), otherwise you will get a ``ConfigurationError``. In either case, the returned ``Iterable`` can be iterated over multiple times. It's unlikely you want to override this function, but if you do your result should likewise be repeatedly iterable. """ lazy = getattr(self, 'lazy', None) if lazy is None: logger.warning("DatasetReader.lazy is not set, " "did you forget to call the superclass constructor?") if self._cache_directory: cache_file = self._get_cache_location_for_file_path(file_path) else: cache_file = None if lazy: return _LazyInstances(lambda: self._read(file_path), cache_file, self.deserialize_instance, self.serialize_instance) else: # First we read the instances, either from a cache or from the original file. if cache_file and os.path.exists(cache_file): instances = self._instances_from_cache_file(cache_file) else: instances = self._read(file_path) # Then some validation. if not isinstance(instances, list): instances = [instance for instance in Tqdm.tqdm(instances)] if not instances: raise ConfigurationError("No instances were read from the given filepath {}. " "Is the path correct?".format(file_path)) # And finally we write to the cache if we need to. if cache_file and not os.path.exists(cache_file): logger.info(f"Caching instances to {cache_file}") with open(cache_file, 'w') as cache: for instance in Tqdm.tqdm(instances): cache.write(self.serialize_instance(instance) + '\n') return instances
[ "def", "read", "(", "self", ",", "file_path", ":", "str", ")", "->", "Iterable", "[", "Instance", "]", ":", "lazy", "=", "getattr", "(", "self", ",", "'lazy'", ",", "None", ")", "if", "lazy", "is", "None", ":", "logger", ".", "warning", "(", "\"DatasetReader.lazy is not set, \"", "\"did you forget to call the superclass constructor?\"", ")", "if", "self", ".", "_cache_directory", ":", "cache_file", "=", "self", ".", "_get_cache_location_for_file_path", "(", "file_path", ")", "else", ":", "cache_file", "=", "None", "if", "lazy", ":", "return", "_LazyInstances", "(", "lambda", ":", "self", ".", "_read", "(", "file_path", ")", ",", "cache_file", ",", "self", ".", "deserialize_instance", ",", "self", ".", "serialize_instance", ")", "else", ":", "# First we read the instances, either from a cache or from the original file.", "if", "cache_file", "and", "os", ".", "path", ".", "exists", "(", "cache_file", ")", ":", "instances", "=", "self", ".", "_instances_from_cache_file", "(", "cache_file", ")", "else", ":", "instances", "=", "self", ".", "_read", "(", "file_path", ")", "# Then some validation.", "if", "not", "isinstance", "(", "instances", ",", "list", ")", ":", "instances", "=", "[", "instance", "for", "instance", "in", "Tqdm", ".", "tqdm", "(", "instances", ")", "]", "if", "not", "instances", ":", "raise", "ConfigurationError", "(", "\"No instances were read from the given filepath {}. \"", "\"Is the path correct?\"", ".", "format", "(", "file_path", ")", ")", "# And finally we write to the cache if we need to.", "if", "cache_file", "and", "not", "os", ".", "path", ".", "exists", "(", "cache_file", ")", ":", "logger", ".", "info", "(", "f\"Caching instances to {cache_file}\"", ")", "with", "open", "(", "cache_file", ",", "'w'", ")", "as", "cache", ":", "for", "instance", "in", "Tqdm", ".", "tqdm", "(", "instances", ")", ":", "cache", ".", "write", "(", "self", ".", "serialize_instance", "(", "instance", ")", "+", "'\\n'", ")", "return", "instances" ]
Returns an ``Iterable`` containing all the instances in the specified dataset. If ``self.lazy`` is False, this calls ``self._read()``, ensures that the result is a list, then returns the resulting list. If ``self.lazy`` is True, this returns an object whose ``__iter__`` method calls ``self._read()`` each iteration. In this case your implementation of ``_read()`` must also be lazy (that is, not load all instances into memory at once), otherwise you will get a ``ConfigurationError``. In either case, the returned ``Iterable`` can be iterated over multiple times. It's unlikely you want to override this function, but if you do your result should likewise be repeatedly iterable.
[ "Returns", "an", "Iterable", "containing", "all", "the", "instances", "in", "the", "specified", "dataset", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/data/dataset_readers/dataset_reader.py#L91-L145
23,078
allenai/allennlp
allennlp/models/semantic_role_labeler.py
write_to_conll_eval_file
def write_to_conll_eval_file(prediction_file: TextIO, gold_file: TextIO, verb_index: Optional[int], sentence: List[str], prediction: List[str], gold_labels: List[str]): """ Prints predicate argument predictions and gold labels for a single verbal predicate in a sentence to two provided file references. Parameters ---------- prediction_file : TextIO, required. A file reference to print predictions to. gold_file : TextIO, required. A file reference to print gold labels to. verb_index : Optional[int], required. The index of the verbal predicate in the sentence which the gold labels are the arguments for, or None if the sentence contains no verbal predicate. sentence : List[str], required. The word tokens. prediction : List[str], required. The predicted BIO labels. gold_labels : List[str], required. The gold BIO labels. """ verb_only_sentence = ["-"] * len(sentence) if verb_index: verb_only_sentence[verb_index] = sentence[verb_index] conll_format_predictions = convert_bio_tags_to_conll_format(prediction) conll_format_gold_labels = convert_bio_tags_to_conll_format(gold_labels) for word, predicted, gold in zip(verb_only_sentence, conll_format_predictions, conll_format_gold_labels): prediction_file.write(word.ljust(15)) prediction_file.write(predicted.rjust(15) + "\n") gold_file.write(word.ljust(15)) gold_file.write(gold.rjust(15) + "\n") prediction_file.write("\n") gold_file.write("\n")
python
def write_to_conll_eval_file(prediction_file: TextIO, gold_file: TextIO, verb_index: Optional[int], sentence: List[str], prediction: List[str], gold_labels: List[str]): """ Prints predicate argument predictions and gold labels for a single verbal predicate in a sentence to two provided file references. Parameters ---------- prediction_file : TextIO, required. A file reference to print predictions to. gold_file : TextIO, required. A file reference to print gold labels to. verb_index : Optional[int], required. The index of the verbal predicate in the sentence which the gold labels are the arguments for, or None if the sentence contains no verbal predicate. sentence : List[str], required. The word tokens. prediction : List[str], required. The predicted BIO labels. gold_labels : List[str], required. The gold BIO labels. """ verb_only_sentence = ["-"] * len(sentence) if verb_index: verb_only_sentence[verb_index] = sentence[verb_index] conll_format_predictions = convert_bio_tags_to_conll_format(prediction) conll_format_gold_labels = convert_bio_tags_to_conll_format(gold_labels) for word, predicted, gold in zip(verb_only_sentence, conll_format_predictions, conll_format_gold_labels): prediction_file.write(word.ljust(15)) prediction_file.write(predicted.rjust(15) + "\n") gold_file.write(word.ljust(15)) gold_file.write(gold.rjust(15) + "\n") prediction_file.write("\n") gold_file.write("\n")
[ "def", "write_to_conll_eval_file", "(", "prediction_file", ":", "TextIO", ",", "gold_file", ":", "TextIO", ",", "verb_index", ":", "Optional", "[", "int", "]", ",", "sentence", ":", "List", "[", "str", "]", ",", "prediction", ":", "List", "[", "str", "]", ",", "gold_labels", ":", "List", "[", "str", "]", ")", ":", "verb_only_sentence", "=", "[", "\"-\"", "]", "*", "len", "(", "sentence", ")", "if", "verb_index", ":", "verb_only_sentence", "[", "verb_index", "]", "=", "sentence", "[", "verb_index", "]", "conll_format_predictions", "=", "convert_bio_tags_to_conll_format", "(", "prediction", ")", "conll_format_gold_labels", "=", "convert_bio_tags_to_conll_format", "(", "gold_labels", ")", "for", "word", ",", "predicted", ",", "gold", "in", "zip", "(", "verb_only_sentence", ",", "conll_format_predictions", ",", "conll_format_gold_labels", ")", ":", "prediction_file", ".", "write", "(", "word", ".", "ljust", "(", "15", ")", ")", "prediction_file", ".", "write", "(", "predicted", ".", "rjust", "(", "15", ")", "+", "\"\\n\"", ")", "gold_file", ".", "write", "(", "word", ".", "ljust", "(", "15", ")", ")", "gold_file", ".", "write", "(", "gold", ".", "rjust", "(", "15", ")", "+", "\"\\n\"", ")", "prediction_file", ".", "write", "(", "\"\\n\"", ")", "gold_file", ".", "write", "(", "\"\\n\"", ")" ]
Prints predicate argument predictions and gold labels for a single verbal predicate in a sentence to two provided file references. Parameters ---------- prediction_file : TextIO, required. A file reference to print predictions to. gold_file : TextIO, required. A file reference to print gold labels to. verb_index : Optional[int], required. The index of the verbal predicate in the sentence which the gold labels are the arguments for, or None if the sentence contains no verbal predicate. sentence : List[str], required. The word tokens. prediction : List[str], required. The predicted BIO labels. gold_labels : List[str], required. The gold BIO labels.
[ "Prints", "predicate", "argument", "predictions", "and", "gold", "labels", "for", "a", "single", "verbal", "predicate", "in", "a", "sentence", "to", "two", "provided", "file", "references", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/models/semantic_role_labeler.py#L226-L268
23,079
allenai/allennlp
allennlp/semparse/domain_languages/nlvr_language.py
NlvrLanguage.get_agenda_for_sentence
def get_agenda_for_sentence(self, sentence: str) -> List[str]: """ Given a ``sentence``, returns a list of actions the sentence triggers as an ``agenda``. The ``agenda`` can be used while by a parser to guide the decoder. sequences as possible. This is a simplistic mapping at this point, and can be expanded. Parameters ---------- sentence : ``str`` The sentence for which an agenda will be produced. """ agenda = [] sentence = sentence.lower() if sentence.startswith("there is a box") or sentence.startswith("there is a tower "): agenda.append(self.terminal_productions["box_exists"]) elif sentence.startswith("there is a "): agenda.append(self.terminal_productions["object_exists"]) if "<Set[Box]:bool> -> box_exists" not in agenda: # These are object filters and do not apply if we have a box_exists at the top. if "touch" in sentence: if "top" in sentence: agenda.append(self.terminal_productions["touch_top"]) elif "bottom" in sentence or "base" in sentence: agenda.append(self.terminal_productions["touch_bottom"]) elif "corner" in sentence: agenda.append(self.terminal_productions["touch_corner"]) elif "right" in sentence: agenda.append(self.terminal_productions["touch_right"]) elif "left" in sentence: agenda.append(self.terminal_productions["touch_left"]) elif "wall" in sentence or "edge" in sentence: agenda.append(self.terminal_productions["touch_wall"]) else: agenda.append(self.terminal_productions["touch_object"]) else: # The words "top" and "bottom" may be referring to top and bottom blocks in a tower. if "top" in sentence: agenda.append(self.terminal_productions["top"]) elif "bottom" in sentence or "base" in sentence: agenda.append(self.terminal_productions["bottom"]) if " not " in sentence: agenda.append(self.terminal_productions["negate_filter"]) if " contains " in sentence or " has " in sentence: agenda.append(self.terminal_productions["all_boxes"]) # This takes care of shapes, colors, top, bottom, big, small etc. for constant, production in self.terminal_productions.items(): # TODO(pradeep): Deal with constant names with underscores. if "top" in constant or "bottom" in constant: # We already dealt with top, bottom, touch_top and touch_bottom above. continue if constant in sentence: if "<Set[Object]:Set[Object]> ->" in production and "<Set[Box]:bool> -> box_exists" in agenda: if constant in ["square", "circle", "triangle"]: agenda.append(self.terminal_productions[f"shape_{constant}"]) elif constant in ["yellow", "blue", "black"]: agenda.append(self.terminal_productions[f"color_{constant}"]) else: continue else: agenda.append(production) # TODO (pradeep): Rules for "member_*" productions ("tower" or "box" followed by a color, # shape or number...) number_productions = self._get_number_productions(sentence) for production in number_productions: agenda.append(production) if not agenda: # None of the rules above was triggered! if "box" in sentence: agenda.append(self.terminal_productions["all_boxes"]) else: agenda.append(self.terminal_productions["all_objects"]) return agenda
python
def get_agenda_for_sentence(self, sentence: str) -> List[str]: """ Given a ``sentence``, returns a list of actions the sentence triggers as an ``agenda``. The ``agenda`` can be used while by a parser to guide the decoder. sequences as possible. This is a simplistic mapping at this point, and can be expanded. Parameters ---------- sentence : ``str`` The sentence for which an agenda will be produced. """ agenda = [] sentence = sentence.lower() if sentence.startswith("there is a box") or sentence.startswith("there is a tower "): agenda.append(self.terminal_productions["box_exists"]) elif sentence.startswith("there is a "): agenda.append(self.terminal_productions["object_exists"]) if "<Set[Box]:bool> -> box_exists" not in agenda: # These are object filters and do not apply if we have a box_exists at the top. if "touch" in sentence: if "top" in sentence: agenda.append(self.terminal_productions["touch_top"]) elif "bottom" in sentence or "base" in sentence: agenda.append(self.terminal_productions["touch_bottom"]) elif "corner" in sentence: agenda.append(self.terminal_productions["touch_corner"]) elif "right" in sentence: agenda.append(self.terminal_productions["touch_right"]) elif "left" in sentence: agenda.append(self.terminal_productions["touch_left"]) elif "wall" in sentence or "edge" in sentence: agenda.append(self.terminal_productions["touch_wall"]) else: agenda.append(self.terminal_productions["touch_object"]) else: # The words "top" and "bottom" may be referring to top and bottom blocks in a tower. if "top" in sentence: agenda.append(self.terminal_productions["top"]) elif "bottom" in sentence or "base" in sentence: agenda.append(self.terminal_productions["bottom"]) if " not " in sentence: agenda.append(self.terminal_productions["negate_filter"]) if " contains " in sentence or " has " in sentence: agenda.append(self.terminal_productions["all_boxes"]) # This takes care of shapes, colors, top, bottom, big, small etc. for constant, production in self.terminal_productions.items(): # TODO(pradeep): Deal with constant names with underscores. if "top" in constant or "bottom" in constant: # We already dealt with top, bottom, touch_top and touch_bottom above. continue if constant in sentence: if "<Set[Object]:Set[Object]> ->" in production and "<Set[Box]:bool> -> box_exists" in agenda: if constant in ["square", "circle", "triangle"]: agenda.append(self.terminal_productions[f"shape_{constant}"]) elif constant in ["yellow", "blue", "black"]: agenda.append(self.terminal_productions[f"color_{constant}"]) else: continue else: agenda.append(production) # TODO (pradeep): Rules for "member_*" productions ("tower" or "box" followed by a color, # shape or number...) number_productions = self._get_number_productions(sentence) for production in number_productions: agenda.append(production) if not agenda: # None of the rules above was triggered! if "box" in sentence: agenda.append(self.terminal_productions["all_boxes"]) else: agenda.append(self.terminal_productions["all_objects"]) return agenda
[ "def", "get_agenda_for_sentence", "(", "self", ",", "sentence", ":", "str", ")", "->", "List", "[", "str", "]", ":", "agenda", "=", "[", "]", "sentence", "=", "sentence", ".", "lower", "(", ")", "if", "sentence", ".", "startswith", "(", "\"there is a box\"", ")", "or", "sentence", ".", "startswith", "(", "\"there is a tower \"", ")", ":", "agenda", ".", "append", "(", "self", ".", "terminal_productions", "[", "\"box_exists\"", "]", ")", "elif", "sentence", ".", "startswith", "(", "\"there is a \"", ")", ":", "agenda", ".", "append", "(", "self", ".", "terminal_productions", "[", "\"object_exists\"", "]", ")", "if", "\"<Set[Box]:bool> -> box_exists\"", "not", "in", "agenda", ":", "# These are object filters and do not apply if we have a box_exists at the top.", "if", "\"touch\"", "in", "sentence", ":", "if", "\"top\"", "in", "sentence", ":", "agenda", ".", "append", "(", "self", ".", "terminal_productions", "[", "\"touch_top\"", "]", ")", "elif", "\"bottom\"", "in", "sentence", "or", "\"base\"", "in", "sentence", ":", "agenda", ".", "append", "(", "self", ".", "terminal_productions", "[", "\"touch_bottom\"", "]", ")", "elif", "\"corner\"", "in", "sentence", ":", "agenda", ".", "append", "(", "self", ".", "terminal_productions", "[", "\"touch_corner\"", "]", ")", "elif", "\"right\"", "in", "sentence", ":", "agenda", ".", "append", "(", "self", ".", "terminal_productions", "[", "\"touch_right\"", "]", ")", "elif", "\"left\"", "in", "sentence", ":", "agenda", ".", "append", "(", "self", ".", "terminal_productions", "[", "\"touch_left\"", "]", ")", "elif", "\"wall\"", "in", "sentence", "or", "\"edge\"", "in", "sentence", ":", "agenda", ".", "append", "(", "self", ".", "terminal_productions", "[", "\"touch_wall\"", "]", ")", "else", ":", "agenda", ".", "append", "(", "self", ".", "terminal_productions", "[", "\"touch_object\"", "]", ")", "else", ":", "# The words \"top\" and \"bottom\" may be referring to top and bottom blocks in a tower.", "if", "\"top\"", "in", "sentence", ":", "agenda", ".", "append", "(", "self", ".", "terminal_productions", "[", "\"top\"", "]", ")", "elif", "\"bottom\"", "in", "sentence", "or", "\"base\"", "in", "sentence", ":", "agenda", ".", "append", "(", "self", ".", "terminal_productions", "[", "\"bottom\"", "]", ")", "if", "\" not \"", "in", "sentence", ":", "agenda", ".", "append", "(", "self", ".", "terminal_productions", "[", "\"negate_filter\"", "]", ")", "if", "\" contains \"", "in", "sentence", "or", "\" has \"", "in", "sentence", ":", "agenda", ".", "append", "(", "self", ".", "terminal_productions", "[", "\"all_boxes\"", "]", ")", "# This takes care of shapes, colors, top, bottom, big, small etc.", "for", "constant", ",", "production", "in", "self", ".", "terminal_productions", ".", "items", "(", ")", ":", "# TODO(pradeep): Deal with constant names with underscores.", "if", "\"top\"", "in", "constant", "or", "\"bottom\"", "in", "constant", ":", "# We already dealt with top, bottom, touch_top and touch_bottom above.", "continue", "if", "constant", "in", "sentence", ":", "if", "\"<Set[Object]:Set[Object]> ->\"", "in", "production", "and", "\"<Set[Box]:bool> -> box_exists\"", "in", "agenda", ":", "if", "constant", "in", "[", "\"square\"", ",", "\"circle\"", ",", "\"triangle\"", "]", ":", "agenda", ".", "append", "(", "self", ".", "terminal_productions", "[", "f\"shape_{constant}\"", "]", ")", "elif", "constant", "in", "[", "\"yellow\"", ",", "\"blue\"", ",", "\"black\"", "]", ":", "agenda", ".", "append", "(", "self", ".", "terminal_productions", "[", "f\"color_{constant}\"", "]", ")", "else", ":", "continue", "else", ":", "agenda", ".", "append", "(", "production", ")", "# TODO (pradeep): Rules for \"member_*\" productions (\"tower\" or \"box\" followed by a color,", "# shape or number...)", "number_productions", "=", "self", ".", "_get_number_productions", "(", "sentence", ")", "for", "production", "in", "number_productions", ":", "agenda", ".", "append", "(", "production", ")", "if", "not", "agenda", ":", "# None of the rules above was triggered!", "if", "\"box\"", "in", "sentence", ":", "agenda", ".", "append", "(", "self", ".", "terminal_productions", "[", "\"all_boxes\"", "]", ")", "else", ":", "agenda", ".", "append", "(", "self", ".", "terminal_productions", "[", "\"all_objects\"", "]", ")", "return", "agenda" ]
Given a ``sentence``, returns a list of actions the sentence triggers as an ``agenda``. The ``agenda`` can be used while by a parser to guide the decoder. sequences as possible. This is a simplistic mapping at this point, and can be expanded. Parameters ---------- sentence : ``str`` The sentence for which an agenda will be produced.
[ "Given", "a", "sentence", "returns", "a", "list", "of", "actions", "the", "sentence", "triggers", "as", "an", "agenda", ".", "The", "agenda", "can", "be", "used", "while", "by", "a", "parser", "to", "guide", "the", "decoder", ".", "sequences", "as", "possible", ".", "This", "is", "a", "simplistic", "mapping", "at", "this", "point", "and", "can", "be", "expanded", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/nlvr_language.py#L125-L199
23,080
allenai/allennlp
allennlp/semparse/domain_languages/nlvr_language.py
NlvrLanguage._get_number_productions
def _get_number_productions(sentence: str) -> List[str]: """ Gathers all the numbers in the sentence, and returns productions that lead to them. """ # The mapping here is very simple and limited, which also shouldn't be a problem # because numbers seem to be represented fairly regularly. number_strings = {"one": "1", "two": "2", "three": "3", "four": "4", "five": "5", "six": "6", "seven": "7", "eight": "8", "nine": "9", "ten": "10"} number_productions = [] tokens = sentence.split() numbers = number_strings.values() for token in tokens: if token in numbers: number_productions.append(f"int -> {token}") elif token in number_strings: number_productions.append(f"int -> {number_strings[token]}") return number_productions
python
def _get_number_productions(sentence: str) -> List[str]: """ Gathers all the numbers in the sentence, and returns productions that lead to them. """ # The mapping here is very simple and limited, which also shouldn't be a problem # because numbers seem to be represented fairly regularly. number_strings = {"one": "1", "two": "2", "three": "3", "four": "4", "five": "5", "six": "6", "seven": "7", "eight": "8", "nine": "9", "ten": "10"} number_productions = [] tokens = sentence.split() numbers = number_strings.values() for token in tokens: if token in numbers: number_productions.append(f"int -> {token}") elif token in number_strings: number_productions.append(f"int -> {number_strings[token]}") return number_productions
[ "def", "_get_number_productions", "(", "sentence", ":", "str", ")", "->", "List", "[", "str", "]", ":", "# The mapping here is very simple and limited, which also shouldn't be a problem", "# because numbers seem to be represented fairly regularly.", "number_strings", "=", "{", "\"one\"", ":", "\"1\"", ",", "\"two\"", ":", "\"2\"", ",", "\"three\"", ":", "\"3\"", ",", "\"four\"", ":", "\"4\"", ",", "\"five\"", ":", "\"5\"", ",", "\"six\"", ":", "\"6\"", ",", "\"seven\"", ":", "\"7\"", ",", "\"eight\"", ":", "\"8\"", ",", "\"nine\"", ":", "\"9\"", ",", "\"ten\"", ":", "\"10\"", "}", "number_productions", "=", "[", "]", "tokens", "=", "sentence", ".", "split", "(", ")", "numbers", "=", "number_strings", ".", "values", "(", ")", "for", "token", "in", "tokens", ":", "if", "token", "in", "numbers", ":", "number_productions", ".", "append", "(", "f\"int -> {token}\"", ")", "elif", "token", "in", "number_strings", ":", "number_productions", ".", "append", "(", "f\"int -> {number_strings[token]}\"", ")", "return", "number_productions" ]
Gathers all the numbers in the sentence, and returns productions that lead to them.
[ "Gathers", "all", "the", "numbers", "in", "the", "sentence", "and", "returns", "productions", "that", "lead", "to", "them", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/nlvr_language.py#L202-L218
23,081
allenai/allennlp
allennlp/semparse/domain_languages/nlvr_language.py
NlvrLanguage.touch_object
def touch_object(self, objects: Set[Object]) -> Set[Object]: """ Returns all objects that touch the given set of objects. """ objects_per_box = self._separate_objects_by_boxes(objects) return_set = set() for box, box_objects in objects_per_box.items(): candidate_objects = box.objects for object_ in box_objects: for candidate_object in candidate_objects: if self._objects_touch_each_other(object_, candidate_object): return_set.add(candidate_object) return return_set
python
def touch_object(self, objects: Set[Object]) -> Set[Object]: """ Returns all objects that touch the given set of objects. """ objects_per_box = self._separate_objects_by_boxes(objects) return_set = set() for box, box_objects in objects_per_box.items(): candidate_objects = box.objects for object_ in box_objects: for candidate_object in candidate_objects: if self._objects_touch_each_other(object_, candidate_object): return_set.add(candidate_object) return return_set
[ "def", "touch_object", "(", "self", ",", "objects", ":", "Set", "[", "Object", "]", ")", "->", "Set", "[", "Object", "]", ":", "objects_per_box", "=", "self", ".", "_separate_objects_by_boxes", "(", "objects", ")", "return_set", "=", "set", "(", ")", "for", "box", ",", "box_objects", "in", "objects_per_box", ".", "items", "(", ")", ":", "candidate_objects", "=", "box", ".", "objects", "for", "object_", "in", "box_objects", ":", "for", "candidate_object", "in", "candidate_objects", ":", "if", "self", ".", "_objects_touch_each_other", "(", "object_", ",", "candidate_object", ")", ":", "return_set", ".", "add", "(", "candidate_object", ")", "return", "return_set" ]
Returns all objects that touch the given set of objects.
[ "Returns", "all", "objects", "that", "touch", "the", "given", "set", "of", "objects", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/nlvr_language.py#L329-L341
23,082
allenai/allennlp
allennlp/semparse/domain_languages/nlvr_language.py
NlvrLanguage.above
def above(self, objects: Set[Object]) -> Set[Object]: """ Returns the set of objects in the same boxes that are above the given objects. That is, if the input is a set of two objects, one in each box, we will return a union of the objects above the first object in the first box, and those above the second object in the second box. """ objects_per_box = self._separate_objects_by_boxes(objects) return_set = set() for box in objects_per_box: # min_y_loc corresponds to the top-most object. min_y_loc = min([obj.y_loc for obj in objects_per_box[box]]) for candidate_obj in box.objects: if candidate_obj.y_loc < min_y_loc: return_set.add(candidate_obj) return return_set
python
def above(self, objects: Set[Object]) -> Set[Object]: """ Returns the set of objects in the same boxes that are above the given objects. That is, if the input is a set of two objects, one in each box, we will return a union of the objects above the first object in the first box, and those above the second object in the second box. """ objects_per_box = self._separate_objects_by_boxes(objects) return_set = set() for box in objects_per_box: # min_y_loc corresponds to the top-most object. min_y_loc = min([obj.y_loc for obj in objects_per_box[box]]) for candidate_obj in box.objects: if candidate_obj.y_loc < min_y_loc: return_set.add(candidate_obj) return return_set
[ "def", "above", "(", "self", ",", "objects", ":", "Set", "[", "Object", "]", ")", "->", "Set", "[", "Object", "]", ":", "objects_per_box", "=", "self", ".", "_separate_objects_by_boxes", "(", "objects", ")", "return_set", "=", "set", "(", ")", "for", "box", "in", "objects_per_box", ":", "# min_y_loc corresponds to the top-most object.", "min_y_loc", "=", "min", "(", "[", "obj", ".", "y_loc", "for", "obj", "in", "objects_per_box", "[", "box", "]", "]", ")", "for", "candidate_obj", "in", "box", ".", "objects", ":", "if", "candidate_obj", ".", "y_loc", "<", "min_y_loc", ":", "return_set", ".", "add", "(", "candidate_obj", ")", "return", "return_set" ]
Returns the set of objects in the same boxes that are above the given objects. That is, if the input is a set of two objects, one in each box, we will return a union of the objects above the first object in the first box, and those above the second object in the second box.
[ "Returns", "the", "set", "of", "objects", "in", "the", "same", "boxes", "that", "are", "above", "the", "given", "objects", ".", "That", "is", "if", "the", "input", "is", "a", "set", "of", "two", "objects", "one", "in", "each", "box", "we", "will", "return", "a", "union", "of", "the", "objects", "above", "the", "first", "object", "in", "the", "first", "box", "and", "those", "above", "the", "second", "object", "in", "the", "second", "box", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/nlvr_language.py#L370-L384
23,083
allenai/allennlp
allennlp/semparse/domain_languages/nlvr_language.py
NlvrLanguage.below
def below(self, objects: Set[Object]) -> Set[Object]: """ Returns the set of objects in the same boxes that are below the given objects. That is, if the input is a set of two objects, one in each box, we will return a union of the objects below the first object in the first box, and those below the second object in the second box. """ objects_per_box = self._separate_objects_by_boxes(objects) return_set = set() for box in objects_per_box: # max_y_loc corresponds to the bottom-most object. max_y_loc = max([obj.y_loc for obj in objects_per_box[box]]) for candidate_obj in box.objects: if candidate_obj.y_loc > max_y_loc: return_set.add(candidate_obj) return return_set
python
def below(self, objects: Set[Object]) -> Set[Object]: """ Returns the set of objects in the same boxes that are below the given objects. That is, if the input is a set of two objects, one in each box, we will return a union of the objects below the first object in the first box, and those below the second object in the second box. """ objects_per_box = self._separate_objects_by_boxes(objects) return_set = set() for box in objects_per_box: # max_y_loc corresponds to the bottom-most object. max_y_loc = max([obj.y_loc for obj in objects_per_box[box]]) for candidate_obj in box.objects: if candidate_obj.y_loc > max_y_loc: return_set.add(candidate_obj) return return_set
[ "def", "below", "(", "self", ",", "objects", ":", "Set", "[", "Object", "]", ")", "->", "Set", "[", "Object", "]", ":", "objects_per_box", "=", "self", ".", "_separate_objects_by_boxes", "(", "objects", ")", "return_set", "=", "set", "(", ")", "for", "box", "in", "objects_per_box", ":", "# max_y_loc corresponds to the bottom-most object.", "max_y_loc", "=", "max", "(", "[", "obj", ".", "y_loc", "for", "obj", "in", "objects_per_box", "[", "box", "]", "]", ")", "for", "candidate_obj", "in", "box", ".", "objects", ":", "if", "candidate_obj", ".", "y_loc", ">", "max_y_loc", ":", "return_set", ".", "add", "(", "candidate_obj", ")", "return", "return_set" ]
Returns the set of objects in the same boxes that are below the given objects. That is, if the input is a set of two objects, one in each box, we will return a union of the objects below the first object in the first box, and those below the second object in the second box.
[ "Returns", "the", "set", "of", "objects", "in", "the", "same", "boxes", "that", "are", "below", "the", "given", "objects", ".", "That", "is", "if", "the", "input", "is", "a", "set", "of", "two", "objects", "one", "in", "each", "box", "we", "will", "return", "a", "union", "of", "the", "objects", "below", "the", "first", "object", "in", "the", "first", "box", "and", "those", "below", "the", "second", "object", "in", "the", "second", "box", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/nlvr_language.py#L387-L401
23,084
allenai/allennlp
allennlp/semparse/domain_languages/nlvr_language.py
NlvrLanguage._objects_touch_each_other
def _objects_touch_each_other(self, object1: Object, object2: Object) -> bool: """ Returns true iff the objects touch each other. """ in_vertical_range = object1.y_loc <= object2.y_loc + object2.size and \ object1.y_loc + object1.size >= object2.y_loc in_horizantal_range = object1.x_loc <= object2.x_loc + object2.size and \ object1.x_loc + object1.size >= object2.x_loc touch_side = object1.x_loc + object1.size == object2.x_loc or \ object2.x_loc + object2.size == object1.x_loc touch_top_or_bottom = object1.y_loc + object1.size == object2.y_loc or \ object2.y_loc + object2.size == object1.y_loc return (in_vertical_range and touch_side) or (in_horizantal_range and touch_top_or_bottom)
python
def _objects_touch_each_other(self, object1: Object, object2: Object) -> bool: """ Returns true iff the objects touch each other. """ in_vertical_range = object1.y_loc <= object2.y_loc + object2.size and \ object1.y_loc + object1.size >= object2.y_loc in_horizantal_range = object1.x_loc <= object2.x_loc + object2.size and \ object1.x_loc + object1.size >= object2.x_loc touch_side = object1.x_loc + object1.size == object2.x_loc or \ object2.x_loc + object2.size == object1.x_loc touch_top_or_bottom = object1.y_loc + object1.size == object2.y_loc or \ object2.y_loc + object2.size == object1.y_loc return (in_vertical_range and touch_side) or (in_horizantal_range and touch_top_or_bottom)
[ "def", "_objects_touch_each_other", "(", "self", ",", "object1", ":", "Object", ",", "object2", ":", "Object", ")", "->", "bool", ":", "in_vertical_range", "=", "object1", ".", "y_loc", "<=", "object2", ".", "y_loc", "+", "object2", ".", "size", "and", "object1", ".", "y_loc", "+", "object1", ".", "size", ">=", "object2", ".", "y_loc", "in_horizantal_range", "=", "object1", ".", "x_loc", "<=", "object2", ".", "x_loc", "+", "object2", ".", "size", "and", "object1", ".", "x_loc", "+", "object1", ".", "size", ">=", "object2", ".", "x_loc", "touch_side", "=", "object1", ".", "x_loc", "+", "object1", ".", "size", "==", "object2", ".", "x_loc", "or", "object2", ".", "x_loc", "+", "object2", ".", "size", "==", "object1", ".", "x_loc", "touch_top_or_bottom", "=", "object1", ".", "y_loc", "+", "object1", ".", "size", "==", "object2", ".", "y_loc", "or", "object2", ".", "y_loc", "+", "object2", ".", "size", "==", "object1", ".", "y_loc", "return", "(", "in_vertical_range", "and", "touch_side", ")", "or", "(", "in_horizantal_range", "and", "touch_top_or_bottom", ")" ]
Returns true iff the objects touch each other.
[ "Returns", "true", "iff", "the", "objects", "touch", "each", "other", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/nlvr_language.py#L654-L666
23,085
allenai/allennlp
allennlp/semparse/domain_languages/nlvr_language.py
NlvrLanguage._separate_objects_by_boxes
def _separate_objects_by_boxes(self, objects: Set[Object]) -> Dict[Box, List[Object]]: """ Given a set of objects, separate them by the boxes they belong to and return a dict. """ objects_per_box: Dict[Box, List[Object]] = defaultdict(list) for box in self.boxes: for object_ in objects: if object_ in box.objects: objects_per_box[box].append(object_) return objects_per_box
python
def _separate_objects_by_boxes(self, objects: Set[Object]) -> Dict[Box, List[Object]]: """ Given a set of objects, separate them by the boxes they belong to and return a dict. """ objects_per_box: Dict[Box, List[Object]] = defaultdict(list) for box in self.boxes: for object_ in objects: if object_ in box.objects: objects_per_box[box].append(object_) return objects_per_box
[ "def", "_separate_objects_by_boxes", "(", "self", ",", "objects", ":", "Set", "[", "Object", "]", ")", "->", "Dict", "[", "Box", ",", "List", "[", "Object", "]", "]", ":", "objects_per_box", ":", "Dict", "[", "Box", ",", "List", "[", "Object", "]", "]", "=", "defaultdict", "(", "list", ")", "for", "box", "in", "self", ".", "boxes", ":", "for", "object_", "in", "objects", ":", "if", "object_", "in", "box", ".", "objects", ":", "objects_per_box", "[", "box", "]", ".", "append", "(", "object_", ")", "return", "objects_per_box" ]
Given a set of objects, separate them by the boxes they belong to and return a dict.
[ "Given", "a", "set", "of", "objects", "separate", "them", "by", "the", "boxes", "they", "belong", "to", "and", "return", "a", "dict", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/nlvr_language.py#L668-L677
23,086
allenai/allennlp
allennlp/semparse/domain_languages/nlvr_language.py
NlvrLanguage._get_objects_with_same_attribute
def _get_objects_with_same_attribute(self, objects: Set[Object], attribute_function: Callable[[Object], str]) -> Set[Object]: """ Returns the set of objects for which the attribute function returns an attribute value that is most frequent in the initial set, if the frequency is greater than 1. If not, all objects have different attribute values, and this method returns an empty set. """ objects_of_attribute: Dict[str, Set[Object]] = defaultdict(set) for entity in objects: objects_of_attribute[attribute_function(entity)].add(entity) if not objects_of_attribute: return set() most_frequent_attribute = max(objects_of_attribute, key=lambda x: len(objects_of_attribute[x])) if len(objects_of_attribute[most_frequent_attribute]) <= 1: return set() return objects_of_attribute[most_frequent_attribute]
python
def _get_objects_with_same_attribute(self, objects: Set[Object], attribute_function: Callable[[Object], str]) -> Set[Object]: """ Returns the set of objects for which the attribute function returns an attribute value that is most frequent in the initial set, if the frequency is greater than 1. If not, all objects have different attribute values, and this method returns an empty set. """ objects_of_attribute: Dict[str, Set[Object]] = defaultdict(set) for entity in objects: objects_of_attribute[attribute_function(entity)].add(entity) if not objects_of_attribute: return set() most_frequent_attribute = max(objects_of_attribute, key=lambda x: len(objects_of_attribute[x])) if len(objects_of_attribute[most_frequent_attribute]) <= 1: return set() return objects_of_attribute[most_frequent_attribute]
[ "def", "_get_objects_with_same_attribute", "(", "self", ",", "objects", ":", "Set", "[", "Object", "]", ",", "attribute_function", ":", "Callable", "[", "[", "Object", "]", ",", "str", "]", ")", "->", "Set", "[", "Object", "]", ":", "objects_of_attribute", ":", "Dict", "[", "str", ",", "Set", "[", "Object", "]", "]", "=", "defaultdict", "(", "set", ")", "for", "entity", "in", "objects", ":", "objects_of_attribute", "[", "attribute_function", "(", "entity", ")", "]", ".", "add", "(", "entity", ")", "if", "not", "objects_of_attribute", ":", "return", "set", "(", ")", "most_frequent_attribute", "=", "max", "(", "objects_of_attribute", ",", "key", "=", "lambda", "x", ":", "len", "(", "objects_of_attribute", "[", "x", "]", ")", ")", "if", "len", "(", "objects_of_attribute", "[", "most_frequent_attribute", "]", ")", "<=", "1", ":", "return", "set", "(", ")", "return", "objects_of_attribute", "[", "most_frequent_attribute", "]" ]
Returns the set of objects for which the attribute function returns an attribute value that is most frequent in the initial set, if the frequency is greater than 1. If not, all objects have different attribute values, and this method returns an empty set.
[ "Returns", "the", "set", "of", "objects", "for", "which", "the", "attribute", "function", "returns", "an", "attribute", "value", "that", "is", "most", "frequent", "in", "the", "initial", "set", "if", "the", "frequency", "is", "greater", "than", "1", ".", "If", "not", "all", "objects", "have", "different", "attribute", "values", "and", "this", "method", "returns", "an", "empty", "set", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/domain_languages/nlvr_language.py#L679-L695
23,087
allenai/allennlp
allennlp/nn/util.py
has_tensor
def has_tensor(obj) -> bool: """ Given a possibly complex data structure, check if it has any torch.Tensors in it. """ if isinstance(obj, torch.Tensor): return True elif isinstance(obj, dict): return any(has_tensor(value) for value in obj.values()) elif isinstance(obj, (list, tuple)): return any(has_tensor(item) for item in obj) else: return False
python
def has_tensor(obj) -> bool: """ Given a possibly complex data structure, check if it has any torch.Tensors in it. """ if isinstance(obj, torch.Tensor): return True elif isinstance(obj, dict): return any(has_tensor(value) for value in obj.values()) elif isinstance(obj, (list, tuple)): return any(has_tensor(item) for item in obj) else: return False
[ "def", "has_tensor", "(", "obj", ")", "->", "bool", ":", "if", "isinstance", "(", "obj", ",", "torch", ".", "Tensor", ")", ":", "return", "True", "elif", "isinstance", "(", "obj", ",", "dict", ")", ":", "return", "any", "(", "has_tensor", "(", "value", ")", "for", "value", "in", "obj", ".", "values", "(", ")", ")", "elif", "isinstance", "(", "obj", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "any", "(", "has_tensor", "(", "item", ")", "for", "item", "in", "obj", ")", "else", ":", "return", "False" ]
Given a possibly complex data structure, check if it has any torch.Tensors in it.
[ "Given", "a", "possibly", "complex", "data", "structure", "check", "if", "it", "has", "any", "torch", ".", "Tensors", "in", "it", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/nn/util.py#L20-L32
23,088
allenai/allennlp
allennlp/nn/util.py
clamp_tensor
def clamp_tensor(tensor, minimum, maximum): """ Supports sparse and dense tensors. Returns a tensor with values clamped between the provided minimum and maximum, without modifying the original tensor. """ if tensor.is_sparse: coalesced_tensor = tensor.coalesce() # pylint: disable=protected-access coalesced_tensor._values().clamp_(minimum, maximum) return coalesced_tensor else: return tensor.clamp(minimum, maximum)
python
def clamp_tensor(tensor, minimum, maximum): """ Supports sparse and dense tensors. Returns a tensor with values clamped between the provided minimum and maximum, without modifying the original tensor. """ if tensor.is_sparse: coalesced_tensor = tensor.coalesce() # pylint: disable=protected-access coalesced_tensor._values().clamp_(minimum, maximum) return coalesced_tensor else: return tensor.clamp(minimum, maximum)
[ "def", "clamp_tensor", "(", "tensor", ",", "minimum", ",", "maximum", ")", ":", "if", "tensor", ".", "is_sparse", ":", "coalesced_tensor", "=", "tensor", ".", "coalesce", "(", ")", "# pylint: disable=protected-access", "coalesced_tensor", ".", "_values", "(", ")", ".", "clamp_", "(", "minimum", ",", "maximum", ")", "return", "coalesced_tensor", "else", ":", "return", "tensor", ".", "clamp", "(", "minimum", ",", "maximum", ")" ]
Supports sparse and dense tensors. Returns a tensor with values clamped between the provided minimum and maximum, without modifying the original tensor.
[ "Supports", "sparse", "and", "dense", "tensors", ".", "Returns", "a", "tensor", "with", "values", "clamped", "between", "the", "provided", "minimum", "and", "maximum", "without", "modifying", "the", "original", "tensor", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/nn/util.py#L54-L66
23,089
allenai/allennlp
allennlp/nn/util.py
batch_tensor_dicts
def batch_tensor_dicts(tensor_dicts: List[Dict[str, torch.Tensor]], remove_trailing_dimension: bool = False) -> Dict[str, torch.Tensor]: """ Takes a list of tensor dictionaries, where each dictionary is assumed to have matching keys, and returns a single dictionary with all tensors with the same key batched together. Parameters ---------- tensor_dicts : ``List[Dict[str, torch.Tensor]]`` The list of tensor dictionaries to batch. remove_trailing_dimension : ``bool`` If ``True``, we will check for a trailing dimension of size 1 on the tensors that are being batched, and remove it if we find it. """ key_to_tensors: Dict[str, List[torch.Tensor]] = defaultdict(list) for tensor_dict in tensor_dicts: for key, tensor in tensor_dict.items(): key_to_tensors[key].append(tensor) batched_tensors = {} for key, tensor_list in key_to_tensors.items(): batched_tensor = torch.stack(tensor_list) if remove_trailing_dimension and all(tensor.size(-1) == 1 for tensor in tensor_list): batched_tensor = batched_tensor.squeeze(-1) batched_tensors[key] = batched_tensor return batched_tensors
python
def batch_tensor_dicts(tensor_dicts: List[Dict[str, torch.Tensor]], remove_trailing_dimension: bool = False) -> Dict[str, torch.Tensor]: """ Takes a list of tensor dictionaries, where each dictionary is assumed to have matching keys, and returns a single dictionary with all tensors with the same key batched together. Parameters ---------- tensor_dicts : ``List[Dict[str, torch.Tensor]]`` The list of tensor dictionaries to batch. remove_trailing_dimension : ``bool`` If ``True``, we will check for a trailing dimension of size 1 on the tensors that are being batched, and remove it if we find it. """ key_to_tensors: Dict[str, List[torch.Tensor]] = defaultdict(list) for tensor_dict in tensor_dicts: for key, tensor in tensor_dict.items(): key_to_tensors[key].append(tensor) batched_tensors = {} for key, tensor_list in key_to_tensors.items(): batched_tensor = torch.stack(tensor_list) if remove_trailing_dimension and all(tensor.size(-1) == 1 for tensor in tensor_list): batched_tensor = batched_tensor.squeeze(-1) batched_tensors[key] = batched_tensor return batched_tensors
[ "def", "batch_tensor_dicts", "(", "tensor_dicts", ":", "List", "[", "Dict", "[", "str", ",", "torch", ".", "Tensor", "]", "]", ",", "remove_trailing_dimension", ":", "bool", "=", "False", ")", "->", "Dict", "[", "str", ",", "torch", ".", "Tensor", "]", ":", "key_to_tensors", ":", "Dict", "[", "str", ",", "List", "[", "torch", ".", "Tensor", "]", "]", "=", "defaultdict", "(", "list", ")", "for", "tensor_dict", "in", "tensor_dicts", ":", "for", "key", ",", "tensor", "in", "tensor_dict", ".", "items", "(", ")", ":", "key_to_tensors", "[", "key", "]", ".", "append", "(", "tensor", ")", "batched_tensors", "=", "{", "}", "for", "key", ",", "tensor_list", "in", "key_to_tensors", ".", "items", "(", ")", ":", "batched_tensor", "=", "torch", ".", "stack", "(", "tensor_list", ")", "if", "remove_trailing_dimension", "and", "all", "(", "tensor", ".", "size", "(", "-", "1", ")", "==", "1", "for", "tensor", "in", "tensor_list", ")", ":", "batched_tensor", "=", "batched_tensor", ".", "squeeze", "(", "-", "1", ")", "batched_tensors", "[", "key", "]", "=", "batched_tensor", "return", "batched_tensors" ]
Takes a list of tensor dictionaries, where each dictionary is assumed to have matching keys, and returns a single dictionary with all tensors with the same key batched together. Parameters ---------- tensor_dicts : ``List[Dict[str, torch.Tensor]]`` The list of tensor dictionaries to batch. remove_trailing_dimension : ``bool`` If ``True``, we will check for a trailing dimension of size 1 on the tensors that are being batched, and remove it if we find it.
[ "Takes", "a", "list", "of", "tensor", "dictionaries", "where", "each", "dictionary", "is", "assumed", "to", "have", "matching", "keys", "and", "returns", "a", "single", "dictionary", "with", "all", "tensors", "with", "the", "same", "key", "batched", "together", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/nn/util.py#L69-L93
23,090
allenai/allennlp
allennlp/nn/util.py
sort_batch_by_length
def sort_batch_by_length(tensor: torch.Tensor, sequence_lengths: torch.Tensor): """ Sort a batch first tensor by some specified lengths. Parameters ---------- tensor : torch.FloatTensor, required. A batch first Pytorch tensor. sequence_lengths : torch.LongTensor, required. A tensor representing the lengths of some dimension of the tensor which we want to sort by. Returns ------- sorted_tensor : torch.FloatTensor The original tensor sorted along the batch dimension with respect to sequence_lengths. sorted_sequence_lengths : torch.LongTensor The original sequence_lengths sorted by decreasing size. restoration_indices : torch.LongTensor Indices into the sorted_tensor such that ``sorted_tensor.index_select(0, restoration_indices) == original_tensor`` permutation_index : torch.LongTensor The indices used to sort the tensor. This is useful if you want to sort many tensors using the same ordering. """ if not isinstance(tensor, torch.Tensor) or not isinstance(sequence_lengths, torch.Tensor): raise ConfigurationError("Both the tensor and sequence lengths must be torch.Tensors.") sorted_sequence_lengths, permutation_index = sequence_lengths.sort(0, descending=True) sorted_tensor = tensor.index_select(0, permutation_index) index_range = torch.arange(0, len(sequence_lengths), device=sequence_lengths.device) # This is the equivalent of zipping with index, sorting by the original # sequence lengths and returning the now sorted indices. _, reverse_mapping = permutation_index.sort(0, descending=False) restoration_indices = index_range.index_select(0, reverse_mapping) return sorted_tensor, sorted_sequence_lengths, restoration_indices, permutation_index
python
def sort_batch_by_length(tensor: torch.Tensor, sequence_lengths: torch.Tensor): """ Sort a batch first tensor by some specified lengths. Parameters ---------- tensor : torch.FloatTensor, required. A batch first Pytorch tensor. sequence_lengths : torch.LongTensor, required. A tensor representing the lengths of some dimension of the tensor which we want to sort by. Returns ------- sorted_tensor : torch.FloatTensor The original tensor sorted along the batch dimension with respect to sequence_lengths. sorted_sequence_lengths : torch.LongTensor The original sequence_lengths sorted by decreasing size. restoration_indices : torch.LongTensor Indices into the sorted_tensor such that ``sorted_tensor.index_select(0, restoration_indices) == original_tensor`` permutation_index : torch.LongTensor The indices used to sort the tensor. This is useful if you want to sort many tensors using the same ordering. """ if not isinstance(tensor, torch.Tensor) or not isinstance(sequence_lengths, torch.Tensor): raise ConfigurationError("Both the tensor and sequence lengths must be torch.Tensors.") sorted_sequence_lengths, permutation_index = sequence_lengths.sort(0, descending=True) sorted_tensor = tensor.index_select(0, permutation_index) index_range = torch.arange(0, len(sequence_lengths), device=sequence_lengths.device) # This is the equivalent of zipping with index, sorting by the original # sequence lengths and returning the now sorted indices. _, reverse_mapping = permutation_index.sort(0, descending=False) restoration_indices = index_range.index_select(0, reverse_mapping) return sorted_tensor, sorted_sequence_lengths, restoration_indices, permutation_index
[ "def", "sort_batch_by_length", "(", "tensor", ":", "torch", ".", "Tensor", ",", "sequence_lengths", ":", "torch", ".", "Tensor", ")", ":", "if", "not", "isinstance", "(", "tensor", ",", "torch", ".", "Tensor", ")", "or", "not", "isinstance", "(", "sequence_lengths", ",", "torch", ".", "Tensor", ")", ":", "raise", "ConfigurationError", "(", "\"Both the tensor and sequence lengths must be torch.Tensors.\"", ")", "sorted_sequence_lengths", ",", "permutation_index", "=", "sequence_lengths", ".", "sort", "(", "0", ",", "descending", "=", "True", ")", "sorted_tensor", "=", "tensor", ".", "index_select", "(", "0", ",", "permutation_index", ")", "index_range", "=", "torch", ".", "arange", "(", "0", ",", "len", "(", "sequence_lengths", ")", ",", "device", "=", "sequence_lengths", ".", "device", ")", "# This is the equivalent of zipping with index, sorting by the original", "# sequence lengths and returning the now sorted indices.", "_", ",", "reverse_mapping", "=", "permutation_index", ".", "sort", "(", "0", ",", "descending", "=", "False", ")", "restoration_indices", "=", "index_range", ".", "index_select", "(", "0", ",", "reverse_mapping", ")", "return", "sorted_tensor", ",", "sorted_sequence_lengths", ",", "restoration_indices", ",", "permutation_index" ]
Sort a batch first tensor by some specified lengths. Parameters ---------- tensor : torch.FloatTensor, required. A batch first Pytorch tensor. sequence_lengths : torch.LongTensor, required. A tensor representing the lengths of some dimension of the tensor which we want to sort by. Returns ------- sorted_tensor : torch.FloatTensor The original tensor sorted along the batch dimension with respect to sequence_lengths. sorted_sequence_lengths : torch.LongTensor The original sequence_lengths sorted by decreasing size. restoration_indices : torch.LongTensor Indices into the sorted_tensor such that ``sorted_tensor.index_select(0, restoration_indices) == original_tensor`` permutation_index : torch.LongTensor The indices used to sort the tensor. This is useful if you want to sort many tensors using the same ordering.
[ "Sort", "a", "batch", "first", "tensor", "by", "some", "specified", "lengths", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/nn/util.py#L132-L169
23,091
allenai/allennlp
allennlp/nn/util.py
get_dropout_mask
def get_dropout_mask(dropout_probability: float, tensor_for_masking: torch.Tensor): """ Computes and returns an element-wise dropout mask for a given tensor, where each element in the mask is dropped out with probability dropout_probability. Note that the mask is NOT applied to the tensor - the tensor is passed to retain the correct CUDA tensor type for the mask. Parameters ---------- dropout_probability : float, required. Probability of dropping a dimension of the input. tensor_for_masking : torch.Tensor, required. Returns ------- A torch.FloatTensor consisting of the binary mask scaled by 1/ (1 - dropout_probability). This scaling ensures expected values and variances of the output of applying this mask and the original tensor are the same. """ binary_mask = (torch.rand(tensor_for_masking.size()) > dropout_probability).to(tensor_for_masking.device) # Scale mask by 1/keep_prob to preserve output statistics. dropout_mask = binary_mask.float().div(1.0 - dropout_probability) return dropout_mask
python
def get_dropout_mask(dropout_probability: float, tensor_for_masking: torch.Tensor): """ Computes and returns an element-wise dropout mask for a given tensor, where each element in the mask is dropped out with probability dropout_probability. Note that the mask is NOT applied to the tensor - the tensor is passed to retain the correct CUDA tensor type for the mask. Parameters ---------- dropout_probability : float, required. Probability of dropping a dimension of the input. tensor_for_masking : torch.Tensor, required. Returns ------- A torch.FloatTensor consisting of the binary mask scaled by 1/ (1 - dropout_probability). This scaling ensures expected values and variances of the output of applying this mask and the original tensor are the same. """ binary_mask = (torch.rand(tensor_for_masking.size()) > dropout_probability).to(tensor_for_masking.device) # Scale mask by 1/keep_prob to preserve output statistics. dropout_mask = binary_mask.float().div(1.0 - dropout_probability) return dropout_mask
[ "def", "get_dropout_mask", "(", "dropout_probability", ":", "float", ",", "tensor_for_masking", ":", "torch", ".", "Tensor", ")", ":", "binary_mask", "=", "(", "torch", ".", "rand", "(", "tensor_for_masking", ".", "size", "(", ")", ")", ">", "dropout_probability", ")", ".", "to", "(", "tensor_for_masking", ".", "device", ")", "# Scale mask by 1/keep_prob to preserve output statistics.", "dropout_mask", "=", "binary_mask", ".", "float", "(", ")", ".", "div", "(", "1.0", "-", "dropout_probability", ")", "return", "dropout_mask" ]
Computes and returns an element-wise dropout mask for a given tensor, where each element in the mask is dropped out with probability dropout_probability. Note that the mask is NOT applied to the tensor - the tensor is passed to retain the correct CUDA tensor type for the mask. Parameters ---------- dropout_probability : float, required. Probability of dropping a dimension of the input. tensor_for_masking : torch.Tensor, required. Returns ------- A torch.FloatTensor consisting of the binary mask scaled by 1/ (1 - dropout_probability). This scaling ensures expected values and variances of the output of applying this mask and the original tensor are the same.
[ "Computes", "and", "returns", "an", "element", "-", "wise", "dropout", "mask", "for", "a", "given", "tensor", "where", "each", "element", "in", "the", "mask", "is", "dropped", "out", "with", "probability", "dropout_probability", ".", "Note", "that", "the", "mask", "is", "NOT", "applied", "to", "the", "tensor", "-", "the", "tensor", "is", "passed", "to", "retain", "the", "correct", "CUDA", "tensor", "type", "for", "the", "mask", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/nn/util.py#L205-L228
23,092
allenai/allennlp
allennlp/nn/util.py
masked_max
def masked_max(vector: torch.Tensor, mask: torch.Tensor, dim: int, keepdim: bool = False, min_val: float = -1e7) -> torch.Tensor: """ To calculate max along certain dimensions on masked values Parameters ---------- vector : ``torch.Tensor`` The vector to calculate max, assume unmasked parts are already zeros mask : ``torch.Tensor`` The mask of the vector. It must be broadcastable with vector. dim : ``int`` The dimension to calculate max keepdim : ``bool`` Whether to keep dimension min_val : ``float`` The minimal value for paddings Returns ------- A ``torch.Tensor`` of including the maximum values. """ one_minus_mask = (1.0 - mask).byte() replaced_vector = vector.masked_fill(one_minus_mask, min_val) max_value, _ = replaced_vector.max(dim=dim, keepdim=keepdim) return max_value
python
def masked_max(vector: torch.Tensor, mask: torch.Tensor, dim: int, keepdim: bool = False, min_val: float = -1e7) -> torch.Tensor: """ To calculate max along certain dimensions on masked values Parameters ---------- vector : ``torch.Tensor`` The vector to calculate max, assume unmasked parts are already zeros mask : ``torch.Tensor`` The mask of the vector. It must be broadcastable with vector. dim : ``int`` The dimension to calculate max keepdim : ``bool`` Whether to keep dimension min_val : ``float`` The minimal value for paddings Returns ------- A ``torch.Tensor`` of including the maximum values. """ one_minus_mask = (1.0 - mask).byte() replaced_vector = vector.masked_fill(one_minus_mask, min_val) max_value, _ = replaced_vector.max(dim=dim, keepdim=keepdim) return max_value
[ "def", "masked_max", "(", "vector", ":", "torch", ".", "Tensor", ",", "mask", ":", "torch", ".", "Tensor", ",", "dim", ":", "int", ",", "keepdim", ":", "bool", "=", "False", ",", "min_val", ":", "float", "=", "-", "1e7", ")", "->", "torch", ".", "Tensor", ":", "one_minus_mask", "=", "(", "1.0", "-", "mask", ")", ".", "byte", "(", ")", "replaced_vector", "=", "vector", ".", "masked_fill", "(", "one_minus_mask", ",", "min_val", ")", "max_value", ",", "_", "=", "replaced_vector", ".", "max", "(", "dim", "=", "dim", ",", "keepdim", "=", "keepdim", ")", "return", "max_value" ]
To calculate max along certain dimensions on masked values Parameters ---------- vector : ``torch.Tensor`` The vector to calculate max, assume unmasked parts are already zeros mask : ``torch.Tensor`` The mask of the vector. It must be broadcastable with vector. dim : ``int`` The dimension to calculate max keepdim : ``bool`` Whether to keep dimension min_val : ``float`` The minimal value for paddings Returns ------- A ``torch.Tensor`` of including the maximum values.
[ "To", "calculate", "max", "along", "certain", "dimensions", "on", "masked", "values" ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/nn/util.py#L306-L334
23,093
allenai/allennlp
allennlp/nn/util.py
masked_mean
def masked_mean(vector: torch.Tensor, mask: torch.Tensor, dim: int, keepdim: bool = False, eps: float = 1e-8) -> torch.Tensor: """ To calculate mean along certain dimensions on masked values Parameters ---------- vector : ``torch.Tensor`` The vector to calculate mean. mask : ``torch.Tensor`` The mask of the vector. It must be broadcastable with vector. dim : ``int`` The dimension to calculate mean keepdim : ``bool`` Whether to keep dimension eps : ``float`` A small value to avoid zero division problem. Returns ------- A ``torch.Tensor`` of including the mean values. """ one_minus_mask = (1.0 - mask).byte() replaced_vector = vector.masked_fill(one_minus_mask, 0.0) value_sum = torch.sum(replaced_vector, dim=dim, keepdim=keepdim) value_count = torch.sum(mask.float(), dim=dim, keepdim=keepdim) return value_sum / value_count.clamp(min=eps)
python
def masked_mean(vector: torch.Tensor, mask: torch.Tensor, dim: int, keepdim: bool = False, eps: float = 1e-8) -> torch.Tensor: """ To calculate mean along certain dimensions on masked values Parameters ---------- vector : ``torch.Tensor`` The vector to calculate mean. mask : ``torch.Tensor`` The mask of the vector. It must be broadcastable with vector. dim : ``int`` The dimension to calculate mean keepdim : ``bool`` Whether to keep dimension eps : ``float`` A small value to avoid zero division problem. Returns ------- A ``torch.Tensor`` of including the mean values. """ one_minus_mask = (1.0 - mask).byte() replaced_vector = vector.masked_fill(one_minus_mask, 0.0) value_sum = torch.sum(replaced_vector, dim=dim, keepdim=keepdim) value_count = torch.sum(mask.float(), dim=dim, keepdim=keepdim) return value_sum / value_count.clamp(min=eps)
[ "def", "masked_mean", "(", "vector", ":", "torch", ".", "Tensor", ",", "mask", ":", "torch", ".", "Tensor", ",", "dim", ":", "int", ",", "keepdim", ":", "bool", "=", "False", ",", "eps", ":", "float", "=", "1e-8", ")", "->", "torch", ".", "Tensor", ":", "one_minus_mask", "=", "(", "1.0", "-", "mask", ")", ".", "byte", "(", ")", "replaced_vector", "=", "vector", ".", "masked_fill", "(", "one_minus_mask", ",", "0.0", ")", "value_sum", "=", "torch", ".", "sum", "(", "replaced_vector", ",", "dim", "=", "dim", ",", "keepdim", "=", "keepdim", ")", "value_count", "=", "torch", ".", "sum", "(", "mask", ".", "float", "(", ")", ",", "dim", "=", "dim", ",", "keepdim", "=", "keepdim", ")", "return", "value_sum", "/", "value_count", ".", "clamp", "(", "min", "=", "eps", ")" ]
To calculate mean along certain dimensions on masked values Parameters ---------- vector : ``torch.Tensor`` The vector to calculate mean. mask : ``torch.Tensor`` The mask of the vector. It must be broadcastable with vector. dim : ``int`` The dimension to calculate mean keepdim : ``bool`` Whether to keep dimension eps : ``float`` A small value to avoid zero division problem. Returns ------- A ``torch.Tensor`` of including the mean values.
[ "To", "calculate", "mean", "along", "certain", "dimensions", "on", "masked", "values" ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/nn/util.py#L337-L367
23,094
allenai/allennlp
allennlp/nn/util.py
masked_flip
def masked_flip(padded_sequence: torch.Tensor, sequence_lengths: List[int]) -> torch.Tensor: """ Flips a padded tensor along the time dimension without affecting masked entries. Parameters ---------- padded_sequence : ``torch.Tensor`` The tensor to flip along the time dimension. Assumed to be of dimensions (batch size, num timesteps, ...) sequence_lengths : ``torch.Tensor`` A list containing the lengths of each unpadded sequence in the batch. Returns ------- A ``torch.Tensor`` of the same shape as padded_sequence. """ assert padded_sequence.size(0) == len(sequence_lengths), \ f'sequence_lengths length ${len(sequence_lengths)} does not match batch size ${padded_sequence.size(0)}' num_timesteps = padded_sequence.size(1) flipped_padded_sequence = torch.flip(padded_sequence, [1]) sequences = [flipped_padded_sequence[i, num_timesteps - length:] for i, length in enumerate(sequence_lengths)] return torch.nn.utils.rnn.pad_sequence(sequences, batch_first=True)
python
def masked_flip(padded_sequence: torch.Tensor, sequence_lengths: List[int]) -> torch.Tensor: """ Flips a padded tensor along the time dimension without affecting masked entries. Parameters ---------- padded_sequence : ``torch.Tensor`` The tensor to flip along the time dimension. Assumed to be of dimensions (batch size, num timesteps, ...) sequence_lengths : ``torch.Tensor`` A list containing the lengths of each unpadded sequence in the batch. Returns ------- A ``torch.Tensor`` of the same shape as padded_sequence. """ assert padded_sequence.size(0) == len(sequence_lengths), \ f'sequence_lengths length ${len(sequence_lengths)} does not match batch size ${padded_sequence.size(0)}' num_timesteps = padded_sequence.size(1) flipped_padded_sequence = torch.flip(padded_sequence, [1]) sequences = [flipped_padded_sequence[i, num_timesteps - length:] for i, length in enumerate(sequence_lengths)] return torch.nn.utils.rnn.pad_sequence(sequences, batch_first=True)
[ "def", "masked_flip", "(", "padded_sequence", ":", "torch", ".", "Tensor", ",", "sequence_lengths", ":", "List", "[", "int", "]", ")", "->", "torch", ".", "Tensor", ":", "assert", "padded_sequence", ".", "size", "(", "0", ")", "==", "len", "(", "sequence_lengths", ")", ",", "f'sequence_lengths length ${len(sequence_lengths)} does not match batch size ${padded_sequence.size(0)}'", "num_timesteps", "=", "padded_sequence", ".", "size", "(", "1", ")", "flipped_padded_sequence", "=", "torch", ".", "flip", "(", "padded_sequence", ",", "[", "1", "]", ")", "sequences", "=", "[", "flipped_padded_sequence", "[", "i", ",", "num_timesteps", "-", "length", ":", "]", "for", "i", ",", "length", "in", "enumerate", "(", "sequence_lengths", ")", "]", "return", "torch", ".", "nn", ".", "utils", ".", "rnn", ".", "pad_sequence", "(", "sequences", ",", "batch_first", "=", "True", ")" ]
Flips a padded tensor along the time dimension without affecting masked entries. Parameters ---------- padded_sequence : ``torch.Tensor`` The tensor to flip along the time dimension. Assumed to be of dimensions (batch size, num timesteps, ...) sequence_lengths : ``torch.Tensor`` A list containing the lengths of each unpadded sequence in the batch. Returns ------- A ``torch.Tensor`` of the same shape as padded_sequence.
[ "Flips", "a", "padded", "tensor", "along", "the", "time", "dimension", "without", "affecting", "masked", "entries", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/nn/util.py#L370-L392
23,095
allenai/allennlp
allennlp/nn/util.py
get_text_field_mask
def get_text_field_mask(text_field_tensors: Dict[str, torch.Tensor], num_wrapping_dims: int = 0) -> torch.LongTensor: """ Takes the dictionary of tensors produced by a ``TextField`` and returns a mask with 0 where the tokens are padding, and 1 otherwise. We also handle ``TextFields`` wrapped by an arbitrary number of ``ListFields``, where the number of wrapping ``ListFields`` is given by ``num_wrapping_dims``. If ``num_wrapping_dims == 0``, the returned mask has shape ``(batch_size, num_tokens)``. If ``num_wrapping_dims > 0`` then the returned mask has ``num_wrapping_dims`` extra dimensions, so the shape will be ``(batch_size, ..., num_tokens)``. There could be several entries in the tensor dictionary with different shapes (e.g., one for word ids, one for character ids). In order to get a token mask, we use the tensor in the dictionary with the lowest number of dimensions. After subtracting ``num_wrapping_dims``, if this tensor has two dimensions we assume it has shape ``(batch_size, ..., num_tokens)``, and use it for the mask. If instead it has three dimensions, we assume it has shape ``(batch_size, ..., num_tokens, num_features)``, and sum over the last dimension to produce the mask. Most frequently this will be a character id tensor, but it could also be a featurized representation of each token, etc. If the input ``text_field_tensors`` contains the "mask" key, this is returned instead of inferring the mask. TODO(joelgrus): can we change this? NOTE: Our functions for generating masks create torch.LongTensors, because using torch.ByteTensors makes it easy to run into overflow errors when doing mask manipulation, such as summing to get the lengths of sequences - see below. >>> mask = torch.ones([260]).byte() >>> mask.sum() # equals 260. >>> var_mask = torch.autograd.V(mask) >>> var_mask.sum() # equals 4, due to 8 bit precision - the sum overflows. """ if "mask" in text_field_tensors: return text_field_tensors["mask"] tensor_dims = [(tensor.dim(), tensor) for tensor in text_field_tensors.values()] tensor_dims.sort(key=lambda x: x[0]) smallest_dim = tensor_dims[0][0] - num_wrapping_dims if smallest_dim == 2: token_tensor = tensor_dims[0][1] return (token_tensor != 0).long() elif smallest_dim == 3: character_tensor = tensor_dims[0][1] return ((character_tensor > 0).long().sum(dim=-1) > 0).long() else: raise ValueError("Expected a tensor with dimension 2 or 3, found {}".format(smallest_dim))
python
def get_text_field_mask(text_field_tensors: Dict[str, torch.Tensor], num_wrapping_dims: int = 0) -> torch.LongTensor: """ Takes the dictionary of tensors produced by a ``TextField`` and returns a mask with 0 where the tokens are padding, and 1 otherwise. We also handle ``TextFields`` wrapped by an arbitrary number of ``ListFields``, where the number of wrapping ``ListFields`` is given by ``num_wrapping_dims``. If ``num_wrapping_dims == 0``, the returned mask has shape ``(batch_size, num_tokens)``. If ``num_wrapping_dims > 0`` then the returned mask has ``num_wrapping_dims`` extra dimensions, so the shape will be ``(batch_size, ..., num_tokens)``. There could be several entries in the tensor dictionary with different shapes (e.g., one for word ids, one for character ids). In order to get a token mask, we use the tensor in the dictionary with the lowest number of dimensions. After subtracting ``num_wrapping_dims``, if this tensor has two dimensions we assume it has shape ``(batch_size, ..., num_tokens)``, and use it for the mask. If instead it has three dimensions, we assume it has shape ``(batch_size, ..., num_tokens, num_features)``, and sum over the last dimension to produce the mask. Most frequently this will be a character id tensor, but it could also be a featurized representation of each token, etc. If the input ``text_field_tensors`` contains the "mask" key, this is returned instead of inferring the mask. TODO(joelgrus): can we change this? NOTE: Our functions for generating masks create torch.LongTensors, because using torch.ByteTensors makes it easy to run into overflow errors when doing mask manipulation, such as summing to get the lengths of sequences - see below. >>> mask = torch.ones([260]).byte() >>> mask.sum() # equals 260. >>> var_mask = torch.autograd.V(mask) >>> var_mask.sum() # equals 4, due to 8 bit precision - the sum overflows. """ if "mask" in text_field_tensors: return text_field_tensors["mask"] tensor_dims = [(tensor.dim(), tensor) for tensor in text_field_tensors.values()] tensor_dims.sort(key=lambda x: x[0]) smallest_dim = tensor_dims[0][0] - num_wrapping_dims if smallest_dim == 2: token_tensor = tensor_dims[0][1] return (token_tensor != 0).long() elif smallest_dim == 3: character_tensor = tensor_dims[0][1] return ((character_tensor > 0).long().sum(dim=-1) > 0).long() else: raise ValueError("Expected a tensor with dimension 2 or 3, found {}".format(smallest_dim))
[ "def", "get_text_field_mask", "(", "text_field_tensors", ":", "Dict", "[", "str", ",", "torch", ".", "Tensor", "]", ",", "num_wrapping_dims", ":", "int", "=", "0", ")", "->", "torch", ".", "LongTensor", ":", "if", "\"mask\"", "in", "text_field_tensors", ":", "return", "text_field_tensors", "[", "\"mask\"", "]", "tensor_dims", "=", "[", "(", "tensor", ".", "dim", "(", ")", ",", "tensor", ")", "for", "tensor", "in", "text_field_tensors", ".", "values", "(", ")", "]", "tensor_dims", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ")", "smallest_dim", "=", "tensor_dims", "[", "0", "]", "[", "0", "]", "-", "num_wrapping_dims", "if", "smallest_dim", "==", "2", ":", "token_tensor", "=", "tensor_dims", "[", "0", "]", "[", "1", "]", "return", "(", "token_tensor", "!=", "0", ")", ".", "long", "(", ")", "elif", "smallest_dim", "==", "3", ":", "character_tensor", "=", "tensor_dims", "[", "0", "]", "[", "1", "]", "return", "(", "(", "character_tensor", ">", "0", ")", ".", "long", "(", ")", ".", "sum", "(", "dim", "=", "-", "1", ")", ">", "0", ")", ".", "long", "(", ")", "else", ":", "raise", "ValueError", "(", "\"Expected a tensor with dimension 2 or 3, found {}\"", ".", "format", "(", "smallest_dim", ")", ")" ]
Takes the dictionary of tensors produced by a ``TextField`` and returns a mask with 0 where the tokens are padding, and 1 otherwise. We also handle ``TextFields`` wrapped by an arbitrary number of ``ListFields``, where the number of wrapping ``ListFields`` is given by ``num_wrapping_dims``. If ``num_wrapping_dims == 0``, the returned mask has shape ``(batch_size, num_tokens)``. If ``num_wrapping_dims > 0`` then the returned mask has ``num_wrapping_dims`` extra dimensions, so the shape will be ``(batch_size, ..., num_tokens)``. There could be several entries in the tensor dictionary with different shapes (e.g., one for word ids, one for character ids). In order to get a token mask, we use the tensor in the dictionary with the lowest number of dimensions. After subtracting ``num_wrapping_dims``, if this tensor has two dimensions we assume it has shape ``(batch_size, ..., num_tokens)``, and use it for the mask. If instead it has three dimensions, we assume it has shape ``(batch_size, ..., num_tokens, num_features)``, and sum over the last dimension to produce the mask. Most frequently this will be a character id tensor, but it could also be a featurized representation of each token, etc. If the input ``text_field_tensors`` contains the "mask" key, this is returned instead of inferring the mask. TODO(joelgrus): can we change this? NOTE: Our functions for generating masks create torch.LongTensors, because using torch.ByteTensors makes it easy to run into overflow errors when doing mask manipulation, such as summing to get the lengths of sequences - see below. >>> mask = torch.ones([260]).byte() >>> mask.sum() # equals 260. >>> var_mask = torch.autograd.V(mask) >>> var_mask.sum() # equals 4, due to 8 bit precision - the sum overflows.
[ "Takes", "the", "dictionary", "of", "tensors", "produced", "by", "a", "TextField", "and", "returns", "a", "mask", "with", "0", "where", "the", "tokens", "are", "padding", "and", "1", "otherwise", ".", "We", "also", "handle", "TextFields", "wrapped", "by", "an", "arbitrary", "number", "of", "ListFields", "where", "the", "number", "of", "wrapping", "ListFields", "is", "given", "by", "num_wrapping_dims", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/nn/util.py#L481-L527
23,096
allenai/allennlp
allennlp/nn/util.py
_rindex
def _rindex(sequence: Sequence[T], obj: T) -> int: """ Return zero-based index in the sequence of the last item whose value is equal to obj. Raises a ValueError if there is no such item. Parameters ---------- sequence : ``Sequence[T]`` obj : ``T`` Returns ------- zero-based index associated to the position of the last item equal to obj """ for i in range(len(sequence) - 1, -1, -1): if sequence[i] == obj: return i raise ValueError(f"Unable to find {obj} in sequence {sequence}.")
python
def _rindex(sequence: Sequence[T], obj: T) -> int: """ Return zero-based index in the sequence of the last item whose value is equal to obj. Raises a ValueError if there is no such item. Parameters ---------- sequence : ``Sequence[T]`` obj : ``T`` Returns ------- zero-based index associated to the position of the last item equal to obj """ for i in range(len(sequence) - 1, -1, -1): if sequence[i] == obj: return i raise ValueError(f"Unable to find {obj} in sequence {sequence}.")
[ "def", "_rindex", "(", "sequence", ":", "Sequence", "[", "T", "]", ",", "obj", ":", "T", ")", "->", "int", ":", "for", "i", "in", "range", "(", "len", "(", "sequence", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "if", "sequence", "[", "i", "]", "==", "obj", ":", "return", "i", "raise", "ValueError", "(", "f\"Unable to find {obj} in sequence {sequence}.\"", ")" ]
Return zero-based index in the sequence of the last item whose value is equal to obj. Raises a ValueError if there is no such item. Parameters ---------- sequence : ``Sequence[T]`` obj : ``T`` Returns ------- zero-based index associated to the position of the last item equal to obj
[ "Return", "zero", "-", "based", "index", "in", "the", "sequence", "of", "the", "last", "item", "whose", "value", "is", "equal", "to", "obj", ".", "Raises", "a", "ValueError", "if", "there", "is", "no", "such", "item", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/nn/util.py#L749-L767
23,097
allenai/allennlp
allennlp/nn/util.py
get_range_vector
def get_range_vector(size: int, device: int) -> torch.Tensor: """ Returns a range vector with the desired size, starting at 0. The CUDA implementation is meant to avoid copy data from CPU to GPU. """ if device > -1: return torch.cuda.LongTensor(size, device=device).fill_(1).cumsum(0) - 1 else: return torch.arange(0, size, dtype=torch.long)
python
def get_range_vector(size: int, device: int) -> torch.Tensor: """ Returns a range vector with the desired size, starting at 0. The CUDA implementation is meant to avoid copy data from CPU to GPU. """ if device > -1: return torch.cuda.LongTensor(size, device=device).fill_(1).cumsum(0) - 1 else: return torch.arange(0, size, dtype=torch.long)
[ "def", "get_range_vector", "(", "size", ":", "int", ",", "device", ":", "int", ")", "->", "torch", ".", "Tensor", ":", "if", "device", ">", "-", "1", ":", "return", "torch", ".", "cuda", ".", "LongTensor", "(", "size", ",", "device", "=", "device", ")", ".", "fill_", "(", "1", ")", ".", "cumsum", "(", "0", ")", "-", "1", "else", ":", "return", "torch", ".", "arange", "(", "0", ",", "size", ",", "dtype", "=", "torch", ".", "long", ")" ]
Returns a range vector with the desired size, starting at 0. The CUDA implementation is meant to avoid copy data from CPU to GPU.
[ "Returns", "a", "range", "vector", "with", "the", "desired", "size", "starting", "at", "0", ".", "The", "CUDA", "implementation", "is", "meant", "to", "avoid", "copy", "data", "from", "CPU", "to", "GPU", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/nn/util.py#L1084-L1092
23,098
allenai/allennlp
allennlp/nn/util.py
clone
def clone(module: torch.nn.Module, num_copies: int) -> torch.nn.ModuleList: """Produce N identical layers.""" return torch.nn.ModuleList([copy.deepcopy(module) for _ in range(num_copies)])
python
def clone(module: torch.nn.Module, num_copies: int) -> torch.nn.ModuleList: """Produce N identical layers.""" return torch.nn.ModuleList([copy.deepcopy(module) for _ in range(num_copies)])
[ "def", "clone", "(", "module", ":", "torch", ".", "nn", ".", "Module", ",", "num_copies", ":", "int", ")", "->", "torch", ".", "nn", ".", "ModuleList", ":", "return", "torch", ".", "nn", ".", "ModuleList", "(", "[", "copy", ".", "deepcopy", "(", "module", ")", "for", "_", "in", "range", "(", "num_copies", ")", "]", ")" ]
Produce N identical layers.
[ "Produce", "N", "identical", "layers", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/nn/util.py#L1289-L1291
23,099
allenai/allennlp
allennlp/semparse/contexts/table_question_context.py
TableQuestionContext._string_in_table
def _string_in_table(self, candidate: str) -> List[str]: """ Checks if the string occurs in the table, and if it does, returns the names of the columns under which it occurs. If it does not, returns an empty list. """ candidate_column_names: List[str] = [] # First check if the entire candidate occurs as a cell. if candidate in self._string_column_mapping: candidate_column_names = self._string_column_mapping[candidate] # If not, check if it is a substring pf any cell value. if not candidate_column_names: for cell_value, column_names in self._string_column_mapping.items(): if candidate in cell_value: candidate_column_names.extend(column_names) candidate_column_names = list(set(candidate_column_names)) return candidate_column_names
python
def _string_in_table(self, candidate: str) -> List[str]: """ Checks if the string occurs in the table, and if it does, returns the names of the columns under which it occurs. If it does not, returns an empty list. """ candidate_column_names: List[str] = [] # First check if the entire candidate occurs as a cell. if candidate in self._string_column_mapping: candidate_column_names = self._string_column_mapping[candidate] # If not, check if it is a substring pf any cell value. if not candidate_column_names: for cell_value, column_names in self._string_column_mapping.items(): if candidate in cell_value: candidate_column_names.extend(column_names) candidate_column_names = list(set(candidate_column_names)) return candidate_column_names
[ "def", "_string_in_table", "(", "self", ",", "candidate", ":", "str", ")", "->", "List", "[", "str", "]", ":", "candidate_column_names", ":", "List", "[", "str", "]", "=", "[", "]", "# First check if the entire candidate occurs as a cell.", "if", "candidate", "in", "self", ".", "_string_column_mapping", ":", "candidate_column_names", "=", "self", ".", "_string_column_mapping", "[", "candidate", "]", "# If not, check if it is a substring pf any cell value.", "if", "not", "candidate_column_names", ":", "for", "cell_value", ",", "column_names", "in", "self", ".", "_string_column_mapping", ".", "items", "(", ")", ":", "if", "candidate", "in", "cell_value", ":", "candidate_column_names", ".", "extend", "(", "column_names", ")", "candidate_column_names", "=", "list", "(", "set", "(", "candidate_column_names", ")", ")", "return", "candidate_column_names" ]
Checks if the string occurs in the table, and if it does, returns the names of the columns under which it occurs. If it does not, returns an empty list.
[ "Checks", "if", "the", "string", "occurs", "in", "the", "table", "and", "if", "it", "does", "returns", "the", "names", "of", "the", "columns", "under", "which", "it", "occurs", ".", "If", "it", "does", "not", "returns", "an", "empty", "list", "." ]
648a36f77db7e45784c047176074f98534c76636
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/semparse/contexts/table_question_context.py#L342-L357