partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
train
HParams.set_hparam
Set the value of an existing hyperparameter. This function verifies that the type of the value matches the type of the existing hyperparameter. Args: name: Name of the hyperparameter. value: New value of the hyperparameter. Raises: KeyError: If the hyperparameter doesn't exist. ValueError: If there is a type mismatch.
tensor2tensor/utils/hparam.py
def set_hparam(self, name, value): """Set the value of an existing hyperparameter. This function verifies that the type of the value matches the type of the existing hyperparameter. Args: name: Name of the hyperparameter. value: New value of the hyperparameter. Raises: KeyError: If the hyperparameter doesn't exist. ValueError: If there is a type mismatch. """ param_type, is_list = self._hparam_types[name] if isinstance(value, list): if not is_list: raise ValueError( 'Must not pass a list for single-valued parameter: %s' % name) setattr(self, name, [ _cast_to_type_if_compatible(name, param_type, v) for v in value]) else: if is_list: raise ValueError( 'Must pass a list for multi-valued parameter: %s.' % name) setattr(self, name, _cast_to_type_if_compatible(name, param_type, value))
def set_hparam(self, name, value): """Set the value of an existing hyperparameter. This function verifies that the type of the value matches the type of the existing hyperparameter. Args: name: Name of the hyperparameter. value: New value of the hyperparameter. Raises: KeyError: If the hyperparameter doesn't exist. ValueError: If there is a type mismatch. """ param_type, is_list = self._hparam_types[name] if isinstance(value, list): if not is_list: raise ValueError( 'Must not pass a list for single-valued parameter: %s' % name) setattr(self, name, [ _cast_to_type_if_compatible(name, param_type, v) for v in value]) else: if is_list: raise ValueError( 'Must pass a list for multi-valued parameter: %s.' % name) setattr(self, name, _cast_to_type_if_compatible(name, param_type, value))
[ "Set", "the", "value", "of", "an", "existing", "hyperparameter", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparam.py#L443-L468
[ "def", "set_hparam", "(", "self", ",", "name", ",", "value", ")", ":", "param_type", ",", "is_list", "=", "self", ".", "_hparam_types", "[", "name", "]", "if", "isinstance", "(", "value", ",", "list", ")", ":", "if", "not", "is_list", ":", "raise", "...
272500b6efe353aeb638d2745ed56e519462ca31
train
HParams.del_hparam
Removes the hyperparameter with key 'name'. Does nothing if it isn't present. Args: name: Name of the hyperparameter.
tensor2tensor/utils/hparam.py
def del_hparam(self, name): """Removes the hyperparameter with key 'name'. Does nothing if it isn't present. Args: name: Name of the hyperparameter. """ if hasattr(self, name): delattr(self, name) del self._hparam_types[name]
def del_hparam(self, name): """Removes the hyperparameter with key 'name'. Does nothing if it isn't present. Args: name: Name of the hyperparameter. """ if hasattr(self, name): delattr(self, name) del self._hparam_types[name]
[ "Removes", "the", "hyperparameter", "with", "key", "name", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparam.py#L470-L480
[ "def", "del_hparam", "(", "self", ",", "name", ")", ":", "if", "hasattr", "(", "self", ",", "name", ")", ":", "delattr", "(", "self", ",", "name", ")", "del", "self", ".", "_hparam_types", "[", "name", "]" ]
272500b6efe353aeb638d2745ed56e519462ca31
train
HParams.parse
Override existing hyperparameter values, parsing new values from a string. See parse_values for more detail on the allowed format for values. Args: values: String. Comma separated list of `name=value` pairs where 'value' must follow the syntax described above. Returns: The `HParams` instance. Raises: ValueError: If `values` cannot be parsed or a hyperparameter in `values` doesn't exist.
tensor2tensor/utils/hparam.py
def parse(self, values): """Override existing hyperparameter values, parsing new values from a string. See parse_values for more detail on the allowed format for values. Args: values: String. Comma separated list of `name=value` pairs where 'value' must follow the syntax described above. Returns: The `HParams` instance. Raises: ValueError: If `values` cannot be parsed or a hyperparameter in `values` doesn't exist. """ type_map = {} for name, t in self._hparam_types.items(): param_type, _ = t type_map[name] = param_type values_map = parse_values(values, type_map) return self.override_from_dict(values_map)
def parse(self, values): """Override existing hyperparameter values, parsing new values from a string. See parse_values for more detail on the allowed format for values. Args: values: String. Comma separated list of `name=value` pairs where 'value' must follow the syntax described above. Returns: The `HParams` instance. Raises: ValueError: If `values` cannot be parsed or a hyperparameter in `values` doesn't exist. """ type_map = {} for name, t in self._hparam_types.items(): param_type, _ = t type_map[name] = param_type values_map = parse_values(values, type_map) return self.override_from_dict(values_map)
[ "Override", "existing", "hyperparameter", "values", "parsing", "new", "values", "from", "a", "string", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparam.py#L482-L504
[ "def", "parse", "(", "self", ",", "values", ")", ":", "type_map", "=", "{", "}", "for", "name", ",", "t", "in", "self", ".", "_hparam_types", ".", "items", "(", ")", ":", "param_type", ",", "_", "=", "t", "type_map", "[", "name", "]", "=", "param...
272500b6efe353aeb638d2745ed56e519462ca31
train
HParams.override_from_dict
Override existing hyperparameter values, parsing new values from a dictionary. Args: values_dict: Dictionary of name:value pairs. Returns: The `HParams` instance. Raises: KeyError: If a hyperparameter in `values_dict` doesn't exist. ValueError: If `values_dict` cannot be parsed.
tensor2tensor/utils/hparam.py
def override_from_dict(self, values_dict): """Override existing hyperparameter values, parsing new values from a dictionary. Args: values_dict: Dictionary of name:value pairs. Returns: The `HParams` instance. Raises: KeyError: If a hyperparameter in `values_dict` doesn't exist. ValueError: If `values_dict` cannot be parsed. """ for name, value in values_dict.items(): self.set_hparam(name, value) return self
def override_from_dict(self, values_dict): """Override existing hyperparameter values, parsing new values from a dictionary. Args: values_dict: Dictionary of name:value pairs. Returns: The `HParams` instance. Raises: KeyError: If a hyperparameter in `values_dict` doesn't exist. ValueError: If `values_dict` cannot be parsed. """ for name, value in values_dict.items(): self.set_hparam(name, value) return self
[ "Override", "existing", "hyperparameter", "values", "parsing", "new", "values", "from", "a", "dictionary", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparam.py#L506-L521
[ "def", "override_from_dict", "(", "self", ",", "values_dict", ")", ":", "for", "name", ",", "value", "in", "values_dict", ".", "items", "(", ")", ":", "self", ".", "set_hparam", "(", "name", ",", "value", ")", "return", "self" ]
272500b6efe353aeb638d2745ed56e519462ca31
train
HParams.to_json
Serializes the hyperparameters into JSON. Args: indent: If a non-negative integer, JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0, or negative, will only insert newlines. `None` (the default) selects the most compact representation. separators: Optional `(item_separator, key_separator)` tuple. Default is `(', ', ': ')`. sort_keys: If `True`, the output dictionaries will be sorted by key. Returns: A JSON string.
tensor2tensor/utils/hparam.py
def to_json(self, indent=None, separators=None, sort_keys=False): """Serializes the hyperparameters into JSON. Args: indent: If a non-negative integer, JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0, or negative, will only insert newlines. `None` (the default) selects the most compact representation. separators: Optional `(item_separator, key_separator)` tuple. Default is `(', ', ': ')`. sort_keys: If `True`, the output dictionaries will be sorted by key. Returns: A JSON string. """ def remove_callables(x): """Omit callable elements from input with arbitrary nesting.""" if isinstance(x, dict): return {k: remove_callables(v) for k, v in six.iteritems(x) if not callable(v)} elif isinstance(x, list): return [remove_callables(i) for i in x if not callable(i)] return x return json.dumps( remove_callables(self.values()), indent=indent, separators=separators, sort_keys=sort_keys)
def to_json(self, indent=None, separators=None, sort_keys=False): """Serializes the hyperparameters into JSON. Args: indent: If a non-negative integer, JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0, or negative, will only insert newlines. `None` (the default) selects the most compact representation. separators: Optional `(item_separator, key_separator)` tuple. Default is `(', ', ': ')`. sort_keys: If `True`, the output dictionaries will be sorted by key. Returns: A JSON string. """ def remove_callables(x): """Omit callable elements from input with arbitrary nesting.""" if isinstance(x, dict): return {k: remove_callables(v) for k, v in six.iteritems(x) if not callable(v)} elif isinstance(x, list): return [remove_callables(i) for i in x if not callable(i)] return x return json.dumps( remove_callables(self.values()), indent=indent, separators=separators, sort_keys=sort_keys)
[ "Serializes", "the", "hyperparameters", "into", "JSON", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparam.py#L529-L556
[ "def", "to_json", "(", "self", ",", "indent", "=", "None", ",", "separators", "=", "None", ",", "sort_keys", "=", "False", ")", ":", "def", "remove_callables", "(", "x", ")", ":", "\"\"\"Omit callable elements from input with arbitrary nesting.\"\"\"", "if", "isin...
272500b6efe353aeb638d2745ed56e519462ca31
train
HParams.parse_json
Override existing hyperparameter values, parsing new values from a json object. Args: values_json: String containing a json object of name:value pairs. Returns: The `HParams` instance. Raises: KeyError: If a hyperparameter in `values_json` doesn't exist. ValueError: If `values_json` cannot be parsed.
tensor2tensor/utils/hparam.py
def parse_json(self, values_json): """Override existing hyperparameter values, parsing new values from a json object. Args: values_json: String containing a json object of name:value pairs. Returns: The `HParams` instance. Raises: KeyError: If a hyperparameter in `values_json` doesn't exist. ValueError: If `values_json` cannot be parsed. """ values_map = json.loads(values_json) return self.override_from_dict(values_map)
def parse_json(self, values_json): """Override existing hyperparameter values, parsing new values from a json object. Args: values_json: String containing a json object of name:value pairs. Returns: The `HParams` instance. Raises: KeyError: If a hyperparameter in `values_json` doesn't exist. ValueError: If `values_json` cannot be parsed. """ values_map = json.loads(values_json) return self.override_from_dict(values_map)
[ "Override", "existing", "hyperparameter", "values", "parsing", "new", "values", "from", "a", "json", "object", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparam.py#L558-L572
[ "def", "parse_json", "(", "self", ",", "values_json", ")", ":", "values_map", "=", "json", ".", "loads", "(", "values_json", ")", "return", "self", ".", "override_from_dict", "(", "values_map", ")" ]
272500b6efe353aeb638d2745ed56e519462ca31
train
HParams.values
Return the hyperparameter values as a Python dictionary. Returns: A dictionary with hyperparameter names as keys. The values are the hyperparameter values.
tensor2tensor/utils/hparam.py
def values(self): """Return the hyperparameter values as a Python dictionary. Returns: A dictionary with hyperparameter names as keys. The values are the hyperparameter values. """ return {n: getattr(self, n) for n in self._hparam_types.keys()}
def values(self): """Return the hyperparameter values as a Python dictionary. Returns: A dictionary with hyperparameter names as keys. The values are the hyperparameter values. """ return {n: getattr(self, n) for n in self._hparam_types.keys()}
[ "Return", "the", "hyperparameter", "values", "as", "a", "Python", "dictionary", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparam.py#L574-L581
[ "def", "values", "(", "self", ")", ":", "return", "{", "n", ":", "getattr", "(", "self", ",", "n", ")", "for", "n", "in", "self", ".", "_hparam_types", ".", "keys", "(", ")", "}" ]
272500b6efe353aeb638d2745ed56e519462ca31
train
HParams.get
Returns the value of `key` if it exists, else `default`.
tensor2tensor/utils/hparam.py
def get(self, key, default=None): """Returns the value of `key` if it exists, else `default`.""" if key in self._hparam_types: # Ensure that default is compatible with the parameter type. if default is not None: param_type, is_param_list = self._hparam_types[key] type_str = 'list<%s>' % param_type if is_param_list else str(param_type) fail_msg = ("Hparam '%s' of type '%s' is incompatible with " 'default=%s' % (key, type_str, default)) is_default_list = isinstance(default, list) if is_param_list != is_default_list: raise ValueError(fail_msg) try: if is_default_list: for value in default: _cast_to_type_if_compatible(key, param_type, value) else: _cast_to_type_if_compatible(key, param_type, default) except ValueError as e: raise ValueError('%s. %s' % (fail_msg, e)) return getattr(self, key) return default
def get(self, key, default=None): """Returns the value of `key` if it exists, else `default`.""" if key in self._hparam_types: # Ensure that default is compatible with the parameter type. if default is not None: param_type, is_param_list = self._hparam_types[key] type_str = 'list<%s>' % param_type if is_param_list else str(param_type) fail_msg = ("Hparam '%s' of type '%s' is incompatible with " 'default=%s' % (key, type_str, default)) is_default_list = isinstance(default, list) if is_param_list != is_default_list: raise ValueError(fail_msg) try: if is_default_list: for value in default: _cast_to_type_if_compatible(key, param_type, value) else: _cast_to_type_if_compatible(key, param_type, default) except ValueError as e: raise ValueError('%s. %s' % (fail_msg, e)) return getattr(self, key) return default
[ "Returns", "the", "value", "of", "key", "if", "it", "exists", "else", "default", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparam.py#L583-L608
[ "def", "get", "(", "self", ",", "key", ",", "default", "=", "None", ")", ":", "if", "key", "in", "self", ".", "_hparam_types", ":", "# Ensure that default is compatible with the parameter type.", "if", "default", "is", "not", "None", ":", "param_type", ",", "i...
272500b6efe353aeb638d2745ed56e519462ca31
train
HParams._get_kind_name
Returns the field name given parameter type and is_list. Args: param_type: Data type of the hparam. is_list: Whether this is a list. Returns: A string representation of the field name. Raises: ValueError: If parameter type is not recognized.
tensor2tensor/utils/hparam.py
def _get_kind_name(param_type, is_list): """Returns the field name given parameter type and is_list. Args: param_type: Data type of the hparam. is_list: Whether this is a list. Returns: A string representation of the field name. Raises: ValueError: If parameter type is not recognized. """ if issubclass(param_type, bool): # This check must happen before issubclass(param_type, six.integer_types), # since Python considers bool to be a subclass of int. typename = 'bool' elif issubclass(param_type, six.integer_types): # Setting 'int' and 'long' types to be 'int64' to ensure the type is # compatible with both Python2 and Python3. typename = 'int64' elif issubclass(param_type, (six.string_types, six.binary_type)): # Setting 'string' and 'bytes' types to be 'bytes' to ensure the type is # compatible with both Python2 and Python3. typename = 'bytes' elif issubclass(param_type, float): typename = 'float' else: raise ValueError('Unsupported parameter type: %s' % str(param_type)) suffix = 'list' if is_list else 'value' return '_'.join([typename, suffix])
def _get_kind_name(param_type, is_list): """Returns the field name given parameter type and is_list. Args: param_type: Data type of the hparam. is_list: Whether this is a list. Returns: A string representation of the field name. Raises: ValueError: If parameter type is not recognized. """ if issubclass(param_type, bool): # This check must happen before issubclass(param_type, six.integer_types), # since Python considers bool to be a subclass of int. typename = 'bool' elif issubclass(param_type, six.integer_types): # Setting 'int' and 'long' types to be 'int64' to ensure the type is # compatible with both Python2 and Python3. typename = 'int64' elif issubclass(param_type, (six.string_types, six.binary_type)): # Setting 'string' and 'bytes' types to be 'bytes' to ensure the type is # compatible with both Python2 and Python3. typename = 'bytes' elif issubclass(param_type, float): typename = 'float' else: raise ValueError('Unsupported parameter type: %s' % str(param_type)) suffix = 'list' if is_list else 'value' return '_'.join([typename, suffix])
[ "Returns", "the", "field", "name", "given", "parameter", "type", "and", "is_list", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparam.py#L620-L651
[ "def", "_get_kind_name", "(", "param_type", ",", "is_list", ")", ":", "if", "issubclass", "(", "param_type", ",", "bool", ")", ":", "# This check must happen before issubclass(param_type, six.integer_types),", "# since Python considers bool to be a subclass of int.", "typename", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
TransformerModel.process
Returns the visualizations for query. Args: query: The query to process. Returns: A dictionary of results with processing and graph visualizations.
tensor2tensor/insights/transformer_model.py
def process(self, query): """Returns the visualizations for query. Args: query: The query to process. Returns: A dictionary of results with processing and graph visualizations. """ tf.logging.info("Processing new query [%s]" %query) # Create the new TFDBG hook directory. hook_dir = "/tmp/t2t_server_dump/request_%d" %int(time.time()) os.makedirs(hook_dir) hooks = [tfdbg.DumpingDebugHook(hook_dir, watch_fn=topk_watch_fn)] # TODO(kstevens): This is extremely hacky and slow for responding to # queries. Figure out a reasonable way to pre-load the model weights before # forking and run queries through the estimator quickly. def server_input_fn(): """Generator that returns just the current query.""" for _ in range(1): input_ids = self.source_vocab.encode(query) input_ids.append(text_encoder.EOS_ID) x = [1, 100, len(input_ids)] + input_ids x += [0] * (self.const_array_size - len(x)) d = { "inputs": np.array(x).astype(np.int32), } yield d def input_fn(): """Generator that returns just the current query.""" gen_fn = decoding.make_input_fn_from_generator(server_input_fn()) example = gen_fn() # TODO(kstevens): Make this method public # pylint: disable=protected-access return decoding._interactive_input_tensor_to_features_dict( example, self.hparams) # Make the prediction for the current query. result_iter = self.estimator.predict(input_fn, hooks=hooks) result = None for result in result_iter: break # Extract the beam search information by reading the dumped TFDBG event # tensors. We first read and record the per step beam sequences then record # the beam scores. Afterwards we align the two sets of values to create the # full graph vertices and edges. decoding_graph = graph.Graph() run_dirs = sorted(glob.glob(os.path.join(hook_dir, "run_*"))) for run_dir in run_dirs: # Record the different completed and active beam sequence ids. alive_sequences = deque() finished_sequences = deque() # Make the root vertex since it always needs to exist. decoding_graph.get_vertex(sequence_key([0])) # Create the initial vertices and edges for the active and finished # sequences. We uniquely define each vertex using it's full sequence path # as a string to ensure there's no collisions when the same step has two # instances of an output id. dump_dir = tfdbg.DebugDumpDir(run_dir, validate=False) seq_datums = dump_dir.find(predicate=seq_filter) for seq_datum in seq_datums: sequences = np.array(seq_datum.get_tensor()).astype(int)[0] if "alive" in seq_datum.node_name: alive_sequences.append(sequences) if "finished" in seq_datum.node_name: finished_sequences.append(sequences) for sequence in sequences: pieces = self.targets_vocab.decode_list(sequence) index = sequence[-1] if index == 0: continue parent = decoding_graph.get_vertex(sequence_key(sequence[:-1])) current = decoding_graph.get_vertex(sequence_key(sequence)) edge = decoding_graph.add_edge(parent, current) edge.data["label"] = pieces[-1] edge.data["label_id"] = index # Coerce the type to be a python bool. Numpy bools can't be easily # converted to JSON. edge.data["completed"] = bool(index == 1) # Examine the score results and store the scores with the associated edges # in the graph. We fetch the vertices (and relevant edges) by looking # into the saved beam sequences stored above. score_datums = dump_dir.find(predicate=scores_filter) for score_datum in score_datums: if "alive" in score_datum.node_name: sequences = alive_sequences.popleft() if "finished" in score_datum.node_name: sequences = finished_sequences.popleft() scores = np.array(score_datum.get_tensor()).astype(float)[0] for i, score in enumerate(scores): sequence = sequences[i] if sequence[-1] == 0: continue vertex = decoding_graph.get_vertex(sequence_key(sequence)) edge = decoding_graph.edges[vertex.in_edges[0]] edge.data["score"] = score edge.data["log_probability"] = score edge.data["total_log_probability"] = score # Delete the hook dir to save disk space shutil.rmtree(hook_dir) # Create the graph visualization data structure. graph_vis = { "visualization_name": "graph", "title": "Graph", "name": "graph", "search_graph": decoding_graph.to_dict(), } # Create the processing visualization data structure. # TODO(kstevens): Make this method public # pylint: disable=protected-access output_ids = decoding._save_until_eos(result["outputs"].flatten(), False) output_pieces = self.targets_vocab.decode_list(output_ids) output_token = [{"text": piece} for piece in output_pieces] output = self.targets_vocab.decode(output_ids) source_steps = [{ "step_name": "Initial", "segment": [{ "text": query }], }] target_steps = [{ "step_name": "Initial", "segment": output_token, }, { "step_name": "Final", "segment": [{ "text": output }], }] processing_vis = { "visualization_name": "processing", "title": "Processing", "name": "processing", "query_processing": { "source_processing": source_steps, "target_processing": target_steps, }, } return { "result": [processing_vis, graph_vis], }
def process(self, query): """Returns the visualizations for query. Args: query: The query to process. Returns: A dictionary of results with processing and graph visualizations. """ tf.logging.info("Processing new query [%s]" %query) # Create the new TFDBG hook directory. hook_dir = "/tmp/t2t_server_dump/request_%d" %int(time.time()) os.makedirs(hook_dir) hooks = [tfdbg.DumpingDebugHook(hook_dir, watch_fn=topk_watch_fn)] # TODO(kstevens): This is extremely hacky and slow for responding to # queries. Figure out a reasonable way to pre-load the model weights before # forking and run queries through the estimator quickly. def server_input_fn(): """Generator that returns just the current query.""" for _ in range(1): input_ids = self.source_vocab.encode(query) input_ids.append(text_encoder.EOS_ID) x = [1, 100, len(input_ids)] + input_ids x += [0] * (self.const_array_size - len(x)) d = { "inputs": np.array(x).astype(np.int32), } yield d def input_fn(): """Generator that returns just the current query.""" gen_fn = decoding.make_input_fn_from_generator(server_input_fn()) example = gen_fn() # TODO(kstevens): Make this method public # pylint: disable=protected-access return decoding._interactive_input_tensor_to_features_dict( example, self.hparams) # Make the prediction for the current query. result_iter = self.estimator.predict(input_fn, hooks=hooks) result = None for result in result_iter: break # Extract the beam search information by reading the dumped TFDBG event # tensors. We first read and record the per step beam sequences then record # the beam scores. Afterwards we align the two sets of values to create the # full graph vertices and edges. decoding_graph = graph.Graph() run_dirs = sorted(glob.glob(os.path.join(hook_dir, "run_*"))) for run_dir in run_dirs: # Record the different completed and active beam sequence ids. alive_sequences = deque() finished_sequences = deque() # Make the root vertex since it always needs to exist. decoding_graph.get_vertex(sequence_key([0])) # Create the initial vertices and edges for the active and finished # sequences. We uniquely define each vertex using it's full sequence path # as a string to ensure there's no collisions when the same step has two # instances of an output id. dump_dir = tfdbg.DebugDumpDir(run_dir, validate=False) seq_datums = dump_dir.find(predicate=seq_filter) for seq_datum in seq_datums: sequences = np.array(seq_datum.get_tensor()).astype(int)[0] if "alive" in seq_datum.node_name: alive_sequences.append(sequences) if "finished" in seq_datum.node_name: finished_sequences.append(sequences) for sequence in sequences: pieces = self.targets_vocab.decode_list(sequence) index = sequence[-1] if index == 0: continue parent = decoding_graph.get_vertex(sequence_key(sequence[:-1])) current = decoding_graph.get_vertex(sequence_key(sequence)) edge = decoding_graph.add_edge(parent, current) edge.data["label"] = pieces[-1] edge.data["label_id"] = index # Coerce the type to be a python bool. Numpy bools can't be easily # converted to JSON. edge.data["completed"] = bool(index == 1) # Examine the score results and store the scores with the associated edges # in the graph. We fetch the vertices (and relevant edges) by looking # into the saved beam sequences stored above. score_datums = dump_dir.find(predicate=scores_filter) for score_datum in score_datums: if "alive" in score_datum.node_name: sequences = alive_sequences.popleft() if "finished" in score_datum.node_name: sequences = finished_sequences.popleft() scores = np.array(score_datum.get_tensor()).astype(float)[0] for i, score in enumerate(scores): sequence = sequences[i] if sequence[-1] == 0: continue vertex = decoding_graph.get_vertex(sequence_key(sequence)) edge = decoding_graph.edges[vertex.in_edges[0]] edge.data["score"] = score edge.data["log_probability"] = score edge.data["total_log_probability"] = score # Delete the hook dir to save disk space shutil.rmtree(hook_dir) # Create the graph visualization data structure. graph_vis = { "visualization_name": "graph", "title": "Graph", "name": "graph", "search_graph": decoding_graph.to_dict(), } # Create the processing visualization data structure. # TODO(kstevens): Make this method public # pylint: disable=protected-access output_ids = decoding._save_until_eos(result["outputs"].flatten(), False) output_pieces = self.targets_vocab.decode_list(output_ids) output_token = [{"text": piece} for piece in output_pieces] output = self.targets_vocab.decode(output_ids) source_steps = [{ "step_name": "Initial", "segment": [{ "text": query }], }] target_steps = [{ "step_name": "Initial", "segment": output_token, }, { "step_name": "Final", "segment": [{ "text": output }], }] processing_vis = { "visualization_name": "processing", "title": "Processing", "name": "processing", "query_processing": { "source_processing": source_steps, "target_processing": target_steps, }, } return { "result": [processing_vis, graph_vis], }
[ "Returns", "the", "visualizations", "for", "query", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/insights/transformer_model.py#L141-L301
[ "def", "process", "(", "self", ",", "query", ")", ":", "tf", ".", "logging", ".", "info", "(", "\"Processing new query [%s]\"", "%", "query", ")", "# Create the new TFDBG hook directory.", "hook_dir", "=", "\"/tmp/t2t_server_dump/request_%d\"", "%", "int", "(", "tim...
272500b6efe353aeb638d2745ed56e519462ca31
train
_default_output_dir
Default output directory.
tensor2tensor/trax/trainer.py
def _default_output_dir(): """Default output directory.""" try: dataset_name = gin.query_parameter("inputs.dataset_name") except ValueError: dataset_name = "random" dir_name = "{model_name}_{dataset_name}_{timestamp}".format( model_name=gin.query_parameter("train.model").configurable.name, dataset_name=dataset_name, timestamp=datetime.datetime.now().strftime("%Y%m%d_%H%M"), ) dir_path = os.path.join("~", "trax", dir_name) print() trax.log("No --output_dir specified") return dir_path
def _default_output_dir(): """Default output directory.""" try: dataset_name = gin.query_parameter("inputs.dataset_name") except ValueError: dataset_name = "random" dir_name = "{model_name}_{dataset_name}_{timestamp}".format( model_name=gin.query_parameter("train.model").configurable.name, dataset_name=dataset_name, timestamp=datetime.datetime.now().strftime("%Y%m%d_%H%M"), ) dir_path = os.path.join("~", "trax", dir_name) print() trax.log("No --output_dir specified") return dir_path
[ "Default", "output", "directory", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/trainer.py#L48-L62
[ "def", "_default_output_dir", "(", ")", ":", "try", ":", "dataset_name", "=", "gin", ".", "query_parameter", "(", "\"inputs.dataset_name\"", ")", "except", "ValueError", ":", "dataset_name", "=", "\"random\"", "dir_name", "=", "\"{model_name}_{dataset_name}_{timestamp}\...
272500b6efe353aeb638d2745ed56e519462ca31
train
_setup_gin
Setup gin configuration.
tensor2tensor/trax/trainer.py
def _setup_gin(): """Setup gin configuration.""" # Imports for configurables # pylint: disable=g-import-not-at-top,unused-import,g-bad-import-order,reimported,unused-variable from tensor2tensor.trax import models as _trax_models from tensor2tensor.trax import optimizers as _trax_opt # pylint: disable=g-import-not-at-top,unused-import,g-bad-import-order,reimported,unused-variable configs = FLAGS.config or [] # Override with --dataset and --model if FLAGS.dataset: configs.append("inputs.dataset_name='%s'" % FLAGS.dataset) if FLAGS.data_dir: configs.append("inputs.data_dir='%s'" % FLAGS.data_dir) if FLAGS.model: configs.append("train.model=@trax.models.%s" % FLAGS.model) gin.parse_config_files_and_bindings(FLAGS.config_file, configs)
def _setup_gin(): """Setup gin configuration.""" # Imports for configurables # pylint: disable=g-import-not-at-top,unused-import,g-bad-import-order,reimported,unused-variable from tensor2tensor.trax import models as _trax_models from tensor2tensor.trax import optimizers as _trax_opt # pylint: disable=g-import-not-at-top,unused-import,g-bad-import-order,reimported,unused-variable configs = FLAGS.config or [] # Override with --dataset and --model if FLAGS.dataset: configs.append("inputs.dataset_name='%s'" % FLAGS.dataset) if FLAGS.data_dir: configs.append("inputs.data_dir='%s'" % FLAGS.data_dir) if FLAGS.model: configs.append("train.model=@trax.models.%s" % FLAGS.model) gin.parse_config_files_and_bindings(FLAGS.config_file, configs)
[ "Setup", "gin", "configuration", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/trainer.py#L65-L81
[ "def", "_setup_gin", "(", ")", ":", "# Imports for configurables", "# pylint: disable=g-import-not-at-top,unused-import,g-bad-import-order,reimported,unused-variable", "from", "tensor2tensor", ".", "trax", "import", "models", "as", "_trax_models", "from", "tensor2tensor", ".", "t...
272500b6efe353aeb638d2745ed56e519462ca31
train
train_and_eval_dataset
Return train and evaluation datasets, feature info and supervised keys. Args: dataset_name: a string, the name of the dataset; if it starts with "v1_" then we'll search T2T Problem registry for it, otherwise we assume it is a dataset from TFDS and load it from there. data_dir: directory where the data is located. Returns: a 4-tuple consisting of: * the train tf.data.Dataset * the eval tf.data.Dataset * information about features: a python dictionary with feature names as keys and an object as value that provides .shape and .num_classes. * supervised_keys: information what's the input and what's the target, ie., a pair of lists with input and target feature names.
tensor2tensor/v2/t2t.py
def train_and_eval_dataset(dataset_name, data_dir): """Return train and evaluation datasets, feature info and supervised keys. Args: dataset_name: a string, the name of the dataset; if it starts with "v1_" then we'll search T2T Problem registry for it, otherwise we assume it is a dataset from TFDS and load it from there. data_dir: directory where the data is located. Returns: a 4-tuple consisting of: * the train tf.data.Dataset * the eval tf.data.Dataset * information about features: a python dictionary with feature names as keys and an object as value that provides .shape and .num_classes. * supervised_keys: information what's the input and what's the target, ie., a pair of lists with input and target feature names. """ if dataset_name.startswith("v1_"): return _train_and_eval_dataset_v1(dataset_name[3:], data_dir) dataset_builder = tfds.builder(dataset_name, data_dir=data_dir) info = dataset_builder.info splits = dataset_builder.info.splits if tfds.Split.TRAIN not in splits: raise ValueError("To train we require a train split in the dataset.") if tfds.Split.VALIDATION not in splits and "test" not in splits: raise ValueError("We require a validation or test split in the dataset.") eval_split = tfds.Split.VALIDATION if tfds.Split.VALIDATION not in splits: eval_split = tfds.Split.TEST train, valid = tfds.load( name=dataset_name, split=[tfds.Split.TRAIN, eval_split]) keys = None if info.supervised_keys: keys = ([info.supervised_keys[0]], [info.supervised_keys[1]]) return train, valid, info.features, keys
def train_and_eval_dataset(dataset_name, data_dir): """Return train and evaluation datasets, feature info and supervised keys. Args: dataset_name: a string, the name of the dataset; if it starts with "v1_" then we'll search T2T Problem registry for it, otherwise we assume it is a dataset from TFDS and load it from there. data_dir: directory where the data is located. Returns: a 4-tuple consisting of: * the train tf.data.Dataset * the eval tf.data.Dataset * information about features: a python dictionary with feature names as keys and an object as value that provides .shape and .num_classes. * supervised_keys: information what's the input and what's the target, ie., a pair of lists with input and target feature names. """ if dataset_name.startswith("v1_"): return _train_and_eval_dataset_v1(dataset_name[3:], data_dir) dataset_builder = tfds.builder(dataset_name, data_dir=data_dir) info = dataset_builder.info splits = dataset_builder.info.splits if tfds.Split.TRAIN not in splits: raise ValueError("To train we require a train split in the dataset.") if tfds.Split.VALIDATION not in splits and "test" not in splits: raise ValueError("We require a validation or test split in the dataset.") eval_split = tfds.Split.VALIDATION if tfds.Split.VALIDATION not in splits: eval_split = tfds.Split.TEST train, valid = tfds.load( name=dataset_name, split=[tfds.Split.TRAIN, eval_split]) keys = None if info.supervised_keys: keys = ([info.supervised_keys[0]], [info.supervised_keys[1]]) return train, valid, info.features, keys
[ "Return", "train", "and", "evaluation", "datasets", "feature", "info", "and", "supervised", "keys", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/v2/t2t.py#L48-L83
[ "def", "train_and_eval_dataset", "(", "dataset_name", ",", "data_dir", ")", ":", "if", "dataset_name", ".", "startswith", "(", "\"v1_\"", ")", ":", "return", "_train_and_eval_dataset_v1", "(", "dataset_name", "[", "3", ":", "]", ",", "data_dir", ")", "dataset_bu...
272500b6efe353aeb638d2745ed56e519462ca31
train
_make_info
Create an info-like tuple for feature given some shapes and vocab size.
tensor2tensor/v2/t2t.py
def _make_info(shape_list, num_classes): """Create an info-like tuple for feature given some shapes and vocab size.""" feature_info = collections.namedtuple("FeatureInfo", ["shape", "num_classes"]) cur_shape = list(shape_list[0]) # We need to merge the provided shapes, put None where they disagree. for shape in shape_list: if len(shape) != len(cur_shape): raise ValueError("Shapes need to have the same number of dimensions.") for i in range(len(shape)): if cur_shape[i] is not None: if shape[i] != cur_shape[i]: cur_shape[i] = None return feature_info(cur_shape, num_classes)
def _make_info(shape_list, num_classes): """Create an info-like tuple for feature given some shapes and vocab size.""" feature_info = collections.namedtuple("FeatureInfo", ["shape", "num_classes"]) cur_shape = list(shape_list[0]) # We need to merge the provided shapes, put None where they disagree. for shape in shape_list: if len(shape) != len(cur_shape): raise ValueError("Shapes need to have the same number of dimensions.") for i in range(len(shape)): if cur_shape[i] is not None: if shape[i] != cur_shape[i]: cur_shape[i] = None return feature_info(cur_shape, num_classes)
[ "Create", "an", "info", "-", "like", "tuple", "for", "feature", "given", "some", "shapes", "and", "vocab", "size", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/v2/t2t.py#L86-L98
[ "def", "_make_info", "(", "shape_list", ",", "num_classes", ")", ":", "feature_info", "=", "collections", ".", "namedtuple", "(", "\"FeatureInfo\"", ",", "[", "\"shape\"", ",", "\"num_classes\"", "]", ")", "cur_shape", "=", "list", "(", "shape_list", "[", "0",...
272500b6efe353aeb638d2745ed56e519462ca31
train
_select_features
Select a subset of features from the example dict.
tensor2tensor/v2/t2t.py
def _select_features(example, feature_list=None): """Select a subset of features from the example dict.""" feature_list = feature_list or ["inputs", "targets"] return {f: example[f] for f in feature_list}
def _select_features(example, feature_list=None): """Select a subset of features from the example dict.""" feature_list = feature_list or ["inputs", "targets"] return {f: example[f] for f in feature_list}
[ "Select", "a", "subset", "of", "features", "from", "the", "example", "dict", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/v2/t2t.py#L101-L104
[ "def", "_select_features", "(", "example", ",", "feature_list", "=", "None", ")", ":", "feature_list", "=", "feature_list", "or", "[", "\"inputs\"", ",", "\"targets\"", "]", "return", "{", "f", ":", "example", "[", "f", "]", "for", "f", "in", "feature_list...
272500b6efe353aeb638d2745ed56e519462ca31
train
_train_and_eval_dataset_v1
Return train and evaluation datasets, feature info and supervised keys.
tensor2tensor/v2/t2t.py
def _train_and_eval_dataset_v1(problem_name, data_dir): """Return train and evaluation datasets, feature info and supervised keys.""" problem = problems.problem(problem_name) train_dataset = problem.dataset(tf.estimator.ModeKeys.TRAIN, data_dir) train_dataset = train_dataset.map(_select_features) eval_dataset = problem.dataset(tf.estimator.ModeKeys.EVAL, data_dir) eval_dataset = eval_dataset.map(_select_features) supervised_keys = (["inputs"], ["targets"]) hparams = problem.get_hparams() # We take a few training examples to guess the shapes. input_shapes, target_shapes = [], [] for example in train_dataset.take(3): input_shapes.append(example["inputs"].shape.as_list()) target_shapes.append(example["targets"].shape.as_list()) input_vocab_size = hparams.vocab_size["inputs"] target_vocab_size = hparams.vocab_size["targets"] input_info = _make_info(input_shapes, input_vocab_size) target_info = _make_info(target_shapes, target_vocab_size) info = {"inputs": input_info, "targets": target_info} return train_dataset, eval_dataset, info, supervised_keys
def _train_and_eval_dataset_v1(problem_name, data_dir): """Return train and evaluation datasets, feature info and supervised keys.""" problem = problems.problem(problem_name) train_dataset = problem.dataset(tf.estimator.ModeKeys.TRAIN, data_dir) train_dataset = train_dataset.map(_select_features) eval_dataset = problem.dataset(tf.estimator.ModeKeys.EVAL, data_dir) eval_dataset = eval_dataset.map(_select_features) supervised_keys = (["inputs"], ["targets"]) hparams = problem.get_hparams() # We take a few training examples to guess the shapes. input_shapes, target_shapes = [], [] for example in train_dataset.take(3): input_shapes.append(example["inputs"].shape.as_list()) target_shapes.append(example["targets"].shape.as_list()) input_vocab_size = hparams.vocab_size["inputs"] target_vocab_size = hparams.vocab_size["targets"] input_info = _make_info(input_shapes, input_vocab_size) target_info = _make_info(target_shapes, target_vocab_size) info = {"inputs": input_info, "targets": target_info} return train_dataset, eval_dataset, info, supervised_keys
[ "Return", "train", "and", "evaluation", "datasets", "feature", "info", "and", "supervised", "keys", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/v2/t2t.py#L107-L126
[ "def", "_train_and_eval_dataset_v1", "(", "problem_name", ",", "data_dir", ")", ":", "problem", "=", "problems", ".", "problem", "(", "problem_name", ")", "train_dataset", "=", "problem", ".", "dataset", "(", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAI...
272500b6efe353aeb638d2745ed56e519462ca31
train
batch_fn
Batching function.
tensor2tensor/v2/t2t.py
def batch_fn(dataset, training, shapes, target_names, batch_size=32, eval_batch_size=32, bucket_batch_length=32, bucket_max_length=256, bucket_min_length=8, bucket_length_step=1.1, buckets=None): """Batching function.""" del target_names # If bucketing is not specified, check if target shapes are variable. cur_batch_size = batch_size if training else eval_batch_size if buckets is None: variable_target_shapes = False target_shape = shapes[1] for dim in target_shape: if dim is None: variable_target_shapes = True tf.logging.info("Heuristically setting bucketing to %s based on shapes " "of target tensors." % variable_target_shapes) if variable_target_shapes: batch_size_per_token = cur_batch_size * bucket_batch_length scheme = data_reader.batching_scheme(batch_size_per_token, bucket_max_length, bucket_min_length, bucket_length_step, drop_long_sequences=training) buckets = (scheme["boundaries"], scheme["batch_sizes"]) if buckets: tf.logging.info("Bucketing with buckets %s." % str(buckets)) def example_length(_, target): return tf.shape(target)[0] boundaries, batch_sizes = buckets dataset = dataset.apply(tf.data.experimental.bucket_by_sequence_length( example_length, boundaries, batch_sizes)) else: dataset = dataset.padded_batch(cur_batch_size, shapes) return dataset
def batch_fn(dataset, training, shapes, target_names, batch_size=32, eval_batch_size=32, bucket_batch_length=32, bucket_max_length=256, bucket_min_length=8, bucket_length_step=1.1, buckets=None): """Batching function.""" del target_names # If bucketing is not specified, check if target shapes are variable. cur_batch_size = batch_size if training else eval_batch_size if buckets is None: variable_target_shapes = False target_shape = shapes[1] for dim in target_shape: if dim is None: variable_target_shapes = True tf.logging.info("Heuristically setting bucketing to %s based on shapes " "of target tensors." % variable_target_shapes) if variable_target_shapes: batch_size_per_token = cur_batch_size * bucket_batch_length scheme = data_reader.batching_scheme(batch_size_per_token, bucket_max_length, bucket_min_length, bucket_length_step, drop_long_sequences=training) buckets = (scheme["boundaries"], scheme["batch_sizes"]) if buckets: tf.logging.info("Bucketing with buckets %s." % str(buckets)) def example_length(_, target): return tf.shape(target)[0] boundaries, batch_sizes = buckets dataset = dataset.apply(tf.data.experimental.bucket_by_sequence_length( example_length, boundaries, batch_sizes)) else: dataset = dataset.padded_batch(cur_batch_size, shapes) return dataset
[ "Batching", "function", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/v2/t2t.py#L140-L174
[ "def", "batch_fn", "(", "dataset", ",", "training", ",", "shapes", ",", "target_names", ",", "batch_size", "=", "32", ",", "eval_batch_size", "=", "32", ",", "bucket_batch_length", "=", "32", ",", "bucket_max_length", "=", "256", ",", "bucket_min_length", "=",...
272500b6efe353aeb638d2745ed56e519462ca31
train
shuffle_and_batch_data
Shuffle and batch the given dataset.
tensor2tensor/v2/t2t.py
def shuffle_and_batch_data(dataset, target_names, features_info, training): """Shuffle and batch the given dataset.""" def append_targets(example): """Append targets to the example dictionary. Needed for Keras.""" if len(target_names) == 1: return (example, example[target_names[0]]) targets = {} for name in target_names: targets[name] = example[name] return (example, targets) dataset = dataset.map(append_targets) if training: dataset = dataset.repeat() shapes = {k: features_info[k].shape for k in features_info} shapes = (shapes, shapes[target_names[0]]) dataset = dataset.shuffle(128) dataset = preprocess_fn(dataset, training) dataset = batch_fn(dataset, training, shapes, target_names) return dataset.prefetch(8)
def shuffle_and_batch_data(dataset, target_names, features_info, training): """Shuffle and batch the given dataset.""" def append_targets(example): """Append targets to the example dictionary. Needed for Keras.""" if len(target_names) == 1: return (example, example[target_names[0]]) targets = {} for name in target_names: targets[name] = example[name] return (example, targets) dataset = dataset.map(append_targets) if training: dataset = dataset.repeat() shapes = {k: features_info[k].shape for k in features_info} shapes = (shapes, shapes[target_names[0]]) dataset = dataset.shuffle(128) dataset = preprocess_fn(dataset, training) dataset = batch_fn(dataset, training, shapes, target_names) return dataset.prefetch(8)
[ "Shuffle", "and", "batch", "the", "given", "dataset", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/v2/t2t.py#L177-L195
[ "def", "shuffle_and_batch_data", "(", "dataset", ",", "target_names", ",", "features_info", ",", "training", ")", ":", "def", "append_targets", "(", "example", ")", ":", "\"\"\"Append targets to the example dictionary. Needed for Keras.\"\"\"", "if", "len", "(", "target_n...
272500b6efe353aeb638d2745ed56e519462ca31
train
optimize_fn
Compile the model in Keras.
tensor2tensor/v2/t2t.py
def optimize_fn(model, optimizer=None, learning_rate_schedule=None, loss=None, metrics=None): """Compile the model in Keras.""" learning_rate_schedule = learning_rate_schedule or T2TLearningRateSchedule() if optimizer: optimizer = optimizer(learning_rate=learning_rate_schedule) else: # We use Adam by default with adjusted parameters. optimizer = tf.keras.optimizers.Adam( learning_rate=learning_rate_schedule, beta_1=0.9, beta_2=0.997, epsilon=1e-9) metrics = metrics or [tf.keras.metrics.sparse_categorical_accuracy] def xent_loss(y, x): return tf.keras.backend.sparse_categorical_crossentropy( y, x, from_logits=True) loss = loss or xent_loss return model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
def optimize_fn(model, optimizer=None, learning_rate_schedule=None, loss=None, metrics=None): """Compile the model in Keras.""" learning_rate_schedule = learning_rate_schedule or T2TLearningRateSchedule() if optimizer: optimizer = optimizer(learning_rate=learning_rate_schedule) else: # We use Adam by default with adjusted parameters. optimizer = tf.keras.optimizers.Adam( learning_rate=learning_rate_schedule, beta_1=0.9, beta_2=0.997, epsilon=1e-9) metrics = metrics or [tf.keras.metrics.sparse_categorical_accuracy] def xent_loss(y, x): return tf.keras.backend.sparse_categorical_crossentropy( y, x, from_logits=True) loss = loss or xent_loss return model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
[ "Compile", "the", "model", "in", "Keras", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/v2/t2t.py#L233-L253
[ "def", "optimize_fn", "(", "model", ",", "optimizer", "=", "None", ",", "learning_rate_schedule", "=", "None", ",", "loss", "=", "None", ",", "metrics", "=", "None", ")", ":", "learning_rate_schedule", "=", "learning_rate_schedule", "or", "T2TLearningRateSchedule"...
272500b6efe353aeb638d2745ed56e519462ca31
train
train_fn
Train the given model on the given dataset. Args: data_dir: Directory where the data is located. output_dir: Directory where to put the logs and checkpoints. model_class: The model class to train. dataset: The name of the dataset to train on. input_names: List of strings with the names of the features on input. target_names: List of strings with the names of the target features. train_steps: for how many steps to train. eval_steps: for how many steps to do evaluation. eval_frequency: how often (every this many steps) to run evaluation.
tensor2tensor/v2/t2t.py
def train_fn(data_dir=None, output_dir=None, model_class=gin.REQUIRED, dataset=gin.REQUIRED, input_names=None, target_names=None, train_steps=1000, eval_steps=1, eval_frequency=100): """Train the given model on the given dataset. Args: data_dir: Directory where the data is located. output_dir: Directory where to put the logs and checkpoints. model_class: The model class to train. dataset: The name of the dataset to train on. input_names: List of strings with the names of the features on input. target_names: List of strings with the names of the target features. train_steps: for how many steps to train. eval_steps: for how many steps to do evaluation. eval_frequency: how often (every this many steps) to run evaluation. """ train_data, eval_data, features_info, keys = train_and_eval_dataset( dataset, data_dir) if input_names is None: input_names = keys[0] if target_names is None: target_names = keys[1] # TODO(lukaszkaiser): The use of distribution strategy below fails like this: # .../keras/models.py", line 93, in _clone_functional_model # for layer in model._input_layers: # AttributeError: 'BasicFcRelu' object has no attribute '_input_layers' # strategy = tf.distribute.MirroredStrategy() # with strategy.scope(): model = model_class(features_info=features_info, input_names=input_names, target_names=target_names) optimize_fn(model) train_batches = shuffle_and_batch_data( train_data, target_names, features_info, training=True) eval_batches = shuffle_and_batch_data( eval_data, target_names, features_info, training=False) # Need to run one training step just to get optimizer variables to load. model.fit(train_batches, epochs=1, steps_per_epoch=1) # Training loop. callbacks = [] callbacks.append(tf.keras.callbacks.History()) callbacks.append(tf.keras.callbacks.BaseLogger()) last_epoch = 0 if output_dir is not None: callbacks.append(tf.keras.callbacks.TensorBoard(log_dir=output_dir)) output_format = os.path.join(output_dir, "model-{epoch:05d}") callbacks.append(tf.keras.callbacks.ModelCheckpoint( filepath=output_format, save_weights_only=True)) checkpoints = tf.gfile.Glob(os.path.join(output_dir, "model-*")) # Take basenames and strip the "model-" prefix. checkpoints = [os.path.basename(ckpt)[6:] for ckpt in checkpoints] # Get epoch numbers from the filenames and sort to obtain last epoch. epoch_numbers = [int(ckpt[:5]) for ckpt in checkpoints if len(ckpt) > 4] epoch_numbers.sort() if epoch_numbers: last_epoch = epoch_numbers[-1] saved_path = os.path.join(output_dir, "model-%05d" % last_epoch) model.load_weights(saved_path) model.fit(train_batches, epochs=train_steps // eval_frequency, steps_per_epoch=eval_frequency, validation_data=eval_batches, validation_steps=eval_steps, initial_epoch=last_epoch, callbacks=callbacks)
def train_fn(data_dir=None, output_dir=None, model_class=gin.REQUIRED, dataset=gin.REQUIRED, input_names=None, target_names=None, train_steps=1000, eval_steps=1, eval_frequency=100): """Train the given model on the given dataset. Args: data_dir: Directory where the data is located. output_dir: Directory where to put the logs and checkpoints. model_class: The model class to train. dataset: The name of the dataset to train on. input_names: List of strings with the names of the features on input. target_names: List of strings with the names of the target features. train_steps: for how many steps to train. eval_steps: for how many steps to do evaluation. eval_frequency: how often (every this many steps) to run evaluation. """ train_data, eval_data, features_info, keys = train_and_eval_dataset( dataset, data_dir) if input_names is None: input_names = keys[0] if target_names is None: target_names = keys[1] # TODO(lukaszkaiser): The use of distribution strategy below fails like this: # .../keras/models.py", line 93, in _clone_functional_model # for layer in model._input_layers: # AttributeError: 'BasicFcRelu' object has no attribute '_input_layers' # strategy = tf.distribute.MirroredStrategy() # with strategy.scope(): model = model_class(features_info=features_info, input_names=input_names, target_names=target_names) optimize_fn(model) train_batches = shuffle_and_batch_data( train_data, target_names, features_info, training=True) eval_batches = shuffle_and_batch_data( eval_data, target_names, features_info, training=False) # Need to run one training step just to get optimizer variables to load. model.fit(train_batches, epochs=1, steps_per_epoch=1) # Training loop. callbacks = [] callbacks.append(tf.keras.callbacks.History()) callbacks.append(tf.keras.callbacks.BaseLogger()) last_epoch = 0 if output_dir is not None: callbacks.append(tf.keras.callbacks.TensorBoard(log_dir=output_dir)) output_format = os.path.join(output_dir, "model-{epoch:05d}") callbacks.append(tf.keras.callbacks.ModelCheckpoint( filepath=output_format, save_weights_only=True)) checkpoints = tf.gfile.Glob(os.path.join(output_dir, "model-*")) # Take basenames and strip the "model-" prefix. checkpoints = [os.path.basename(ckpt)[6:] for ckpt in checkpoints] # Get epoch numbers from the filenames and sort to obtain last epoch. epoch_numbers = [int(ckpt[:5]) for ckpt in checkpoints if len(ckpt) > 4] epoch_numbers.sort() if epoch_numbers: last_epoch = epoch_numbers[-1] saved_path = os.path.join(output_dir, "model-%05d" % last_epoch) model.load_weights(saved_path) model.fit(train_batches, epochs=train_steps // eval_frequency, steps_per_epoch=eval_frequency, validation_data=eval_batches, validation_steps=eval_steps, initial_epoch=last_epoch, callbacks=callbacks)
[ "Train", "the", "given", "model", "on", "the", "given", "dataset", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/v2/t2t.py#L259-L324
[ "def", "train_fn", "(", "data_dir", "=", "None", ",", "output_dir", "=", "None", ",", "model_class", "=", "gin", ".", "REQUIRED", ",", "dataset", "=", "gin", ".", "REQUIRED", ",", "input_names", "=", "None", ",", "target_names", "=", "None", ",", "train_...
272500b6efe353aeb638d2745ed56e519462ca31
train
t2t_train
Main function to train the given model on the given dataset. Args: model_name: The name of the model to train. dataset_name: The name of the dataset to train on. data_dir: Directory where the data is located. output_dir: Directory where to put the logs and checkpoints. config_file: the gin configuration file to use. config: string (in gin format) to override gin parameters.
tensor2tensor/v2/t2t.py
def t2t_train(model_name, dataset_name, data_dir=None, output_dir=None, config_file=None, config=None): """Main function to train the given model on the given dataset. Args: model_name: The name of the model to train. dataset_name: The name of the dataset to train on. data_dir: Directory where the data is located. output_dir: Directory where to put the logs and checkpoints. config_file: the gin configuration file to use. config: string (in gin format) to override gin parameters. """ if model_name not in _MODEL_REGISTRY: raise ValueError("Model %s not in registry. Available models:\n * %s." % (model_name, "\n * ".join(_MODEL_REGISTRY.keys()))) model_class = _MODEL_REGISTRY[model_name]() gin.bind_parameter("train_fn.model_class", model_class) gin.bind_parameter("train_fn.dataset", dataset_name) gin.parse_config_files_and_bindings(config_file, config) # TODO(lukaszkaiser): save gin config in output_dir if provided? train_fn(data_dir, output_dir=output_dir)
def t2t_train(model_name, dataset_name, data_dir=None, output_dir=None, config_file=None, config=None): """Main function to train the given model on the given dataset. Args: model_name: The name of the model to train. dataset_name: The name of the dataset to train on. data_dir: Directory where the data is located. output_dir: Directory where to put the logs and checkpoints. config_file: the gin configuration file to use. config: string (in gin format) to override gin parameters. """ if model_name not in _MODEL_REGISTRY: raise ValueError("Model %s not in registry. Available models:\n * %s." % (model_name, "\n * ".join(_MODEL_REGISTRY.keys()))) model_class = _MODEL_REGISTRY[model_name]() gin.bind_parameter("train_fn.model_class", model_class) gin.bind_parameter("train_fn.dataset", dataset_name) gin.parse_config_files_and_bindings(config_file, config) # TODO(lukaszkaiser): save gin config in output_dir if provided? train_fn(data_dir, output_dir=output_dir)
[ "Main", "function", "to", "train", "the", "given", "model", "on", "the", "given", "dataset", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/v2/t2t.py#L327-L347
[ "def", "t2t_train", "(", "model_name", ",", "dataset_name", ",", "data_dir", "=", "None", ",", "output_dir", "=", "None", ",", "config_file", "=", "None", ",", "config", "=", "None", ")", ":", "if", "model_name", "not", "in", "_MODEL_REGISTRY", ":", "raise...
272500b6efe353aeb638d2745ed56e519462ca31
train
decode
Decode from estimator. Interactive, from file, or from dataset.
tensor2tensor/bin/t2t_decoder.py
def decode(estimator, hparams, decode_hp): """Decode from estimator. Interactive, from file, or from dataset.""" if FLAGS.decode_interactive: if estimator.config.use_tpu: raise ValueError("TPU can only decode from dataset.") decoding.decode_interactively(estimator, hparams, decode_hp, checkpoint_path=FLAGS.checkpoint_path) elif FLAGS.decode_from_file: decoding.decode_from_file(estimator, FLAGS.decode_from_file, hparams, decode_hp, FLAGS.decode_to_file, checkpoint_path=FLAGS.checkpoint_path) if FLAGS.checkpoint_path and FLAGS.keep_timestamp: ckpt_time = os.path.getmtime(FLAGS.checkpoint_path + ".index") os.utime(FLAGS.decode_to_file, (ckpt_time, ckpt_time)) else: decoding.decode_from_dataset( estimator, FLAGS.problem, hparams, decode_hp, decode_to_file=FLAGS.decode_to_file, dataset_split="test" if FLAGS.eval_use_test_set else None, checkpoint_path=FLAGS.checkpoint_path)
def decode(estimator, hparams, decode_hp): """Decode from estimator. Interactive, from file, or from dataset.""" if FLAGS.decode_interactive: if estimator.config.use_tpu: raise ValueError("TPU can only decode from dataset.") decoding.decode_interactively(estimator, hparams, decode_hp, checkpoint_path=FLAGS.checkpoint_path) elif FLAGS.decode_from_file: decoding.decode_from_file(estimator, FLAGS.decode_from_file, hparams, decode_hp, FLAGS.decode_to_file, checkpoint_path=FLAGS.checkpoint_path) if FLAGS.checkpoint_path and FLAGS.keep_timestamp: ckpt_time = os.path.getmtime(FLAGS.checkpoint_path + ".index") os.utime(FLAGS.decode_to_file, (ckpt_time, ckpt_time)) else: decoding.decode_from_dataset( estimator, FLAGS.problem, hparams, decode_hp, decode_to_file=FLAGS.decode_to_file, dataset_split="test" if FLAGS.eval_use_test_set else None, checkpoint_path=FLAGS.checkpoint_path)
[ "Decode", "from", "estimator", ".", "Interactive", "from", "file", "or", "from", "dataset", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/bin/t2t_decoder.py#L82-L104
[ "def", "decode", "(", "estimator", ",", "hparams", ",", "decode_hp", ")", ":", "if", "FLAGS", ".", "decode_interactive", ":", "if", "estimator", ".", "config", ".", "use_tpu", ":", "raise", "ValueError", "(", "\"TPU can only decode from dataset.\"", ")", "decodi...
272500b6efe353aeb638d2745ed56e519462ca31
train
score_file
Score each line in a file and return the scores.
tensor2tensor/bin/t2t_decoder.py
def score_file(filename): """Score each line in a file and return the scores.""" # Prepare model. hparams = create_hparams() encoders = registry.problem(FLAGS.problem).feature_encoders(FLAGS.data_dir) has_inputs = "inputs" in encoders # Prepare features for feeding into the model. if has_inputs: inputs_ph = tf.placeholder(dtype=tf.int32) # Just length dimension. batch_inputs = tf.reshape(inputs_ph, [1, -1, 1, 1]) # Make it 4D. targets_ph = tf.placeholder(dtype=tf.int32) # Just length dimension. batch_targets = tf.reshape(targets_ph, [1, -1, 1, 1]) # Make it 4D. if has_inputs: features = {"inputs": batch_inputs, "targets": batch_targets} else: features = {"targets": batch_targets} # Prepare the model and the graph when model runs on features. model = registry.model(FLAGS.model)(hparams, tf.estimator.ModeKeys.EVAL) _, losses = model(features) saver = tf.train.Saver() with tf.Session() as sess: # Load weights from checkpoint. if FLAGS.checkpoint_path is None: ckpts = tf.train.get_checkpoint_state(FLAGS.output_dir) ckpt = ckpts.model_checkpoint_path else: ckpt = FLAGS.checkpoint_path saver.restore(sess, ckpt) # Run on each line. with tf.gfile.Open(filename) as f: lines = f.readlines() results = [] for line in lines: tab_split = line.split("\t") if len(tab_split) > 2: raise ValueError("Each line must have at most one tab separator.") if len(tab_split) == 1: targets = tab_split[0].strip() else: targets = tab_split[1].strip() inputs = tab_split[0].strip() # Run encoders and append EOS symbol. targets_numpy = encoders["targets"].encode( targets) + [text_encoder.EOS_ID] if has_inputs: inputs_numpy = encoders["inputs"].encode(inputs) + [text_encoder.EOS_ID] # Prepare the feed. if has_inputs: feed = {inputs_ph: inputs_numpy, targets_ph: targets_numpy} else: feed = {targets_ph: targets_numpy} # Get the score. np_loss = sess.run(losses["training"], feed) results.append(np_loss) return results
def score_file(filename): """Score each line in a file and return the scores.""" # Prepare model. hparams = create_hparams() encoders = registry.problem(FLAGS.problem).feature_encoders(FLAGS.data_dir) has_inputs = "inputs" in encoders # Prepare features for feeding into the model. if has_inputs: inputs_ph = tf.placeholder(dtype=tf.int32) # Just length dimension. batch_inputs = tf.reshape(inputs_ph, [1, -1, 1, 1]) # Make it 4D. targets_ph = tf.placeholder(dtype=tf.int32) # Just length dimension. batch_targets = tf.reshape(targets_ph, [1, -1, 1, 1]) # Make it 4D. if has_inputs: features = {"inputs": batch_inputs, "targets": batch_targets} else: features = {"targets": batch_targets} # Prepare the model and the graph when model runs on features. model = registry.model(FLAGS.model)(hparams, tf.estimator.ModeKeys.EVAL) _, losses = model(features) saver = tf.train.Saver() with tf.Session() as sess: # Load weights from checkpoint. if FLAGS.checkpoint_path is None: ckpts = tf.train.get_checkpoint_state(FLAGS.output_dir) ckpt = ckpts.model_checkpoint_path else: ckpt = FLAGS.checkpoint_path saver.restore(sess, ckpt) # Run on each line. with tf.gfile.Open(filename) as f: lines = f.readlines() results = [] for line in lines: tab_split = line.split("\t") if len(tab_split) > 2: raise ValueError("Each line must have at most one tab separator.") if len(tab_split) == 1: targets = tab_split[0].strip() else: targets = tab_split[1].strip() inputs = tab_split[0].strip() # Run encoders and append EOS symbol. targets_numpy = encoders["targets"].encode( targets) + [text_encoder.EOS_ID] if has_inputs: inputs_numpy = encoders["inputs"].encode(inputs) + [text_encoder.EOS_ID] # Prepare the feed. if has_inputs: feed = {inputs_ph: inputs_numpy, targets_ph: targets_numpy} else: feed = {targets_ph: targets_numpy} # Get the score. np_loss = sess.run(losses["training"], feed) results.append(np_loss) return results
[ "Score", "each", "line", "in", "a", "file", "and", "return", "the", "scores", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/bin/t2t_decoder.py#L107-L164
[ "def", "score_file", "(", "filename", ")", ":", "# Prepare model.", "hparams", "=", "create_hparams", "(", ")", "encoders", "=", "registry", ".", "problem", "(", "FLAGS", ".", "problem", ")", ".", "feature_encoders", "(", "FLAGS", ".", "data_dir", ")", "has_...
272500b6efe353aeb638d2745ed56e519462ca31
train
time_to_channels
Put time dimension on channels in an embedded video.
tensor2tensor/models/research/autoencoders.py
def time_to_channels(embedded_video): """Put time dimension on channels in an embedded video.""" video_shape = common_layers.shape_list(embedded_video) if len(video_shape) != 5: raise ValueError("Assuming videos given as tensors in the format " "[batch, time, height, width, channels] but got one " "of shape: %s" % str(video_shape)) transposed = tf.transpose(embedded_video, [0, 2, 3, 1, 4]) return tf.reshape(transposed, [ video_shape[0], video_shape[2], video_shape[3], video_shape[1] * video_shape[4] ])
def time_to_channels(embedded_video): """Put time dimension on channels in an embedded video.""" video_shape = common_layers.shape_list(embedded_video) if len(video_shape) != 5: raise ValueError("Assuming videos given as tensors in the format " "[batch, time, height, width, channels] but got one " "of shape: %s" % str(video_shape)) transposed = tf.transpose(embedded_video, [0, 2, 3, 1, 4]) return tf.reshape(transposed, [ video_shape[0], video_shape[2], video_shape[3], video_shape[1] * video_shape[4] ])
[ "Put", "time", "dimension", "on", "channels", "in", "an", "embedded", "video", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L38-L49
[ "def", "time_to_channels", "(", "embedded_video", ")", ":", "video_shape", "=", "common_layers", ".", "shape_list", "(", "embedded_video", ")", "if", "len", "(", "video_shape", ")", "!=", "5", ":", "raise", "ValueError", "(", "\"Assuming videos given as tensors in t...
272500b6efe353aeb638d2745ed56e519462ca31
train
autoencoder_basic
Basic autoencoder model.
tensor2tensor/models/research/autoencoders.py
def autoencoder_basic(): """Basic autoencoder model.""" hparams = common_hparams.basic_params1() hparams.optimizer = "adam" hparams.learning_rate_constant = 0.0002 hparams.learning_rate_warmup_steps = 500 hparams.learning_rate_schedule = "constant * linear_warmup" hparams.label_smoothing = 0.0 hparams.batch_size = 128 hparams.hidden_size = 64 hparams.num_hidden_layers = 5 hparams.initializer = "uniform_unit_scaling" hparams.initializer_gain = 1.0 hparams.weight_decay = 0.0 hparams.kernel_height = 4 hparams.kernel_width = 4 hparams.dropout = 0.05 hparams.add_hparam("max_hidden_size", 1024) hparams.add_hparam("bottleneck_bits", 128) hparams.add_hparam("bottleneck_shared_bits", 0) hparams.add_hparam("bottleneck_shared_bits_start_warmup", 0) hparams.add_hparam("bottleneck_shared_bits_stop_warmup", 0) hparams.add_hparam("bottleneck_noise", 0.1) hparams.add_hparam("bottleneck_warmup_steps", 2000) hparams.add_hparam("sample_height", 32) hparams.add_hparam("sample_width", 32) hparams.add_hparam("discriminator_batchnorm", True) hparams.add_hparam("num_sliced_vecs", 20000) hparams.add_hparam("sliced_do_tanh", int(True)) hparams.add_hparam("discriminator_size", 256) hparams.add_hparam("discriminator_kernel_size", 6) hparams.add_hparam("discriminator_strides", 4) hparams.add_hparam("discriminator_pure_mean", int(False)) hparams.add_hparam("code_loss_factor", 1.0) hparams.add_hparam("gan_codes_warmup_steps", 16000) hparams.add_hparam("gan_loss_factor", 0.0) hparams.add_hparam("bottleneck_l2_factor", 0.05) hparams.add_hparam("gumbel_temperature", 0.5) hparams.add_hparam("gumbel_noise_factor", 0.5) hparams.add_hparam("vq_temperature", 0.001) hparams.add_hparam("use_vq_loss", int(False)) hparams.add_hparam("discriminator", "double") return hparams
def autoencoder_basic(): """Basic autoencoder model.""" hparams = common_hparams.basic_params1() hparams.optimizer = "adam" hparams.learning_rate_constant = 0.0002 hparams.learning_rate_warmup_steps = 500 hparams.learning_rate_schedule = "constant * linear_warmup" hparams.label_smoothing = 0.0 hparams.batch_size = 128 hparams.hidden_size = 64 hparams.num_hidden_layers = 5 hparams.initializer = "uniform_unit_scaling" hparams.initializer_gain = 1.0 hparams.weight_decay = 0.0 hparams.kernel_height = 4 hparams.kernel_width = 4 hparams.dropout = 0.05 hparams.add_hparam("max_hidden_size", 1024) hparams.add_hparam("bottleneck_bits", 128) hparams.add_hparam("bottleneck_shared_bits", 0) hparams.add_hparam("bottleneck_shared_bits_start_warmup", 0) hparams.add_hparam("bottleneck_shared_bits_stop_warmup", 0) hparams.add_hparam("bottleneck_noise", 0.1) hparams.add_hparam("bottleneck_warmup_steps", 2000) hparams.add_hparam("sample_height", 32) hparams.add_hparam("sample_width", 32) hparams.add_hparam("discriminator_batchnorm", True) hparams.add_hparam("num_sliced_vecs", 20000) hparams.add_hparam("sliced_do_tanh", int(True)) hparams.add_hparam("discriminator_size", 256) hparams.add_hparam("discriminator_kernel_size", 6) hparams.add_hparam("discriminator_strides", 4) hparams.add_hparam("discriminator_pure_mean", int(False)) hparams.add_hparam("code_loss_factor", 1.0) hparams.add_hparam("gan_codes_warmup_steps", 16000) hparams.add_hparam("gan_loss_factor", 0.0) hparams.add_hparam("bottleneck_l2_factor", 0.05) hparams.add_hparam("gumbel_temperature", 0.5) hparams.add_hparam("gumbel_noise_factor", 0.5) hparams.add_hparam("vq_temperature", 0.001) hparams.add_hparam("use_vq_loss", int(False)) hparams.add_hparam("discriminator", "double") return hparams
[ "Basic", "autoencoder", "model", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1027-L1069
[ "def", "autoencoder_basic", "(", ")", ":", "hparams", "=", "common_hparams", ".", "basic_params1", "(", ")", "hparams", ".", "optimizer", "=", "\"adam\"", "hparams", ".", "learning_rate_constant", "=", "0.0002", "hparams", ".", "learning_rate_warmup_steps", "=", "...
272500b6efe353aeb638d2745ed56e519462ca31
train
autoencoder_autoregressive
Autoregressive autoencoder model.
tensor2tensor/models/research/autoencoders.py
def autoencoder_autoregressive(): """Autoregressive autoencoder model.""" hparams = autoencoder_basic() hparams.add_hparam("autoregressive_forget_base", False) hparams.add_hparam("autoregressive_mode", "none") hparams.add_hparam("autoregressive_decode_steps", 0) hparams.add_hparam("autoregressive_eval_pure_autoencoder", False) hparams.add_hparam("autoregressive_gumbel_sample", False) return hparams
def autoencoder_autoregressive(): """Autoregressive autoencoder model.""" hparams = autoencoder_basic() hparams.add_hparam("autoregressive_forget_base", False) hparams.add_hparam("autoregressive_mode", "none") hparams.add_hparam("autoregressive_decode_steps", 0) hparams.add_hparam("autoregressive_eval_pure_autoencoder", False) hparams.add_hparam("autoregressive_gumbel_sample", False) return hparams
[ "Autoregressive", "autoencoder", "model", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1073-L1081
[ "def", "autoencoder_autoregressive", "(", ")", ":", "hparams", "=", "autoencoder_basic", "(", ")", "hparams", ".", "add_hparam", "(", "\"autoregressive_forget_base\"", ",", "False", ")", "hparams", ".", "add_hparam", "(", "\"autoregressive_mode\"", ",", "\"none\"", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
autoencoder_residual
Residual autoencoder model.
tensor2tensor/models/research/autoencoders.py
def autoencoder_residual(): """Residual autoencoder model.""" hparams = autoencoder_autoregressive() hparams.optimizer = "Adafactor" hparams.clip_grad_norm = 1.0 hparams.learning_rate_constant = 0.5 hparams.learning_rate_warmup_steps = 500 hparams.learning_rate_schedule = "constant * linear_warmup * rsqrt_decay" hparams.num_hidden_layers = 5 hparams.hidden_size = 64 hparams.max_hidden_size = 1024 hparams.add_hparam("num_residual_layers", 2) hparams.add_hparam("residual_kernel_height", 3) hparams.add_hparam("residual_kernel_width", 3) hparams.add_hparam("residual_filter_multiplier", 2.0) hparams.add_hparam("residual_dropout", 0.2) hparams.add_hparam("residual_use_separable_conv", int(True)) hparams.add_hparam("kl_beta", 1.0) return hparams
def autoencoder_residual(): """Residual autoencoder model.""" hparams = autoencoder_autoregressive() hparams.optimizer = "Adafactor" hparams.clip_grad_norm = 1.0 hparams.learning_rate_constant = 0.5 hparams.learning_rate_warmup_steps = 500 hparams.learning_rate_schedule = "constant * linear_warmup * rsqrt_decay" hparams.num_hidden_layers = 5 hparams.hidden_size = 64 hparams.max_hidden_size = 1024 hparams.add_hparam("num_residual_layers", 2) hparams.add_hparam("residual_kernel_height", 3) hparams.add_hparam("residual_kernel_width", 3) hparams.add_hparam("residual_filter_multiplier", 2.0) hparams.add_hparam("residual_dropout", 0.2) hparams.add_hparam("residual_use_separable_conv", int(True)) hparams.add_hparam("kl_beta", 1.0) return hparams
[ "Residual", "autoencoder", "model", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1085-L1103
[ "def", "autoencoder_residual", "(", ")", ":", "hparams", "=", "autoencoder_autoregressive", "(", ")", "hparams", ".", "optimizer", "=", "\"Adafactor\"", "hparams", ".", "clip_grad_norm", "=", "1.0", "hparams", ".", "learning_rate_constant", "=", "0.5", "hparams", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
autoencoder_residual_text
Residual autoencoder model for text.
tensor2tensor/models/research/autoencoders.py
def autoencoder_residual_text(): """Residual autoencoder model for text.""" hparams = autoencoder_residual() hparams.bottleneck_bits = 32 hparams.batch_size = 1024 hparams.hidden_size = 64 hparams.max_hidden_size = 512 hparams.bottleneck_noise = 0.0 hparams.bottom = { "inputs": modalities.identity_bottom, "targets": modalities.identity_bottom, } hparams.top = { "targets": modalities.identity_top, } hparams.autoregressive_mode = "none" hparams.sample_width = 1 return hparams
def autoencoder_residual_text(): """Residual autoencoder model for text.""" hparams = autoencoder_residual() hparams.bottleneck_bits = 32 hparams.batch_size = 1024 hparams.hidden_size = 64 hparams.max_hidden_size = 512 hparams.bottleneck_noise = 0.0 hparams.bottom = { "inputs": modalities.identity_bottom, "targets": modalities.identity_bottom, } hparams.top = { "targets": modalities.identity_top, } hparams.autoregressive_mode = "none" hparams.sample_width = 1 return hparams
[ "Residual", "autoencoder", "model", "for", "text", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1107-L1124
[ "def", "autoencoder_residual_text", "(", ")", ":", "hparams", "=", "autoencoder_residual", "(", ")", "hparams", ".", "bottleneck_bits", "=", "32", "hparams", ".", "batch_size", "=", "1024", "hparams", ".", "hidden_size", "=", "64", "hparams", ".", "max_hidden_si...
272500b6efe353aeb638d2745ed56e519462ca31
train
autoencoder_basic_discrete
Basic autoencoder model.
tensor2tensor/models/research/autoencoders.py
def autoencoder_basic_discrete(): """Basic autoencoder model.""" hparams = autoencoder_autoregressive() hparams.num_hidden_layers = 5 hparams.hidden_size = 64 hparams.bottleneck_bits = 1024 hparams.bottleneck_noise = 0.1 hparams.add_hparam("discretize_warmup_steps", 16000) return hparams
def autoencoder_basic_discrete(): """Basic autoencoder model.""" hparams = autoencoder_autoregressive() hparams.num_hidden_layers = 5 hparams.hidden_size = 64 hparams.bottleneck_bits = 1024 hparams.bottleneck_noise = 0.1 hparams.add_hparam("discretize_warmup_steps", 16000) return hparams
[ "Basic", "autoencoder", "model", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1128-L1136
[ "def", "autoencoder_basic_discrete", "(", ")", ":", "hparams", "=", "autoencoder_autoregressive", "(", ")", "hparams", ".", "num_hidden_layers", "=", "5", "hparams", ".", "hidden_size", "=", "64", "hparams", ".", "bottleneck_bits", "=", "1024", "hparams", ".", "...
272500b6efe353aeb638d2745ed56e519462ca31
train
autoencoder_residual_discrete
Residual discrete autoencoder model.
tensor2tensor/models/research/autoencoders.py
def autoencoder_residual_discrete(): """Residual discrete autoencoder model.""" hparams = autoencoder_residual() hparams.bottleneck_bits = 1024 hparams.bottleneck_noise = 0.05 hparams.add_hparam("discretize_warmup_steps", 16000) hparams.add_hparam("bottleneck_kind", "tanh_discrete") hparams.add_hparam("isemhash_noise_dev", 0.5) hparams.add_hparam("isemhash_mix_prob", 0.5) hparams.add_hparam("isemhash_filter_size_multiplier", 2.0) hparams.add_hparam("vq_beta", 0.25) hparams.add_hparam("vq_decay", 0.999) hparams.add_hparam("vq_epsilon", 1e-5) return hparams
def autoencoder_residual_discrete(): """Residual discrete autoencoder model.""" hparams = autoencoder_residual() hparams.bottleneck_bits = 1024 hparams.bottleneck_noise = 0.05 hparams.add_hparam("discretize_warmup_steps", 16000) hparams.add_hparam("bottleneck_kind", "tanh_discrete") hparams.add_hparam("isemhash_noise_dev", 0.5) hparams.add_hparam("isemhash_mix_prob", 0.5) hparams.add_hparam("isemhash_filter_size_multiplier", 2.0) hparams.add_hparam("vq_beta", 0.25) hparams.add_hparam("vq_decay", 0.999) hparams.add_hparam("vq_epsilon", 1e-5) return hparams
[ "Residual", "discrete", "autoencoder", "model", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1140-L1153
[ "def", "autoencoder_residual_discrete", "(", ")", ":", "hparams", "=", "autoencoder_residual", "(", ")", "hparams", ".", "bottleneck_bits", "=", "1024", "hparams", ".", "bottleneck_noise", "=", "0.05", "hparams", ".", "add_hparam", "(", "\"discretize_warmup_steps\"", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
autoencoder_residual_discrete_big
Residual discrete autoencoder model, big version.
tensor2tensor/models/research/autoencoders.py
def autoencoder_residual_discrete_big(): """Residual discrete autoencoder model, big version.""" hparams = autoencoder_residual_discrete() hparams.hidden_size = 128 hparams.max_hidden_size = 4096 hparams.bottleneck_noise = 0.1 hparams.residual_dropout = 0.4 return hparams
def autoencoder_residual_discrete_big(): """Residual discrete autoencoder model, big version.""" hparams = autoencoder_residual_discrete() hparams.hidden_size = 128 hparams.max_hidden_size = 4096 hparams.bottleneck_noise = 0.1 hparams.residual_dropout = 0.4 return hparams
[ "Residual", "discrete", "autoencoder", "model", "big", "version", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1157-L1164
[ "def", "autoencoder_residual_discrete_big", "(", ")", ":", "hparams", "=", "autoencoder_residual_discrete", "(", ")", "hparams", ".", "hidden_size", "=", "128", "hparams", ".", "max_hidden_size", "=", "4096", "hparams", ".", "bottleneck_noise", "=", "0.1", "hparams"...
272500b6efe353aeb638d2745ed56e519462ca31
train
autoencoder_ordered_discrete
Ordered discrete autoencoder model.
tensor2tensor/models/research/autoencoders.py
def autoencoder_ordered_discrete(): """Ordered discrete autoencoder model.""" hparams = autoencoder_residual_discrete() hparams.bottleneck_noise = 0.05 # Use 0.8 for ordered. hparams.gan_loss_factor = 0.05 hparams.add_hparam("unordered", True) return hparams
def autoencoder_ordered_discrete(): """Ordered discrete autoencoder model.""" hparams = autoencoder_residual_discrete() hparams.bottleneck_noise = 0.05 # Use 0.8 for ordered. hparams.gan_loss_factor = 0.05 hparams.add_hparam("unordered", True) return hparams
[ "Ordered", "discrete", "autoencoder", "model", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1168-L1174
[ "def", "autoencoder_ordered_discrete", "(", ")", ":", "hparams", "=", "autoencoder_residual_discrete", "(", ")", "hparams", ".", "bottleneck_noise", "=", "0.05", "# Use 0.8 for ordered.", "hparams", ".", "gan_loss_factor", "=", "0.05", "hparams", ".", "add_hparam", "(...
272500b6efe353aeb638d2745ed56e519462ca31
train
autoencoder_ordered_discrete_image64
Ordered discrete autoencoder model.
tensor2tensor/models/research/autoencoders.py
def autoencoder_ordered_discrete_image64(): """Ordered discrete autoencoder model.""" hparams = autoencoder_ordered_discrete() hparams.batch_size = 32 hparams.num_hidden_layers = 6 hparams.bottleneck_warmup_steps *= 2 hparams.gan_codes_warmup_steps *= 2 return hparams
def autoencoder_ordered_discrete_image64(): """Ordered discrete autoencoder model.""" hparams = autoencoder_ordered_discrete() hparams.batch_size = 32 hparams.num_hidden_layers = 6 hparams.bottleneck_warmup_steps *= 2 hparams.gan_codes_warmup_steps *= 2 return hparams
[ "Ordered", "discrete", "autoencoder", "model", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1178-L1186
[ "def", "autoencoder_ordered_discrete_image64", "(", ")", ":", "hparams", "=", "autoencoder_ordered_discrete", "(", ")", "hparams", ".", "batch_size", "=", "32", "hparams", ".", "num_hidden_layers", "=", "6", "hparams", ".", "bottleneck_warmup_steps", "*=", "2", "hpa...
272500b6efe353aeb638d2745ed56e519462ca31
train
autoencoder_ordered_text
Ordered discrete autoencoder model for text.
tensor2tensor/models/research/autoencoders.py
def autoencoder_ordered_text(): """Ordered discrete autoencoder model for text.""" hparams = autoencoder_ordered_discrete() hparams.bottleneck_bits = 1024 hparams.bottleneck_shared_bits = 1024-64 hparams.bottleneck_shared_bits_start_warmup = 75000 hparams.bottleneck_shared_bits_stop_warmup = 275000 hparams.num_hidden_layers = 7 hparams.batch_size = 1024 hparams.autoregressive_mode = "conv5" hparams.max_hidden_size = 1024 hparams.bottom = { "inputs": modalities.identity_bottom, "targets": modalities.identity_bottom, } hparams.top = { "targets": modalities.identity_top, } hparams.sample_height = 128 hparams.sample_width = 1 return hparams
def autoencoder_ordered_text(): """Ordered discrete autoencoder model for text.""" hparams = autoencoder_ordered_discrete() hparams.bottleneck_bits = 1024 hparams.bottleneck_shared_bits = 1024-64 hparams.bottleneck_shared_bits_start_warmup = 75000 hparams.bottleneck_shared_bits_stop_warmup = 275000 hparams.num_hidden_layers = 7 hparams.batch_size = 1024 hparams.autoregressive_mode = "conv5" hparams.max_hidden_size = 1024 hparams.bottom = { "inputs": modalities.identity_bottom, "targets": modalities.identity_bottom, } hparams.top = { "targets": modalities.identity_top, } hparams.sample_height = 128 hparams.sample_width = 1 return hparams
[ "Ordered", "discrete", "autoencoder", "model", "for", "text", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1214-L1234
[ "def", "autoencoder_ordered_text", "(", ")", ":", "hparams", "=", "autoencoder_ordered_discrete", "(", ")", "hparams", ".", "bottleneck_bits", "=", "1024", "hparams", ".", "bottleneck_shared_bits", "=", "1024", "-", "64", "hparams", ".", "bottleneck_shared_bits_start_...
272500b6efe353aeb638d2745ed56e519462ca31
train
autoencoder_ordered_text_small
Ordered discrete autoencoder model for text, small version.
tensor2tensor/models/research/autoencoders.py
def autoencoder_ordered_text_small(): """Ordered discrete autoencoder model for text, small version.""" hparams = autoencoder_ordered_text() hparams.bottleneck_bits = 32 hparams.num_hidden_layers = 3 hparams.hidden_size = 64 hparams.max_hidden_size = 512 hparams.bottleneck_noise = 0.0 hparams.autoregressive_mode = "conv5" hparams.sample_height = 4 return hparams
def autoencoder_ordered_text_small(): """Ordered discrete autoencoder model for text, small version.""" hparams = autoencoder_ordered_text() hparams.bottleneck_bits = 32 hparams.num_hidden_layers = 3 hparams.hidden_size = 64 hparams.max_hidden_size = 512 hparams.bottleneck_noise = 0.0 hparams.autoregressive_mode = "conv5" hparams.sample_height = 4 return hparams
[ "Ordered", "discrete", "autoencoder", "model", "for", "text", "small", "version", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1238-L1248
[ "def", "autoencoder_ordered_text_small", "(", ")", ":", "hparams", "=", "autoencoder_ordered_text", "(", ")", "hparams", ".", "bottleneck_bits", "=", "32", "hparams", ".", "num_hidden_layers", "=", "3", "hparams", ".", "hidden_size", "=", "64", "hparams", ".", "...
272500b6efe353aeb638d2745ed56e519462ca31
train
autoencoder_discrete_pong
Discrete autoencoder model for compressing pong frames.
tensor2tensor/models/research/autoencoders.py
def autoencoder_discrete_pong(): """Discrete autoencoder model for compressing pong frames.""" hparams = autoencoder_ordered_discrete() hparams.num_hidden_layers = 3 hparams.bottleneck_bits = 24 hparams.batch_size = 2 hparams.gan_loss_factor = 0.01 hparams.bottleneck_l2_factor = 0.001 hparams.add_hparam("video_modality_loss_cutoff", 0.02) return hparams
def autoencoder_discrete_pong(): """Discrete autoencoder model for compressing pong frames.""" hparams = autoencoder_ordered_discrete() hparams.num_hidden_layers = 3 hparams.bottleneck_bits = 24 hparams.batch_size = 2 hparams.gan_loss_factor = 0.01 hparams.bottleneck_l2_factor = 0.001 hparams.add_hparam("video_modality_loss_cutoff", 0.02) return hparams
[ "Discrete", "autoencoder", "model", "for", "compressing", "pong", "frames", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1261-L1270
[ "def", "autoencoder_discrete_pong", "(", ")", ":", "hparams", "=", "autoencoder_ordered_discrete", "(", ")", "hparams", ".", "num_hidden_layers", "=", "3", "hparams", ".", "bottleneck_bits", "=", "24", "hparams", ".", "batch_size", "=", "2", "hparams", ".", "gan...
272500b6efe353aeb638d2745ed56e519462ca31
train
autoencoder_discrete_tiny
Discrete autoencoder model for compressing pong frames for testing.
tensor2tensor/models/research/autoencoders.py
def autoencoder_discrete_tiny(): """Discrete autoencoder model for compressing pong frames for testing.""" hparams = autoencoder_ordered_discrete() hparams.num_hidden_layers = 2 hparams.bottleneck_bits = 24 hparams.batch_size = 2 hparams.gan_loss_factor = 0. hparams.bottleneck_l2_factor = 0.001 hparams.add_hparam("video_modality_loss_cutoff", 0.02) hparams.num_residual_layers = 1 hparams.hidden_size = 32 hparams.max_hidden_size = 64 return hparams
def autoencoder_discrete_tiny(): """Discrete autoencoder model for compressing pong frames for testing.""" hparams = autoencoder_ordered_discrete() hparams.num_hidden_layers = 2 hparams.bottleneck_bits = 24 hparams.batch_size = 2 hparams.gan_loss_factor = 0. hparams.bottleneck_l2_factor = 0.001 hparams.add_hparam("video_modality_loss_cutoff", 0.02) hparams.num_residual_layers = 1 hparams.hidden_size = 32 hparams.max_hidden_size = 64 return hparams
[ "Discrete", "autoencoder", "model", "for", "compressing", "pong", "frames", "for", "testing", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1274-L1286
[ "def", "autoencoder_discrete_tiny", "(", ")", ":", "hparams", "=", "autoencoder_ordered_discrete", "(", ")", "hparams", ".", "num_hidden_layers", "=", "2", "hparams", ".", "bottleneck_bits", "=", "24", "hparams", ".", "batch_size", "=", "2", "hparams", ".", "gan...
272500b6efe353aeb638d2745ed56e519462ca31
train
autoencoder_discrete_cifar
Discrete autoencoder model for compressing cifar.
tensor2tensor/models/research/autoencoders.py
def autoencoder_discrete_cifar(): """Discrete autoencoder model for compressing cifar.""" hparams = autoencoder_ordered_discrete() hparams.bottleneck_noise = 0.0 hparams.bottleneck_bits = 90 hparams.num_hidden_layers = 2 hparams.hidden_size = 256 hparams.num_residual_layers = 4 hparams.batch_size = 32 hparams.learning_rate_constant = 1.0 return hparams
def autoencoder_discrete_cifar(): """Discrete autoencoder model for compressing cifar.""" hparams = autoencoder_ordered_discrete() hparams.bottleneck_noise = 0.0 hparams.bottleneck_bits = 90 hparams.num_hidden_layers = 2 hparams.hidden_size = 256 hparams.num_residual_layers = 4 hparams.batch_size = 32 hparams.learning_rate_constant = 1.0 return hparams
[ "Discrete", "autoencoder", "model", "for", "compressing", "cifar", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1290-L1300
[ "def", "autoencoder_discrete_cifar", "(", ")", ":", "hparams", "=", "autoencoder_ordered_discrete", "(", ")", "hparams", ".", "bottleneck_noise", "=", "0.0", "hparams", ".", "bottleneck_bits", "=", "90", "hparams", ".", "num_hidden_layers", "=", "2", "hparams", "....
272500b6efe353aeb638d2745ed56e519462ca31
train
autoencoder_range
Tuning grid of the main autoencoder params.
tensor2tensor/models/research/autoencoders.py
def autoencoder_range(rhp): """Tuning grid of the main autoencoder params.""" rhp.set_float("dropout", 0.01, 0.3) rhp.set_float("gan_loss_factor", 0.01, 0.1) rhp.set_float("bottleneck_l2_factor", 0.001, 0.1, scale=rhp.LOG_SCALE) rhp.set_discrete("bottleneck_warmup_steps", [200, 2000]) rhp.set_float("gumbel_temperature", 0, 1) rhp.set_float("gumbel_noise_factor", 0, 0.5)
def autoencoder_range(rhp): """Tuning grid of the main autoencoder params.""" rhp.set_float("dropout", 0.01, 0.3) rhp.set_float("gan_loss_factor", 0.01, 0.1) rhp.set_float("bottleneck_l2_factor", 0.001, 0.1, scale=rhp.LOG_SCALE) rhp.set_discrete("bottleneck_warmup_steps", [200, 2000]) rhp.set_float("gumbel_temperature", 0, 1) rhp.set_float("gumbel_noise_factor", 0, 0.5)
[ "Tuning", "grid", "of", "the", "main", "autoencoder", "params", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/autoencoders.py#L1304-L1311
[ "def", "autoencoder_range", "(", "rhp", ")", ":", "rhp", ".", "set_float", "(", "\"dropout\"", ",", "0.01", ",", "0.3", ")", "rhp", ".", "set_float", "(", "\"gan_loss_factor\"", ",", "0.01", ",", "0.1", ")", "rhp", ".", "set_float", "(", "\"bottleneck_l2_f...
272500b6efe353aeb638d2745ed56e519462ca31
train
image_encoder
A stack of self attention layers.
tensor2tensor/models/research/vqa_attention.py
def image_encoder(image_feat, hparams, name="image_encoder", save_weights_to=None, make_image_summary=True): """A stack of self attention layers.""" x = image_feat with tf.variable_scope(name): for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers): with tf.variable_scope("layer_%d" % layer): with tf.variable_scope("self_attention"): y = vqa_layers.multihead_attention( common_layers.layer_preprocess(x, hparams), None, None, hparams.attention_key_channels or hparams.image_hidden_size, hparams.attention_value_channels or hparams.image_hidden_size, hparams.image_hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.self_attention_type, save_weights_to=save_weights_to, max_relative_position=None, make_image_summary=make_image_summary, dropout_broadcast_dims=None, max_length=None, vars_3d=False, scale_otproduct=hparams.scale_dotproduct) utils.collect_named_outputs("norms", "image_feat_self_attention", tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs( "norms", "image_feat_self_attention_zero_add", tf.norm(x, axis=-1)) with tf.variable_scope("ffn"): y = common_layers.dense_relu_dense( common_layers.layer_preprocess(x, hparams), hparams.image_filter_size, hparams.image_hidden_size, dropout=hparams.relu_dropout, dropout_broadcast_dims=None) utils.collect_named_outputs("norms", "image_feat_ffn", tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs("norms", "image_feat_ffn_zero_add", tf.norm(x, axis=-1)) # if normalization is done in layer_preprocess, then it should also be done # on the output, since the output can grow very large, being the sum of # a whole stack of unnormalized layer outputs. return common_layers.layer_preprocess(x, hparams)
def image_encoder(image_feat, hparams, name="image_encoder", save_weights_to=None, make_image_summary=True): """A stack of self attention layers.""" x = image_feat with tf.variable_scope(name): for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers): with tf.variable_scope("layer_%d" % layer): with tf.variable_scope("self_attention"): y = vqa_layers.multihead_attention( common_layers.layer_preprocess(x, hparams), None, None, hparams.attention_key_channels or hparams.image_hidden_size, hparams.attention_value_channels or hparams.image_hidden_size, hparams.image_hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.self_attention_type, save_weights_to=save_weights_to, max_relative_position=None, make_image_summary=make_image_summary, dropout_broadcast_dims=None, max_length=None, vars_3d=False, scale_otproduct=hparams.scale_dotproduct) utils.collect_named_outputs("norms", "image_feat_self_attention", tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs( "norms", "image_feat_self_attention_zero_add", tf.norm(x, axis=-1)) with tf.variable_scope("ffn"): y = common_layers.dense_relu_dense( common_layers.layer_preprocess(x, hparams), hparams.image_filter_size, hparams.image_hidden_size, dropout=hparams.relu_dropout, dropout_broadcast_dims=None) utils.collect_named_outputs("norms", "image_feat_ffn", tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs("norms", "image_feat_ffn_zero_add", tf.norm(x, axis=-1)) # if normalization is done in layer_preprocess, then it should also be done # on the output, since the output can grow very large, being the sum of # a whole stack of unnormalized layer outputs. return common_layers.layer_preprocess(x, hparams)
[ "A", "stack", "of", "self", "attention", "layers", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/vqa_attention.py#L182-L232
[ "def", "image_encoder", "(", "image_feat", ",", "hparams", ",", "name", "=", "\"image_encoder\"", ",", "save_weights_to", "=", "None", ",", "make_image_summary", "=", "True", ")", ":", "x", "=", "image_feat", "with", "tf", ".", "variable_scope", "(", "name", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
question_encoder
Question encoder, run LSTM encoder and get the last output as encoding.
tensor2tensor/models/research/vqa_attention.py
def question_encoder(question, hparams, name="encoder"): """Question encoder, run LSTM encoder and get the last output as encoding.""" with tf.variable_scope(name, "encoder", values=[question]): question = common_layers.flatten4d3d(question) padding = common_attention.embedding_to_padding(question) length = common_attention.padding_to_length(padding) max_question_length = hparams.max_question_length question = question[:, :max_question_length, :] actual_question_length = common_layers.shape_list(question)[1] length = tf.minimum(length, max_question_length) padding = [[0, 0], [0, max_question_length-actual_question_length], [0, 0]] question = tf.pad(question, padding) question_shape = question.get_shape().as_list() question_shape[1] = max_question_length question.set_shape(question_shape) # apply tanh dropout on question embedding question = tf.tanh(question) question = tf.nn.dropout(question, keep_prob=1.-hparams.dropout) question = [question[:, i, :] for i in range(max_question_length)] # rnn_layers = [_get_rnn_cell(hparams) # for _ in range(hparams.num_rnn_layers)] # rnn_multi_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layers) rnn_cell = _get_rnn_cell(hparams) # outputs, _ = tf.nn.dynamic_rnn( # rnn_cell, question, length, dtype=tf.float32) _, state = tf.nn.static_rnn(rnn_cell, question, sequence_length=length, dtype=tf.float32) # outputs = [tf.expand_dims(output, axis=1) for output in outputs] # outputs = tf.concat(outputs, axis=1) # utils.collect_named_outputs("vqa_attention_debug", "question_output", # outputs) # utils.collect_named_outputs("vqa_attention_debug", "question_state", # state.h) # batch_size = common_layers.shape_list(outputs)[0] # row_indices = tf.range(batch_size) # # length - 1 as index # indices = tf.transpose([row_indices, tf.maximum(length-1, 0)]) # last_output = tf.gather_nd(outputs, indices) # utils.collect_named_outputs("vqa_attention_debug", # "question_final_output", last_output) return state.h
def question_encoder(question, hparams, name="encoder"): """Question encoder, run LSTM encoder and get the last output as encoding.""" with tf.variable_scope(name, "encoder", values=[question]): question = common_layers.flatten4d3d(question) padding = common_attention.embedding_to_padding(question) length = common_attention.padding_to_length(padding) max_question_length = hparams.max_question_length question = question[:, :max_question_length, :] actual_question_length = common_layers.shape_list(question)[1] length = tf.minimum(length, max_question_length) padding = [[0, 0], [0, max_question_length-actual_question_length], [0, 0]] question = tf.pad(question, padding) question_shape = question.get_shape().as_list() question_shape[1] = max_question_length question.set_shape(question_shape) # apply tanh dropout on question embedding question = tf.tanh(question) question = tf.nn.dropout(question, keep_prob=1.-hparams.dropout) question = [question[:, i, :] for i in range(max_question_length)] # rnn_layers = [_get_rnn_cell(hparams) # for _ in range(hparams.num_rnn_layers)] # rnn_multi_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layers) rnn_cell = _get_rnn_cell(hparams) # outputs, _ = tf.nn.dynamic_rnn( # rnn_cell, question, length, dtype=tf.float32) _, state = tf.nn.static_rnn(rnn_cell, question, sequence_length=length, dtype=tf.float32) # outputs = [tf.expand_dims(output, axis=1) for output in outputs] # outputs = tf.concat(outputs, axis=1) # utils.collect_named_outputs("vqa_attention_debug", "question_output", # outputs) # utils.collect_named_outputs("vqa_attention_debug", "question_state", # state.h) # batch_size = common_layers.shape_list(outputs)[0] # row_indices = tf.range(batch_size) # # length - 1 as index # indices = tf.transpose([row_indices, tf.maximum(length-1, 0)]) # last_output = tf.gather_nd(outputs, indices) # utils.collect_named_outputs("vqa_attention_debug", # "question_final_output", last_output) return state.h
[ "Question", "encoder", "run", "LSTM", "encoder", "and", "get", "the", "last", "output", "as", "encoding", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/vqa_attention.py#L245-L295
[ "def", "question_encoder", "(", "question", ",", "hparams", ",", "name", "=", "\"encoder\"", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "\"encoder\"", ",", "values", "=", "[", "question", "]", ")", ":", "question", "=", "common_layers...
272500b6efe353aeb638d2745ed56e519462ca31
train
attn
Attention on image feature with question as query.
tensor2tensor/models/research/vqa_attention.py
def attn(image_feat, query, hparams, name="attn"): """Attention on image feature with question as query.""" with tf.variable_scope(name, "attn", values=[image_feat, query]): attn_dim = hparams.attn_dim num_glimps = hparams.num_glimps num_channels = common_layers.shape_list(image_feat)[-1] if len(common_layers.shape_list(image_feat)) == 4: image_feat = common_layers.flatten4d3d(image_feat) query = tf.expand_dims(query, 1) image_proj = common_attention.compute_attention_component( image_feat, attn_dim, name="image_proj") query_proj = common_attention.compute_attention_component( query, attn_dim, name="query_proj") h = tf.nn.relu(image_proj + query_proj) h_proj = common_attention.compute_attention_component( h, num_glimps, name="h_proj") p = tf.nn.softmax(h_proj, axis=1) image_ave = tf.matmul(image_feat, p, transpose_a=True) image_ave = tf.reshape(image_ave, [-1, num_channels*num_glimps]) return image_ave
def attn(image_feat, query, hparams, name="attn"): """Attention on image feature with question as query.""" with tf.variable_scope(name, "attn", values=[image_feat, query]): attn_dim = hparams.attn_dim num_glimps = hparams.num_glimps num_channels = common_layers.shape_list(image_feat)[-1] if len(common_layers.shape_list(image_feat)) == 4: image_feat = common_layers.flatten4d3d(image_feat) query = tf.expand_dims(query, 1) image_proj = common_attention.compute_attention_component( image_feat, attn_dim, name="image_proj") query_proj = common_attention.compute_attention_component( query, attn_dim, name="query_proj") h = tf.nn.relu(image_proj + query_proj) h_proj = common_attention.compute_attention_component( h, num_glimps, name="h_proj") p = tf.nn.softmax(h_proj, axis=1) image_ave = tf.matmul(image_feat, p, transpose_a=True) image_ave = tf.reshape(image_ave, [-1, num_channels*num_glimps]) return image_ave
[ "Attention", "on", "image", "feature", "with", "question", "as", "query", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/vqa_attention.py#L298-L318
[ "def", "attn", "(", "image_feat", ",", "query", ",", "hparams", ",", "name", "=", "\"attn\"", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "\"attn\"", ",", "values", "=", "[", "image_feat", ",", "query", "]", ")", ":", "attn_dim", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
mlp
Multi layer perceptron with dropout and relu activation.
tensor2tensor/models/research/vqa_attention.py
def mlp(feature, hparams, name="mlp"): """Multi layer perceptron with dropout and relu activation.""" with tf.variable_scope(name, "mlp", values=[feature]): num_mlp_layers = hparams.num_mlp_layers mlp_dim = hparams.mlp_dim for _ in range(num_mlp_layers): feature = common_layers.dense(feature, mlp_dim, activation=tf.nn.relu) feature = tf.nn.dropout(feature, keep_prob=1.-hparams.dropout) return feature
def mlp(feature, hparams, name="mlp"): """Multi layer perceptron with dropout and relu activation.""" with tf.variable_scope(name, "mlp", values=[feature]): num_mlp_layers = hparams.num_mlp_layers mlp_dim = hparams.mlp_dim for _ in range(num_mlp_layers): feature = common_layers.dense(feature, mlp_dim, activation=tf.nn.relu) feature = tf.nn.dropout(feature, keep_prob=1.-hparams.dropout) return feature
[ "Multi", "layer", "perceptron", "with", "dropout", "and", "relu", "activation", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/vqa_attention.py#L321-L329
[ "def", "mlp", "(", "feature", ",", "hparams", ",", "name", "=", "\"mlp\"", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "\"mlp\"", ",", "values", "=", "[", "feature", "]", ")", ":", "num_mlp_layers", "=", "hparams", ".", "num_mlp_la...
272500b6efe353aeb638d2745ed56e519462ca31
train
vqa_attention_base
VQA attention baseline hparams.
tensor2tensor/models/research/vqa_attention.py
def vqa_attention_base(): """VQA attention baseline hparams.""" hparams = common_hparams.basic_params1() hparams.batch_size = 128 hparams.use_fixed_batch_size = True, hparams.optimizer = "adam" hparams.optimizer_adam_beta1 = 0.9 hparams.optimizer_adam_beta2 = 0.999 hparams.optimizer_adam_epsilon = 1e-8 hparams.weight_decay = 0. hparams.clip_grad_norm = 0. hparams.initializer = "xavier" hparams.learning_rate = 0.5 hparams.learning_rate_schedule = "legacy" hparams.learning_rate_warmup_steps = 0 hparams.learning_rate_decay_scheme = "exp" hparams.learning_rate_decay_rate = 0.5 hparams.learning_rate_decay_steps = 50000 hparams.dropout = 0.5 hparams.summarize_grads = True hparams.summarize_vars = True # not used hparams hparams.label_smoothing = 0. hparams.multiply_embedding_mode = "" # add new hparams # preprocess hparams.add_hparam("resize_side", 512) hparams.add_hparam("height", 448) hparams.add_hparam("width", 448) hparams.add_hparam("distort", True) hparams.add_hparam("train_resnet", False) hparams.add_hparam("rnn_type", "lstm") hparams.add_hparam("num_rnn_layers", 1) hparams.add_hparam("max_question_length", 15) # lstm hidden size hparams.hidden_size = 512 hparams.add_hparam("attn_dim", 512) hparams.add_hparam("num_glimps", 2) hparams.add_hparam("num_mlp_layers", 1) hparams.add_hparam("mlp_dim", 1024) hparams.add_hparam("image_input_type", "image") hparams.add_hparam("image_model_fn", "resnet_v1_152") hparams.add_hparam("image_feat_size", 0) # self attention parts hparams.norm_type = "layer" hparams.layer_preprocess_sequence = "n" hparams.layer_postprocess_sequence = "da" hparams.layer_prepostprocess_dropout = 0.3 hparams.attention_dropout = 0.1 hparams.relu_dropout = 0.1 hparams.image_hidden_size = 2048 hparams.add_hparam("num_encoder_layers", 1) # Attention-related flags. hparams.add_hparam("num_heads", 8) hparams.add_hparam("attention_key_channels", 0) hparams.add_hparam("attention_value_channels", 0) hparams.add_hparam("image_filter_size", 1024) hparams.add_hparam("self_attention_type", "dot_product") hparams.add_hparam("scale_dotproduct", True) return hparams
def vqa_attention_base(): """VQA attention baseline hparams.""" hparams = common_hparams.basic_params1() hparams.batch_size = 128 hparams.use_fixed_batch_size = True, hparams.optimizer = "adam" hparams.optimizer_adam_beta1 = 0.9 hparams.optimizer_adam_beta2 = 0.999 hparams.optimizer_adam_epsilon = 1e-8 hparams.weight_decay = 0. hparams.clip_grad_norm = 0. hparams.initializer = "xavier" hparams.learning_rate = 0.5 hparams.learning_rate_schedule = "legacy" hparams.learning_rate_warmup_steps = 0 hparams.learning_rate_decay_scheme = "exp" hparams.learning_rate_decay_rate = 0.5 hparams.learning_rate_decay_steps = 50000 hparams.dropout = 0.5 hparams.summarize_grads = True hparams.summarize_vars = True # not used hparams hparams.label_smoothing = 0. hparams.multiply_embedding_mode = "" # add new hparams # preprocess hparams.add_hparam("resize_side", 512) hparams.add_hparam("height", 448) hparams.add_hparam("width", 448) hparams.add_hparam("distort", True) hparams.add_hparam("train_resnet", False) hparams.add_hparam("rnn_type", "lstm") hparams.add_hparam("num_rnn_layers", 1) hparams.add_hparam("max_question_length", 15) # lstm hidden size hparams.hidden_size = 512 hparams.add_hparam("attn_dim", 512) hparams.add_hparam("num_glimps", 2) hparams.add_hparam("num_mlp_layers", 1) hparams.add_hparam("mlp_dim", 1024) hparams.add_hparam("image_input_type", "image") hparams.add_hparam("image_model_fn", "resnet_v1_152") hparams.add_hparam("image_feat_size", 0) # self attention parts hparams.norm_type = "layer" hparams.layer_preprocess_sequence = "n" hparams.layer_postprocess_sequence = "da" hparams.layer_prepostprocess_dropout = 0.3 hparams.attention_dropout = 0.1 hparams.relu_dropout = 0.1 hparams.image_hidden_size = 2048 hparams.add_hparam("num_encoder_layers", 1) # Attention-related flags. hparams.add_hparam("num_heads", 8) hparams.add_hparam("attention_key_channels", 0) hparams.add_hparam("attention_value_channels", 0) hparams.add_hparam("image_filter_size", 1024) hparams.add_hparam("self_attention_type", "dot_product") hparams.add_hparam("scale_dotproduct", True) return hparams
[ "VQA", "attention", "baseline", "hparams", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/vqa_attention.py#L333-L400
[ "def", "vqa_attention_base", "(", ")", ":", "hparams", "=", "common_hparams", ".", "basic_params1", "(", ")", "hparams", ".", "batch_size", "=", "128", "hparams", ".", "use_fixed_batch_size", "=", "True", ",", "hparams", ".", "optimizer", "=", "\"adam\"", "hpa...
272500b6efe353aeb638d2745ed56e519462ca31
train
vqa_attention_base_range
Small range of hyperparameters.
tensor2tensor/models/research/vqa_attention.py
def vqa_attention_base_range(rhp): """Small range of hyperparameters.""" # After starting from base, set intervals for some parameters. rhp.set_float("learning_rate", 0.1, 1.0, scale=rhp.LOG_SCALE) rhp.set_float("clip_grad_norm", 0.1, 10, scale=rhp.LOG_SCALE) rhp.set_discrete("batch_size", [128, 256, 512, 1024]) rhp.set_float("weight_decay", 0.0, 1e-4) rhp.set_categorical("rnn_type", ["lstm", "lstm_layernorm"])
def vqa_attention_base_range(rhp): """Small range of hyperparameters.""" # After starting from base, set intervals for some parameters. rhp.set_float("learning_rate", 0.1, 1.0, scale=rhp.LOG_SCALE) rhp.set_float("clip_grad_norm", 0.1, 10, scale=rhp.LOG_SCALE) rhp.set_discrete("batch_size", [128, 256, 512, 1024]) rhp.set_float("weight_decay", 0.0, 1e-4) rhp.set_categorical("rnn_type", ["lstm", "lstm_layernorm"])
[ "Small", "range", "of", "hyperparameters", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/vqa_attention.py#L580-L587
[ "def", "vqa_attention_base_range", "(", "rhp", ")", ":", "# After starting from base, set intervals for some parameters.", "rhp", ".", "set_float", "(", "\"learning_rate\"", ",", "0.1", ",", "1.0", ",", "scale", "=", "rhp", ".", "LOG_SCALE", ")", "rhp", ".", "set_fl...
272500b6efe353aeb638d2745ed56e519462ca31
train
History.append
Append (step, value) pair to history for the given mode and metric.
tensor2tensor/trax/history.py
def append(self, mode, metric, step, value): """Append (step, value) pair to history for the given mode and metric.""" if mode not in self._values: self._values[mode] = collections.defaultdict(list) self._values[mode][metric].append((step, value))
def append(self, mode, metric, step, value): """Append (step, value) pair to history for the given mode and metric.""" if mode not in self._values: self._values[mode] = collections.defaultdict(list) self._values[mode][metric].append((step, value))
[ "Append", "(", "step", "value", ")", "pair", "to", "history", "for", "the", "given", "mode", "and", "metric", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/history.py#L52-L56
[ "def", "append", "(", "self", ",", "mode", ",", "metric", ",", "step", ",", "value", ")", ":", "if", "mode", "not", "in", "self", ".", "_values", ":", "self", ".", "_values", "[", "mode", "]", "=", "collections", ".", "defaultdict", "(", "list", ")...
272500b6efe353aeb638d2745ed56e519462ca31
train
History.get
Get the history for the given metric and mode.
tensor2tensor/trax/history.py
def get(self, mode, metric): """Get the history for the given metric and mode.""" if mode not in self._values: logging.info("Metric %s not found for mode %s", metric, mode) return [] return list(self._values[mode][metric])
def get(self, mode, metric): """Get the history for the given metric and mode.""" if mode not in self._values: logging.info("Metric %s not found for mode %s", metric, mode) return [] return list(self._values[mode][metric])
[ "Get", "the", "history", "for", "the", "given", "metric", "and", "mode", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/history.py#L58-L63
[ "def", "get", "(", "self", ",", "mode", ",", "metric", ")", ":", "if", "mode", "not", "in", "self", ".", "_values", ":", "logging", ".", "info", "(", "\"Metric %s not found for mode %s\"", ",", "metric", ",", "mode", ")", "return", "[", "]", "return", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
History.metrics_for_mode
Metrics available for a given mode.
tensor2tensor/trax/history.py
def metrics_for_mode(self, mode): """Metrics available for a given mode.""" if mode not in self._values: logging.info("Mode %s not found", mode) return [] return sorted(list(self._values[mode].keys()))
def metrics_for_mode(self, mode): """Metrics available for a given mode.""" if mode not in self._values: logging.info("Mode %s not found", mode) return [] return sorted(list(self._values[mode].keys()))
[ "Metrics", "available", "for", "a", "given", "mode", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/history.py#L70-L75
[ "def", "metrics_for_mode", "(", "self", ",", "mode", ")", ":", "if", "mode", "not", "in", "self", ".", "_values", ":", "logging", ".", "info", "(", "\"Mode %s not found\"", ",", "mode", ")", "return", "[", "]", "return", "sorted", "(", "list", "(", "se...
272500b6efe353aeb638d2745ed56e519462ca31
train
batch_norm_relu
Performs a batch normalization followed by a ReLU. Args: inputs: `Tensor` of shape `[batch, channels, ...]`. is_training: `bool` for whether the model is training. relu: `bool` if False, omits the ReLU operation. init_zero: `bool` if True, initializes scale parameter of batch normalization with 0 instead of 1 (default). data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. Returns: A normalized `Tensor` with the same `data_format`.
tensor2tensor/models/resnet.py
def batch_norm_relu(inputs, is_training, relu=True, init_zero=False, data_format="channels_first"): """Performs a batch normalization followed by a ReLU. Args: inputs: `Tensor` of shape `[batch, channels, ...]`. is_training: `bool` for whether the model is training. relu: `bool` if False, omits the ReLU operation. init_zero: `bool` if True, initializes scale parameter of batch normalization with 0 instead of 1 (default). data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. Returns: A normalized `Tensor` with the same `data_format`. """ if init_zero: gamma_initializer = tf.zeros_initializer() else: gamma_initializer = tf.ones_initializer() if data_format == "channels_first": axis = 1 else: axis = 3 inputs = layers().BatchNormalization( axis=axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, center=True, scale=True, fused=True, gamma_initializer=gamma_initializer)(inputs, training=is_training) if relu: inputs = tf.nn.relu(inputs) return inputs
def batch_norm_relu(inputs, is_training, relu=True, init_zero=False, data_format="channels_first"): """Performs a batch normalization followed by a ReLU. Args: inputs: `Tensor` of shape `[batch, channels, ...]`. is_training: `bool` for whether the model is training. relu: `bool` if False, omits the ReLU operation. init_zero: `bool` if True, initializes scale parameter of batch normalization with 0 instead of 1 (default). data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. Returns: A normalized `Tensor` with the same `data_format`. """ if init_zero: gamma_initializer = tf.zeros_initializer() else: gamma_initializer = tf.ones_initializer() if data_format == "channels_first": axis = 1 else: axis = 3 inputs = layers().BatchNormalization( axis=axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, center=True, scale=True, fused=True, gamma_initializer=gamma_initializer)(inputs, training=is_training) if relu: inputs = tf.nn.relu(inputs) return inputs
[ "Performs", "a", "batch", "normalization", "followed", "by", "a", "ReLU", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/resnet.py#L41-L81
[ "def", "batch_norm_relu", "(", "inputs", ",", "is_training", ",", "relu", "=", "True", ",", "init_zero", "=", "False", ",", "data_format", "=", "\"channels_first\"", ")", ":", "if", "init_zero", ":", "gamma_initializer", "=", "tf", ".", "zeros_initializer", "(...
272500b6efe353aeb638d2745ed56e519462ca31
train
conv2d_fixed_padding
Strided 2-D convolution with explicit padding. The padding is consistent and is based only on `kernel_size`, not on the dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone). Args: inputs: `Tensor` of size `[batch, channels, height_in, width_in]`. filters: `int` number of filters in the convolution. kernel_size: `int` size of the kernel to be used in the convolution. strides: `int` strides of the convolution. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. is_training: `bool` for whether the model is in training. Returns: A `Tensor` of shape `[batch, filters, height_out, width_out]`. Raises: Exception: if use_td is not valid.
tensor2tensor/models/resnet.py
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format="channels_first", use_td=False, targeting_rate=None, keep_prob=None, is_training=None): """Strided 2-D convolution with explicit padding. The padding is consistent and is based only on `kernel_size`, not on the dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone). Args: inputs: `Tensor` of size `[batch, channels, height_in, width_in]`. filters: `int` number of filters in the convolution. kernel_size: `int` size of the kernel to be used in the convolution. strides: `int` strides of the convolution. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. is_training: `bool` for whether the model is in training. Returns: A `Tensor` of shape `[batch, filters, height_out, width_out]`. Raises: Exception: if use_td is not valid. """ if strides > 1: inputs = fixed_padding(inputs, kernel_size, data_format=data_format) if use_td: inputs_shape = common_layers.shape_list(inputs) if use_td == "weight": if data_format == "channels_last": size = kernel_size * kernel_size * inputs_shape[-1] else: size = kernel_size * kernel_size * inputs_shape[1] targeting_count = targeting_rate * tf.to_float(size) targeting_fn = common_layers.weight_targeting elif use_td == "unit": targeting_count = targeting_rate * filters targeting_fn = common_layers.unit_targeting else: raise Exception("Unrecognized targeted dropout type: %s" % use_td) y = common_layers.td_conv( inputs, filters, kernel_size, targeting_count, targeting_fn, keep_prob, is_training, do_prune=True, strides=strides, padding=("SAME" if strides == 1 else "VALID"), data_format=data_format, use_bias=False, kernel_initializer=tf.variance_scaling_initializer()) else: y = layers().Conv2D( filters=filters, kernel_size=kernel_size, strides=strides, padding=("SAME" if strides == 1 else "VALID"), use_bias=False, kernel_initializer=tf.variance_scaling_initializer(), data_format=data_format)(inputs) return y
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format="channels_first", use_td=False, targeting_rate=None, keep_prob=None, is_training=None): """Strided 2-D convolution with explicit padding. The padding is consistent and is based only on `kernel_size`, not on the dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone). Args: inputs: `Tensor` of size `[batch, channels, height_in, width_in]`. filters: `int` number of filters in the convolution. kernel_size: `int` size of the kernel to be used in the convolution. strides: `int` strides of the convolution. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. is_training: `bool` for whether the model is in training. Returns: A `Tensor` of shape `[batch, filters, height_out, width_out]`. Raises: Exception: if use_td is not valid. """ if strides > 1: inputs = fixed_padding(inputs, kernel_size, data_format=data_format) if use_td: inputs_shape = common_layers.shape_list(inputs) if use_td == "weight": if data_format == "channels_last": size = kernel_size * kernel_size * inputs_shape[-1] else: size = kernel_size * kernel_size * inputs_shape[1] targeting_count = targeting_rate * tf.to_float(size) targeting_fn = common_layers.weight_targeting elif use_td == "unit": targeting_count = targeting_rate * filters targeting_fn = common_layers.unit_targeting else: raise Exception("Unrecognized targeted dropout type: %s" % use_td) y = common_layers.td_conv( inputs, filters, kernel_size, targeting_count, targeting_fn, keep_prob, is_training, do_prune=True, strides=strides, padding=("SAME" if strides == 1 else "VALID"), data_format=data_format, use_bias=False, kernel_initializer=tf.variance_scaling_initializer()) else: y = layers().Conv2D( filters=filters, kernel_size=kernel_size, strides=strides, padding=("SAME" if strides == 1 else "VALID"), use_bias=False, kernel_initializer=tf.variance_scaling_initializer(), data_format=data_format)(inputs) return y
[ "Strided", "2", "-", "D", "convolution", "with", "explicit", "padding", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/resnet.py#L112-L188
[ "def", "conv2d_fixed_padding", "(", "inputs", ",", "filters", ",", "kernel_size", ",", "strides", ",", "data_format", "=", "\"channels_first\"", ",", "use_td", "=", "False", ",", "targeting_rate", "=", "None", ",", "keep_prob", "=", "None", ",", "is_training", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
residual_block
Standard building block for residual networks with BN before convolutions. Args: inputs: `Tensor` of size `[batch, channels, height, width]`. filters: `int` number of filters for the first two convolutions. Note that the third and final convolution will use 4 times as many filters. is_training: `bool` for whether the model is in training. projection_shortcut: `function` to use for projection shortcuts (typically a 1x1 convolution to match the filter dimensions). If None, no projection is used and the input is passed as unchanged through the shortcut connection. strides: `int` block stride. If greater than 1, this block will ultimately downsample the input. final_block: unused parameter to keep the same function signature as `bottleneck_block`. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: The output `Tensor` of the block.
tensor2tensor/models/resnet.py
def residual_block(inputs, filters, is_training, projection_shortcut, strides, final_block, data_format="channels_first", use_td=False, targeting_rate=None, keep_prob=None): """Standard building block for residual networks with BN before convolutions. Args: inputs: `Tensor` of size `[batch, channels, height, width]`. filters: `int` number of filters for the first two convolutions. Note that the third and final convolution will use 4 times as many filters. is_training: `bool` for whether the model is in training. projection_shortcut: `function` to use for projection shortcuts (typically a 1x1 convolution to match the filter dimensions). If None, no projection is used and the input is passed as unchanged through the shortcut connection. strides: `int` block stride. If greater than 1, this block will ultimately downsample the input. final_block: unused parameter to keep the same function signature as `bottleneck_block`. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: The output `Tensor` of the block. """ del final_block shortcut = inputs inputs = batch_norm_relu(inputs, is_training, data_format=data_format) if projection_shortcut is not None: shortcut = projection_shortcut(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=strides, data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob, is_training=is_training) inputs = batch_norm_relu(inputs, is_training, data_format=data_format) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=1, data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob, is_training=is_training) return inputs + shortcut
def residual_block(inputs, filters, is_training, projection_shortcut, strides, final_block, data_format="channels_first", use_td=False, targeting_rate=None, keep_prob=None): """Standard building block for residual networks with BN before convolutions. Args: inputs: `Tensor` of size `[batch, channels, height, width]`. filters: `int` number of filters for the first two convolutions. Note that the third and final convolution will use 4 times as many filters. is_training: `bool` for whether the model is in training. projection_shortcut: `function` to use for projection shortcuts (typically a 1x1 convolution to match the filter dimensions). If None, no projection is used and the input is passed as unchanged through the shortcut connection. strides: `int` block stride. If greater than 1, this block will ultimately downsample the input. final_block: unused parameter to keep the same function signature as `bottleneck_block`. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: The output `Tensor` of the block. """ del final_block shortcut = inputs inputs = batch_norm_relu(inputs, is_training, data_format=data_format) if projection_shortcut is not None: shortcut = projection_shortcut(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=strides, data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob, is_training=is_training) inputs = batch_norm_relu(inputs, is_training, data_format=data_format) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=1, data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob, is_training=is_training) return inputs + shortcut
[ "Standard", "building", "block", "for", "residual", "networks", "with", "BN", "before", "convolutions", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/resnet.py#L191-L257
[ "def", "residual_block", "(", "inputs", ",", "filters", ",", "is_training", ",", "projection_shortcut", ",", "strides", ",", "final_block", ",", "data_format", "=", "\"channels_first\"", ",", "use_td", "=", "False", ",", "targeting_rate", "=", "None", ",", "keep...
272500b6efe353aeb638d2745ed56e519462ca31
train
bottleneck_block
Bottleneck block variant for residual networks with BN after convolutions. Args: inputs: `Tensor` of size `[batch, channels, height, width]`. filters: `int` number of filters for the first two convolutions. Note that the third and final convolution will use 4 times as many filters. is_training: `bool` for whether the model is in training. projection_shortcut: `function` to use for projection shortcuts (typically a 1x1 convolution to match the filter dimensions). If None, no projection is used and the input is passed as unchanged through the shortcut connection. strides: `int` block stride. If greater than 1, this block will ultimately downsample the input. final_block: `bool` set to True if it is this the final block in the group. This is changes the behavior of batch normalization initialization for the final batch norm in a block. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: The output `Tensor` of the block.
tensor2tensor/models/resnet.py
def bottleneck_block(inputs, filters, is_training, projection_shortcut, strides, final_block, data_format="channels_first", use_td=False, targeting_rate=None, keep_prob=None): """Bottleneck block variant for residual networks with BN after convolutions. Args: inputs: `Tensor` of size `[batch, channels, height, width]`. filters: `int` number of filters for the first two convolutions. Note that the third and final convolution will use 4 times as many filters. is_training: `bool` for whether the model is in training. projection_shortcut: `function` to use for projection shortcuts (typically a 1x1 convolution to match the filter dimensions). If None, no projection is used and the input is passed as unchanged through the shortcut connection. strides: `int` block stride. If greater than 1, this block will ultimately downsample the input. final_block: `bool` set to True if it is this the final block in the group. This is changes the behavior of batch normalization initialization for the final batch norm in a block. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: The output `Tensor` of the block. """ # TODO(chrisying): this block is technically the post-activation resnet-v1 # bottleneck unit. Test with v2 (pre-activation) and replace if there is no # difference for consistency. shortcut = inputs if projection_shortcut is not None: shortcut = projection_shortcut(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=1, strides=1, data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob, is_training=is_training) inputs = batch_norm_relu(inputs, is_training, data_format=data_format) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=strides, data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob, is_training=is_training) inputs = batch_norm_relu(inputs, is_training, data_format=data_format) inputs = conv2d_fixed_padding( inputs=inputs, filters=4 * filters, kernel_size=1, strides=1, data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob, is_training=is_training) inputs = batch_norm_relu( inputs, is_training, relu=False, init_zero=final_block, data_format=data_format) return tf.nn.relu(inputs + shortcut)
def bottleneck_block(inputs, filters, is_training, projection_shortcut, strides, final_block, data_format="channels_first", use_td=False, targeting_rate=None, keep_prob=None): """Bottleneck block variant for residual networks with BN after convolutions. Args: inputs: `Tensor` of size `[batch, channels, height, width]`. filters: `int` number of filters for the first two convolutions. Note that the third and final convolution will use 4 times as many filters. is_training: `bool` for whether the model is in training. projection_shortcut: `function` to use for projection shortcuts (typically a 1x1 convolution to match the filter dimensions). If None, no projection is used and the input is passed as unchanged through the shortcut connection. strides: `int` block stride. If greater than 1, this block will ultimately downsample the input. final_block: `bool` set to True if it is this the final block in the group. This is changes the behavior of batch normalization initialization for the final batch norm in a block. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: The output `Tensor` of the block. """ # TODO(chrisying): this block is technically the post-activation resnet-v1 # bottleneck unit. Test with v2 (pre-activation) and replace if there is no # difference for consistency. shortcut = inputs if projection_shortcut is not None: shortcut = projection_shortcut(inputs) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=1, strides=1, data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob, is_training=is_training) inputs = batch_norm_relu(inputs, is_training, data_format=data_format) inputs = conv2d_fixed_padding( inputs=inputs, filters=filters, kernel_size=3, strides=strides, data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob, is_training=is_training) inputs = batch_norm_relu(inputs, is_training, data_format=data_format) inputs = conv2d_fixed_padding( inputs=inputs, filters=4 * filters, kernel_size=1, strides=1, data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob, is_training=is_training) inputs = batch_norm_relu( inputs, is_training, relu=False, init_zero=final_block, data_format=data_format) return tf.nn.relu(inputs + shortcut)
[ "Bottleneck", "block", "variant", "for", "residual", "networks", "with", "BN", "after", "convolutions", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/resnet.py#L260-L345
[ "def", "bottleneck_block", "(", "inputs", ",", "filters", ",", "is_training", ",", "projection_shortcut", ",", "strides", ",", "final_block", ",", "data_format", "=", "\"channels_first\"", ",", "use_td", "=", "False", ",", "targeting_rate", "=", "None", ",", "ke...
272500b6efe353aeb638d2745ed56e519462ca31
train
block_layer
Creates one layer of blocks for the ResNet model. Args: inputs: `Tensor` of size `[batch, channels, height, width]`. filters: `int` number of filters for the first convolution of the layer. block_fn: `function` for the block to use within the model blocks: `int` number of blocks contained in the layer. strides: `int` stride to use for the first convolution of the layer. If greater than 1, this layer will downsample the input. is_training: `bool` for whether the model is training. name: `str`name for the Tensor output of the block layer. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: The output `Tensor` of the block layer.
tensor2tensor/models/resnet.py
def block_layer(inputs, filters, block_fn, blocks, strides, is_training, name, data_format="channels_first", use_td=False, targeting_rate=None, keep_prob=None): """Creates one layer of blocks for the ResNet model. Args: inputs: `Tensor` of size `[batch, channels, height, width]`. filters: `int` number of filters for the first convolution of the layer. block_fn: `function` for the block to use within the model blocks: `int` number of blocks contained in the layer. strides: `int` stride to use for the first convolution of the layer. If greater than 1, this layer will downsample the input. is_training: `bool` for whether the model is training. name: `str`name for the Tensor output of the block layer. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: The output `Tensor` of the block layer. """ # Bottleneck blocks end with 4x the number of filters as they start with filters_out = 4 * filters if block_fn is bottleneck_block else filters def projection_shortcut(inputs): """Project identity branch.""" inputs = conv2d_fixed_padding( inputs=inputs, filters=filters_out, kernel_size=1, strides=strides, data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob, is_training=is_training) return batch_norm_relu( inputs, is_training, relu=False, data_format=data_format) # Only the first block per block_layer uses projection_shortcut and strides inputs = block_fn( inputs, filters, is_training, projection_shortcut, strides, False, data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) for i in range(1, blocks): inputs = block_fn( inputs, filters, is_training, None, 1, (i + 1 == blocks), data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) return tf.identity(inputs, name)
def block_layer(inputs, filters, block_fn, blocks, strides, is_training, name, data_format="channels_first", use_td=False, targeting_rate=None, keep_prob=None): """Creates one layer of blocks for the ResNet model. Args: inputs: `Tensor` of size `[batch, channels, height, width]`. filters: `int` number of filters for the first convolution of the layer. block_fn: `function` for the block to use within the model blocks: `int` number of blocks contained in the layer. strides: `int` stride to use for the first convolution of the layer. If greater than 1, this layer will downsample the input. is_training: `bool` for whether the model is training. name: `str`name for the Tensor output of the block layer. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: The output `Tensor` of the block layer. """ # Bottleneck blocks end with 4x the number of filters as they start with filters_out = 4 * filters if block_fn is bottleneck_block else filters def projection_shortcut(inputs): """Project identity branch.""" inputs = conv2d_fixed_padding( inputs=inputs, filters=filters_out, kernel_size=1, strides=strides, data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob, is_training=is_training) return batch_norm_relu( inputs, is_training, relu=False, data_format=data_format) # Only the first block per block_layer uses projection_shortcut and strides inputs = block_fn( inputs, filters, is_training, projection_shortcut, strides, False, data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) for i in range(1, blocks): inputs = block_fn( inputs, filters, is_training, None, 1, (i + 1 == blocks), data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) return tf.identity(inputs, name)
[ "Creates", "one", "layer", "of", "blocks", "for", "the", "ResNet", "model", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/resnet.py#L348-L424
[ "def", "block_layer", "(", "inputs", ",", "filters", ",", "block_fn", ",", "blocks", ",", "strides", ",", "is_training", ",", "name", ",", "data_format", "=", "\"channels_first\"", ",", "use_td", "=", "False", ",", "targeting_rate", "=", "None", ",", "keep_p...
272500b6efe353aeb638d2745ed56e519462ca31
train
resnet_v2
Resnet model. Args: inputs: `Tensor` images. block_fn: `function` for the block to use within the model. Either `residual_block` or `bottleneck_block`. layer_blocks: list of 3 or 4 `int`s denoting the number of blocks to include in each of the 3 or 4 block groups. Each group consists of blocks that take inputs of the same resolution. filters: list of 4 or 5 `int`s denoting the number of filter to include in block. data_format: `str`, "channels_first" `[batch, channels, height, width]` or "channels_last" `[batch, height, width, channels]`. is_training: bool, build in training mode or not. is_cifar: bool, whether the data is CIFAR or not. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: Pre-logit activations.
tensor2tensor/models/resnet.py
def resnet_v2(inputs, block_fn, layer_blocks, filters, data_format="channels_first", is_training=False, is_cifar=False, use_td=False, targeting_rate=None, keep_prob=None): """Resnet model. Args: inputs: `Tensor` images. block_fn: `function` for the block to use within the model. Either `residual_block` or `bottleneck_block`. layer_blocks: list of 3 or 4 `int`s denoting the number of blocks to include in each of the 3 or 4 block groups. Each group consists of blocks that take inputs of the same resolution. filters: list of 4 or 5 `int`s denoting the number of filter to include in block. data_format: `str`, "channels_first" `[batch, channels, height, width]` or "channels_last" `[batch, height, width, channels]`. is_training: bool, build in training mode or not. is_cifar: bool, whether the data is CIFAR or not. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: Pre-logit activations. """ inputs = block_layer( inputs=inputs, filters=filters[1], block_fn=block_fn, blocks=layer_blocks[0], strides=1, is_training=is_training, name="block_layer1", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) inputs = block_layer( inputs=inputs, filters=filters[2], block_fn=block_fn, blocks=layer_blocks[1], strides=2, is_training=is_training, name="block_layer2", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) inputs = block_layer( inputs=inputs, filters=filters[3], block_fn=block_fn, blocks=layer_blocks[2], strides=2, is_training=is_training, name="block_layer3", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) if not is_cifar: inputs = block_layer( inputs=inputs, filters=filters[4], block_fn=block_fn, blocks=layer_blocks[3], strides=2, is_training=is_training, name="block_layer4", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) return inputs
def resnet_v2(inputs, block_fn, layer_blocks, filters, data_format="channels_first", is_training=False, is_cifar=False, use_td=False, targeting_rate=None, keep_prob=None): """Resnet model. Args: inputs: `Tensor` images. block_fn: `function` for the block to use within the model. Either `residual_block` or `bottleneck_block`. layer_blocks: list of 3 or 4 `int`s denoting the number of blocks to include in each of the 3 or 4 block groups. Each group consists of blocks that take inputs of the same resolution. filters: list of 4 or 5 `int`s denoting the number of filter to include in block. data_format: `str`, "channels_first" `[batch, channels, height, width]` or "channels_last" `[batch, height, width, channels]`. is_training: bool, build in training mode or not. is_cifar: bool, whether the data is CIFAR or not. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: Pre-logit activations. """ inputs = block_layer( inputs=inputs, filters=filters[1], block_fn=block_fn, blocks=layer_blocks[0], strides=1, is_training=is_training, name="block_layer1", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) inputs = block_layer( inputs=inputs, filters=filters[2], block_fn=block_fn, blocks=layer_blocks[1], strides=2, is_training=is_training, name="block_layer2", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) inputs = block_layer( inputs=inputs, filters=filters[3], block_fn=block_fn, blocks=layer_blocks[2], strides=2, is_training=is_training, name="block_layer3", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) if not is_cifar: inputs = block_layer( inputs=inputs, filters=filters[4], block_fn=block_fn, blocks=layer_blocks[3], strides=2, is_training=is_training, name="block_layer4", data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) return inputs
[ "Resnet", "model", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/resnet.py#L427-L511
[ "def", "resnet_v2", "(", "inputs", ",", "block_fn", ",", "layer_blocks", ",", "filters", ",", "data_format", "=", "\"channels_first\"", ",", "is_training", "=", "False", ",", "is_cifar", "=", "False", ",", "use_td", "=", "False", ",", "targeting_rate", "=", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
resnet_imagenet_34_td_weight_05_05
Set of hyperparameters.
tensor2tensor/models/resnet.py
def resnet_imagenet_34_td_weight_05_05(): """Set of hyperparameters.""" hp = resnet_imagenet_34() hp.use_td = "weight" hp.targeting_rate = 0.5 hp.keep_prob = 0.5 return hp
def resnet_imagenet_34_td_weight_05_05(): """Set of hyperparameters.""" hp = resnet_imagenet_34() hp.use_td = "weight" hp.targeting_rate = 0.5 hp.keep_prob = 0.5 return hp
[ "Set", "of", "hyperparameters", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/resnet.py#L679-L686
[ "def", "resnet_imagenet_34_td_weight_05_05", "(", ")", ":", "hp", "=", "resnet_imagenet_34", "(", ")", "hp", ".", "use_td", "=", "\"weight\"", "hp", ".", "targeting_rate", "=", "0.5", "hp", ".", "keep_prob", "=", "0.5", "return", "hp" ]
272500b6efe353aeb638d2745ed56e519462ca31
train
resnet_imagenet_34_td_unit_05_05
Set of hyperparameters.
tensor2tensor/models/resnet.py
def resnet_imagenet_34_td_unit_05_05(): """Set of hyperparameters.""" hp = resnet_imagenet_34() hp.use_td = "unit" hp.targeting_rate = 0.5 hp.keep_prob = 0.5 return hp
def resnet_imagenet_34_td_unit_05_05(): """Set of hyperparameters.""" hp = resnet_imagenet_34() hp.use_td = "unit" hp.targeting_rate = 0.5 hp.keep_prob = 0.5 return hp
[ "Set", "of", "hyperparameters", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/resnet.py#L690-L697
[ "def", "resnet_imagenet_34_td_unit_05_05", "(", ")", ":", "hp", "=", "resnet_imagenet_34", "(", ")", "hp", ".", "use_td", "=", "\"unit\"", "hp", ".", "targeting_rate", "=", "0.5", "hp", ".", "keep_prob", "=", "0.5", "return", "hp" ]
272500b6efe353aeb638d2745ed56e519462ca31
train
resnet_imagenet_34_td_unit_no_drop
Set of hyperparameters.
tensor2tensor/models/resnet.py
def resnet_imagenet_34_td_unit_no_drop(): """Set of hyperparameters.""" hp = resnet_imagenet_34() hp.use_td = "unit" hp.targeting_rate = 0.0 hp.keep_prob = 1.0 return hp
def resnet_imagenet_34_td_unit_no_drop(): """Set of hyperparameters.""" hp = resnet_imagenet_34() hp.use_td = "unit" hp.targeting_rate = 0.0 hp.keep_prob = 1.0 return hp
[ "Set", "of", "hyperparameters", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/resnet.py#L701-L708
[ "def", "resnet_imagenet_34_td_unit_no_drop", "(", ")", ":", "hp", "=", "resnet_imagenet_34", "(", ")", "hp", ".", "use_td", "=", "\"unit\"", "hp", ".", "targeting_rate", "=", "0.0", "hp", ".", "keep_prob", "=", "1.0", "return", "hp" ]
272500b6efe353aeb638d2745ed56e519462ca31
train
resnet_cifar_15
Set of hyperparameters.
tensor2tensor/models/resnet.py
def resnet_cifar_15(): """Set of hyperparameters.""" hp = resnet_base() hp.block_fn = "residual" hp.is_cifar = True hp.layer_sizes = [2, 2, 2] hp.filter_sizes = [16, 32, 64, 128] return hp
def resnet_cifar_15(): """Set of hyperparameters.""" hp = resnet_base() hp.block_fn = "residual" hp.is_cifar = True hp.layer_sizes = [2, 2, 2] hp.filter_sizes = [16, 32, 64, 128] return hp
[ "Set", "of", "hyperparameters", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/resnet.py#L719-L727
[ "def", "resnet_cifar_15", "(", ")", ":", "hp", "=", "resnet_base", "(", ")", "hp", ".", "block_fn", "=", "\"residual\"", "hp", ".", "is_cifar", "=", "True", "hp", ".", "layer_sizes", "=", "[", "2", ",", "2", ",", "2", "]", "hp", ".", "filter_sizes", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
_len_lcs
Returns the length of the Longest Common Subsequence between two seqs. Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence Args: x: sequence of words y: sequence of words Returns integer: Length of LCS between x and y
tensor2tensor/utils/rouge.py
def _len_lcs(x, y): """Returns the length of the Longest Common Subsequence between two seqs. Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence Args: x: sequence of words y: sequence of words Returns integer: Length of LCS between x and y """ table = _lcs(x, y) n, m = len(x), len(y) return table[n, m]
def _len_lcs(x, y): """Returns the length of the Longest Common Subsequence between two seqs. Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence Args: x: sequence of words y: sequence of words Returns integer: Length of LCS between x and y """ table = _lcs(x, y) n, m = len(x), len(y) return table[n, m]
[ "Returns", "the", "length", "of", "the", "Longest", "Common", "Subsequence", "between", "two", "seqs", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/rouge.py#L33-L47
[ "def", "_len_lcs", "(", "x", ",", "y", ")", ":", "table", "=", "_lcs", "(", "x", ",", "y", ")", "n", ",", "m", "=", "len", "(", "x", ")", ",", "len", "(", "y", ")", "return", "table", "[", "n", ",", "m", "]" ]
272500b6efe353aeb638d2745ed56e519462ca31
train
_lcs
Computes the length of the LCS between two seqs. The implementation below uses a DP programming algorithm and runs in O(nm) time where n = len(x) and m = len(y). Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence Args: x: collection of words y: collection of words Returns: Table of dictionary of coord and len lcs
tensor2tensor/utils/rouge.py
def _lcs(x, y): """Computes the length of the LCS between two seqs. The implementation below uses a DP programming algorithm and runs in O(nm) time where n = len(x) and m = len(y). Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence Args: x: collection of words y: collection of words Returns: Table of dictionary of coord and len lcs """ n, m = len(x), len(y) table = {} for i in range(n + 1): for j in range(m + 1): if i == 0 or j == 0: table[i, j] = 0 elif x[i - 1] == y[j - 1]: table[i, j] = table[i - 1, j - 1] + 1 else: table[i, j] = max(table[i - 1, j], table[i, j - 1]) return table
def _lcs(x, y): """Computes the length of the LCS between two seqs. The implementation below uses a DP programming algorithm and runs in O(nm) time where n = len(x) and m = len(y). Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence Args: x: collection of words y: collection of words Returns: Table of dictionary of coord and len lcs """ n, m = len(x), len(y) table = {} for i in range(n + 1): for j in range(m + 1): if i == 0 or j == 0: table[i, j] = 0 elif x[i - 1] == y[j - 1]: table[i, j] = table[i - 1, j - 1] + 1 else: table[i, j] = max(table[i - 1, j], table[i, j - 1]) return table
[ "Computes", "the", "length", "of", "the", "LCS", "between", "two", "seqs", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/rouge.py#L50-L74
[ "def", "_lcs", "(", "x", ",", "y", ")", ":", "n", ",", "m", "=", "len", "(", "x", ")", ",", "len", "(", "y", ")", "table", "=", "{", "}", "for", "i", "in", "range", "(", "n", "+", "1", ")", ":", "for", "j", "in", "range", "(", "m", "+...
272500b6efe353aeb638d2745ed56e519462ca31
train
rouge_l_sentence_level
Computes ROUGE-L (sentence level) of two collections of sentences. Source: https://www.microsoft.com/en-us/research/publication/ rouge-a-package-for-automatic-evaluation-of-summaries/ Calculated according to: R_lcs = LCS(X,Y)/m P_lcs = LCS(X,Y)/n F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs) where: X = reference summary Y = Candidate summary m = length of reference summary n = length of candidate summary Args: eval_sentences: The sentences that have been picked by the summarizer ref_sentences: The sentences from the reference set Returns: A float: F_lcs
tensor2tensor/utils/rouge.py
def rouge_l_sentence_level(eval_sentences, ref_sentences): """Computes ROUGE-L (sentence level) of two collections of sentences. Source: https://www.microsoft.com/en-us/research/publication/ rouge-a-package-for-automatic-evaluation-of-summaries/ Calculated according to: R_lcs = LCS(X,Y)/m P_lcs = LCS(X,Y)/n F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs) where: X = reference summary Y = Candidate summary m = length of reference summary n = length of candidate summary Args: eval_sentences: The sentences that have been picked by the summarizer ref_sentences: The sentences from the reference set Returns: A float: F_lcs """ f1_scores = [] for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences): m = len(ref_sentence) n = len(eval_sentence) lcs = _len_lcs(eval_sentence, ref_sentence) f1_scores.append(_f_lcs(lcs, m, n)) return np.mean(f1_scores, dtype=np.float32)
def rouge_l_sentence_level(eval_sentences, ref_sentences): """Computes ROUGE-L (sentence level) of two collections of sentences. Source: https://www.microsoft.com/en-us/research/publication/ rouge-a-package-for-automatic-evaluation-of-summaries/ Calculated according to: R_lcs = LCS(X,Y)/m P_lcs = LCS(X,Y)/n F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs) where: X = reference summary Y = Candidate summary m = length of reference summary n = length of candidate summary Args: eval_sentences: The sentences that have been picked by the summarizer ref_sentences: The sentences from the reference set Returns: A float: F_lcs """ f1_scores = [] for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences): m = len(ref_sentence) n = len(eval_sentence) lcs = _len_lcs(eval_sentence, ref_sentence) f1_scores.append(_f_lcs(lcs, m, n)) return np.mean(f1_scores, dtype=np.float32)
[ "Computes", "ROUGE", "-", "L", "(", "sentence", "level", ")", "of", "two", "collections", "of", "sentences", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/rouge.py#L100-L131
[ "def", "rouge_l_sentence_level", "(", "eval_sentences", ",", "ref_sentences", ")", ":", "f1_scores", "=", "[", "]", "for", "eval_sentence", ",", "ref_sentence", "in", "zip", "(", "eval_sentences", ",", "ref_sentences", ")", ":", "m", "=", "len", "(", "ref_sent...
272500b6efe353aeb638d2745ed56e519462ca31
train
rouge_l_fscore
ROUGE scores computation between labels and predictions. This is an approximate ROUGE scoring method since we do not glue word pieces or decode the ids and tokenize the output. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: rouge_l_fscore: approx rouge-l f1 score.
tensor2tensor/utils/rouge.py
def rouge_l_fscore(predictions, labels, **unused_kwargs): """ROUGE scores computation between labels and predictions. This is an approximate ROUGE scoring method since we do not glue word pieces or decode the ids and tokenize the output. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: rouge_l_fscore: approx rouge-l f1 score. """ outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) # Convert the outputs and labels to a [batch_size, input_length] tensor. outputs = tf.squeeze(outputs, axis=[-1, -2]) labels = tf.squeeze(labels, axis=[-1, -2]) rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels), tf.float32) return rouge_l_f_score, tf.constant(1.0)
def rouge_l_fscore(predictions, labels, **unused_kwargs): """ROUGE scores computation between labels and predictions. This is an approximate ROUGE scoring method since we do not glue word pieces or decode the ids and tokenize the output. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: rouge_l_fscore: approx rouge-l f1 score. """ outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) # Convert the outputs and labels to a [batch_size, input_length] tensor. outputs = tf.squeeze(outputs, axis=[-1, -2]) labels = tf.squeeze(labels, axis=[-1, -2]) rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels), tf.float32) return rouge_l_f_score, tf.constant(1.0)
[ "ROUGE", "scores", "computation", "between", "labels", "and", "predictions", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/rouge.py#L134-L153
[ "def", "rouge_l_fscore", "(", "predictions", ",", "labels", ",", "*", "*", "unused_kwargs", ")", ":", "outputs", "=", "tf", ".", "to_int32", "(", "tf", ".", "argmax", "(", "predictions", ",", "axis", "=", "-", "1", ")", ")", "# Convert the outputs and labe...
272500b6efe353aeb638d2745ed56e519462ca31
train
_get_ngrams
Calculates n-grams. Args: n: which n-grams to calculate text: An array of tokens Returns: A set of n-grams
tensor2tensor/utils/rouge.py
def _get_ngrams(n, text): """Calculates n-grams. Args: n: which n-grams to calculate text: An array of tokens Returns: A set of n-grams """ ngram_set = set() text_length = len(text) max_index_ngram_start = text_length - n for i in range(max_index_ngram_start + 1): ngram_set.add(tuple(text[i:i + n])) return ngram_set
def _get_ngrams(n, text): """Calculates n-grams. Args: n: which n-grams to calculate text: An array of tokens Returns: A set of n-grams """ ngram_set = set() text_length = len(text) max_index_ngram_start = text_length - n for i in range(max_index_ngram_start + 1): ngram_set.add(tuple(text[i:i + n])) return ngram_set
[ "Calculates", "n", "-", "grams", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/rouge.py#L156-L171
[ "def", "_get_ngrams", "(", "n", ",", "text", ")", ":", "ngram_set", "=", "set", "(", ")", "text_length", "=", "len", "(", "text", ")", "max_index_ngram_start", "=", "text_length", "-", "n", "for", "i", "in", "range", "(", "max_index_ngram_start", "+", "1...
272500b6efe353aeb638d2745ed56e519462ca31
train
rouge_2_fscore
ROUGE-2 F1 score computation between labels and predictions. This is an approximate ROUGE scoring method since we do not glue word pieces or decode the ids and tokenize the output. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: rouge2_fscore: approx rouge-2 f1 score.
tensor2tensor/utils/rouge.py
def rouge_2_fscore(predictions, labels, **unused_kwargs): """ROUGE-2 F1 score computation between labels and predictions. This is an approximate ROUGE scoring method since we do not glue word pieces or decode the ids and tokenize the output. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: rouge2_fscore: approx rouge-2 f1 score. """ outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) # Convert the outputs and labels to a [batch_size, input_length] tensor. outputs = tf.squeeze(outputs, axis=[-1, -2]) labels = tf.squeeze(labels, axis=[-1, -2]) rouge_2_f_score = tf.py_func(rouge_n, (outputs, labels), tf.float32) return rouge_2_f_score, tf.constant(1.0)
def rouge_2_fscore(predictions, labels, **unused_kwargs): """ROUGE-2 F1 score computation between labels and predictions. This is an approximate ROUGE scoring method since we do not glue word pieces or decode the ids and tokenize the output. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: rouge2_fscore: approx rouge-2 f1 score. """ outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) # Convert the outputs and labels to a [batch_size, input_length] tensor. outputs = tf.squeeze(outputs, axis=[-1, -2]) labels = tf.squeeze(labels, axis=[-1, -2]) rouge_2_f_score = tf.py_func(rouge_n, (outputs, labels), tf.float32) return rouge_2_f_score, tf.constant(1.0)
[ "ROUGE", "-", "2", "F1", "score", "computation", "between", "labels", "and", "predictions", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/rouge.py#L217-L236
[ "def", "rouge_2_fscore", "(", "predictions", ",", "labels", ",", "*", "*", "unused_kwargs", ")", ":", "outputs", "=", "tf", ".", "to_int32", "(", "tf", ".", "argmax", "(", "predictions", ",", "axis", "=", "-", "1", ")", ")", "# Convert the outputs and labe...
272500b6efe353aeb638d2745ed56e519462ca31
train
normalize_example_nlp
Normalize the examples from different tasks so they can be merged. This function is specific to NLP tasks and normalizes them so that in the end the example only has "targets" and "task_id". For tasks that originally have inputs, this is done by appending task_id to the inputs and prepending targets, so normalized_targets = inputs task_id targets. For classification tasks, targets are constructed by spelling out the class. Args: task: the Problem class of the task we are normalizing. example: a dictionary of tensors, the example to normalize. is_infer: bool, whether we are performing inference or not. vocab_type: the type of vocabulary in use. vocab_offset: integer, offset index for subword vocabularies. max_input_length: maximum length to cut inputs to. max_target_length: maximum length to cut targets to. fixed_train_length: set length to this size if > 0. Returns: a dictionary of tensors, like example, after normalizing, which in this case means that it only has "targets" and "task_id" as feature.
tensor2tensor/data_generators/multi_problem.py
def normalize_example_nlp(task, example, is_infer, vocab_type, vocab_offset, max_input_length, max_target_length, fixed_train_length): """Normalize the examples from different tasks so they can be merged. This function is specific to NLP tasks and normalizes them so that in the end the example only has "targets" and "task_id". For tasks that originally have inputs, this is done by appending task_id to the inputs and prepending targets, so normalized_targets = inputs task_id targets. For classification tasks, targets are constructed by spelling out the class. Args: task: the Problem class of the task we are normalizing. example: a dictionary of tensors, the example to normalize. is_infer: bool, whether we are performing inference or not. vocab_type: the type of vocabulary in use. vocab_offset: integer, offset index for subword vocabularies. max_input_length: maximum length to cut inputs to. max_target_length: maximum length to cut targets to. fixed_train_length: set length to this size if > 0. Returns: a dictionary of tensors, like example, after normalizing, which in this case means that it only has "targets" and "task_id" as feature. """ if task.has_inputs: example["inputs"] = example["inputs"][:-1] # remove EOS token if hasattr(task, "class_labels"): if vocab_type == text_problems.VocabType.CHARACTER: # TODO(urvashik): handle the case where num_labels > 9 example["targets"] = tf.cast(discretization.int_to_bit( example["targets"], 1, base=10) + 50, tf.int64) example["targets"] = tf.squeeze(example["targets"], axis=[-1]) elif vocab_type == text_problems.VocabType.SUBWORD: example["targets"] = vocab_offset + example["targets"] else: # sequence with inputs and targets eg: summarization if task.has_inputs: if max_input_length > 0: example["inputs"] = example["inputs"][:max_input_length] # Do not truncate targets during inference with beam decoding. if max_target_length > 0 and not is_infer: example["targets"] = example["targets"][:max_target_length] def make_constant_shape(x, size): x = x[:size] xlen = tf.shape(x)[0] x = tf.pad(x, [[0, size - xlen]]) return tf.reshape(x, [size]) if task.has_inputs: if is_infer: concat_list = [example["inputs"], [task.task_id]] example["inputs"] = tf.concat(concat_list, axis=0) else: inputs = example.pop("inputs") concat_list = [inputs, [task.task_id], example["targets"]] example["targets"] = tf.concat(concat_list, axis=0) if fixed_train_length > 0: example["targets"] = make_constant_shape( example["targets"], fixed_train_length) else: concat_list = [[task.task_id], example["targets"]] example["targets"] = tf.concat(concat_list, axis=0) if not is_infer and fixed_train_length > 0: example["targets"] = make_constant_shape( example["targets"], fixed_train_length) example["task_id"] = tf.constant([task.task_id], dtype=tf.int64) return example
def normalize_example_nlp(task, example, is_infer, vocab_type, vocab_offset, max_input_length, max_target_length, fixed_train_length): """Normalize the examples from different tasks so they can be merged. This function is specific to NLP tasks and normalizes them so that in the end the example only has "targets" and "task_id". For tasks that originally have inputs, this is done by appending task_id to the inputs and prepending targets, so normalized_targets = inputs task_id targets. For classification tasks, targets are constructed by spelling out the class. Args: task: the Problem class of the task we are normalizing. example: a dictionary of tensors, the example to normalize. is_infer: bool, whether we are performing inference or not. vocab_type: the type of vocabulary in use. vocab_offset: integer, offset index for subword vocabularies. max_input_length: maximum length to cut inputs to. max_target_length: maximum length to cut targets to. fixed_train_length: set length to this size if > 0. Returns: a dictionary of tensors, like example, after normalizing, which in this case means that it only has "targets" and "task_id" as feature. """ if task.has_inputs: example["inputs"] = example["inputs"][:-1] # remove EOS token if hasattr(task, "class_labels"): if vocab_type == text_problems.VocabType.CHARACTER: # TODO(urvashik): handle the case where num_labels > 9 example["targets"] = tf.cast(discretization.int_to_bit( example["targets"], 1, base=10) + 50, tf.int64) example["targets"] = tf.squeeze(example["targets"], axis=[-1]) elif vocab_type == text_problems.VocabType.SUBWORD: example["targets"] = vocab_offset + example["targets"] else: # sequence with inputs and targets eg: summarization if task.has_inputs: if max_input_length > 0: example["inputs"] = example["inputs"][:max_input_length] # Do not truncate targets during inference with beam decoding. if max_target_length > 0 and not is_infer: example["targets"] = example["targets"][:max_target_length] def make_constant_shape(x, size): x = x[:size] xlen = tf.shape(x)[0] x = tf.pad(x, [[0, size - xlen]]) return tf.reshape(x, [size]) if task.has_inputs: if is_infer: concat_list = [example["inputs"], [task.task_id]] example["inputs"] = tf.concat(concat_list, axis=0) else: inputs = example.pop("inputs") concat_list = [inputs, [task.task_id], example["targets"]] example["targets"] = tf.concat(concat_list, axis=0) if fixed_train_length > 0: example["targets"] = make_constant_shape( example["targets"], fixed_train_length) else: concat_list = [[task.task_id], example["targets"]] example["targets"] = tf.concat(concat_list, axis=0) if not is_infer and fixed_train_length > 0: example["targets"] = make_constant_shape( example["targets"], fixed_train_length) example["task_id"] = tf.constant([task.task_id], dtype=tf.int64) return example
[ "Normalize", "the", "examples", "from", "different", "tasks", "so", "they", "can", "be", "merged", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem.py#L38-L108
[ "def", "normalize_example_nlp", "(", "task", ",", "example", ",", "is_infer", ",", "vocab_type", ",", "vocab_offset", ",", "max_input_length", ",", "max_target_length", ",", "fixed_train_length", ")", ":", "if", "task", ".", "has_inputs", ":", "example", "[", "\...
272500b6efe353aeb638d2745ed56e519462ca31
train
flatten_zip_dataset
A list of examples to a dataset containing mixed examples. Given a list of `n` dataset examples, flatten them by converting each element into a dataset and concatenating them to convert into a single dataset. Args: *args: A list containing one example each from `n` different datasets. Returns: flattened: A new dataset containing the examples from the list as part of a single dataset.
tensor2tensor/data_generators/multi_problem.py
def flatten_zip_dataset(*args): """A list of examples to a dataset containing mixed examples. Given a list of `n` dataset examples, flatten them by converting each element into a dataset and concatenating them to convert into a single dataset. Args: *args: A list containing one example each from `n` different datasets. Returns: flattened: A new dataset containing the examples from the list as part of a single dataset. """ flattened = tf.data.Dataset.from_tensors(args[0]) for ex in args[1:]: flattened = flattened.concatenate(tf.data.Dataset.from_tensors(ex)) return flattened
def flatten_zip_dataset(*args): """A list of examples to a dataset containing mixed examples. Given a list of `n` dataset examples, flatten them by converting each element into a dataset and concatenating them to convert into a single dataset. Args: *args: A list containing one example each from `n` different datasets. Returns: flattened: A new dataset containing the examples from the list as part of a single dataset. """ flattened = tf.data.Dataset.from_tensors(args[0]) for ex in args[1:]: flattened = flattened.concatenate(tf.data.Dataset.from_tensors(ex)) return flattened
[ "A", "list", "of", "examples", "to", "a", "dataset", "containing", "mixed", "examples", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem.py#L111-L128
[ "def", "flatten_zip_dataset", "(", "*", "args", ")", ":", "flattened", "=", "tf", ".", "data", ".", "Dataset", ".", "from_tensors", "(", "args", "[", "0", "]", ")", "for", "ex", "in", "args", "[", "1", ":", "]", ":", "flattened", "=", "flattened", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
aggregate_task_losses
Multiproblem loss function.
tensor2tensor/data_generators/multi_problem.py
def aggregate_task_losses(hparams, problem_hparams, logits, feature_name, feature): """Multiproblem loss function.""" # If no reweighting, we want the default loss to mimic the LM loss. if not hparams.multiproblem_reweight_label_loss: return aggregate_task_lm_losses(hparams=hparams, problem_hparams=problem_hparams, logits=logits, feature_name=feature_name, feature=feature) summaries = [] main_task_id = hparams.problem.task_list[0].task_id vocab_size = problem_hparams.vocab_size[feature_name] if vocab_size is not None and hasattr(hparams, "vocab_divisor"): vocab_size += (-vocab_size) % hparams.vocab_divisor modality = problem_hparams.modality[feature_name] loss = hparams.loss.get(feature_name, modalities.get_loss(modality)) weights_fn = hparams.weights_fn.get( feature_name, modalities.get_weights_fn(modality)) # Primary task loss loss_num, loss_den = loss( logits, feature, lambda x: common_layers.weights_multi_problem_all(x, main_task_id), hparams, vocab_size, weights_fn) loss_val = loss_num / tf.maximum(1.0, loss_den) summaries.append([hparams.problem.task_list[0].name+"_loss", loss_val]) # Since the losses may undergo rescaling, they cannot exist as separate # numerators and denominators. Set the denominators to 1 in order to faciliate # loss averaging. loss_num = loss_val loss_den = tf.minimum(tf.convert_to_tensor(1, dtype=tf.float32), loss_den) for task in hparams.problem.task_list[1:]: # Loss only from the input sequence -- the auxiliary LM loss. seq_loss_num, seq_loss_den = loss( logits, feature, lambda x: common_layers.weights_multi_problem_input(x, task.task_id), # pylint: disable=cell-var-from-loop hparams, vocab_size) seq_loss_num *= problem_hparams.loss_multiplier # Unscaled sequence loss. seq_loss = seq_loss_num / tf.maximum(1.0, seq_loss_den) summaries.append([task.name+"_seq_loss", seq_loss]) if hasattr(task, "num_classes"): # Loss only from the classification label. label_loss_num, label_loss_den = loss( logits, feature, lambda x: common_layers.weights_multi_problem(x, task.task_id), # pylint: disable=cell-var-from-loop hparams, vocab_size) label_loss_num *= problem_hparams.loss_multiplier # Unscaled classification label loss. label_loss = label_loss_num / tf.maximum(1.0, label_loss_den) summaries.append([task.name+"_label_loss", label_loss]) # Scaling. if hparams.multiproblem_reweight_label_loss: label_loss *= hparams.multiproblem_label_weight seq_loss *= (1 - hparams.multiproblem_label_weight) # This is the training loss for the optimizer after scaling. task_loss_val = seq_loss + label_loss loss_den_ = label_loss_den else: # Loss only from the target sequence. target_loss_num, target_loss_den = loss( logits, feature, lambda x: common_layers.weights_multi_problem(x, task.task_id), # pylint: disable=cell-var-from-loop hparams, vocab_size) target_loss_num *= problem_hparams.loss_multiplier # Unscaled target sequence loss. target_loss = target_loss_num / tf.maximum(1.0, target_loss_den) summaries.append([task.name+"_target_loss", target_loss]) # Scaling. if hparams.multiproblem_reweight_label_loss: target_loss *= hparams.multiproblem_label_weight seq_loss *= (1 - hparams.multiproblem_label_weight) # This is the training loss for the optimizer after all the scaling. task_loss_val = seq_loss + target_loss loss_den_ = target_loss_den summaries.append([task.name+"_loss", task_loss_val]) # Adding 1 to the loss den for each task leads to averaging task losses. # TODO(urvashik): Fix combination with other task losses - weighted # average based on the number of examples from that task. loss_num += task_loss_val loss_den += tf.minimum(tf.convert_to_tensor(1, dtype=tf.float32), loss_den_) return loss_num, loss_den, summaries
def aggregate_task_losses(hparams, problem_hparams, logits, feature_name, feature): """Multiproblem loss function.""" # If no reweighting, we want the default loss to mimic the LM loss. if not hparams.multiproblem_reweight_label_loss: return aggregate_task_lm_losses(hparams=hparams, problem_hparams=problem_hparams, logits=logits, feature_name=feature_name, feature=feature) summaries = [] main_task_id = hparams.problem.task_list[0].task_id vocab_size = problem_hparams.vocab_size[feature_name] if vocab_size is not None and hasattr(hparams, "vocab_divisor"): vocab_size += (-vocab_size) % hparams.vocab_divisor modality = problem_hparams.modality[feature_name] loss = hparams.loss.get(feature_name, modalities.get_loss(modality)) weights_fn = hparams.weights_fn.get( feature_name, modalities.get_weights_fn(modality)) # Primary task loss loss_num, loss_den = loss( logits, feature, lambda x: common_layers.weights_multi_problem_all(x, main_task_id), hparams, vocab_size, weights_fn) loss_val = loss_num / tf.maximum(1.0, loss_den) summaries.append([hparams.problem.task_list[0].name+"_loss", loss_val]) # Since the losses may undergo rescaling, they cannot exist as separate # numerators and denominators. Set the denominators to 1 in order to faciliate # loss averaging. loss_num = loss_val loss_den = tf.minimum(tf.convert_to_tensor(1, dtype=tf.float32), loss_den) for task in hparams.problem.task_list[1:]: # Loss only from the input sequence -- the auxiliary LM loss. seq_loss_num, seq_loss_den = loss( logits, feature, lambda x: common_layers.weights_multi_problem_input(x, task.task_id), # pylint: disable=cell-var-from-loop hparams, vocab_size) seq_loss_num *= problem_hparams.loss_multiplier # Unscaled sequence loss. seq_loss = seq_loss_num / tf.maximum(1.0, seq_loss_den) summaries.append([task.name+"_seq_loss", seq_loss]) if hasattr(task, "num_classes"): # Loss only from the classification label. label_loss_num, label_loss_den = loss( logits, feature, lambda x: common_layers.weights_multi_problem(x, task.task_id), # pylint: disable=cell-var-from-loop hparams, vocab_size) label_loss_num *= problem_hparams.loss_multiplier # Unscaled classification label loss. label_loss = label_loss_num / tf.maximum(1.0, label_loss_den) summaries.append([task.name+"_label_loss", label_loss]) # Scaling. if hparams.multiproblem_reweight_label_loss: label_loss *= hparams.multiproblem_label_weight seq_loss *= (1 - hparams.multiproblem_label_weight) # This is the training loss for the optimizer after scaling. task_loss_val = seq_loss + label_loss loss_den_ = label_loss_den else: # Loss only from the target sequence. target_loss_num, target_loss_den = loss( logits, feature, lambda x: common_layers.weights_multi_problem(x, task.task_id), # pylint: disable=cell-var-from-loop hparams, vocab_size) target_loss_num *= problem_hparams.loss_multiplier # Unscaled target sequence loss. target_loss = target_loss_num / tf.maximum(1.0, target_loss_den) summaries.append([task.name+"_target_loss", target_loss]) # Scaling. if hparams.multiproblem_reweight_label_loss: target_loss *= hparams.multiproblem_label_weight seq_loss *= (1 - hparams.multiproblem_label_weight) # This is the training loss for the optimizer after all the scaling. task_loss_val = seq_loss + target_loss loss_den_ = target_loss_den summaries.append([task.name+"_loss", task_loss_val]) # Adding 1 to the loss den for each task leads to averaging task losses. # TODO(urvashik): Fix combination with other task losses - weighted # average based on the number of examples from that task. loss_num += task_loss_val loss_den += tf.minimum(tf.convert_to_tensor(1, dtype=tf.float32), loss_den_) return loss_num, loss_den, summaries
[ "Multiproblem", "loss", "function", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem.py#L419-L522
[ "def", "aggregate_task_losses", "(", "hparams", ",", "problem_hparams", ",", "logits", ",", "feature_name", ",", "feature", ")", ":", "# If no reweighting, we want the default loss to mimic the LM loss.", "if", "not", "hparams", ".", "multiproblem_reweight_label_loss", ":", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
aggregate_task_lm_losses
LM loss for multiproblems.
tensor2tensor/data_generators/multi_problem.py
def aggregate_task_lm_losses(hparams, problem_hparams, logits, feature_name, feature): """LM loss for multiproblems.""" summaries = [] vocab_size = problem_hparams.vocab_size[feature_name] if vocab_size is not None and hasattr(hparams, "vocab_divisor"): vocab_size += (-vocab_size) % hparams.vocab_divisor modality = problem_hparams.modality[feature_name] loss = hparams.loss.get(feature_name, modalities.get_loss(modality)) weights_fn = hparams.weights_fn.get( feature_name, modalities.get_weights_fn(modality)) loss_num = 0. loss_den = 0. for task in hparams.problem.task_list: loss_num_, loss_den_ = loss( logits, feature, lambda x: common_layers.weights_multi_problem_all(x, task.task_id), # pylint: disable=cell-var-from-loop hparams, vocab_size, weights_fn) loss_num += loss_num_ loss_den += loss_den_ loss_val = loss_num_ / tf.maximum(1.0, loss_den_) summaries.append([task.name+"_loss", loss_val]) return loss_num, loss_den, summaries
def aggregate_task_lm_losses(hparams, problem_hparams, logits, feature_name, feature): """LM loss for multiproblems.""" summaries = [] vocab_size = problem_hparams.vocab_size[feature_name] if vocab_size is not None and hasattr(hparams, "vocab_divisor"): vocab_size += (-vocab_size) % hparams.vocab_divisor modality = problem_hparams.modality[feature_name] loss = hparams.loss.get(feature_name, modalities.get_loss(modality)) weights_fn = hparams.weights_fn.get( feature_name, modalities.get_weights_fn(modality)) loss_num = 0. loss_den = 0. for task in hparams.problem.task_list: loss_num_, loss_den_ = loss( logits, feature, lambda x: common_layers.weights_multi_problem_all(x, task.task_id), # pylint: disable=cell-var-from-loop hparams, vocab_size, weights_fn) loss_num += loss_num_ loss_den += loss_den_ loss_val = loss_num_ / tf.maximum(1.0, loss_den_) summaries.append([task.name+"_loss", loss_val]) return loss_num, loss_den, summaries
[ "LM", "loss", "for", "multiproblems", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem.py#L525-L553
[ "def", "aggregate_task_lm_losses", "(", "hparams", ",", "problem_hparams", ",", "logits", ",", "feature_name", ",", "feature", ")", ":", "summaries", "=", "[", "]", "vocab_size", "=", "problem_hparams", ".", "vocab_size", "[", "feature_name", "]", "if", "vocab_s...
272500b6efe353aeb638d2745ed56e519462ca31
train
MultiProblem.normalize_example
Normalize the examples from different tasks so they can be merged.
tensor2tensor/data_generators/multi_problem.py
def normalize_example(self, task, example, encoder, hparams, is_infer): """Normalize the examples from different tasks so they can be merged.""" # Here we use the default function for NLP tasks that makes everything # a part of "targets" feature. Override in your subclasses for other uses. vocab_offset = encoder.vocab_size + len(self.task_list) return normalize_example_nlp( task, example, is_infer, self.vocab_type, vocab_offset, hparams.multiproblem_max_input_length, hparams.multiproblem_max_target_length, hparams.multiproblem_fixed_train_length)
def normalize_example(self, task, example, encoder, hparams, is_infer): """Normalize the examples from different tasks so they can be merged.""" # Here we use the default function for NLP tasks that makes everything # a part of "targets" feature. Override in your subclasses for other uses. vocab_offset = encoder.vocab_size + len(self.task_list) return normalize_example_nlp( task, example, is_infer, self.vocab_type, vocab_offset, hparams.multiproblem_max_input_length, hparams.multiproblem_max_target_length, hparams.multiproblem_fixed_train_length)
[ "Normalize", "the", "examples", "from", "different", "tasks", "so", "they", "can", "be", "merged", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem.py#L145-L154
[ "def", "normalize_example", "(", "self", ",", "task", ",", "example", ",", "encoder", ",", "hparams", ",", "is_infer", ")", ":", "# Here we use the default function for NLP tasks that makes everything", "# a part of \"targets\" feature. Override in your subclasses for other uses.",...
272500b6efe353aeb638d2745ed56e519462ca31
train
MultiProblem.update_task_ids
Generate task_ids for each problem. These ids correspond to the index of the task in the task_list. Args: encoder_vocab_size: the size of the vocab which is used to compute the index offset.
tensor2tensor/data_generators/multi_problem.py
def update_task_ids(self, encoder_vocab_size): """Generate task_ids for each problem. These ids correspond to the index of the task in the task_list. Args: encoder_vocab_size: the size of the vocab which is used to compute the index offset. """ for idx, task in enumerate(self.task_list): task.set_task_id(idx + encoder_vocab_size) tf.logging.info("Task %d (%s) has id %d." % (idx, task.name, task.task_id))
def update_task_ids(self, encoder_vocab_size): """Generate task_ids for each problem. These ids correspond to the index of the task in the task_list. Args: encoder_vocab_size: the size of the vocab which is used to compute the index offset. """ for idx, task in enumerate(self.task_list): task.set_task_id(idx + encoder_vocab_size) tf.logging.info("Task %d (%s) has id %d." % (idx, task.name, task.task_id))
[ "Generate", "task_ids", "for", "each", "problem", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem.py#L385-L397
[ "def", "update_task_ids", "(", "self", ",", "encoder_vocab_size", ")", ":", "for", "idx", ",", "task", "in", "enumerate", "(", "self", ".", "task_list", ")", ":", "task", ".", "set_task_id", "(", "idx", "+", "encoder_vocab_size", ")", "tf", ".", "logging",...
272500b6efe353aeb638d2745ed56e519462ca31
train
MultiProblem.get_max_num_classes
Compute the maximum number of classes any subtask has. This is useful for modifying the size of the softmax to include the output labels for the classification tasks. Currently, labels from different tasks are overloaded. Returns: num: Highest number of output classes in any text classification sub-task within this MultiProblem.
tensor2tensor/data_generators/multi_problem.py
def get_max_num_classes(self): """Compute the maximum number of classes any subtask has. This is useful for modifying the size of the softmax to include the output labels for the classification tasks. Currently, labels from different tasks are overloaded. Returns: num: Highest number of output classes in any text classification sub-task within this MultiProblem. """ num = 0 for task in self.task_list: if hasattr(task, "num_classes"): if num < task.num_classes: num = task.num_classes return num
def get_max_num_classes(self): """Compute the maximum number of classes any subtask has. This is useful for modifying the size of the softmax to include the output labels for the classification tasks. Currently, labels from different tasks are overloaded. Returns: num: Highest number of output classes in any text classification sub-task within this MultiProblem. """ num = 0 for task in self.task_list: if hasattr(task, "num_classes"): if num < task.num_classes: num = task.num_classes return num
[ "Compute", "the", "maximum", "number", "of", "classes", "any", "subtask", "has", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem.py#L399-L416
[ "def", "get_max_num_classes", "(", "self", ")", ":", "num", "=", "0", "for", "task", "in", "self", ".", "task_list", ":", "if", "hasattr", "(", "task", ",", "\"num_classes\"", ")", ":", "if", "num", "<", "task", ".", "num_classes", ":", "num", "=", "...
272500b6efe353aeb638d2745ed56e519462ca31
train
RecurrentMemory.pre_attention
Called prior to self-attention, to incorporate memory items. Args: segment: an integer Tensor with shape [batch] query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: must be None. Attention normally allows this to be a Tensor with shape [batch, length_m, channels], but we currently only support memory for decoder-side self-attention. bias: bias Tensor (see attention_bias()) Returns: (data, new_query_antecedent, new_memory_antecedent, new_bias)
tensor2tensor/layers/transformer_memory.py
def pre_attention(self, segment, query_antecedent, memory_antecedent, bias): """Called prior to self-attention, to incorporate memory items. Args: segment: an integer Tensor with shape [batch] query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: must be None. Attention normally allows this to be a Tensor with shape [batch, length_m, channels], but we currently only support memory for decoder-side self-attention. bias: bias Tensor (see attention_bias()) Returns: (data, new_query_antecedent, new_memory_antecedent, new_bias) """ del segment return None, query_antecedent, memory_antecedent, bias
def pre_attention(self, segment, query_antecedent, memory_antecedent, bias): """Called prior to self-attention, to incorporate memory items. Args: segment: an integer Tensor with shape [batch] query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: must be None. Attention normally allows this to be a Tensor with shape [batch, length_m, channels], but we currently only support memory for decoder-side self-attention. bias: bias Tensor (see attention_bias()) Returns: (data, new_query_antecedent, new_memory_antecedent, new_bias) """ del segment return None, query_antecedent, memory_antecedent, bias
[ "Called", "prior", "to", "self", "-", "attention", "to", "incorporate", "memory", "items", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/transformer_memory.py#L31-L45
[ "def", "pre_attention", "(", "self", ",", "segment", ",", "query_antecedent", ",", "memory_antecedent", ",", "bias", ")", ":", "del", "segment", "return", "None", ",", "query_antecedent", ",", "memory_antecedent", ",", "bias" ]
272500b6efe353aeb638d2745ed56e519462ca31
train
RecentTokensMemory.pre_attention
Called prior to self-attention, to incorporate memory items. Args: segment: an integer Tensor with shape [batch] query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: must be None. Attention normally allows this to be a Tensor with shape [batch, length_m, channels], but we currently only support memory for decoder-side self-attention. bias: bias Tensor (see attention_bias()) Returns: (data, new_query_antecedent, new_memory_antecedent, new_bias)
tensor2tensor/layers/transformer_memory.py
def pre_attention(self, segment, query_antecedent, memory_antecedent, bias): """Called prior to self-attention, to incorporate memory items. Args: segment: an integer Tensor with shape [batch] query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: must be None. Attention normally allows this to be a Tensor with shape [batch, length_m, channels], but we currently only support memory for decoder-side self-attention. bias: bias Tensor (see attention_bias()) Returns: (data, new_query_antecedent, new_memory_antecedent, new_bias) """ assert memory_antecedent is None, "We only support language modeling" # In eval mode, batch size may be variable memory_batch_size = tf.shape(self.previous_vals)[0] current_batch_size = tf.shape(query_antecedent)[0] amount_to_pad = memory_batch_size - current_batch_size # If segment id is zero, don't attend back to the memory previous_bias = self.previous_bias[:current_batch_size, :, :, :] + tf.cast( tf.equal(segment[:, None, None, None], 0), tf.float32) * -1e9 sliced_previous_vals = self.previous_vals[:current_batch_size, :, :] new_memory_antecedent = tf.concat( [tf.stop_gradient(sliced_previous_vals), query_antecedent], 1) new_bias = tf.concat([ tf.tile(tf.stop_gradient(previous_bias), [1, 1, self.chunk_length, 1]), tf.tile(bias, [current_batch_size, 1, 1, 1]), ], -1) remember_segment = tf.pad(segment, [[0, amount_to_pad]]) # TODO(kitaev): The code assumes that we always either increment the chunk # number or reset it to zero. This assumption will not hold if we re-run the # model for each token, e.g. for autoregressive greedy/beam/sampling decode. remember_vals = tf.pad(query_antecedent, [[0, amount_to_pad], [0, 0], [0, 0]]) # Query position is on axis -2 for bias: as long as a token can be attended # to from at least one query position (i.e. it's not padding), memorize it. remember_bias = tf.tile( tf.reduce_max(bias, -2, keepdims=True), [memory_batch_size, 1, 1, 1]) # Assume that query_antecedent is always a full chunk (i.e. not truncated) if self.chunk_length < self.tokens_to_cache: remember_vals = tf.concat([self.previous_vals, remember_vals], 1) remember_bias = tf.concat([ self.previous_bias - 1e9 * tf.cast( tf.equal( tf.pad(segment, [[0, amount_to_pad]])[:, None, None, None], 0), tf.float32), remember_bias ], -1) if self.chunk_length != self.tokens_to_cache: remember_vals = remember_vals[:, -self.tokens_to_cache:, :] remember_bias = remember_bias[:, :, :, -self.tokens_to_cache:] token = (remember_segment, remember_vals, remember_bias) return token, query_antecedent, new_memory_antecedent, new_bias
def pre_attention(self, segment, query_antecedent, memory_antecedent, bias): """Called prior to self-attention, to incorporate memory items. Args: segment: an integer Tensor with shape [batch] query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: must be None. Attention normally allows this to be a Tensor with shape [batch, length_m, channels], but we currently only support memory for decoder-side self-attention. bias: bias Tensor (see attention_bias()) Returns: (data, new_query_antecedent, new_memory_antecedent, new_bias) """ assert memory_antecedent is None, "We only support language modeling" # In eval mode, batch size may be variable memory_batch_size = tf.shape(self.previous_vals)[0] current_batch_size = tf.shape(query_antecedent)[0] amount_to_pad = memory_batch_size - current_batch_size # If segment id is zero, don't attend back to the memory previous_bias = self.previous_bias[:current_batch_size, :, :, :] + tf.cast( tf.equal(segment[:, None, None, None], 0), tf.float32) * -1e9 sliced_previous_vals = self.previous_vals[:current_batch_size, :, :] new_memory_antecedent = tf.concat( [tf.stop_gradient(sliced_previous_vals), query_antecedent], 1) new_bias = tf.concat([ tf.tile(tf.stop_gradient(previous_bias), [1, 1, self.chunk_length, 1]), tf.tile(bias, [current_batch_size, 1, 1, 1]), ], -1) remember_segment = tf.pad(segment, [[0, amount_to_pad]]) # TODO(kitaev): The code assumes that we always either increment the chunk # number or reset it to zero. This assumption will not hold if we re-run the # model for each token, e.g. for autoregressive greedy/beam/sampling decode. remember_vals = tf.pad(query_antecedent, [[0, amount_to_pad], [0, 0], [0, 0]]) # Query position is on axis -2 for bias: as long as a token can be attended # to from at least one query position (i.e. it's not padding), memorize it. remember_bias = tf.tile( tf.reduce_max(bias, -2, keepdims=True), [memory_batch_size, 1, 1, 1]) # Assume that query_antecedent is always a full chunk (i.e. not truncated) if self.chunk_length < self.tokens_to_cache: remember_vals = tf.concat([self.previous_vals, remember_vals], 1) remember_bias = tf.concat([ self.previous_bias - 1e9 * tf.cast( tf.equal( tf.pad(segment, [[0, amount_to_pad]])[:, None, None, None], 0), tf.float32), remember_bias ], -1) if self.chunk_length != self.tokens_to_cache: remember_vals = remember_vals[:, -self.tokens_to_cache:, :] remember_bias = remember_bias[:, :, :, -self.tokens_to_cache:] token = (remember_segment, remember_vals, remember_bias) return token, query_antecedent, new_memory_antecedent, new_bias
[ "Called", "prior", "to", "self", "-", "attention", "to", "incorporate", "memory", "items", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/transformer_memory.py#L110-L168
[ "def", "pre_attention", "(", "self", ",", "segment", ",", "query_antecedent", ",", "memory_antecedent", ",", "bias", ")", ":", "assert", "memory_antecedent", "is", "None", ",", "\"We only support language modeling\"", "# In eval mode, batch size may be variable", "memory_ba...
272500b6efe353aeb638d2745ed56e519462ca31
train
RecentTokensMemory.post_attention
Called after self-attention. The memory can be updated here. Args: token: Data returned by pre_attention, which can be used to carry over state related to the current memory operation. x: a Tensor of data after self-attention and feed-forward Returns: a (possibly modified) version of the input x
tensor2tensor/layers/transformer_memory.py
def post_attention(self, token, x): """Called after self-attention. The memory can be updated here. Args: token: Data returned by pre_attention, which can be used to carry over state related to the current memory operation. x: a Tensor of data after self-attention and feed-forward Returns: a (possibly modified) version of the input x """ with tf.control_dependencies([ self.previous_segment.assign(token[0]), self.previous_vals.assign(token[1]), self.previous_bias.assign(token[2]), ]): return tf.identity(x)
def post_attention(self, token, x): """Called after self-attention. The memory can be updated here. Args: token: Data returned by pre_attention, which can be used to carry over state related to the current memory operation. x: a Tensor of data after self-attention and feed-forward Returns: a (possibly modified) version of the input x """ with tf.control_dependencies([ self.previous_segment.assign(token[0]), self.previous_vals.assign(token[1]), self.previous_bias.assign(token[2]), ]): return tf.identity(x)
[ "Called", "after", "self", "-", "attention", ".", "The", "memory", "can", "be", "updated", "here", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/transformer_memory.py#L170-L185
[ "def", "post_attention", "(", "self", ",", "token", ",", "x", ")", ":", "with", "tf", ".", "control_dependencies", "(", "[", "self", ".", "previous_segment", ".", "assign", "(", "token", "[", "0", "]", ")", ",", "self", ".", "previous_vals", ".", "assi...
272500b6efe353aeb638d2745ed56e519462ca31
train
TransformerMemory._norm
Compute the safe norm.
tensor2tensor/layers/transformer_memory.py
def _norm(self, x): """Compute the safe norm.""" return tf.sqrt(tf.reduce_sum(tf.square(x), keepdims=True, axis=-1) + 1e-7)
def _norm(self, x): """Compute the safe norm.""" return tf.sqrt(tf.reduce_sum(tf.square(x), keepdims=True, axis=-1) + 1e-7)
[ "Compute", "the", "safe", "norm", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/transformer_memory.py#L226-L228
[ "def", "_norm", "(", "self", ",", "x", ")", ":", "return", "tf", ".", "sqrt", "(", "tf", ".", "reduce_sum", "(", "tf", ".", "square", "(", "x", ")", ",", "keepdims", "=", "True", ",", "axis", "=", "-", "1", ")", "+", "1e-7", ")" ]
272500b6efe353aeb638d2745ed56e519462ca31
train
TransformerMemory._address_content
Address the memory based on content similarity. Args: x: a tensor in the shape of [batch_size, length, depth]. Returns: the logits for each memory entry [batch_size, length, memory_size].
tensor2tensor/layers/transformer_memory.py
def _address_content(self, x): """Address the memory based on content similarity. Args: x: a tensor in the shape of [batch_size, length, depth]. Returns: the logits for each memory entry [batch_size, length, memory_size]. """ mem_keys = tf.layers.dense(self.mem_vals, self.key_depth, bias_initializer=tf.constant_initializer(1.0), name="mem_key") mem_query = tf.layers.dense(x, self.key_depth, bias_initializer=tf.constant_initializer(1.0), name="mem_query") norm = tf.matmul(self._norm(mem_query), self._norm(mem_keys), transpose_b=True) dot_product = tf.matmul(mem_query, mem_keys, transpose_b=True) cos_dist = tf.div(dot_product, norm + 1e-7, name="cos_dist") access_logits = self.sharpen_factor * cos_dist return access_logits
def _address_content(self, x): """Address the memory based on content similarity. Args: x: a tensor in the shape of [batch_size, length, depth]. Returns: the logits for each memory entry [batch_size, length, memory_size]. """ mem_keys = tf.layers.dense(self.mem_vals, self.key_depth, bias_initializer=tf.constant_initializer(1.0), name="mem_key") mem_query = tf.layers.dense(x, self.key_depth, bias_initializer=tf.constant_initializer(1.0), name="mem_query") norm = tf.matmul(self._norm(mem_query), self._norm(mem_keys), transpose_b=True) dot_product = tf.matmul(mem_query, mem_keys, transpose_b=True) cos_dist = tf.div(dot_product, norm + 1e-7, name="cos_dist") access_logits = self.sharpen_factor * cos_dist return access_logits
[ "Address", "the", "memory", "based", "on", "content", "similarity", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/transformer_memory.py#L230-L249
[ "def", "_address_content", "(", "self", ",", "x", ")", ":", "mem_keys", "=", "tf", ".", "layers", ".", "dense", "(", "self", ".", "mem_vals", ",", "self", ".", "key_depth", ",", "bias_initializer", "=", "tf", ".", "constant_initializer", "(", "1.0", ")",...
272500b6efe353aeb638d2745ed56e519462ca31
train
TransformerMemory.read
Read from the memory. An external component can use the results via a simple MLP, e.g., fn(x W_x + retrieved_mem W_m). Args: x: a tensor in the shape of [batch_size, length, depth]. Returns: access_logits: the logits for accessing the memory in shape of [batch_size, length, memory_size]. retrieved_mem: the retrieved results in the shape of [batch_size, length, val_depth].
tensor2tensor/layers/transformer_memory.py
def read(self, x): """Read from the memory. An external component can use the results via a simple MLP, e.g., fn(x W_x + retrieved_mem W_m). Args: x: a tensor in the shape of [batch_size, length, depth]. Returns: access_logits: the logits for accessing the memory in shape of [batch_size, length, memory_size]. retrieved_mem: the retrieved results in the shape of [batch_size, length, val_depth]. """ access_logits = self._address_content(x) weights = tf.nn.softmax(access_logits) retrieved_mem = tf.reduce_sum( tf.multiply(tf.expand_dims(weights, 3), tf.expand_dims(self.mem_vals, axis=1)), axis=2) return access_logits, retrieved_mem
def read(self, x): """Read from the memory. An external component can use the results via a simple MLP, e.g., fn(x W_x + retrieved_mem W_m). Args: x: a tensor in the shape of [batch_size, length, depth]. Returns: access_logits: the logits for accessing the memory in shape of [batch_size, length, memory_size]. retrieved_mem: the retrieved results in the shape of [batch_size, length, val_depth]. """ access_logits = self._address_content(x) weights = tf.nn.softmax(access_logits) retrieved_mem = tf.reduce_sum( tf.multiply(tf.expand_dims(weights, 3), tf.expand_dims(self.mem_vals, axis=1)), axis=2) return access_logits, retrieved_mem
[ "Read", "from", "the", "memory", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/transformer_memory.py#L251-L270
[ "def", "read", "(", "self", ",", "x", ")", ":", "access_logits", "=", "self", ".", "_address_content", "(", "x", ")", "weights", "=", "tf", ".", "nn", ".", "softmax", "(", "access_logits", ")", "retrieved_mem", "=", "tf", ".", "reduce_sum", "(", "tf", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
TransformerMemory.write
Write to the memory based on a combination of similarity and least used. Based on arXiv:1607.00036v2 [cs.LG]. Args: x: a tensor in the shape of [batch_size, length, depth]. access_logits: the logits for accessing the memory. Returns: the update op.
tensor2tensor/layers/transformer_memory.py
def write(self, x, access_logits): """Write to the memory based on a combination of similarity and least used. Based on arXiv:1607.00036v2 [cs.LG]. Args: x: a tensor in the shape of [batch_size, length, depth]. access_logits: the logits for accessing the memory. Returns: the update op. """ gamma = tf.layers.dense(x, 1, activation=tf.sigmoid, name="gamma") write_logits = access_logits - gamma * tf.expand_dims(self.mean_logits, 1) candidate_value = tf.layers.dense(x, self.val_depth, activation=tf.nn.relu, name="candidate_value") erase_gates = tf.layers.dense(x, self.memory_size, activation=tf.nn.sigmoid, name="erase") write_weights = tf.nn.softmax(write_logits) erase_weights = tf.expand_dims(1 - erase_gates * write_weights, 3) erase = tf.multiply(erase_weights, tf.expand_dims(self.mem_vals, 1)) addition = tf.multiply( tf.expand_dims(write_weights, 3), tf.expand_dims(candidate_value, 2)) update_value_op = self.mem_vals.assign( tf.reduce_mean(erase + addition, axis=1)) with tf.control_dependencies([update_value_op]): write_op = self.mean_logits.assign( self.mean_logits * 0.1 + tf.reduce_mean(write_logits * 0.9, axis=1)) return write_op
def write(self, x, access_logits): """Write to the memory based on a combination of similarity and least used. Based on arXiv:1607.00036v2 [cs.LG]. Args: x: a tensor in the shape of [batch_size, length, depth]. access_logits: the logits for accessing the memory. Returns: the update op. """ gamma = tf.layers.dense(x, 1, activation=tf.sigmoid, name="gamma") write_logits = access_logits - gamma * tf.expand_dims(self.mean_logits, 1) candidate_value = tf.layers.dense(x, self.val_depth, activation=tf.nn.relu, name="candidate_value") erase_gates = tf.layers.dense(x, self.memory_size, activation=tf.nn.sigmoid, name="erase") write_weights = tf.nn.softmax(write_logits) erase_weights = tf.expand_dims(1 - erase_gates * write_weights, 3) erase = tf.multiply(erase_weights, tf.expand_dims(self.mem_vals, 1)) addition = tf.multiply( tf.expand_dims(write_weights, 3), tf.expand_dims(candidate_value, 2)) update_value_op = self.mem_vals.assign( tf.reduce_mean(erase + addition, axis=1)) with tf.control_dependencies([update_value_op]): write_op = self.mean_logits.assign( self.mean_logits * 0.1 + tf.reduce_mean(write_logits * 0.9, axis=1)) return write_op
[ "Write", "to", "the", "memory", "based", "on", "a", "combination", "of", "similarity", "and", "least", "used", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/transformer_memory.py#L272-L303
[ "def", "write", "(", "self", ",", "x", ",", "access_logits", ")", ":", "gamma", "=", "tf", ".", "layers", ".", "dense", "(", "x", ",", "1", ",", "activation", "=", "tf", ".", "sigmoid", ",", "name", "=", "\"gamma\"", ")", "write_logits", "=", "acce...
272500b6efe353aeb638d2745ed56e519462ca31
train
TransformerMemory.reset
Reset the entries in the memory. Args: entries_to_reset: a 1D tensor. Returns: the reset op.
tensor2tensor/layers/transformer_memory.py
def reset(self, entries_to_reset): """Reset the entries in the memory. Args: entries_to_reset: a 1D tensor. Returns: the reset op. """ num_updates = tf.size(entries_to_reset) update_vals = tf.scatter_update( self.mem_vals, entries_to_reset, tf.tile(tf.expand_dims( tf.fill([self.memory_size, self.val_depth], .0), 0), [num_updates, 1, 1])) update_logits = tf.scatter_update( self.mean_logits, entries_to_reset, tf.tile(tf.expand_dims( tf.fill([self.memory_size], .0), 0), [num_updates, 1])) reset_op = tf.group([update_vals, update_logits]) return reset_op
def reset(self, entries_to_reset): """Reset the entries in the memory. Args: entries_to_reset: a 1D tensor. Returns: the reset op. """ num_updates = tf.size(entries_to_reset) update_vals = tf.scatter_update( self.mem_vals, entries_to_reset, tf.tile(tf.expand_dims( tf.fill([self.memory_size, self.val_depth], .0), 0), [num_updates, 1, 1])) update_logits = tf.scatter_update( self.mean_logits, entries_to_reset, tf.tile(tf.expand_dims( tf.fill([self.memory_size], .0), 0), [num_updates, 1])) reset_op = tf.group([update_vals, update_logits]) return reset_op
[ "Reset", "the", "entries", "in", "the", "memory", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/transformer_memory.py#L317-L337
[ "def", "reset", "(", "self", ",", "entries_to_reset", ")", ":", "num_updates", "=", "tf", ".", "size", "(", "entries_to_reset", ")", "update_vals", "=", "tf", ".", "scatter_update", "(", "self", ".", "mem_vals", ",", "entries_to_reset", ",", "tf", ".", "ti...
272500b6efe353aeb638d2745ed56e519462ca31
train
TransformerMemory.pre_attention
Called prior to self-attention, to incorporate memory items. Args: segment_number: an integer Tensor with shape [batch] query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: must be None. Attention normally allows this to be a Tensor with shape [batch, length_m, channels], but we currently only support memory for decoder-side self-attention. bias: bias Tensor (see attention_bias()) Returns: (data, new_query_antecedent, new_memory_antecedent, new_bias)
tensor2tensor/layers/transformer_memory.py
def pre_attention(self, segment_number, query_antecedent, memory_antecedent, bias): """Called prior to self-attention, to incorporate memory items. Args: segment_number: an integer Tensor with shape [batch] query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: must be None. Attention normally allows this to be a Tensor with shape [batch, length_m, channels], but we currently only support memory for decoder-side self-attention. bias: bias Tensor (see attention_bias()) Returns: (data, new_query_antecedent, new_memory_antecedent, new_bias) """ with tf.variable_scope(self.name + "/pre_attention", reuse=tf.AUTO_REUSE): assert memory_antecedent is None, "We only support language modeling" with tf.control_dependencies([ tf.assert_greater_equal(self.batch_size, tf.size(segment_number))]): difference = self.batch_size - tf.size(segment_number) segment_number = tf.pad(segment_number, [[0, difference]]) reset_op = self.reset(tf.reshape(tf.where( tf.less(segment_number, self.segment_number)), [-1])) memory_results = {} with tf.control_dependencies([reset_op]): with tf.control_dependencies([ self.update_segment_number(segment_number)]): x = tf.pad(query_antecedent, [ [0, difference], [0, 0], [0, 0]]) access_logits, retrieved_mem = self.read(x) memory_results["x"] = x memory_results["access_logits"] = access_logits memory_results["retrieved_mem"] = retrieved_mem return memory_results, query_antecedent, memory_antecedent, bias
def pre_attention(self, segment_number, query_antecedent, memory_antecedent, bias): """Called prior to self-attention, to incorporate memory items. Args: segment_number: an integer Tensor with shape [batch] query_antecedent: a Tensor with shape [batch, length_q, channels] memory_antecedent: must be None. Attention normally allows this to be a Tensor with shape [batch, length_m, channels], but we currently only support memory for decoder-side self-attention. bias: bias Tensor (see attention_bias()) Returns: (data, new_query_antecedent, new_memory_antecedent, new_bias) """ with tf.variable_scope(self.name + "/pre_attention", reuse=tf.AUTO_REUSE): assert memory_antecedent is None, "We only support language modeling" with tf.control_dependencies([ tf.assert_greater_equal(self.batch_size, tf.size(segment_number))]): difference = self.batch_size - tf.size(segment_number) segment_number = tf.pad(segment_number, [[0, difference]]) reset_op = self.reset(tf.reshape(tf.where( tf.less(segment_number, self.segment_number)), [-1])) memory_results = {} with tf.control_dependencies([reset_op]): with tf.control_dependencies([ self.update_segment_number(segment_number)]): x = tf.pad(query_antecedent, [ [0, difference], [0, 0], [0, 0]]) access_logits, retrieved_mem = self.read(x) memory_results["x"] = x memory_results["access_logits"] = access_logits memory_results["retrieved_mem"] = retrieved_mem return memory_results, query_antecedent, memory_antecedent, bias
[ "Called", "prior", "to", "self", "-", "attention", "to", "incorporate", "memory", "items", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/transformer_memory.py#L339-L371
[ "def", "pre_attention", "(", "self", ",", "segment_number", ",", "query_antecedent", ",", "memory_antecedent", ",", "bias", ")", ":", "with", "tf", ".", "variable_scope", "(", "self", ".", "name", "+", "\"/pre_attention\"", ",", "reuse", "=", "tf", ".", "AUT...
272500b6efe353aeb638d2745ed56e519462ca31
train
TransformerMemory.post_attention
Called after self-attention. The memory can be updated here. Args: token: Data returned by pre_attention, which can be used to carry over state related to the current memory operation. x: a Tensor of data after self-attention and feed-forward Returns: a (possibly modified) version of the input x
tensor2tensor/layers/transformer_memory.py
def post_attention(self, token, x): """Called after self-attention. The memory can be updated here. Args: token: Data returned by pre_attention, which can be used to carry over state related to the current memory operation. x: a Tensor of data after self-attention and feed-forward Returns: a (possibly modified) version of the input x """ with tf.variable_scope(self.name + "/post_attention", reuse=tf.AUTO_REUSE): depth = common_layers.shape_list(x)[-1] actual_batch_size = common_layers.shape_list(x)[0] memory_output = tf.gather(token["retrieved_mem"], tf.range(actual_batch_size)) output = tf.add(tf.layers.dense(x, depth, use_bias=False), tf.layers.dense(memory_output, depth)) with tf.control_dependencies([output]): with tf.control_dependencies([ self.write(token["x"], token["access_logits"])]): return tf.identity(output)
def post_attention(self, token, x): """Called after self-attention. The memory can be updated here. Args: token: Data returned by pre_attention, which can be used to carry over state related to the current memory operation. x: a Tensor of data after self-attention and feed-forward Returns: a (possibly modified) version of the input x """ with tf.variable_scope(self.name + "/post_attention", reuse=tf.AUTO_REUSE): depth = common_layers.shape_list(x)[-1] actual_batch_size = common_layers.shape_list(x)[0] memory_output = tf.gather(token["retrieved_mem"], tf.range(actual_batch_size)) output = tf.add(tf.layers.dense(x, depth, use_bias=False), tf.layers.dense(memory_output, depth)) with tf.control_dependencies([output]): with tf.control_dependencies([ self.write(token["x"], token["access_logits"])]): return tf.identity(output)
[ "Called", "after", "self", "-", "attention", ".", "The", "memory", "can", "be", "updated", "here", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/transformer_memory.py#L373-L393
[ "def", "post_attention", "(", "self", ",", "token", ",", "x", ")", ":", "with", "tf", ".", "variable_scope", "(", "self", ".", "name", "+", "\"/post_attention\"", ",", "reuse", "=", "tf", ".", "AUTO_REUSE", ")", ":", "depth", "=", "common_layers", ".", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
_define_train
Define the training setup.
tensor2tensor/rl/ppo_learner.py
def _define_train( train_env, ppo_hparams, eval_env_fn=None, sampling_temp=1.0, **collect_kwargs ): """Define the training setup.""" memory, collect_summary, train_initialization = ( _define_collect( train_env, ppo_hparams, "ppo_train", eval_phase=False, sampling_temp=sampling_temp, **collect_kwargs)) ppo_summary = ppo.define_ppo_epoch( memory, ppo_hparams, train_env.action_space, train_env.batch_size) train_summary = tf.summary.merge([collect_summary, ppo_summary]) if ppo_hparams.eval_every_epochs: # TODO(koz4k): Do we need this at all? assert eval_env_fn is not None eval_env = eval_env_fn(in_graph=True) (_, eval_collect_summary, eval_initialization) = ( _define_collect( eval_env, ppo_hparams, "ppo_eval", eval_phase=True, sampling_temp=0.0, **collect_kwargs)) return (train_summary, eval_collect_summary, (train_initialization, eval_initialization)) else: return (train_summary, None, (train_initialization,))
def _define_train( train_env, ppo_hparams, eval_env_fn=None, sampling_temp=1.0, **collect_kwargs ): """Define the training setup.""" memory, collect_summary, train_initialization = ( _define_collect( train_env, ppo_hparams, "ppo_train", eval_phase=False, sampling_temp=sampling_temp, **collect_kwargs)) ppo_summary = ppo.define_ppo_epoch( memory, ppo_hparams, train_env.action_space, train_env.batch_size) train_summary = tf.summary.merge([collect_summary, ppo_summary]) if ppo_hparams.eval_every_epochs: # TODO(koz4k): Do we need this at all? assert eval_env_fn is not None eval_env = eval_env_fn(in_graph=True) (_, eval_collect_summary, eval_initialization) = ( _define_collect( eval_env, ppo_hparams, "ppo_eval", eval_phase=True, sampling_temp=0.0, **collect_kwargs)) return (train_summary, eval_collect_summary, (train_initialization, eval_initialization)) else: return (train_summary, None, (train_initialization,))
[ "Define", "the", "training", "setup", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/ppo_learner.py#L151-L186
[ "def", "_define_train", "(", "train_env", ",", "ppo_hparams", ",", "eval_env_fn", "=", "None", ",", "sampling_temp", "=", "1.0", ",", "*", "*", "collect_kwargs", ")", ":", "memory", ",", "collect_summary", ",", "train_initialization", "=", "(", "_define_collect"...
272500b6efe353aeb638d2745ed56e519462ca31
train
_run_train
Train.
tensor2tensor/rl/ppo_learner.py
def _run_train(ppo_hparams, event_dir, model_dir, restarter, train_summary_op, eval_summary_op, initializers, report_fn=None, model_save_fn=None): """Train.""" summary_writer = tf.summary.FileWriter( event_dir, graph=tf.get_default_graph(), flush_secs=60) model_saver = tf.train.Saver( tf.global_variables(ppo_hparams.policy_network + "/.*") + tf.global_variables("training/" + ppo_hparams.policy_network + "/.*") + # tf.global_variables("clean_scope.*") + # Needed for sharing params. tf.global_variables("global_step") + tf.global_variables("losses_avg.*") + tf.global_variables("train_stats.*") ) global_step = tf.train.get_or_create_global_step() with tf.control_dependencies([tf.assign_add(global_step, 1)]): train_summary_op = tf.identity(train_summary_op) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for initializer in initializers: initializer(sess) trainer_lib.restore_checkpoint(model_dir, model_saver, sess) num_target_iterations = restarter.target_local_step num_completed_iterations = num_target_iterations - restarter.steps_to_go with restarter.training_loop(): for epoch_index in range(num_completed_iterations, num_target_iterations): summary = sess.run(train_summary_op) if summary_writer: summary_writer.add_summary(summary, epoch_index) if (ppo_hparams.eval_every_epochs and epoch_index % ppo_hparams.eval_every_epochs == 0): eval_summary = sess.run(eval_summary_op) if summary_writer: summary_writer.add_summary(eval_summary, epoch_index) if report_fn: summary_proto = tf.Summary() summary_proto.ParseFromString(eval_summary) for elem in summary_proto.value: if "mean_score" in elem.tag: report_fn(elem.simple_value, epoch_index) break if (model_saver and ppo_hparams.save_models_every_epochs and (epoch_index % ppo_hparams.save_models_every_epochs == 0 or (epoch_index + 1) == num_target_iterations)): ckpt_path = os.path.join( model_dir, "model.ckpt-{}".format(tf.train.global_step(sess, global_step)) ) model_saver.save(sess, ckpt_path) if model_save_fn: model_save_fn(model_dir)
def _run_train(ppo_hparams, event_dir, model_dir, restarter, train_summary_op, eval_summary_op, initializers, report_fn=None, model_save_fn=None): """Train.""" summary_writer = tf.summary.FileWriter( event_dir, graph=tf.get_default_graph(), flush_secs=60) model_saver = tf.train.Saver( tf.global_variables(ppo_hparams.policy_network + "/.*") + tf.global_variables("training/" + ppo_hparams.policy_network + "/.*") + # tf.global_variables("clean_scope.*") + # Needed for sharing params. tf.global_variables("global_step") + tf.global_variables("losses_avg.*") + tf.global_variables("train_stats.*") ) global_step = tf.train.get_or_create_global_step() with tf.control_dependencies([tf.assign_add(global_step, 1)]): train_summary_op = tf.identity(train_summary_op) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for initializer in initializers: initializer(sess) trainer_lib.restore_checkpoint(model_dir, model_saver, sess) num_target_iterations = restarter.target_local_step num_completed_iterations = num_target_iterations - restarter.steps_to_go with restarter.training_loop(): for epoch_index in range(num_completed_iterations, num_target_iterations): summary = sess.run(train_summary_op) if summary_writer: summary_writer.add_summary(summary, epoch_index) if (ppo_hparams.eval_every_epochs and epoch_index % ppo_hparams.eval_every_epochs == 0): eval_summary = sess.run(eval_summary_op) if summary_writer: summary_writer.add_summary(eval_summary, epoch_index) if report_fn: summary_proto = tf.Summary() summary_proto.ParseFromString(eval_summary) for elem in summary_proto.value: if "mean_score" in elem.tag: report_fn(elem.simple_value, epoch_index) break if (model_saver and ppo_hparams.save_models_every_epochs and (epoch_index % ppo_hparams.save_models_every_epochs == 0 or (epoch_index + 1) == num_target_iterations)): ckpt_path = os.path.join( model_dir, "model.ckpt-{}".format(tf.train.global_step(sess, global_step)) ) model_saver.save(sess, ckpt_path) if model_save_fn: model_save_fn(model_dir)
[ "Train", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/ppo_learner.py#L189-L251
[ "def", "_run_train", "(", "ppo_hparams", ",", "event_dir", ",", "model_dir", ",", "restarter", ",", "train_summary_op", ",", "eval_summary_op", ",", "initializers", ",", "report_fn", "=", "None", ",", "model_save_fn", "=", "None", ")", ":", "summary_writer", "="...
272500b6efe353aeb638d2745ed56e519462ca31
train
_rollout_metadata
Metadata for rollouts.
tensor2tensor/rl/ppo_learner.py
def _rollout_metadata(batch_env): """Metadata for rollouts.""" batch_env_shape = batch_env.observ.get_shape().as_list() batch_size = [batch_env_shape[0]] shapes_types_names = [ # TODO(piotrmilos): possibly retrieve the observation type for batch_env (batch_size + batch_env_shape[1:], batch_env.observ_dtype, "observation"), (batch_size, tf.float32, "reward"), (batch_size, tf.bool, "done"), (batch_size + list(batch_env.action_shape), batch_env.action_dtype, "action"), (batch_size, tf.float32, "pdf"), (batch_size, tf.float32, "value_function"), ] return shapes_types_names
def _rollout_metadata(batch_env): """Metadata for rollouts.""" batch_env_shape = batch_env.observ.get_shape().as_list() batch_size = [batch_env_shape[0]] shapes_types_names = [ # TODO(piotrmilos): possibly retrieve the observation type for batch_env (batch_size + batch_env_shape[1:], batch_env.observ_dtype, "observation"), (batch_size, tf.float32, "reward"), (batch_size, tf.bool, "done"), (batch_size + list(batch_env.action_shape), batch_env.action_dtype, "action"), (batch_size, tf.float32, "pdf"), (batch_size, tf.float32, "value_function"), ] return shapes_types_names
[ "Metadata", "for", "rollouts", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/ppo_learner.py#L254-L268
[ "def", "_rollout_metadata", "(", "batch_env", ")", ":", "batch_env_shape", "=", "batch_env", ".", "observ", ".", "get_shape", "(", ")", ".", "as_list", "(", ")", "batch_size", "=", "[", "batch_env_shape", "[", "0", "]", "]", "shapes_types_names", "=", "[", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
_define_collect
Collect trajectories. Args: batch_env: Batch environment. ppo_hparams: PPO hparams, defined in tensor2tensor.models.research.rl. scope: var scope. frame_stack_size: Number of last observations to feed into the policy. eval_phase: TODO(koz4k): Write docstring. sampling_temp: Sampling temperature for the policy. force_beginning_resets: Whether to reset at the beginning of each episode. Returns: Returns memory (observations, rewards, dones, actions, pdfs, values_functions) containing a rollout of environment from nested wrapped structure.
tensor2tensor/rl/ppo_learner.py
def _define_collect(batch_env, ppo_hparams, scope, frame_stack_size, eval_phase, sampling_temp, force_beginning_resets): """Collect trajectories. Args: batch_env: Batch environment. ppo_hparams: PPO hparams, defined in tensor2tensor.models.research.rl. scope: var scope. frame_stack_size: Number of last observations to feed into the policy. eval_phase: TODO(koz4k): Write docstring. sampling_temp: Sampling temperature for the policy. force_beginning_resets: Whether to reset at the beginning of each episode. Returns: Returns memory (observations, rewards, dones, actions, pdfs, values_functions) containing a rollout of environment from nested wrapped structure. """ epoch_length = ppo_hparams.epoch_length to_initialize = [] with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): num_agents = batch_env.batch_size to_initialize.append(batch_env) wrappers = [(StackWrapper, { "history": frame_stack_size }), (_MemoryWrapper, {})] rollout_metadata = None speculum = None for w in wrappers: tf.logging.info("Applying wrapper %s(%s) to env %s." % (str( w[0]), str(w[1]), str(batch_env))) batch_env = w[0](batch_env, **w[1]) to_initialize.append(batch_env) rollout_metadata = _rollout_metadata(batch_env) speculum = batch_env.speculum def initialization_lambda(sess): for batch_env in to_initialize: batch_env.initialize(sess) memory = [ tf.get_variable( # pylint: disable=g-complex-comprehension "collect_memory_%d_%s" % (epoch_length, name), shape=[epoch_length] + shape, dtype=dtype, initializer=tf.zeros_initializer(), trainable=False) for (shape, dtype, name) in rollout_metadata ] cumulative_rewards = tf.get_variable( "cumulative_rewards", len(batch_env), trainable=False) eval_phase_t = tf.convert_to_tensor(eval_phase) should_reset_var = tf.Variable(True, trainable=False) zeros_tensor = tf.zeros(len(batch_env)) force_beginning_resets = tf.convert_to_tensor(force_beginning_resets) def reset_ops_group(): return tf.group( batch_env.reset(tf.range(len(batch_env))), tf.assign(cumulative_rewards, zeros_tensor)) reset_op = tf.cond( tf.logical_or(should_reset_var.read_value(), force_beginning_resets), reset_ops_group, tf.no_op) with tf.control_dependencies([reset_op]): reset_once_op = tf.assign(should_reset_var, False) with tf.control_dependencies([reset_once_op]): def step(index, scores_sum, scores_num): """Single step.""" index %= epoch_length # Only needed in eval runs. # Note - the only way to ensure making a copy of tensor is to run simple # operation. We are waiting for tf.copy: # https://github.com/tensorflow/tensorflow/issues/11186 obs_copy = batch_env.observ + 0 def env_step(arg1, arg2, arg3): # pylint: disable=unused-argument """Step of the environment.""" (logits, value_function) = get_policy( obs_copy, ppo_hparams, batch_env.action_space ) action = common_layers.sample_with_temperature(logits, sampling_temp) action = tf.cast(action, tf.int32) action = tf.reshape(action, shape=(num_agents,)) reward, done = batch_env.simulate(action) pdf = tfp.distributions.Categorical(logits=logits).prob(action) pdf = tf.reshape(pdf, shape=(num_agents,)) value_function = tf.reshape(value_function, shape=(num_agents,)) done = tf.reshape(done, shape=(num_agents,)) with tf.control_dependencies([reward, done]): return tf.identity(pdf), tf.identity(value_function), \ tf.identity(done) # TODO(piotrmilos): while_body is executed at most once, # thus should be replaced with tf.cond pdf, value_function, top_level_done = tf.while_loop( lambda _1, _2, _3: tf.equal(speculum.size(), 0), env_step, [ tf.constant(0.0, shape=(num_agents,)), tf.constant(0.0, shape=(num_agents,)), tf.constant(False, shape=(num_agents,)) ], parallel_iterations=1, back_prop=False, ) with tf.control_dependencies([pdf, value_function]): obs, reward, done, action = speculum.dequeue() to_save = [obs, reward, done, action, pdf, value_function] save_ops = [ tf.scatter_update(memory_slot, index, value) for memory_slot, value in zip(memory, to_save) ] cumulate_rewards_op = cumulative_rewards.assign_add(reward) agent_indices_to_reset = tf.where(top_level_done)[:, 0] with tf.control_dependencies([cumulate_rewards_op]): # TODO(piotrmilos): possibly we need cumulative_rewards.read_value() scores_sum_delta = tf.reduce_sum( tf.gather(cumulative_rewards.read_value(), agent_indices_to_reset)) scores_num_delta = tf.count_nonzero(done, dtype=tf.int32) with tf.control_dependencies(save_ops + [scores_sum_delta, scores_num_delta]): reset_env_op = batch_env.reset(agent_indices_to_reset) reset_cumulative_rewards_op = tf.scatter_update( cumulative_rewards, agent_indices_to_reset, tf.gather(zeros_tensor, agent_indices_to_reset)) with tf.control_dependencies([reset_env_op, reset_cumulative_rewards_op]): return [ index + 1, scores_sum + scores_sum_delta, scores_num + scores_num_delta ] def stop_condition(i, _, resets): return tf.cond(eval_phase_t, lambda: resets < num_agents, lambda: i < epoch_length) init = [tf.constant(0), tf.constant(0.0), tf.constant(0)] index, scores_sum, scores_num = tf.while_loop( stop_condition, step, init, parallel_iterations=1, back_prop=False) # We handle force_beginning_resets differently. We assume that all envs are # reseted at the end of episod (though it happens at the beginning of the # next one scores_num = tf.cond(force_beginning_resets, lambda: scores_num + len(batch_env), lambda: scores_num) with tf.control_dependencies([scores_sum]): scores_sum = tf.cond( force_beginning_resets, lambda: scores_sum + tf.reduce_sum(cumulative_rewards.read_value()), lambda: scores_sum) mean_score = tf.cond( tf.greater(scores_num, 0), lambda: scores_sum / tf.cast(scores_num, tf.float32), lambda: 0.) printing = tf.Print(0, [mean_score, scores_sum, scores_num], "mean_score: ") with tf.control_dependencies([index, printing]): memory = [mem.read_value() for mem in memory] # When generating real data together with PPO training we must use single # agent. For PPO to work we reshape the history, as if it was generated # by real_ppo_effective_num_agents. if ppo_hparams.effective_num_agents is not None and not eval_phase: new_memory = [] effective_num_agents = ppo_hparams.effective_num_agents assert epoch_length % ppo_hparams.effective_num_agents == 0, ( "The rollout of ppo_hparams.epoch_length will be distributed amongst" "effective_num_agents of agents") new_epoch_length = int(epoch_length / effective_num_agents) for mem, info in zip(memory, rollout_metadata): shape, _, name = info new_shape = [effective_num_agents, new_epoch_length] + shape[1:] perm = list(range(len(shape) + 1)) perm[0] = 1 perm[1] = 0 mem = tf.transpose(mem, perm=perm) mem = tf.reshape(mem, shape=new_shape) mem = tf.transpose( mem, perm=perm, name="collect_memory_%d_%s" % (new_epoch_length, name)) new_memory.append(mem) memory = new_memory with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): mean_score_summary = tf.cond( tf.greater(scores_num, 0), lambda: tf.summary.scalar("mean_score_this_iter", mean_score), str) summaries = tf.summary.merge([ mean_score_summary, tf.summary.scalar("episodes_finished_this_iter", scores_num) ]) return memory, summaries, initialization_lambda
def _define_collect(batch_env, ppo_hparams, scope, frame_stack_size, eval_phase, sampling_temp, force_beginning_resets): """Collect trajectories. Args: batch_env: Batch environment. ppo_hparams: PPO hparams, defined in tensor2tensor.models.research.rl. scope: var scope. frame_stack_size: Number of last observations to feed into the policy. eval_phase: TODO(koz4k): Write docstring. sampling_temp: Sampling temperature for the policy. force_beginning_resets: Whether to reset at the beginning of each episode. Returns: Returns memory (observations, rewards, dones, actions, pdfs, values_functions) containing a rollout of environment from nested wrapped structure. """ epoch_length = ppo_hparams.epoch_length to_initialize = [] with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): num_agents = batch_env.batch_size to_initialize.append(batch_env) wrappers = [(StackWrapper, { "history": frame_stack_size }), (_MemoryWrapper, {})] rollout_metadata = None speculum = None for w in wrappers: tf.logging.info("Applying wrapper %s(%s) to env %s." % (str( w[0]), str(w[1]), str(batch_env))) batch_env = w[0](batch_env, **w[1]) to_initialize.append(batch_env) rollout_metadata = _rollout_metadata(batch_env) speculum = batch_env.speculum def initialization_lambda(sess): for batch_env in to_initialize: batch_env.initialize(sess) memory = [ tf.get_variable( # pylint: disable=g-complex-comprehension "collect_memory_%d_%s" % (epoch_length, name), shape=[epoch_length] + shape, dtype=dtype, initializer=tf.zeros_initializer(), trainable=False) for (shape, dtype, name) in rollout_metadata ] cumulative_rewards = tf.get_variable( "cumulative_rewards", len(batch_env), trainable=False) eval_phase_t = tf.convert_to_tensor(eval_phase) should_reset_var = tf.Variable(True, trainable=False) zeros_tensor = tf.zeros(len(batch_env)) force_beginning_resets = tf.convert_to_tensor(force_beginning_resets) def reset_ops_group(): return tf.group( batch_env.reset(tf.range(len(batch_env))), tf.assign(cumulative_rewards, zeros_tensor)) reset_op = tf.cond( tf.logical_or(should_reset_var.read_value(), force_beginning_resets), reset_ops_group, tf.no_op) with tf.control_dependencies([reset_op]): reset_once_op = tf.assign(should_reset_var, False) with tf.control_dependencies([reset_once_op]): def step(index, scores_sum, scores_num): """Single step.""" index %= epoch_length # Only needed in eval runs. # Note - the only way to ensure making a copy of tensor is to run simple # operation. We are waiting for tf.copy: # https://github.com/tensorflow/tensorflow/issues/11186 obs_copy = batch_env.observ + 0 def env_step(arg1, arg2, arg3): # pylint: disable=unused-argument """Step of the environment.""" (logits, value_function) = get_policy( obs_copy, ppo_hparams, batch_env.action_space ) action = common_layers.sample_with_temperature(logits, sampling_temp) action = tf.cast(action, tf.int32) action = tf.reshape(action, shape=(num_agents,)) reward, done = batch_env.simulate(action) pdf = tfp.distributions.Categorical(logits=logits).prob(action) pdf = tf.reshape(pdf, shape=(num_agents,)) value_function = tf.reshape(value_function, shape=(num_agents,)) done = tf.reshape(done, shape=(num_agents,)) with tf.control_dependencies([reward, done]): return tf.identity(pdf), tf.identity(value_function), \ tf.identity(done) # TODO(piotrmilos): while_body is executed at most once, # thus should be replaced with tf.cond pdf, value_function, top_level_done = tf.while_loop( lambda _1, _2, _3: tf.equal(speculum.size(), 0), env_step, [ tf.constant(0.0, shape=(num_agents,)), tf.constant(0.0, shape=(num_agents,)), tf.constant(False, shape=(num_agents,)) ], parallel_iterations=1, back_prop=False, ) with tf.control_dependencies([pdf, value_function]): obs, reward, done, action = speculum.dequeue() to_save = [obs, reward, done, action, pdf, value_function] save_ops = [ tf.scatter_update(memory_slot, index, value) for memory_slot, value in zip(memory, to_save) ] cumulate_rewards_op = cumulative_rewards.assign_add(reward) agent_indices_to_reset = tf.where(top_level_done)[:, 0] with tf.control_dependencies([cumulate_rewards_op]): # TODO(piotrmilos): possibly we need cumulative_rewards.read_value() scores_sum_delta = tf.reduce_sum( tf.gather(cumulative_rewards.read_value(), agent_indices_to_reset)) scores_num_delta = tf.count_nonzero(done, dtype=tf.int32) with tf.control_dependencies(save_ops + [scores_sum_delta, scores_num_delta]): reset_env_op = batch_env.reset(agent_indices_to_reset) reset_cumulative_rewards_op = tf.scatter_update( cumulative_rewards, agent_indices_to_reset, tf.gather(zeros_tensor, agent_indices_to_reset)) with tf.control_dependencies([reset_env_op, reset_cumulative_rewards_op]): return [ index + 1, scores_sum + scores_sum_delta, scores_num + scores_num_delta ] def stop_condition(i, _, resets): return tf.cond(eval_phase_t, lambda: resets < num_agents, lambda: i < epoch_length) init = [tf.constant(0), tf.constant(0.0), tf.constant(0)] index, scores_sum, scores_num = tf.while_loop( stop_condition, step, init, parallel_iterations=1, back_prop=False) # We handle force_beginning_resets differently. We assume that all envs are # reseted at the end of episod (though it happens at the beginning of the # next one scores_num = tf.cond(force_beginning_resets, lambda: scores_num + len(batch_env), lambda: scores_num) with tf.control_dependencies([scores_sum]): scores_sum = tf.cond( force_beginning_resets, lambda: scores_sum + tf.reduce_sum(cumulative_rewards.read_value()), lambda: scores_sum) mean_score = tf.cond( tf.greater(scores_num, 0), lambda: scores_sum / tf.cast(scores_num, tf.float32), lambda: 0.) printing = tf.Print(0, [mean_score, scores_sum, scores_num], "mean_score: ") with tf.control_dependencies([index, printing]): memory = [mem.read_value() for mem in memory] # When generating real data together with PPO training we must use single # agent. For PPO to work we reshape the history, as if it was generated # by real_ppo_effective_num_agents. if ppo_hparams.effective_num_agents is not None and not eval_phase: new_memory = [] effective_num_agents = ppo_hparams.effective_num_agents assert epoch_length % ppo_hparams.effective_num_agents == 0, ( "The rollout of ppo_hparams.epoch_length will be distributed amongst" "effective_num_agents of agents") new_epoch_length = int(epoch_length / effective_num_agents) for mem, info in zip(memory, rollout_metadata): shape, _, name = info new_shape = [effective_num_agents, new_epoch_length] + shape[1:] perm = list(range(len(shape) + 1)) perm[0] = 1 perm[1] = 0 mem = tf.transpose(mem, perm=perm) mem = tf.reshape(mem, shape=new_shape) mem = tf.transpose( mem, perm=perm, name="collect_memory_%d_%s" % (new_epoch_length, name)) new_memory.append(mem) memory = new_memory with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): mean_score_summary = tf.cond( tf.greater(scores_num, 0), lambda: tf.summary.scalar("mean_score_this_iter", mean_score), str) summaries = tf.summary.merge([ mean_score_summary, tf.summary.scalar("episodes_finished_this_iter", scores_num) ]) return memory, summaries, initialization_lambda
[ "Collect", "trajectories", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/ppo_learner.py#L310-L515
[ "def", "_define_collect", "(", "batch_env", ",", "ppo_hparams", ",", "scope", ",", "frame_stack_size", ",", "eval_phase", ",", "sampling_temp", ",", "force_beginning_resets", ")", ":", "epoch_length", "=", "ppo_hparams", ".", "epoch_length", "to_initialize", "=", "[...
272500b6efe353aeb638d2745ed56e519462ca31
train
deconv2d
Deconvolution layer.
tensor2tensor/models/vanilla_gan.py
def deconv2d( input_, output_shape, k_h, k_w, d_h, d_w, stddev=0.02, name="deconv2d"): """Deconvolution layer.""" with tf.variable_scope(name): w = tf.get_variable( "w", [k_h, k_w, output_shape[-1], input_.get_shape()[-1]], initializer=tf.random_normal_initializer(stddev=stddev)) deconv = tf.nn.conv2d_transpose( input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1]) biases = tf.get_variable( "biases", [output_shape[-1]], initializer=tf.constant_initializer(0.0)) return tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
def deconv2d( input_, output_shape, k_h, k_w, d_h, d_w, stddev=0.02, name="deconv2d"): """Deconvolution layer.""" with tf.variable_scope(name): w = tf.get_variable( "w", [k_h, k_w, output_shape[-1], input_.get_shape()[-1]], initializer=tf.random_normal_initializer(stddev=stddev)) deconv = tf.nn.conv2d_transpose( input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1]) biases = tf.get_variable( "biases", [output_shape[-1]], initializer=tf.constant_initializer(0.0)) return tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
[ "Deconvolution", "layer", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/vanilla_gan.py#L37-L48
[ "def", "deconv2d", "(", "input_", ",", "output_shape", ",", "k_h", ",", "k_w", ",", "d_h", ",", "d_w", ",", "stddev", "=", "0.02", ",", "name", "=", "\"deconv2d\"", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ")", ":", "w", "=", "tf...
272500b6efe353aeb638d2745ed56e519462ca31
train
sliced_gan
Basic parameters for a vanilla_gan.
tensor2tensor/models/vanilla_gan.py
def sliced_gan(): """Basic parameters for a vanilla_gan.""" hparams = common_hparams.basic_params1() hparams.optimizer = "adam" hparams.learning_rate_constant = 0.0002 hparams.learning_rate_warmup_steps = 500 hparams.learning_rate_schedule = "constant * linear_warmup" hparams.label_smoothing = 0.0 hparams.batch_size = 128 hparams.hidden_size = 128 hparams.initializer = "uniform_unit_scaling" hparams.initializer_gain = 1.0 hparams.weight_decay = 1e-6 hparams.kernel_height = 4 hparams.kernel_width = 4 hparams.bottleneck_bits = 128 hparams.add_hparam("discriminator_batchnorm", True) hparams.add_hparam("num_sliced_vecs", 4096) return hparams
def sliced_gan(): """Basic parameters for a vanilla_gan.""" hparams = common_hparams.basic_params1() hparams.optimizer = "adam" hparams.learning_rate_constant = 0.0002 hparams.learning_rate_warmup_steps = 500 hparams.learning_rate_schedule = "constant * linear_warmup" hparams.label_smoothing = 0.0 hparams.batch_size = 128 hparams.hidden_size = 128 hparams.initializer = "uniform_unit_scaling" hparams.initializer_gain = 1.0 hparams.weight_decay = 1e-6 hparams.kernel_height = 4 hparams.kernel_width = 4 hparams.bottleneck_bits = 128 hparams.add_hparam("discriminator_batchnorm", True) hparams.add_hparam("num_sliced_vecs", 4096) return hparams
[ "Basic", "parameters", "for", "a", "vanilla_gan", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/vanilla_gan.py#L199-L217
[ "def", "sliced_gan", "(", ")", ":", "hparams", "=", "common_hparams", ".", "basic_params1", "(", ")", "hparams", ".", "optimizer", "=", "\"adam\"", "hparams", ".", "learning_rate_constant", "=", "0.0002", "hparams", ".", "learning_rate_warmup_steps", "=", "500", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
AbstractGAN.discriminator
Discriminator architecture based on InfoGAN. Args: x: input images, shape [bs, h, w, channels] is_training: boolean, are we in train or eval model. reuse: boolean, should params be re-used. Returns: out_logit: the output logits (before sigmoid).
tensor2tensor/models/vanilla_gan.py
def discriminator(self, x, is_training, reuse=False): """Discriminator architecture based on InfoGAN. Args: x: input images, shape [bs, h, w, channels] is_training: boolean, are we in train or eval model. reuse: boolean, should params be re-used. Returns: out_logit: the output logits (before sigmoid). """ hparams = self.hparams with tf.variable_scope( "discriminator", reuse=reuse, initializer=tf.random_normal_initializer(stddev=0.02)): batch_size, height, width = common_layers.shape_list(x)[:3] # Mapping x from [bs, h, w, c] to [bs, 1] net = tf.layers.conv2d(x, 64, (4, 4), strides=(2, 2), padding="SAME", name="d_conv1") # [bs, h/2, w/2, 64] net = lrelu(net) net = tf.layers.conv2d(net, 128, (4, 4), strides=(2, 2), padding="SAME", name="d_conv2") # [bs, h/4, w/4, 128] if hparams.discriminator_batchnorm: net = tf.layers.batch_normalization(net, training=is_training, momentum=0.999, name="d_bn2") net = lrelu(net) size = height * width net = tf.reshape(net, [batch_size, size * 8]) # [bs, h * w * 8] net = tf.layers.dense(net, 1024, name="d_fc3") # [bs, 1024] if hparams.discriminator_batchnorm: net = tf.layers.batch_normalization(net, training=is_training, momentum=0.999, name="d_bn3") net = lrelu(net) return net
def discriminator(self, x, is_training, reuse=False): """Discriminator architecture based on InfoGAN. Args: x: input images, shape [bs, h, w, channels] is_training: boolean, are we in train or eval model. reuse: boolean, should params be re-used. Returns: out_logit: the output logits (before sigmoid). """ hparams = self.hparams with tf.variable_scope( "discriminator", reuse=reuse, initializer=tf.random_normal_initializer(stddev=0.02)): batch_size, height, width = common_layers.shape_list(x)[:3] # Mapping x from [bs, h, w, c] to [bs, 1] net = tf.layers.conv2d(x, 64, (4, 4), strides=(2, 2), padding="SAME", name="d_conv1") # [bs, h/2, w/2, 64] net = lrelu(net) net = tf.layers.conv2d(net, 128, (4, 4), strides=(2, 2), padding="SAME", name="d_conv2") # [bs, h/4, w/4, 128] if hparams.discriminator_batchnorm: net = tf.layers.batch_normalization(net, training=is_training, momentum=0.999, name="d_bn2") net = lrelu(net) size = height * width net = tf.reshape(net, [batch_size, size * 8]) # [bs, h * w * 8] net = tf.layers.dense(net, 1024, name="d_fc3") # [bs, 1024] if hparams.discriminator_batchnorm: net = tf.layers.batch_normalization(net, training=is_training, momentum=0.999, name="d_bn3") net = lrelu(net) return net
[ "Discriminator", "architecture", "based", "on", "InfoGAN", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/vanilla_gan.py#L58-L93
[ "def", "discriminator", "(", "self", ",", "x", ",", "is_training", ",", "reuse", "=", "False", ")", ":", "hparams", "=", "self", ".", "hparams", "with", "tf", ".", "variable_scope", "(", "\"discriminator\"", ",", "reuse", "=", "reuse", ",", "initializer", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
AbstractGAN.generator
Generator outputting image in [0, 1].
tensor2tensor/models/vanilla_gan.py
def generator(self, z, is_training, out_shape): """Generator outputting image in [0, 1].""" hparams = self.hparams height, width, c_dim = out_shape batch_size = hparams.batch_size with tf.variable_scope( "generator", initializer=tf.random_normal_initializer(stddev=0.02)): net = tf.layers.dense(z, 1024, name="g_fc1") net = tf.layers.batch_normalization(net, training=is_training, momentum=0.999, name="g_bn1") net = lrelu(net) net = tf.layers.dense(net, 128 * (height // 4) * (width // 4), name="g_fc2") net = tf.layers.batch_normalization(net, training=is_training, momentum=0.999, name="g_bn2") net = lrelu(net) net = tf.reshape(net, [batch_size, height // 4, width // 4, 128]) net = deconv2d(net, [batch_size, height // 2, width // 2, 64], 4, 4, 2, 2, name="g_dc3") net = tf.layers.batch_normalization(net, training=is_training, momentum=0.999, name="g_bn3") net = lrelu(net) net = deconv2d(net, [batch_size, height, width, c_dim], 4, 4, 2, 2, name="g_dc4") out = tf.nn.sigmoid(net) return common_layers.convert_real_to_rgb(out)
def generator(self, z, is_training, out_shape): """Generator outputting image in [0, 1].""" hparams = self.hparams height, width, c_dim = out_shape batch_size = hparams.batch_size with tf.variable_scope( "generator", initializer=tf.random_normal_initializer(stddev=0.02)): net = tf.layers.dense(z, 1024, name="g_fc1") net = tf.layers.batch_normalization(net, training=is_training, momentum=0.999, name="g_bn1") net = lrelu(net) net = tf.layers.dense(net, 128 * (height // 4) * (width // 4), name="g_fc2") net = tf.layers.batch_normalization(net, training=is_training, momentum=0.999, name="g_bn2") net = lrelu(net) net = tf.reshape(net, [batch_size, height // 4, width // 4, 128]) net = deconv2d(net, [batch_size, height // 2, width // 2, 64], 4, 4, 2, 2, name="g_dc3") net = tf.layers.batch_normalization(net, training=is_training, momentum=0.999, name="g_bn3") net = lrelu(net) net = deconv2d(net, [batch_size, height, width, c_dim], 4, 4, 2, 2, name="g_dc4") out = tf.nn.sigmoid(net) return common_layers.convert_real_to_rgb(out)
[ "Generator", "outputting", "image", "in", "[", "0", "1", "]", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/vanilla_gan.py#L95-L121
[ "def", "generator", "(", "self", ",", "z", ",", "is_training", ",", "out_shape", ")", ":", "hparams", "=", "self", ".", "hparams", "height", ",", "width", ",", "c_dim", "=", "out_shape", "batch_size", "=", "hparams", ".", "batch_size", "with", "tf", ".",...
272500b6efe353aeb638d2745ed56e519462ca31
train
AbstractGAN.body
Body of the model. Args: features: a dictionary with the tensors. Returns: A pair (predictions, losses) where predictions is the generated image and losses is a dictionary of losses (that get added for the final loss).
tensor2tensor/models/vanilla_gan.py
def body(self, features): """Body of the model. Args: features: a dictionary with the tensors. Returns: A pair (predictions, losses) where predictions is the generated image and losses is a dictionary of losses (that get added for the final loss). """ features["targets"] = features["inputs"] is_training = self.hparams.mode == tf.estimator.ModeKeys.TRAIN # Input images. inputs = tf.to_float(features["targets_raw"]) # Noise vector. z = tf.random_uniform([self.hparams.batch_size, self.hparams.bottleneck_bits], minval=-1, maxval=1, name="z") # Generator output: fake images. out_shape = common_layers.shape_list(inputs)[1:4] g = self.generator(z, is_training, out_shape) losses = self.losses(inputs, g) # pylint: disable=not-callable summary_g_image = tf.reshape( g[0, :], [1] + common_layers.shape_list(inputs)[1:]) tf.summary.image("generated", summary_g_image, max_outputs=1) if is_training: # Returns an dummy output and the losses dictionary. return tf.zeros_like(inputs), losses return tf.reshape(g, tf.shape(inputs)), losses
def body(self, features): """Body of the model. Args: features: a dictionary with the tensors. Returns: A pair (predictions, losses) where predictions is the generated image and losses is a dictionary of losses (that get added for the final loss). """ features["targets"] = features["inputs"] is_training = self.hparams.mode == tf.estimator.ModeKeys.TRAIN # Input images. inputs = tf.to_float(features["targets_raw"]) # Noise vector. z = tf.random_uniform([self.hparams.batch_size, self.hparams.bottleneck_bits], minval=-1, maxval=1, name="z") # Generator output: fake images. out_shape = common_layers.shape_list(inputs)[1:4] g = self.generator(z, is_training, out_shape) losses = self.losses(inputs, g) # pylint: disable=not-callable summary_g_image = tf.reshape( g[0, :], [1] + common_layers.shape_list(inputs)[1:]) tf.summary.image("generated", summary_g_image, max_outputs=1) if is_training: # Returns an dummy output and the losses dictionary. return tf.zeros_like(inputs), losses return tf.reshape(g, tf.shape(inputs)), losses
[ "Body", "of", "the", "model", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/vanilla_gan.py#L127-L160
[ "def", "body", "(", "self", ",", "features", ")", ":", "features", "[", "\"targets\"", "]", "=", "features", "[", "\"inputs\"", "]", "is_training", "=", "self", ".", "hparams", ".", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", "...
272500b6efe353aeb638d2745ed56e519462ca31
train
inputs
Make Inputs for built-in datasets. Args: num_devices: how many devices to build the inputs for. dataset_name: a TFDS or T2T dataset name. If it's a T2T dataset name, prefix with "t2t_". data_dir: data directory. input_name: optional, name of the inputs from the dictionary. num_chunks: optional, into how many pieces should we chunk (large inputs). append_targets: optional, instead of inputs return a pair (inputs, targets) which is useful for autoregressive models. Returns: trax.inputs.Inputs
tensor2tensor/trax/inputs.py
def inputs(num_devices, dataset_name, data_dir=None, input_name=None, num_chunks=0, append_targets=False): """Make Inputs for built-in datasets. Args: num_devices: how many devices to build the inputs for. dataset_name: a TFDS or T2T dataset name. If it's a T2T dataset name, prefix with "t2t_". data_dir: data directory. input_name: optional, name of the inputs from the dictionary. num_chunks: optional, into how many pieces should we chunk (large inputs). append_targets: optional, instead of inputs return a pair (inputs, targets) which is useful for autoregressive models. Returns: trax.inputs.Inputs """ assert data_dir, "Must provide a data directory" data_dir = os.path.expanduser(data_dir) (train_batches, train_eval_batches, eval_batches, input_name, input_shape) = _train_and_eval_batches( dataset_name, data_dir, input_name, num_devices) def numpy_stream(dataset): return dataset_to_stream( dataset, input_name, num_chunks=num_chunks, append_targets=append_targets) if num_chunks > 0: length = input_shape[0] input_shape = tuple( [tuple([length // num_chunks] + list(input_shape)[1:])] * num_chunks) return Inputs(train_stream=lambda: numpy_stream(train_batches), train_eval_stream=lambda: numpy_stream(train_eval_batches), eval_stream=lambda: numpy_stream(eval_batches), input_shape=input_shape)
def inputs(num_devices, dataset_name, data_dir=None, input_name=None, num_chunks=0, append_targets=False): """Make Inputs for built-in datasets. Args: num_devices: how many devices to build the inputs for. dataset_name: a TFDS or T2T dataset name. If it's a T2T dataset name, prefix with "t2t_". data_dir: data directory. input_name: optional, name of the inputs from the dictionary. num_chunks: optional, into how many pieces should we chunk (large inputs). append_targets: optional, instead of inputs return a pair (inputs, targets) which is useful for autoregressive models. Returns: trax.inputs.Inputs """ assert data_dir, "Must provide a data directory" data_dir = os.path.expanduser(data_dir) (train_batches, train_eval_batches, eval_batches, input_name, input_shape) = _train_and_eval_batches( dataset_name, data_dir, input_name, num_devices) def numpy_stream(dataset): return dataset_to_stream( dataset, input_name, num_chunks=num_chunks, append_targets=append_targets) if num_chunks > 0: length = input_shape[0] input_shape = tuple( [tuple([length // num_chunks] + list(input_shape)[1:])] * num_chunks) return Inputs(train_stream=lambda: numpy_stream(train_batches), train_eval_stream=lambda: numpy_stream(train_eval_batches), eval_stream=lambda: numpy_stream(eval_batches), input_shape=input_shape)
[ "Make", "Inputs", "for", "built", "-", "in", "datasets", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/inputs.py#L58-L95
[ "def", "inputs", "(", "num_devices", ",", "dataset_name", ",", "data_dir", "=", "None", ",", "input_name", "=", "None", ",", "num_chunks", "=", "0", ",", "append_targets", "=", "False", ")", ":", "assert", "data_dir", ",", "\"Must provide a data directory\"", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
random_inputs
Make random Inputs for debugging. Args: num_devices: how many devices to build the inputs for. input_shape: the shape of inputs (including batch dimension). input_dtype: the type of the inputs (int32 by default). input_range: the range of inputs (defaults to (0, 255)). output_shape: the shape of outputs (including batch dimension). output_dtype: the type of the outputs (int32 by default). output_range: the range of outputs (defaults to (0, 9)). Returns: trax.inputs.Inputs
tensor2tensor/trax/inputs.py
def random_inputs( num_devices, input_shape=gin.REQUIRED, input_dtype=np.int32, input_range=(0, 255), output_shape=gin.REQUIRED, output_dtype=np.int32, output_range=(0, 9)): """Make random Inputs for debugging. Args: num_devices: how many devices to build the inputs for. input_shape: the shape of inputs (including batch dimension). input_dtype: the type of the inputs (int32 by default). input_range: the range of inputs (defaults to (0, 255)). output_shape: the shape of outputs (including batch dimension). output_dtype: the type of the outputs (int32 by default). output_range: the range of outputs (defaults to (0, 9)). Returns: trax.inputs.Inputs """ if input_shape[0] % num_devices != 0: tf.logging.fatal( "num_devices[%d] should divide the first dimension of input_shape[%s]", num_devices, input_shape) if output_shape[0] % num_devices != 0: tf.logging.fatal( "num_devices[%d] should divide the first dimension of output_shape[%s]", num_devices, output_shape) def random_minibatches(): """Generate a stream of random mini-batches.""" if input_dtype in [np.float16, np.float32, np.float64]: rand = np.random.uniform else: rand = np.random.random_integers while True: inp = rand(input_range[0], input_range[1], input_shape) inp = inp.astype(input_dtype) out = rand(output_range[0], output_range[1], output_shape) out = out.astype(output_dtype) yield inp, out input_shape_without_batch = list(input_shape)[1:] return Inputs(train_stream=random_minibatches, train_eval_stream=random_minibatches, eval_stream=random_minibatches, input_shape=input_shape_without_batch)
def random_inputs( num_devices, input_shape=gin.REQUIRED, input_dtype=np.int32, input_range=(0, 255), output_shape=gin.REQUIRED, output_dtype=np.int32, output_range=(0, 9)): """Make random Inputs for debugging. Args: num_devices: how many devices to build the inputs for. input_shape: the shape of inputs (including batch dimension). input_dtype: the type of the inputs (int32 by default). input_range: the range of inputs (defaults to (0, 255)). output_shape: the shape of outputs (including batch dimension). output_dtype: the type of the outputs (int32 by default). output_range: the range of outputs (defaults to (0, 9)). Returns: trax.inputs.Inputs """ if input_shape[0] % num_devices != 0: tf.logging.fatal( "num_devices[%d] should divide the first dimension of input_shape[%s]", num_devices, input_shape) if output_shape[0] % num_devices != 0: tf.logging.fatal( "num_devices[%d] should divide the first dimension of output_shape[%s]", num_devices, output_shape) def random_minibatches(): """Generate a stream of random mini-batches.""" if input_dtype in [np.float16, np.float32, np.float64]: rand = np.random.uniform else: rand = np.random.random_integers while True: inp = rand(input_range[0], input_range[1], input_shape) inp = inp.astype(input_dtype) out = rand(output_range[0], output_range[1], output_shape) out = out.astype(output_dtype) yield inp, out input_shape_without_batch = list(input_shape)[1:] return Inputs(train_stream=random_minibatches, train_eval_stream=random_minibatches, eval_stream=random_minibatches, input_shape=input_shape_without_batch)
[ "Make", "random", "Inputs", "for", "debugging", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/inputs.py#L99-L143
[ "def", "random_inputs", "(", "num_devices", ",", "input_shape", "=", "gin", ".", "REQUIRED", ",", "input_dtype", "=", "np", ".", "int32", ",", "input_range", "=", "(", "0", ",", "255", ")", ",", "output_shape", "=", "gin", ".", "REQUIRED", ",", "output_d...
272500b6efe353aeb638d2745ed56e519462ca31
train
dataset_to_stream
Takes a tf.Dataset and creates a numpy stream of ready batches.
tensor2tensor/trax/inputs.py
def dataset_to_stream(dataset, input_name, num_chunks=0, append_targets=False): """Takes a tf.Dataset and creates a numpy stream of ready batches.""" for example in tfds.as_numpy(dataset): inp, out = example[0][input_name], example[1] if len(out.shape) > 1 and out.shape[-1] == 1: out = np.squeeze(out, axis=-1) if num_chunks > 0: inp = np.split(inp, num_chunks, axis=1) out = np.split(out, num_chunks, axis=1) if append_targets: inp = (inp, out) yield inp, out
def dataset_to_stream(dataset, input_name, num_chunks=0, append_targets=False): """Takes a tf.Dataset and creates a numpy stream of ready batches.""" for example in tfds.as_numpy(dataset): inp, out = example[0][input_name], example[1] if len(out.shape) > 1 and out.shape[-1] == 1: out = np.squeeze(out, axis=-1) if num_chunks > 0: inp = np.split(inp, num_chunks, axis=1) out = np.split(out, num_chunks, axis=1) if append_targets: inp = (inp, out) yield inp, out
[ "Takes", "a", "tf", ".", "Dataset", "and", "creates", "a", "numpy", "stream", "of", "ready", "batches", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/inputs.py#L146-L157
[ "def", "dataset_to_stream", "(", "dataset", ",", "input_name", ",", "num_chunks", "=", "0", ",", "append_targets", "=", "False", ")", ":", "for", "example", "in", "tfds", ".", "as_numpy", "(", "dataset", ")", ":", "inp", ",", "out", "=", "example", "[", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
_train_and_eval_dataset_v1
Return train and evaluation datasets, feature info and supervised keys.
tensor2tensor/trax/inputs.py
def _train_and_eval_dataset_v1(problem_name, data_dir): """Return train and evaluation datasets, feature info and supervised keys.""" assert not tf.executing_eagerly(), "tf.eager mode must be turned off." problem = t2t_problems.problem(problem_name) train_dataset = problem.dataset(tf.estimator.ModeKeys.TRAIN, data_dir) train_dataset = train_dataset.map(_select_features) eval_dataset = problem.dataset(tf.estimator.ModeKeys.EVAL, data_dir) eval_dataset = eval_dataset.map(_select_features) hparams = problem.get_hparams() # We take a few training examples to guess the shapes. input_shapes, target_shapes = [], [] example_tensor = train_dataset.make_one_shot_iterator().get_next() sess = tf.Session() example1 = sess.run(example_tensor) example2 = sess.run(example_tensor) example3 = sess.run(example_tensor) # We use "inputs" as input except for purely auto-regressive tasks like # language models where "targets" are used as input_key. input_key = "inputs" if "inputs" in example1 else "targets" supervised_keys = ([input_key], ["targets"]) for example in [example1, example2, example3]: input_shapes.append(list(example[input_key].shape)) target_shapes.append(list(example["targets"].shape)) input_vocab_size = hparams.vocab_size[input_key] target_vocab_size = hparams.vocab_size["targets"] input_info = _make_info(input_shapes, input_vocab_size) target_info = _make_info(target_shapes, target_vocab_size) info = {input_key: input_info, "targets": target_info} return train_dataset, eval_dataset, info, supervised_keys
def _train_and_eval_dataset_v1(problem_name, data_dir): """Return train and evaluation datasets, feature info and supervised keys.""" assert not tf.executing_eagerly(), "tf.eager mode must be turned off." problem = t2t_problems.problem(problem_name) train_dataset = problem.dataset(tf.estimator.ModeKeys.TRAIN, data_dir) train_dataset = train_dataset.map(_select_features) eval_dataset = problem.dataset(tf.estimator.ModeKeys.EVAL, data_dir) eval_dataset = eval_dataset.map(_select_features) hparams = problem.get_hparams() # We take a few training examples to guess the shapes. input_shapes, target_shapes = [], [] example_tensor = train_dataset.make_one_shot_iterator().get_next() sess = tf.Session() example1 = sess.run(example_tensor) example2 = sess.run(example_tensor) example3 = sess.run(example_tensor) # We use "inputs" as input except for purely auto-regressive tasks like # language models where "targets" are used as input_key. input_key = "inputs" if "inputs" in example1 else "targets" supervised_keys = ([input_key], ["targets"]) for example in [example1, example2, example3]: input_shapes.append(list(example[input_key].shape)) target_shapes.append(list(example["targets"].shape)) input_vocab_size = hparams.vocab_size[input_key] target_vocab_size = hparams.vocab_size["targets"] input_info = _make_info(input_shapes, input_vocab_size) target_info = _make_info(target_shapes, target_vocab_size) info = {input_key: input_info, "targets": target_info} return train_dataset, eval_dataset, info, supervised_keys
[ "Return", "train", "and", "evaluation", "datasets", "feature", "info", "and", "supervised", "keys", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/inputs.py#L229-L257
[ "def", "_train_and_eval_dataset_v1", "(", "problem_name", ",", "data_dir", ")", ":", "assert", "not", "tf", ".", "executing_eagerly", "(", ")", ",", "\"tf.eager mode must be turned off.\"", "problem", "=", "t2t_problems", ".", "problem", "(", "problem_name", ")", "t...
272500b6efe353aeb638d2745ed56e519462ca31
train
batch_fun
Batching function.
tensor2tensor/trax/inputs.py
def batch_fun(dataset, training, shapes, target_names, num_devices, batch_size_per_device=32, batch_size=None, eval_batch_size=32, bucket_length=32, buckets=None, batch_shuffle_size=128, max_eval_length=None): """Batching function.""" del target_names # Batch size is batch_size_per_device * num_devices unless given directly. batch_size = batch_size or batch_size_per_device * num_devices # If bucketing is not specified, check if target shapes are variable. cur_batch_size = batch_size if training else eval_batch_size # Make cur_batch_size divisible by num_devices. cur_batch_size = max(cur_batch_size // num_devices, 1) * num_devices # Create heuristic buckets is none are specified. if buckets is None: variable_target_shapes = False target_shape = shapes[1] for dim in target_shape: if dim is None: variable_target_shapes = True tf.logging.info("Heuristically setting bucketing to %s based on shapes " "of target tensors." % variable_target_shapes) if variable_target_shapes: bucket_boundaries = [bucket_length // 4, bucket_length // 2, bucket_length, bucket_length * 2, bucket_length * 4, bucket_length * 8, bucket_length * 16] # We will pad to boundaries which pads to bucket_boundary - 1: add 1 here. bucket_boundaries = [b + 1 for b in bucket_boundaries] if not training: max_eval_length = max_eval_length or bucket_length * 32 bucket_boundaries[-1] = max_eval_length bucket_batch_sizes = [cur_batch_size * 4, cur_batch_size * 2, cur_batch_size, cur_batch_size // 2, cur_batch_size // 4, cur_batch_size // 8, cur_batch_size // 16, 1] if not training: bucket_batch_sizes[-2] = cur_batch_size // max_eval_length # Make batch sizes divisible by num_devices. bucket_batch_sizes = [max(b // num_devices, 1) * num_devices for b in bucket_batch_sizes] buckets = (bucket_boundaries, bucket_batch_sizes) if buckets: tf.logging.info("Bucketing with buckets %s." % str(buckets)) def example_length(_, target): return tf.shape(target)[0] boundaries, batch_sizes = buckets dataset = dataset.apply(tf.data.experimental.bucket_by_sequence_length( example_length, boundaries, batch_sizes, pad_to_bucket_boundary=True)) else: dataset = dataset.padded_batch(cur_batch_size, shapes) if training: return dataset.shuffle(batch_shuffle_size) return dataset
def batch_fun(dataset, training, shapes, target_names, num_devices, batch_size_per_device=32, batch_size=None, eval_batch_size=32, bucket_length=32, buckets=None, batch_shuffle_size=128, max_eval_length=None): """Batching function.""" del target_names # Batch size is batch_size_per_device * num_devices unless given directly. batch_size = batch_size or batch_size_per_device * num_devices # If bucketing is not specified, check if target shapes are variable. cur_batch_size = batch_size if training else eval_batch_size # Make cur_batch_size divisible by num_devices. cur_batch_size = max(cur_batch_size // num_devices, 1) * num_devices # Create heuristic buckets is none are specified. if buckets is None: variable_target_shapes = False target_shape = shapes[1] for dim in target_shape: if dim is None: variable_target_shapes = True tf.logging.info("Heuristically setting bucketing to %s based on shapes " "of target tensors." % variable_target_shapes) if variable_target_shapes: bucket_boundaries = [bucket_length // 4, bucket_length // 2, bucket_length, bucket_length * 2, bucket_length * 4, bucket_length * 8, bucket_length * 16] # We will pad to boundaries which pads to bucket_boundary - 1: add 1 here. bucket_boundaries = [b + 1 for b in bucket_boundaries] if not training: max_eval_length = max_eval_length or bucket_length * 32 bucket_boundaries[-1] = max_eval_length bucket_batch_sizes = [cur_batch_size * 4, cur_batch_size * 2, cur_batch_size, cur_batch_size // 2, cur_batch_size // 4, cur_batch_size // 8, cur_batch_size // 16, 1] if not training: bucket_batch_sizes[-2] = cur_batch_size // max_eval_length # Make batch sizes divisible by num_devices. bucket_batch_sizes = [max(b // num_devices, 1) * num_devices for b in bucket_batch_sizes] buckets = (bucket_boundaries, bucket_batch_sizes) if buckets: tf.logging.info("Bucketing with buckets %s." % str(buckets)) def example_length(_, target): return tf.shape(target)[0] boundaries, batch_sizes = buckets dataset = dataset.apply(tf.data.experimental.bucket_by_sequence_length( example_length, boundaries, batch_sizes, pad_to_bucket_boundary=True)) else: dataset = dataset.padded_batch(cur_batch_size, shapes) if training: return dataset.shuffle(batch_shuffle_size) return dataset
[ "Batching", "function", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/inputs.py#L262-L316
[ "def", "batch_fun", "(", "dataset", ",", "training", ",", "shapes", ",", "target_names", ",", "num_devices", ",", "batch_size_per_device", "=", "32", ",", "batch_size", "=", "None", ",", "eval_batch_size", "=", "32", ",", "bucket_length", "=", "32", ",", "bu...
272500b6efe353aeb638d2745ed56e519462ca31
train
lm1b_preprocess
Preprocessing for LM1B: filter out targets exceeding maximum length.
tensor2tensor/trax/inputs.py
def lm1b_preprocess(dataset, training, max_target_length=-1, max_eval_target_length=-1): """Preprocessing for LM1B: filter out targets exceeding maximum length.""" def target_right_length(_, target): return tf.less(tf.shape(target)[0], max_target_length + 1) def eval_target_right_length(_, target): return tf.less(tf.shape(target)[0], max_eval_target_length + 1) if max_target_length > 0 and training: dataset = dataset.filter(target_right_length) if max_eval_target_length > 0 and not training: dataset = dataset.filter(eval_target_right_length) return dataset
def lm1b_preprocess(dataset, training, max_target_length=-1, max_eval_target_length=-1): """Preprocessing for LM1B: filter out targets exceeding maximum length.""" def target_right_length(_, target): return tf.less(tf.shape(target)[0], max_target_length + 1) def eval_target_right_length(_, target): return tf.less(tf.shape(target)[0], max_eval_target_length + 1) if max_target_length > 0 and training: dataset = dataset.filter(target_right_length) if max_eval_target_length > 0 and not training: dataset = dataset.filter(eval_target_right_length) return dataset
[ "Preprocessing", "for", "LM1B", ":", "filter", "out", "targets", "exceeding", "maximum", "length", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/inputs.py#L337-L353
[ "def", "lm1b_preprocess", "(", "dataset", ",", "training", ",", "max_target_length", "=", "-", "1", ",", "max_eval_target_length", "=", "-", "1", ")", ":", "def", "target_right_length", "(", "_", ",", "target", ")", ":", "return", "tf", ".", "less", "(", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
shuffle_and_batch_data
Shuffle and batch the given dataset.
tensor2tensor/trax/inputs.py
def shuffle_and_batch_data(dataset, target_names, features_info, training, num_devices, shuffle_buffer_size=1024, preprocess_fun=no_preprocess): """Shuffle and batch the given dataset.""" def append_targets(example): """Append targets to the example dictionary. Needed for Keras.""" if len(target_names) == 1: return (example, example[target_names[0]]) targets = {} for name in target_names: targets[name] = example[name] return (example, targets) dataset = dataset.map(append_targets) if training: dataset = dataset.repeat() # Skip a random fraction at the beginning of the stream. The skip is # essential for synchronous highly-parallel training to avoid multiple # replicas reading the same data in lock-step. dataset = dataset.skip(random.randint(0, _MAX_SKIP_EXAMPLES)) dataset = preprocess_fun(dataset, training) shapes = {k: features_info[k].shape for k in features_info} shapes = (shapes, shapes[target_names[0]]) dataset = dataset.shuffle(shuffle_buffer_size) dataset = batch_fun(dataset, training, shapes, target_names, num_devices) return dataset.prefetch(2)
def shuffle_and_batch_data(dataset, target_names, features_info, training, num_devices, shuffle_buffer_size=1024, preprocess_fun=no_preprocess): """Shuffle and batch the given dataset.""" def append_targets(example): """Append targets to the example dictionary. Needed for Keras.""" if len(target_names) == 1: return (example, example[target_names[0]]) targets = {} for name in target_names: targets[name] = example[name] return (example, targets) dataset = dataset.map(append_targets) if training: dataset = dataset.repeat() # Skip a random fraction at the beginning of the stream. The skip is # essential for synchronous highly-parallel training to avoid multiple # replicas reading the same data in lock-step. dataset = dataset.skip(random.randint(0, _MAX_SKIP_EXAMPLES)) dataset = preprocess_fun(dataset, training) shapes = {k: features_info[k].shape for k in features_info} shapes = (shapes, shapes[target_names[0]]) dataset = dataset.shuffle(shuffle_buffer_size) dataset = batch_fun(dataset, training, shapes, target_names, num_devices) return dataset.prefetch(2)
[ "Shuffle", "and", "batch", "the", "given", "dataset", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/inputs.py#L357-L385
[ "def", "shuffle_and_batch_data", "(", "dataset", ",", "target_names", ",", "features_info", ",", "training", ",", "num_devices", ",", "shuffle_buffer_size", "=", "1024", ",", "preprocess_fun", "=", "no_preprocess", ")", ":", "def", "append_targets", "(", "example", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
_train_and_eval_batches
Return train and eval batches with input name and shape.
tensor2tensor/trax/inputs.py
def _train_and_eval_batches(dataset, data_dir, input_name, num_devices): """Return train and eval batches with input name and shape.""" (train_data, eval_data, features_info, keys) = train_and_eval_dataset( dataset, data_dir) input_names, target_names = keys[0], keys[1] train_batches = shuffle_and_batch_data( train_data, target_names, features_info, training=True, num_devices=num_devices) train_eval_batches = shuffle_and_batch_data( # Data for eval-on-train. train_data, target_names, features_info, training=False, num_devices=num_devices) eval_batches = shuffle_and_batch_data( eval_data, target_names, features_info, training=False, num_devices=num_devices) input_name = input_name or input_names[0] input_shape = features_info[input_name].shape return (train_batches, train_eval_batches, eval_batches, input_name, list(input_shape))
def _train_and_eval_batches(dataset, data_dir, input_name, num_devices): """Return train and eval batches with input name and shape.""" (train_data, eval_data, features_info, keys) = train_and_eval_dataset( dataset, data_dir) input_names, target_names = keys[0], keys[1] train_batches = shuffle_and_batch_data( train_data, target_names, features_info, training=True, num_devices=num_devices) train_eval_batches = shuffle_and_batch_data( # Data for eval-on-train. train_data, target_names, features_info, training=False, num_devices=num_devices) eval_batches = shuffle_and_batch_data( eval_data, target_names, features_info, training=False, num_devices=num_devices) input_name = input_name or input_names[0] input_shape = features_info[input_name].shape return (train_batches, train_eval_batches, eval_batches, input_name, list(input_shape))
[ "Return", "train", "and", "eval", "batches", "with", "input", "name", "and", "shape", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/inputs.py#L388-L405
[ "def", "_train_and_eval_batches", "(", "dataset", ",", "data_dir", ",", "input_name", ",", "num_devices", ")", ":", "(", "train_data", ",", "eval_data", ",", "features_info", ",", "keys", ")", "=", "train_and_eval_dataset", "(", "dataset", ",", "data_dir", ")", ...
272500b6efe353aeb638d2745ed56e519462ca31
train
get_multi_dataset
Returns a Dataset that samples records from one or more Datasets. Args: datasets: A list of one or more Dataset objects to sample from. pmf: A tensor of shape [len(datasets)], the probabilities to sample each dataset with. This tensor is often constructed with the global_step. If this is None, we sample from the datasets uniformly at random. Returns: A Dataset object containing records from multiple datasets. Note that because this dataset iterates through other datasets it is stateful, thus you will need to call make_initializable_iterator instead of make_one_shot_iterator.
tensor2tensor/data_generators/multi_problem_v2.py
def get_multi_dataset(datasets, pmf=None): """Returns a Dataset that samples records from one or more Datasets. Args: datasets: A list of one or more Dataset objects to sample from. pmf: A tensor of shape [len(datasets)], the probabilities to sample each dataset with. This tensor is often constructed with the global_step. If this is None, we sample from the datasets uniformly at random. Returns: A Dataset object containing records from multiple datasets. Note that because this dataset iterates through other datasets it is stateful, thus you will need to call make_initializable_iterator instead of make_one_shot_iterator. """ pmf = tf.fill([len(datasets)], 1.0 / len(datasets)) if pmf is None else pmf samplers = [d.repeat().make_one_shot_iterator().get_next for d in datasets] sample = lambda _: categorical_case(pmf, samplers) return tf.data.Dataset.from_tensors([]).repeat().map(sample)
def get_multi_dataset(datasets, pmf=None): """Returns a Dataset that samples records from one or more Datasets. Args: datasets: A list of one or more Dataset objects to sample from. pmf: A tensor of shape [len(datasets)], the probabilities to sample each dataset with. This tensor is often constructed with the global_step. If this is None, we sample from the datasets uniformly at random. Returns: A Dataset object containing records from multiple datasets. Note that because this dataset iterates through other datasets it is stateful, thus you will need to call make_initializable_iterator instead of make_one_shot_iterator. """ pmf = tf.fill([len(datasets)], 1.0 / len(datasets)) if pmf is None else pmf samplers = [d.repeat().make_one_shot_iterator().get_next for d in datasets] sample = lambda _: categorical_case(pmf, samplers) return tf.data.Dataset.from_tensors([]).repeat().map(sample)
[ "Returns", "a", "Dataset", "that", "samples", "records", "from", "one", "or", "more", "Datasets", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem_v2.py#L205-L223
[ "def", "get_multi_dataset", "(", "datasets", ",", "pmf", "=", "None", ")", ":", "pmf", "=", "tf", ".", "fill", "(", "[", "len", "(", "datasets", ")", "]", ",", "1.0", "/", "len", "(", "datasets", ")", ")", "if", "pmf", "is", "None", "else", "pmf"...
272500b6efe353aeb638d2745ed56e519462ca31
train
get_schedule_distribution
Computes the pmf of a schedule given the global_step. Args: schedule: A schedule tuple, see encode_schedule for details. global_step: A scalar tensor, the step to query the schedule. Returns: A 1-D tensor of probs, the sampling distribution of the global_step.
tensor2tensor/data_generators/multi_problem_v2.py
def get_schedule_distribution(schedule, global_step=None): """Computes the pmf of a schedule given the global_step. Args: schedule: A schedule tuple, see encode_schedule for details. global_step: A scalar tensor, the step to query the schedule. Returns: A 1-D tensor of probs, the sampling distribution of the global_step. """ interpolation, steps, pmfs = schedule if len(pmfs) == 1: # py_func doesn't seem to work on TPU - at least get the constant case to # run. # TODO(noam): get the general case working. return pmfs[0] if global_step is None: global_step = tf.train.get_or_create_global_step() if interpolation == 'step': interpolation_fn = step_interpolation elif interpolation == 'linear': interpolation_fn = linear_interpolation else: raise ValueError('Invalid interpolation strategy: %s' % interpolation) return tf.reshape( tf.py_func( func=lambda x: interpolation_fn(x, np.array(steps), np.array(pmfs)), inp=[global_step], Tout=tf.float32), [len(pmfs[0])])
def get_schedule_distribution(schedule, global_step=None): """Computes the pmf of a schedule given the global_step. Args: schedule: A schedule tuple, see encode_schedule for details. global_step: A scalar tensor, the step to query the schedule. Returns: A 1-D tensor of probs, the sampling distribution of the global_step. """ interpolation, steps, pmfs = schedule if len(pmfs) == 1: # py_func doesn't seem to work on TPU - at least get the constant case to # run. # TODO(noam): get the general case working. return pmfs[0] if global_step is None: global_step = tf.train.get_or_create_global_step() if interpolation == 'step': interpolation_fn = step_interpolation elif interpolation == 'linear': interpolation_fn = linear_interpolation else: raise ValueError('Invalid interpolation strategy: %s' % interpolation) return tf.reshape( tf.py_func( func=lambda x: interpolation_fn(x, np.array(steps), np.array(pmfs)), inp=[global_step], Tout=tf.float32), [len(pmfs[0])])
[ "Computes", "the", "pmf", "of", "a", "schedule", "given", "the", "global_step", "." ]
tensorflow/tensor2tensor
python
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/multi_problem_v2.py#L226-L253
[ "def", "get_schedule_distribution", "(", "schedule", ",", "global_step", "=", "None", ")", ":", "interpolation", ",", "steps", ",", "pmfs", "=", "schedule", "if", "len", "(", "pmfs", ")", "==", "1", ":", "# py_func doesn't seem to work on TPU - at least get the cons...
272500b6efe353aeb638d2745ed56e519462ca31