text stringlengths 1 93.6k |
|---|
self._output_size = num_units
|
self._linear1 = [None] * self._number_of_groups
|
self._linear2 = None
|
@property
|
def state_size(self):
|
return self._state_size
|
@property
|
def output_size(self):
|
return self._output_size
|
def _get_input_for_group(self, inputs, group_id, group_size):
|
"""Slices inputs into groups to prepare for processing by cell's groups
|
Args:
|
inputs: cell input or it's previous state,
|
a Tensor, 2D, [batch x num_units]
|
group_id: group id, a Scalar, for which to prepare input
|
group_size: size of the group
|
Returns:
|
subset of inputs corresponding to group "group_id",
|
a Tensor, 2D, [batch x num_units/number_of_groups]
|
"""
|
return array_ops.slice(input_=inputs,
|
begin=[0, group_id * group_size],
|
size=[self._batch_size, group_size],
|
name=("GLSTM_group%d_input_generation" % group_id))
|
def call(self, inputs, state):
|
"""Run one step of G-LSTM.
|
Args:
|
inputs: input Tensor, 2D, [batch x num_units].
|
state: this must be a tuple of state Tensors, both `2-D`,
|
with column sizes `c_state` and `m_state`.
|
Returns:
|
A tuple containing:
|
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
|
G-LSTM after reading `inputs` when previous state was `state`.
|
Here output_dim is:
|
num_proj if num_proj was set,
|
num_units otherwise.
|
- LSTMStateTuple representing the new state of G-LSTM cell
|
after reading `inputs` when the previous state was `state`.
|
Raises:
|
ValueError: If input size cannot be inferred from inputs via
|
static shape inference.
|
"""
|
(c_prev, m_prev) = state
|
self._batch_size = inputs.shape[0].value or array_ops.shape(inputs)[0]
|
input_size = inputs.shape[-1].value or array_ops.shape(inputs)[-1]
|
dtype = inputs.dtype
|
scope = vs.get_variable_scope()
|
with vs.variable_scope(scope, initializer=self._initializer):
|
i_parts = []
|
j_parts = []
|
f_parts = []
|
o_parts = []
|
for group_id in range(self._number_of_groups):
|
with vs.variable_scope("group%d" % group_id):
|
x_g_id = array_ops.concat(
|
[self._get_input_for_group(inputs, group_id,
|
int(input_size / self._number_of_groups)),
|
#self._group_shape[0]), # this is only correct if inputs dim = num_units!!!
|
self._get_input_for_group(m_prev, group_id,
|
int(self._output_size / self._number_of_groups))], axis=1)
|
#self._group_shape[0])], axis=1)
|
if self._linear1[group_id] is None:
|
self._linear1[group_id] = _Linear(x_g_id, 4 * self._group_shape[1], False)
|
R_k = self._linear1[group_id](x_g_id) # pylint: disable=invalid-name
|
i_k, j_k, f_k, o_k = array_ops.split(R_k, 4, 1)
|
i_parts.append(i_k)
|
j_parts.append(j_k)
|
f_parts.append(f_k)
|
o_parts.append(o_k)
|
bi = vs.get_variable(name="bias_i",
|
shape=[self._num_units],
|
dtype=dtype,
|
initializer=
|
init_ops.constant_initializer(0.0, dtype=dtype))
|
bj = vs.get_variable(name="bias_j",
|
shape=[self._num_units],
|
dtype=dtype,
|
initializer=
|
init_ops.constant_initializer(0.0, dtype=dtype))
|
bf = vs.get_variable(name="bias_f",
|
shape=[self._num_units],
|
dtype=dtype,
|
initializer=
|
init_ops.constant_initializer(0.0, dtype=dtype))
|
bo = vs.get_variable(name="bias_o",
|
shape=[self._num_units],
|
dtype=dtype,
|
initializer=
|
init_ops.constant_initializer(0.0, dtype=dtype))
|
i = nn_ops.bias_add(array_ops.concat(i_parts, axis=1), bi)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.