python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
|---|---|---|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for DM21 functionals interface to PySCF."""
from setuptools import setup
REQUIRED_PACKAGES = [
'absl-py',
'attrs',
# Note PySCF 1.7.6 and older do not support h5py 3.3.0:
# https://github.com/pyscf/pyscf/issues/1016
'h5py',
'numpy',
# Note DM21 functionals are compatible with PySCF 1.7.6 if an older version
# of h5py is used.
'pyscf>=2.0',
'tensorflow',
'tensorflow_hub',
]
CHECKPOINT_DATA = ['checkpoints/DM21*/*.pb', 'checkpoints/DM21*/variables/*']
setup(
name='density_functional_approximation_dm21',
version='0.1',
description='An interface to PySCF for the DM21 functionals.',
url='https://github.com/deepmind/deepmind-research/density_functional_approximation_dm21',
author='DeepMind',
author_email='no-reply@google.com',
# Contained modules and scripts.
packages=['density_functional_approximation_dm21'],
package_data={
'density_functional_approximation_dm21': CHECKPOINT_DATA,
},
scripts=['density_functional_approximation_dm21/export_saved_model.py'],
install_requires=REQUIRED_PACKAGES,
platforms=['any'],
license='Apache 2.0',
extras_require={'testing': ['pytest', 'scipy']},
)
|
deepmind-research-master
|
density_functional_approximation_dm21/setup.py
|
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Computation of the Hartree-Fock exchange density.
We consider two types of potential:
1. Coulomb potential v(r,r') = 1/|r-r'|, which results in the full HF exchange
density and energy.
2. Screened (long-range) Coulomb potential v(r,r') = erf(\omega|r-r'|)/|r-r'|,
which results in the screened HF exchange density energy.
Note that PySCF and libcint treat a value of omega=0 to correspond to the
Coulomb potential. In the following, HF refers to full HF exchange if the
Coulomb potential is used and to screened HF exchange if the screened Coulomb
potential is used.
The Hartree-Fock (HF) exchange energy can be written as:
-2 HF_x = \sum_{a,b,c,d} D_{ab} D_{cd} \int dr \int dr'
[ \chi_a(r) \chi_c(r) v(r, r') \chi_b(r') \chi_d(r') ]
where D is the density matrix, \chi_a the atomic basis functions and r, r' are
coordinates. For clarity we have dropped the spin-channel label of the density
matrix.
Defining the following intermediates:
\nu_{bd}(r) = \int dr' (\chi_b(r') v(r, r') \chi_d(r'))
E_b(r) = \sum_a D_{ab} \chi_a(r)
we get the following expression for HF:
-2 HF_x = \int dr \sum_{bd} E_b(r) E_d(r) \nu_{bd}(r)
Therefore the quantity
exx(r) = -0.5 sum_{bd} E_b(r) E_d(r) \nu_{bd}(r)
represents an energy density at location r which integrates to the HF exchange
energy.
The Fock matrix, F, is the derivative of the energy with respect to the density
matrix. If the energy depends upon the set of features {x}, then the Fock matrix
can be evaluated as \sum_x dE/dx dx/dD_{ab}. The derivatives with respect to the
features can be easily evaluated using automatic differentiation. We hence
require the derivative of exx with respect to the density matrix:
dexx(r)/dD_{ab} = -D_{cd} \chi_a(r) \chi_c(r) \nu_{bd}(r)
This is too large to store, so we instead compute the following intermediate,
and evaluate the derivative as required on the fly:
fxx_a(r) = D_{bd} \chi_a(r) \nu_{bd}(r)
Note: we compute exx and fxx for each spin channel for both restricted and
unrestricted calculations.
"""
from typing import Generator, Optional, Tuple, Union
import attr
import numpy as np
from pyscf.dft import numint
from pyscf.gto import mole
from pyscf.lib import logger
from pyscf.lib import numpy_helper
def _evaluate_nu_slow(mol: mole.Mole,
coords: np.ndarray,
omega: float,
hermi: int = 1) -> np.ndarray:
"""Computes nu integrals for given coordinates using a slow loop."""
nu = []
# Use the Gaussian nuclear model in int1e_rinv_sph to evaluate the screened
# integrals.
with mol.with_rinv_zeta(zeta=omega * omega):
# This is going to be slow...
for coord in coords:
with mol.with_rinv_origin(coord):
nu.append(mol.intor('int1e_rinv_sph', hermi=hermi))
return np.asarray(nu)
def _evaluate_nu(mol: mole.Mole,
coords: np.ndarray,
omega: float,
hermi: int = 1) -> np.ndarray:
"""Computes nu integrals for given coordinates."""
try:
with mol.with_range_coulomb(omega=omega):
# grids keyword argument supported in pyscf 2.0.0-alpha.
nu = mol.intor('int1e_grids_sph', hermi=hermi, grids=coords) # pytype: disable=wrong-keyword-args
except TypeError:
logger.info(
mol, 'Support for int1e_grids not found (requires libcint 4.4.1 and '
'pyscf 2.0.0a or later. Falling back to slow loop over individual grid '
'points.')
nu = _evaluate_nu_slow(mol, coords, omega)
return nu
def _nu_chunk(mol: mole.Mole,
coords: np.ndarray,
omega: float,
chunk_size: int = 1000
) -> Generator[Tuple[int, int, np.ndarray], None, None]:
r"""Yields chunks of nu integrals over the grid.
Args:
mol: pyscf Mole object.
coords: coordinates, r', at which to evaluate the nu integrals, shape (N,3).
omega: range separation parameter. A value of 0 disables range-separation
(i.e. uses the kernel v(r,r') = 1/|r-r'| instead of
v(r,r') = erf(\omega |r-r'|) / |r-r'|)
chunk_size: number of coordinates to evaluate the integrals at a time.
Yields:
start_index, end_index, nu_{ab}(r) where
start_index, end_index are indices into coords,
nu is an array of shape (end_index-start_index, nao, nao), where nao is
the number of atomic orbitals and contains
nu_{ab}(r) = <a(r')|v(r,r')| b(r')>, where a,b are atomic
orbitals and r' are the grid coordinates in coords[start_index:end_index].
Raises:
ValueError: if omega is negative.
"""
if omega < 0:
raise ValueError('Range-separated parameter omega must be non-negative!')
ncoords = len(coords)
for chunk_index in range(0, ncoords, chunk_size):
end_index = min(chunk_index + chunk_size, ncoords)
coords_chunk = coords[chunk_index:end_index]
nu_chunk = _evaluate_nu(mol, coords_chunk, omega=omega)
yield chunk_index, end_index, nu_chunk
def _compute_exx_block(nu: np.ndarray,
e: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
r"""Computes exx and fxx.
Args:
nu: batch of <i|v(r,r_k)|j> integrals, in format (k,i,j) where r_k is the
position of the k-th grid point, i and j label atomic orbitals.
e: density matrix in the AO basis at each grid point.
Returns:
exx and fxx, where
fxx_{gb} =\sum_c nu_{gbc} e_{gc} and
exx_{g} = -0.5 \sum_b e_{gb} fxx_{gb}.
"""
fxx = np.einsum('gbc,gc->gb', nu, e)
exx = -0.5 * np.einsum('gb,gb->g', e, fxx)
return exx, fxx
def _compute_jk_block(nu: np.ndarray, fxx: np.ndarray, dm: np.ndarray,
ao_value: np.ndarray,
weights: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Computes J and K contributions from the given block of nu integrals."""
batch_size = nu.shape[0]
vj = numpy_helper.dot(nu.reshape(batch_size, -1), dm.reshape(-1, 1))
vj = np.squeeze(vj)
vj_ao = np.einsum('g,gb->gb', vj * weights, ao_value)
j = numpy_helper.dot(ao_value.T, vj_ao)
w_ao = np.einsum('g,gb->gb', weights, ao_value)
k = numpy_helper.dot(fxx.T, w_ao)
return j, k
@attr.s(auto_attribs=True)
class HFDensityResult:
r"""Container for results returned by get_hf_density.
Note that the kernel used in all integrals is defined by the omega input
argument.
Attributes:
exx: exchange energy density at position r on the grid for the alpha, beta
spin channels. Each array is shape (N), where N is the number of grid
points.
fxx: intermediate for evaluating dexx/dD^{\sigma}_{ab}, where D is the
density matrix and \sigma is the spin coordinate. See top-level docstring
for details. Each array is shape (N, nao), where nao is the number of
atomic orbitals.
coulomb: coulomb matrix (restricted calculations) or matrices (unrestricted
calculations). Each array is shape (nao, nao).
Restricted calculations: \sum_{} D_{cd} (ab|cd)
Unrestricted calculations: \sum_{} D^{\sigma}_{cd} (ab|cd)
exchange: exchange matrix (restricted calculations) or matrices
(unrestricted calculations). Each array is shape (nao, nao).
Restricted calculations: \sum_{} D_{cd} (ab|cd)
Unrestricted calculations: \sum_{} D^{\sigma}_{cd} (ac|bd).
"""
exx: Tuple[np.ndarray, np.ndarray]
fxx: Optional[Tuple[np.ndarray, np.ndarray]] = None
coulomb: Optional[Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]] = None
exchange: Optional[Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]] = None
def get_hf_density(
mol: mole.Mole,
dm: Union[Tuple[np.ndarray, np.ndarray], np.ndarray],
coords: np.ndarray,
omega: float = 0.,
deriv: int = 0,
ao: Optional[np.ndarray] = None,
chunk_size: int = 1000,
weights: Optional[np.ndarray] = None,
) -> HFDensityResult:
r"""Computes the (range-separated) HF energy density.
Args:
mol: PySCF molecule.
dm: The density matrix. For restricted calculations, an array of shape
(M, M), where M is the number of atomic orbitals. For unrestricted
calculations, either an array of shape (2, M, M) or a tuple of arrays,
each of shape (M, M), where dm[0] is the density matrix for the alpha
electrons and dm[1] the density matrix for the beta electrons.
coords: The coordinates to compute the HF density at, shape (N, 3), where N
is the number of grid points.
omega: The inverse width of the error function. An omega of 0. means range
separation and a 1/|r-R| kernel is used in the nu integrals. Otherwise,
the kernel erf(\omega|r-R|)/|r-R|) is used. Must be non-negative.
deriv: The derivative order. Only first derivatives (deriv=1) are currently
implemented. deriv=0 indicates no derivatives are required.
ao: The atomic orbitals evaluated on the grid, shape (N, M). These are
computed if not supplied.
chunk_size: The number of coordinates to compute the HF density for at once.
Reducing this saves memory since we don't have to keep as many Nus (nbasis
x nbasis) in memory at once.
weights: weight of each grid point, shape (N). If present, the Coulomb and
exchange matrices are also computed semi-numerically, otherwise only the
HF density and (if deriv=1) its first derivative are computed.
Returns:
HFDensityResult object with the HF density (exx), the derivative of the HF
density with respect to the density (fxx) if deriv is 1, and the Coulomb and
exchange matrices if the weights argument is provided.
Raises:
NotImplementedError: if a Cartesian basis set is used or if deriv is greater
than 1.
ValueError: if omega or deriv are negative.
"""
if mol.cart:
raise NotImplementedError('Local HF exchange is not implmented for basis '
'sets with Cartesian functions!')
if deriv < 0:
raise ValueError(f'`deriv` must be non-negative, got {deriv}')
if omega < 0:
raise ValueError(f'`omega` must be non-negative, got {omega}')
if deriv > 1:
raise NotImplementedError('Higher order derivatives are not implemented.')
if isinstance(dm, tuple) or dm.ndim == 3:
dma, dmb = dm
restricted = False
else:
dma = dm / 2
dmb = dm / 2
restricted = True
logger.info(mol, 'Computing contracted density matrix ...')
if ao is None:
ao = numint.eval_ao(mol, coords, deriv=0)
e_a = np.dot(ao, dma)
e_b = np.dot(ao, dmb)
exxa = []
exxb = []
fxxa = []
fxxb = []
ja = np.zeros_like(dma)
jb = np.zeros_like(dmb)
ka = np.zeros_like(dma)
kb = np.zeros_like(dmb)
for start, end, nu in _nu_chunk(mol, coords, omega, chunk_size=chunk_size):
logger.info(mol, 'Computing exx %s / %s ...', end, len(e_a))
exxa_block, fxxa_block = _compute_exx_block(nu, e_a[start:end])
exxa.extend(exxa_block)
if not restricted:
exxb_block, fxxb_block = _compute_exx_block(nu, e_b[start:end])
exxb.extend(exxb_block)
if deriv == 1:
fxxa.extend(fxxa_block)
if not restricted:
fxxb.extend(fxxb_block)
if weights is not None:
ja_block, ka_block = _compute_jk_block(nu, fxxa_block, dma, ao[start:end],
weights[start:end])
ja += ja_block
ka += ka_block
if not restricted:
jb_block, kb_block = _compute_jk_block(nu, fxxb_block, dmb,
ao[start:end],
weights[start:end])
jb += jb_block
kb += kb_block
exxa = np.asarray(exxa)
fxxa = np.asarray(fxxa)
if restricted:
exxb = exxa
fxxb = fxxa
else:
exxb = np.asarray(exxb)
fxxb = np.asarray(fxxb)
result = HFDensityResult(exx=(exxa, exxb))
if deriv == 1:
result.fxx = (fxxa, fxxb)
if weights is not None:
if restricted:
result.coulomb = 2 * ja
result.exchange = 2 * ka
else:
result.coulomb = (ja, jb)
result.exchange = (ka, kb)
return result
|
deepmind-research-master
|
density_functional_approximation_dm21/density_functional_approximation_dm21/compute_hfx_density.py
|
#!/usr/bin/env python3
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper for exporting a functional and its derivatives to a saved_model."""
from typing import Sequence
from absl import app
from absl import flags
from density_functional_approximation_dm21 import neural_numint
_OUT_DIR = flags.DEFINE_string(
'out_dir', None, 'Output directory.', required=True)
_BATCH_SIZE = flags.DEFINE_integer(
'batch_size',
1000,
'Number of grid points exported functional will process in a single call.',
lower_bound=0)
_FUNCTIONAL = flags.DEFINE_enum_class('functional',
neural_numint.Functional.DM21,
neural_numint.Functional,
'Functional to export.')
def export(
functional: neural_numint.Functional,
export_path: str,
batch_dim: int,
) -> None:
"""Export a functional and its derivatives to a single saved_model.
Args:
functional: functional to export.
export_path: path to saved the model to.
batch_dim: number of grid points to process in a single call.
"""
ni = neural_numint.NeuralNumInt(functional)
ni.export_functional_and_derivatives(
export_path=export_path, batch_dim=batch_dim)
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
export(_FUNCTIONAL.value, _OUT_DIR.value, _BATCH_SIZE.value)
if __name__ == '__main__':
app.run(main)
|
deepmind-research-master
|
density_functional_approximation_dm21/density_functional_approximation_dm21/export_saved_model.py
|
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for neural_numint."""
import os
from absl.testing import parameterized
import attr
from pyscf import dft
from pyscf import gto
from pyscf import lib
import tensorflow.compat.v1 as tf
from density_functional_approximation_dm21 import neural_numint
class NeuralNumintTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
lib.param.TMPDIR = None
lib.num_threads(1)
# Golden values were obtained using the version of PySCF (including integral
# generation) reported in the DM21 paper.
@parameterized.parameters(
{
'functional': neural_numint.Functional.DM21,
'expected_energy': -126.898521
},
{
'functional': neural_numint.Functional.DM21m,
'expected_energy': -126.907332
},
{
'functional': neural_numint.Functional.DM21mc,
'expected_energy': -126.922127
},
{
'functional': neural_numint.Functional.DM21mu,
'expected_energy': -126.898178
},
)
def test_rks(self, functional, expected_energy):
ni = neural_numint.NeuralNumInt(functional)
mol = gto.Mole()
mol.atom = [['Ne', 0., 0., 0.]]
mol.basis = 'sto-3g'
mol.build()
mf = dft.RKS(mol)
mf.small_rho_cutoff = 1.e-20
mf._numint = ni
mf.run()
self.assertAlmostEqual(mf.e_tot, expected_energy, delta=2.e-4)
@parameterized.parameters(
{
'functional': neural_numint.Functional.DM21,
'expected_energy': -37.34184876
},
{
'functional': neural_numint.Functional.DM21m,
'expected_energy': -37.3377766
},
{
'functional': neural_numint.Functional.DM21mc,
'expected_energy': -37.33489173
},
{
'functional': neural_numint.Functional.DM21mu,
'expected_energy': -37.34015315
},
)
def test_uks(self, functional, expected_energy):
ni = neural_numint.NeuralNumInt(functional)
mol = gto.Mole()
mol.atom = [['C', 0., 0., 0.]]
mol.spin = 2
mol.basis = 'sto-3g'
mol.build()
mf = dft.UKS(mol)
mf.small_rho_cutoff = 1.e-20
mf._numint = ni
mf.run()
self.assertAlmostEqual(mf.e_tot, expected_energy, delta=2.e-4)
def test_exported_model(self):
mol = gto.Mole()
mol.atom = [['C', 0., 0., 0.]]
mol.spin = 2
mol.basis = 'sto-3g'
mol.build()
ni = neural_numint.NeuralNumInt(neural_numint.Functional.DM21)
mf = dft.UKS(mol)
mf.small_rho_cutoff = 1.e-20
mf._numint = ni
mf.run()
dms = mf.make_rdm1()
ao = ni.eval_ao(mol, mf.grids.coords, deriv=2)
rho_a = ni.eval_rho(mol, ao, dms[0], xctype='MGGA')
rho_b = ni.eval_rho(mol, ao, dms[1], xctype='MGGA')
inputs, _ = ni.construct_functional_inputs(
mol=mol,
dms=dms,
spin=1,
coords=mf.grids.coords,
weights=mf.grids.weights,
rho=(rho_a, rho_b),
ao=ao[0])
feed_dict = dict(
zip(
attr.asdict(ni._placeholders).values(),
attr.asdict(inputs).values(),
))
with ni._graph.as_default():
outputs = ni._session.run(
{
'vxc': ni._vxc,
'vrho': ni._vrho,
'vsigma': ni._vsigma,
'vtau': ni._vtau,
'vhf': ni._vhf
},
feed_dict=feed_dict)
export_path = os.path.join(self.get_temp_dir(), 'export')
ni.export_functional_and_derivatives(export_path)
model = tf.saved_model.load_v2(export_path)
tensor_inputs = {
k: tf.constant(v, dtype=tf.float32)
for k, v in attr.asdict(inputs).items()
}
exported_output_tensors = model.signatures['default'](**tensor_inputs)
with tf.Session() as session:
session.run(tf.global_variables_initializer())
exported_outputs = session.run(exported_output_tensors)
self.assertAllClose(outputs, exported_outputs, atol=5.e-5, rtol=1.e-5)
if __name__ == '__main__':
tf.test.main()
|
deepmind-research-master
|
density_functional_approximation_dm21/density_functional_approximation_dm21/neural_numint_test.py
|
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An interface to DM21 family of exchange-correlation functionals for PySCF."""
from density_functional_approximation_dm21.neural_numint import Functional
from density_functional_approximation_dm21.neural_numint import NeuralNumInt
|
deepmind-research-master
|
density_functional_approximation_dm21/density_functional_approximation_dm21/__init__.py
|
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An interface to DM21 family of exchange-correlation functionals for PySCF."""
import enum
import os
from typing import Generator, Optional, Sequence, Tuple, Union
import attr
import numpy as np
from pyscf import dft
from pyscf import gto
from pyscf.dft import numint
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
from density_functional_approximation_dm21 import compute_hfx_density
tf.disable_v2_behavior()
# TODO(b/196260242): avoid depending upon private function
_dot_ao_ao = numint._dot_ao_ao # pylint: disable=protected-access
@enum.unique
class Functional(enum.Enum):
"""Enum for exchange-correlation functionals in the DM21 family.
Attributes:
DM21: trained on molecules dataset, and fractional charge, and fractional
spin constraints.
DM21m: trained on molecules dataset.
DM21mc: trained on molecules dataset, and fractional charge constraints.
DM21mu: trained on molecules dataset, and electron gas constraints.
"""
# Break pylint's preferred naming pattern to match the functional names used
# in the paper.
# pylint: disable=invalid-name
DM21 = enum.auto()
DM21m = enum.auto()
DM21mc = enum.auto()
DM21mu = enum.auto()
# pylint: enable=invalid-name
# We use attr.s instead of here instead of dataclasses.dataclass as
# dataclasses.asdict returns a deepcopy of the attributes. This is wasteful in
# memory if they are large and breaks (as in the case of tf.Tensors) if they are
# not serializable. attr.asdict does not perform this copy and so works with
# both np.ndarrays and tf.Tensors.
@attr.s(auto_attribs=True)
class FunctionalInputs:
r""""Inputs required for DM21 functionals.
Depending upon the context, this is either a set of numpy arrays (feature
construction) or TF tensors (constructing placeholders/running functionals).
Attributes:
rho_a: Density information for the alpha electrons.
PySCF for meta-GGAs supplies a single array for the total density
(restricted calculations) and a pair of arrays, one for each spin channel
(unrestricted calculations).
Each array/tensor is of shape (6, N) and contains the density and density
derivatives, where:
rho(0, :) - density at each grid point
rho(1, :) - norm of the derivative of the density at each grid point
along x
rho(2, :) - norm of the derivative of the density at each grid point
along y
rho(3, :) - norm of the derivative of the density at each grid point
along z
rho(4, :) - \nabla^2 \rho [not used]
rho(5, :) - tau (1/2 (\nabla \rho)^2) at each grid point.
See pyscf.dft.numint.eval_rho for more details.
We require separate inputs for both alpha- and beta-spin densities, even
in restricted calculations (where rho_a = rho_b = rho/2, where rho is the
total density).
rho_b: as for rho_a for the beta electrons.
hfx_a: local Hartree-Fock energy density at each grid point for the alpha-
spin density for each value of omega. Shape [N, len(omega_values)].
See compute_hfx_density for more details.
hfx_b: as for hfx_a for the beta-spin density.
grid_coords: grid coordinates at which to evaluate the density. Shape
(N, 3), where N is the number of grid points. Note that this is currently
unused by the functional, but is still a required input.
grid_weights: weight of each grid point. Shape (N).
"""
rho_a: Union[tf.Tensor, np.ndarray]
rho_b: Union[tf.Tensor, np.ndarray]
hfx_a: Union[tf.Tensor, np.ndarray]
hfx_b: Union[tf.Tensor, np.ndarray]
grid_coords: Union[tf.Tensor, np.ndarray]
grid_weights: Union[tf.Tensor, np.ndarray]
@attr.s(auto_attribs=True)
class _GridState:
"""Internal state required for the numerical grid.
Attributes:
coords: coordinates of the grid. Shape (N, 3), where N is the number of grid
points.
weight: weight associated with each grid point. Shape (N).
mask: mask indicating whether a shell is zero at a grid point. Shape
(N, nbas) where nbas is the number of shells in the basis set. See
pyscf.dft.gen_grids.make_mask.
ao: atomic orbitals evaluated on the grid. Shape (N, nao), where nao is the
number of atomic orbitals, or shape (:, N, nao), where the 0-th element
contains the ao values, the next three elements contain the first
derivatives, and so on.
"""
coords: np.ndarray
weight: np.ndarray
mask: np.ndarray
ao: np.ndarray
@attr.s(auto_attribs=True)
class _SystemState:
"""Internal state required for system of interest.
Attributes:
mol: PySCF molecule
dms: density matrix or matrices (unrestricted calculations only).
Restricted calculations: shape (nao, nao), where nao is the number of
atomic orbitals.
Unrestricted calculations: shape (2, nao, nao) or a sequence (length 2) of
arrays of shape (nao, nao), and dms[0] and dms[1] are the density matrices
of the alpha and beta electrons respectively.
"""
mol: gto.Mole
dms: Union[np.ndarray, Sequence[np.ndarray]]
def _get_number_of_density_matrices(dms):
"""Returns the number of density matrices in dms."""
# See pyscf.numint.NumInt._gen_rho_evaluator
if isinstance(dms, np.ndarray) and dms.ndim == 2:
return 1
return len(dms)
class NeuralNumInt(numint.NumInt):
"""A wrapper around pyscf.dft.numint.NumInt for the DM21 functionals.
In order to supply the local Hartree-Fock features required for the DM21
functionals, we lightly wrap the NumInt class. The actual evaluation of the
exchange-correlation functional is performed in NeuralNumInt.eval_xc.
Usage:
mf = dft.RKS(...) # dft.ROKS and dft.UKS are also supported.
# Specify the functional by monkey-patching mf._numint rather than using
# mf._xc or mf._define_xc_.
mf._numint = NeuralNumInt(Functional.DM21)
mf.kernel()
"""
def __init__(self,
functional: Functional,
*,
checkpoint_path: Optional[str] = None):
"""Constructs a NeuralNumInt object.
Args:
functional: member of Functional enum giving the name of the
functional.
checkpoint_path: Optional path to specify the directory containing the
checkpoints of the DM21 family of functionals. If not specified, attempt
to find the checkpoints using a path relative to the source code.
"""
self._functional_name = functional.name
if checkpoint_path:
self._model_path = os.path.join(checkpoint_path, self._functional_name)
else:
self._model_path = os.path.join(
os.path.dirname(__file__), 'checkpoints', self._functional_name)
# All DM21 functionals use local Hartree-Fock features with a non-range
# separated 1/r kernel and a range-seperated kernel with \omega = 0.4.
# Note an omega of 0.0 is interpreted by PySCF and libcint to indicate no
# range-separation.
self._omega_values = [0.0, 0.4]
self._graph = tf.Graph()
with self._graph.as_default():
self._build_graph()
self._session = tf.Session()
self._session.run(tf.global_variables_initializer())
self._grid_state = None
self._system_state = None
self._vmat_hf = None
super().__init__()
def _build_graph(self, batch_dim: Optional[int] = None):
"""Builds the TensorFlow graph for evaluating the functional.
Args:
batch_dim: the batch dimension of the grid to use in the model. Default:
None (determine at runtime). This should only be set if building a model
in order to export and ahead-of-time compile it into a standalone
library.
"""
self._functional = hub.Module(spec=self._model_path)
grid_coords = tf.placeholder(
tf.float32, shape=[batch_dim, 3], name='grid_coords')
grid_weights = tf.placeholder(
tf.float32, shape=[batch_dim], name='grid_weights')
# Density information.
rho_a = tf.placeholder(tf.float32, shape=[6, batch_dim], name='rho_a')
rho_b = tf.placeholder(tf.float32, shape=[6, batch_dim], name='rho_b')
# Split into corresponding terms.
rho_only_a, grad_a_x, grad_a_y, grad_a_z, _, tau_a = tf.unstack(
rho_a, axis=0)
rho_only_b, grad_b_x, grad_b_y, grad_b_z, _, tau_b = tf.unstack(
rho_b, axis=0)
# Evaluate |\del \rho|^2 for each spin density and for the total density.
norm_grad_a = (grad_a_x**2 + grad_a_y**2 + grad_a_z**2)
norm_grad_b = (grad_b_x**2 + grad_b_y**2 + grad_b_z**2)
grad_x = grad_a_x + grad_b_x
grad_y = grad_a_y + grad_b_y
grad_z = grad_a_z + grad_b_z
norm_grad = (grad_x**2 + grad_y**2 + grad_z**2)
# The local Hartree-Fock energy densities at each grid point for the alpha-
# and beta-spin densities for each value of omega.
# Note an omega of 0 indicates no screening of the Coulomb potential.
hfxa = tf.placeholder(
tf.float32, shape=[batch_dim, len(self._omega_values)], name='hfxa')
hfxb = tf.placeholder(
tf.float32, shape=[batch_dim, len(self._omega_values)], name='hfxb')
# Make all features 2D arrays on input for ease of handling inside the
# functional.
features = {
'grid_coords': grid_coords,
'grid_weights': tf.expand_dims(grid_weights, 1),
'rho_a': tf.expand_dims(rho_only_a, 1),
'rho_b': tf.expand_dims(rho_only_b, 1),
'tau_a': tf.expand_dims(tau_a, 1),
'tau_b': tf.expand_dims(tau_b, 1),
'norm_grad_rho_a': tf.expand_dims(norm_grad_a, 1),
'norm_grad_rho_b': tf.expand_dims(norm_grad_b, 1),
'norm_grad_rho': tf.expand_dims(norm_grad, 1),
'hfxa': hfxa,
'hfxb': hfxb,
}
tensor_dict = {f'tensor_dict${k}': v for k, v in features.items()}
predictions = self._functional(tensor_dict, as_dict=True)
local_xc = predictions['grid_contribution']
weighted_local_xc = local_xc * grid_weights
unweighted_xc = tf.reduce_sum(local_xc, axis=0)
xc = tf.reduce_sum(weighted_local_xc, axis=0)
# The potential is the local exchange correlation divided by the
# total density. Add a small constant to deal with zero density.
self._vxc = local_xc / (rho_only_a + rho_only_b + 1E-12)
# The derivatives of the exchange-correlation (XC) energy with respect to
# input features. PySCF weights the (standard) derivatives by the grid
# weights, so we need to compute this with respect to the unweighted sum
# over grid points.
self._vrho = tf.gradients(
unweighted_xc, [features['rho_a'], features['rho_b']],
name='GRAD_RHO',
unconnected_gradients=tf.UnconnectedGradients.ZERO)
self._vsigma = tf.gradients(
unweighted_xc, [
features['norm_grad_rho_a'], features['norm_grad_rho_b'],
features['norm_grad_rho']
],
name='GRAD_SIGMA',
unconnected_gradients=tf.UnconnectedGradients.ZERO)
self._vtau = tf.gradients(
unweighted_xc, [features['tau_a'], features['tau_b']],
name='GRAD_TAU',
unconnected_gradients=tf.UnconnectedGradients.ZERO)
# Standard meta-GGAs do not have a dependency on local HF, so we need to
# compute the contribution to the Fock matrix ourselves. Just use the
# weighted XC energy to avoid having to weight this later.
self._vhf = tf.gradients(
xc, [features['hfxa'], features['hfxb']],
name='GRAD_HFX',
unconnected_gradients=tf.UnconnectedGradients.ZERO)
self._placeholders = FunctionalInputs(
rho_a=rho_a,
rho_b=rho_b,
hfx_a=hfxa,
hfx_b=hfxb,
grid_coords=grid_coords,
grid_weights=grid_weights)
outputs = {
'vxc': self._vxc,
'vrho': tf.stack(self._vrho),
'vsigma': tf.stack(self._vsigma),
'vtau': tf.stack(self._vtau),
'vhf': tf.stack(self._vhf),
}
# Create the signature for TF-Hub, including both the energy and functional
# derivatives.
# This is a no-op if _build_graph is called outside of
# hub.create_module_spec.
hub.add_signature(
inputs=attr.asdict(self._placeholders), outputs=outputs)
def export_functional_and_derivatives(
self,
export_path: str,
batch_dim: Optional[int] = None,
):
"""Exports the TensorFlow graph containing the functional and derivatives.
The hub modules supplied contain the TensorFlow operations for the
evaluation of the exchange-correlation energy. Evaluation of the functional
derivatives, required for a self-consistent calculation, are added in
_build_graph. The module created by export_functional_and_derivatives
contains the evaluation of the functional and the functional derivatives.
This is much simpler to use from languages other than Python, e.g. using the
C or C++ TensorFlow API, or using tfcompile to create a standalone C++
library.
Args:
export_path: path to write the Hub model to. The exported model can be
loaded using either TF-Hub or SavedModel APIs.
batch_dim: the batch dimension of the grid to use in the model. Default:
None (determine at runtime). This should only be set if the exported
model is to be ahead-of-time compiled into a standalone library.
"""
with tf.Graph().as_default():
spec = hub.create_module_spec(
self._build_graph, tags_and_args=[(set(), {'batch_dim': batch_dim})])
functional_and_derivatives = hub.Module(spec=spec)
with tf.Session() as session:
session.run(tf.global_variables_initializer())
functional_and_derivatives.export(export_path, session)
# DM21* functionals include the hybrid term directly, so set the
# range-separated and hybrid parameters expected by PySCF to 0 so PySCF
# doesn't also add these contributions in separately.
def rsh_coeff(self, *args):
"""Returns the range separated parameters, omega, alpha, beta."""
return [0.0, 0.0, 0.0]
def hybrid_coeff(self, *args, **kwargs):
"""Returns the fraction of Hartree-Fock exchange to include."""
return 0.0
def _xc_type(self, *args, **kwargs):
return 'MGGA'
def nr_rks(self,
mol: gto.Mole,
grids: dft.Grids,
xc_code: str,
dms: Union[np.ndarray, Sequence[np.ndarray]],
relativity: int = 0,
hermi: int = 0,
max_memory: float = 20000,
verbose=None) -> Tuple[float, float, np.ndarray]:
"""Calculates RKS XC functional and potential matrix on a given grid.
Args:
mol: PySCF molecule.
grids: grid on which to evaluate the functional.
xc_code: XC code. Unused. NeuralNumInt hard codes the XC functional
based upon the functional argument given to the constructor.
dms: the density matrix or sequence of density matrices. Multiple density
matrices are not currently supported. Shape (nao, nao), where nao is the
number of atomic orbitals.
relativity: Unused. (pyscf.numint.NumInt.nr_rks does not currently use
this argument.)
hermi: 0 if the density matrix is Hermitian, 1 if the density matrix is
non-Hermitian.
max_memory: the maximum cache to use, in MB.
verbose: verbosity level. Unused. (PySCF currently does not handle the
verbosity level passed in here.)
Returns:
nelec, excsum, vmat, where
nelec is the number of electrons obtained by numerical integration of
the density matrix.
excsum is the functional's XC energy.
vmat is the functional's XC potential matrix, shape (nao, nao).
Raises:
NotImplementedError: if multiple density matrices are supplied.
"""
# Wrap nr_rks so we can store internal variables required to evaluate the
# contribution to the XC potential from local Hartree-Fock features.
# See pyscf.dft.numint.nr_rks for more details.
ndms = _get_number_of_density_matrices(dms)
if ndms > 1:
raise NotImplementedError(
'NeuralNumInt does not support multiple density matrices. '
'Only ground state DFT calculations are currently implemented.')
nao = mol.nao_nr()
self._vmat_hf = np.zeros((nao, nao))
self._system_state = _SystemState(mol=mol, dms=dms)
nelec, excsum, vmat = super().nr_rks(
mol=mol,
grids=grids,
xc_code=xc_code,
dms=dms,
relativity=relativity,
hermi=hermi,
max_memory=max_memory,
verbose=verbose)
vmat += self._vmat_hf + self._vmat_hf.T
# Clear internal state to prevent accidental re-use.
self._system_state = None
self._grid_state = None
return nelec, excsum, vmat
def nr_uks(self,
mol: gto.Mole,
grids: dft.Grids,
xc_code: str,
dms: Union[Sequence[np.ndarray], Sequence[Sequence[np.ndarray]]],
relativity: int = 0,
hermi: int = 0,
max_memory: float = 20000,
verbose=None) -> Tuple[np.ndarray, float, np.ndarray]:
"""Calculates UKS XC functional and potential matrix on a given grid.
Args:
mol: PySCF molecule.
grids: grid on which to evaluate the functional.
xc_code: XC code. Unused. NeuralNumInt hard codes the XC functional
based upon the functional argument given to the constructor.
dms: the density matrix or sequence of density matrices for each spin
channel. Multiple density matrices for each spin channel are not
currently supported. Each density matrix is shape (nao, nao), where nao
is the number of atomic orbitals.
relativity: Unused. (pyscf.dft.numint.NumInt.nr_rks does not currently use
this argument.)
hermi: 0 if the density matrix is Hermitian, 1 if the density matrix is
non-Hermitian.
max_memory: the maximum cache to use, in MB.
verbose: verbosity level. Unused. (PySCF currently does not handle the
verbosity level passed in here.)
Returns:
nelec, excsum, vmat, where
nelec is the number of alpha, beta electrons obtained by numerical
integration of the density matrix as an array of size 2.
excsum is the functional's XC energy.
vmat is the functional's XC potential matrix, shape (2, nao, nao), where
vmat[0] and vmat[1] are the potential matrices for the alpha and beta
spin channels respectively.
Raises:
NotImplementedError: if multiple density matrices for each spin channel
are supplied.
"""
# Wrap nr_uks so we can store internal variables required to evaluate the
# contribution to the XC potential from local Hartree-Fock features.
# See pyscf.dft.numint.nr_uks for more details.
if isinstance(dms, np.ndarray) and dms.ndim == 2: # RHF DM
ndms = _get_number_of_density_matrices(dms)
else:
ndms = _get_number_of_density_matrices(dms[0])
if ndms > 1:
raise NotImplementedError(
'NeuralNumInt does not support multiple density matrices. '
'Only ground state DFT calculations are currently implemented.')
nao = mol.nao_nr()
self._vmat_hf = np.zeros((2, nao, nao))
self._system_state = _SystemState(mol=mol, dms=dms)
nelec, excsum, vmat = super().nr_uks(
mol=mol,
grids=grids,
xc_code=xc_code,
dms=dms,
relativity=relativity,
hermi=hermi,
max_memory=max_memory,
verbose=verbose)
vmat[0] += self._vmat_hf[0] + self._vmat_hf[0].T
vmat[1] += self._vmat_hf[1] + self._vmat_hf[1].T
# Clear internal state to prevent accidental re-use.
self._system_state = None
self._grid_state = None
self._vmat_hf = None
return nelec, excsum, vmat
def block_loop(
self,
mol: gto.Mole,
grids: dft.Grids,
nao: Optional[int] = None,
deriv: int = 0,
max_memory: float = 2000,
non0tab: Optional[np.ndarray] = None,
blksize: Optional[int] = None,
buf: Optional[np.ndarray] = None
) -> Generator[Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray], None,
None]:
"""Loops over the grid by blocks. See pyscf.dft.numint.NumInt.block_loop.
Args:
mol: PySCF molecule.
grids: grid on which to evaluate the functional.
nao: number of basis functions. If None, obtained from mol.
deriv: unused. The first functional derivatives are always computed.
max_memory: the maximum cache to use for the information on the grid, in
MB. Determines the size of each block if blksize is None.
non0tab: mask determining if a shell in the basis set is zero at a grid
point. Shape (N, nbas), where N is the number of grid points and nbas
the number of shells in the basis set. Obtained from grids if not
supplied.
blksize: size of each block. Calculated from max_memory if None.
buf: buffer to use for storing ao. If None, a new array for ao is created
for each block.
Yields:
ao, mask, weight, coords: information on a block of the grid containing N'
points, where
ao: atomic orbitals evaluated on the grid. Shape (N', nao), where nao is
the number of atomic orbitals.
mask: mask indicating whether a shell in the basis set is zero at a grid
point. Shape (N', nbas).
weight: weight associated with each grid point. Shape (N').
coords: coordinates of the grid. Shape (N', 3).
"""
# Wrap block_loop so we can store internal variables required to evaluate
# the contribution to the XC potential from local Hartree-Fock features.
for ao, mask, weight, coords in super().block_loop(
mol=mol,
grids=grids,
nao=nao,
deriv=deriv,
max_memory=max_memory,
non0tab=non0tab,
blksize=blksize,
buf=buf):
# Cache the curent block so we can access it in eval_xc.
self._grid_state = _GridState(
ao=ao, mask=mask, weight=weight, coords=coords)
yield ao, mask, weight, coords
def construct_functional_inputs(
self,
mol: gto.Mole,
dms: Union[np.ndarray, Sequence[np.ndarray]],
spin: int,
coords: np.ndarray,
weights: np.ndarray,
rho: Union[np.ndarray, Tuple[np.ndarray, np.ndarray]],
ao: Optional[np.ndarray] = None,
) -> Tuple[FunctionalInputs, Tuple[np.ndarray, np.ndarray]]:
"""Constructs the input features required for the functional.
Args:
mol: PySCF molecule.
dms: density matrix of shape (nao, nao) (restricted calculations) or of
shape (2, nao, nao) (unrestricted calculations) or tuple of density
matrices for each spin channel, each of shape (nao, nao) (unrestricted
calculations).
spin: 0 for a spin-unpolarized (restricted Kohn-Sham) calculation, and
spin-polarized (unrestricted) otherwise.
coords: coordinates of the grid. Shape (N, 3), where N is the number of
grid points.
weights: weight associated with each grid point. Shape (N).
rho: density and density derivatives at each grid point. Single array
containing the total density for restricted calculations, tuple of
arrays for each spin channel for unrestricted calculations. Each array
has shape (6, N). See pyscf.dft.numint.eval_rho and comments in
FunctionalInputs for more details.
ao: The atomic orbitals evaluated on the grid, shape (N, nao). Computed if
not supplied.
Returns:
inputs, fxx, where
inputs: FunctionalInputs object containing the inputs (as np.ndarrays)
for the functional.
fxx: intermediates, shape (N, nao) for the alpha- and beta-spin
channels, required for computing the first derivative of the local
Hartree-Fock density with respect to the density matrices. See
compute_hfx_density for more details.
"""
if spin == 0:
# RKS
rhoa = rho / 2
rhob = rho / 2
else:
# UKS
rhoa, rhob = rho
# Local HF features.
exxa, exxb = [], []
fxxa, fxxb = [], []
for omega in sorted(self._omega_values):
hfx_results = compute_hfx_density.get_hf_density(
mol,
dms,
coords=coords,
omega=omega,
deriv=1,
ao=ao)
exxa.append(hfx_results.exx[0])
exxb.append(hfx_results.exx[1])
fxxa.append(hfx_results.fxx[0])
fxxb.append(hfx_results.fxx[1])
exxa = np.stack(exxa, axis=-1)
fxxa = np.stack(fxxa, axis=-1)
if spin == 0:
exx = (exxa, exxa)
fxx = (fxxa, fxxa)
else:
exxb = np.stack(exxb, axis=-1)
fxxb = np.stack(fxxb, axis=-1)
exx = (exxa, exxb)
fxx = (fxxa, fxxb)
return FunctionalInputs(
rho_a=rhoa,
rho_b=rhob,
hfx_a=exx[0],
hfx_b=exx[1],
grid_coords=coords,
grid_weights=weights), fxx
def eval_xc(
self,
xc_code: str,
rho: Union[np.ndarray, Tuple[np.ndarray, np.ndarray]],
spin: int = 0,
relativity: int = 0,
deriv: int = 1,
omega: Optional[float] = None,
verbose=None
) -> Tuple[np.ndarray, Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray],
None, None]:
"""Evaluates the XC energy and functional derivatives.
See pyscf.dft.libxc.eval_xc for more details on the interface.
Note: this also sets self._vmat_extra, which contains the contribution the
the potential matrix from the local Hartree-Fock terms in the functional.
Args:
xc_code: unused.
rho: density and density derivatives at each grid point. Single array
containing the total density for restricted calculations, tuple of
arrays for each spin channel for unrestricted calculations. Each array
has shape (6, N), where N is the number of grid points. See
pyscf.dft.numint.eval_rho and comments in FunctionalInputs for more
details.
spin: 0 for a spin-unpolarized (restricted Kohn-Sham) calculation, and
spin-polarized (unrestricted) otherwise.
relativity: Not supported.
deriv: unused. The first functional derivatives are always computed.
omega: RSH parameter. Not supported.
verbose: unused.
Returns:
exc, vxc, fxc, kxc, where:
exc is the exchange-correlation potential matrix evaluated at each grid
point, shape (N).
vxc is (vrho, vgamma, vlapl, vtau), the first-order functional
derivatives evaluated at each grid point, each shape (N).
fxc is set to None. (The second-order functional derivatives are not
computed.)
kxc is set to None. (The third-order functional derivatives are not
computed.)
"""
del xc_code, verbose, deriv # unused
if relativity != 0:
raise NotImplementedError('Relatistic calculations are not implemented '
'for DM21 functionals.')
if omega is not None:
raise NotImplementedError('User-specifed range seperation parameters are '
'not implemented for DM21 functionals.')
# Retrieve cached state.
ao = self._grid_state.ao
if ao.ndim == 3:
# Just need the AO values, not the gradients.
ao = ao[0]
if self._grid_state.weight is None:
weights = np.array([1.])
else:
weights = self._grid_state.weight
mask = self._grid_state.mask
inputs, (fxxa, fxxb) = self.construct_functional_inputs(
mol=self._system_state.mol,
dms=self._system_state.dms,
spin=spin,
rho=rho,
weights=weights,
coords=self._grid_state.coords,
ao=ao)
with self._graph.as_default():
feed_dict = dict(
zip(
attr.asdict(self._placeholders).values(),
attr.asdict(inputs).values(),
))
tensor_list = [
self._vxc,
self._vrho,
self._vsigma,
self._vtau,
self._vhf,
]
exc, vrho, vsigma, vtau, vhf = (
self._session.run(tensor_list, feed_dict=feed_dict))
mol = self._system_state.mol
shls_slice = (0, mol.nbas)
ao_loc_nr = mol.ao_loc_nr()
# Note: tf.gradients returns a list of gradients.
# vrho, vsigma, vtau are derivatives of objects that had
# tf.expand_dims(..., 1) applied. The [:, 0] indexing undoes this by
# selecting the 0-th (and only) element from the second dimension.
if spin == 0:
vxc_0 = (vrho[0][:, 0] + vrho[1][:, 0]) / 2.
# pyscf expects derivatives with respect to:
# grad_rho . grad_rho.
# The functional uses the first and last as inputs, but then has
# grad_(rho_a + rho_b) . grad_(rho_a + rho_b)
# as input. The following computes the correct total derivatives.
vxc_1 = (vsigma[0][:, 0] / 4. + vsigma[1][:, 0] / 4. + vsigma[2][:, 0])
vxc_3 = (vtau[0][:, 0] + vtau[1][:, 0]) / 2.
vxc_2 = np.zeros_like(vxc_3)
vhfs = (vhf[0] + vhf[1]) / 2.
# Local Hartree-Fock terms
for i in range(len(self._omega_values)):
# Factor of 1/2 is to account for adding vmat_hf + vmat_hf.T to vmat,
# which we do to match existing PySCF style. Unlike other terms, vmat_hf
# is already symmetric though.
aow = np.einsum('pi,p->pi', fxxa[:, :, i], -0.5 * vhfs[:, i])
self._vmat_hf += _dot_ao_ao(mol, ao, aow, mask, shls_slice,
ao_loc_nr)
else:
vxc_0 = np.stack([vrho[0][:, 0], vrho[1][:, 0]], axis=1)
# pyscf expects derivatives with respect to:
# grad_rho_a . grad_rho_a
# grad_rho_a . grad_rho_b
# grad_rho_b . grad_rho_b
# The functional uses the first and last as inputs, but then has
# grad_(rho_a + rho_b) . grad_(rho_a + rho_b)
# as input. The following computes the correct total derivatives.
vxc_1 = np.stack([
vsigma[0][:, 0] + vsigma[2][:, 0], 2. * vsigma[2][:, 0],
vsigma[1][:, 0] + vsigma[2][:, 0]
],
axis=1)
vxc_3 = np.stack([vtau[0][:, 0], vtau[1][:, 0]], axis=1)
vxc_2 = np.zeros_like(vxc_3)
vhfs = np.stack([vhf[0], vhf[1]], axis=2)
for i in range(len(self._omega_values)):
# Factors of 1/2 are due to the same reason as in the spin=0 case.
aow = np.einsum('pi,p->pi', fxxa[:, :, i], -0.5 * vhfs[:, i, 0])
self._vmat_hf[0] += _dot_ao_ao(mol, ao, aow, mask, shls_slice,
ao_loc_nr)
aow = np.einsum('pi,p->pi', fxxb[:, :, i], -0.5 * vhfs[:, i, 1])
self._vmat_hf[1] += _dot_ao_ao(mol, ao, aow, mask, shls_slice,
ao_loc_nr)
fxc = None # Second derivative not implemented
kxc = None # Second derivative not implemented
# PySCF C routines expect float64.
exc = exc.astype(np.float64)
vxc = tuple(v.astype(np.float64) for v in (vxc_0, vxc_1, vxc_2, vxc_3))
return exc, vxc, fxc, kxc
|
deepmind-research-master
|
density_functional_approximation_dm21/density_functional_approximation_dm21/neural_numint.py
|
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for compute_hfx_density."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from pyscf import dft
from pyscf import gto
from pyscf import lib
from pyscf import scf
import scipy
from density_functional_approximation_dm21 import compute_hfx_density
class ComputeHfxDensityTest(parameterized.TestCase):
def setUp(self):
super().setUp()
lib.param.TMPDIR = None
lib.num_threads(1)
@parameterized.named_parameters(
{'testcase_name': 'local_hf', 'omega': 0.},
{'testcase_name': 'range_separated_local_hf_0.5', 'omega': 0.5},
{'testcase_name': 'range_separated_local_hf_1.0', 'omega': 1.0},
{'testcase_name': 'range_separated_local_hf_2.0', 'omega': 2.0},
)
def test_closed_shell(self, omega):
mol = gto.M(atom='He 0. 0. 0.', basis='3-21g')
solver = dft.RKS(mol)
solver.grids.level = 2
solver.grids.build()
solver.kernel()
dm = solver.make_rdm1()
with mol.with_range_coulomb(omega=omega):
target_j, target_k = scf.hf.get_jk(mol, dm)
target_hf = -0.25 * np.einsum('ij,ji', dm, target_k)
target_coulomb = np.einsum('ij,ji', dm, target_j)
coords = solver.grids.coords
weights = solver.grids.weights
results = compute_hfx_density.get_hf_density(
mol, dm, coords, omega=omega, weights=weights)
coulomb = np.einsum('ij,ji', dm, results.coulomb)
hf = -0.25 * np.einsum('ij,ji', dm, results.exchange)
predicted_hf = np.sum((results.exx[0] + results.exx[1]) * weights)
with self.subTest('test_hf_density'):
self.assertAlmostEqual(target_hf, predicted_hf)
with self.subTest('test_get_jk'):
np.testing.assert_allclose(results.coulomb, target_j)
np.testing.assert_allclose(results.exchange, target_k)
self.assertAlmostEqual(coulomb, target_coulomb)
self.assertAlmostEqual(hf, target_hf)
@parameterized.named_parameters(
{'testcase_name': 'local_hf', 'omega': 0.},
{'testcase_name': 'range_separated_local_hf_0.5', 'omega': 0.5},
{'testcase_name': 'range_separated_local_hf_1.0', 'omega': 1.0},
{'testcase_name': 'range_separated_local_hf_2.0', 'omega': 2.0},
)
def test_hf_density_on_open_shell(self, omega):
mol = gto.M(atom='He 0. 0. 0.', basis='3-21g', charge=1, spin=1)
solver = dft.UKS(mol)
solver.grids.level = 2
solver.grids.build()
solver.kernel()
dm = solver.make_rdm1()
with mol.with_range_coulomb(omega=omega):
target_j, target_k = scf.hf.get_jk(mol, dm)
target_hf = -0.5 * (
np.einsum('ij,ji', dm[0], target_k[0]) +
np.einsum('ij,ji', dm[1], target_k[1]))
target_coulomb = np.einsum('ij,ji', dm[0], target_j[0]) + np.einsum(
'ij,ji', dm[1], target_j[1])
coords = solver.grids.coords
weights = solver.grids.weights
results = compute_hfx_density.get_hf_density(
mol, dm, coords, omega=omega, weights=weights)
predicted_hf = np.sum((results.exx[0] + results.exx[1]) * weights)
coulomb = (
np.einsum('ij,ji', dm[0], results.coulomb[0]) +
np.einsum('ij,ji', dm[1], results.coulomb[1]))
hf = -0.5 * (
np.einsum('ij,ji', dm[0], results.exchange[0]) +
np.einsum('ij,ji', dm[1], results.exchange[1]))
with self.subTest('test_hf_density'):
self.assertAlmostEqual(target_hf, predicted_hf, places=3)
with self.subTest('test_get_jk'):
np.testing.assert_allclose(results.coulomb[0], target_j[0])
np.testing.assert_allclose(results.coulomb[1], target_j[1])
np.testing.assert_allclose(results.exchange[0], target_k[0])
np.testing.assert_allclose(results.exchange[1], target_k[1])
self.assertAlmostEqual(coulomb, target_coulomb)
self.assertAlmostEqual(hf, target_hf)
def _nu_test_systems():
systems = [
{
'atom': 'N 0 0 0; N 0 0 2.4',
'charge': 0,
'spin': 0,
'basis': 'cc-pVDZ',
'num_grids': -1
},
{
'atom': 'N 0 0 0; N 0 0 2.4',
'charge': 0,
'spin': 0,
'basis': 'cc-pVDZ',
'num_grids': 1
},
{
'atom': 'N 0 0 0; N 0 0 2.4',
'charge': 0,
'spin': 0,
'basis': 'cc-pVDZ',
'num_grids': 2
},
{
'atom': 'N 0 0 0; N 0 0 2.4',
'charge': 0,
'spin': 0,
'basis': 'cc-pVDZ',
'num_grids': 10
},
{
'atom': 'N 0 0 0; N 0 0 2.4',
'charge': 0,
'spin': 0,
'basis': 'cc-pVDZ',
'num_grids': 32
},
{
'atom': 'N 0 0 0; N 0 0 2.4',
'charge': 0,
'spin': 0,
'basis': 'cc-pVDZ',
'num_grids': 33
},
{
'atom': 'Li 0 0 0',
'charge': 0,
'spin': 1,
'basis': 'cc-pVTZ',
'num_grids': -1
},
{
'atom': 'H 0 0 0',
'charge': 0,
'spin': 1,
'basis': 'cc-pVQZ',
'num_grids': -1
},
]
system_names = ['N2', 'N2_1', 'N2_2', 'N2_10', 'N2_32', 'N2_33', 'Li', 'H']
for name, system in zip(system_names, systems):
yield {'testcase_name': f'{name}_hermitian', 'hermi': 0, **system}
yield {'testcase_name': f'{name}_non_hermitian', 'hermi': 1, **system}
class NuTest(parameterized.TestCase):
def setUp(self):
super(NuTest, self).setUp()
lib.param.TMPDIR = None
lib.num_threads(1)
@parameterized.named_parameters(_nu_test_systems())
def test_nu_integrals(self, atom, charge, spin, basis, num_grids, hermi):
mol = gto.M(atom=atom, charge=charge, spin=spin, basis=basis)
mf = dft.UKS(mol)
mf.grids.build()
if num_grids == -1:
test_coords = mf.grids.coords
else:
test_coords = mf.grids.coords[0:num_grids]
nu_slow = compute_hfx_density._evaluate_nu_slow(
mol, test_coords, omega=0.0, hermi=hermi)
nu_fast = compute_hfx_density._evaluate_nu(
mol, test_coords, omega=0.0, hermi=hermi)
np.testing.assert_allclose(nu_slow, nu_fast, atol=1E-13)
def test_range_separated_nu(self):
mol = gto.M(atom='He 0 0 0', basis='cc-pVDZ')
r0 = np.array([[0.1, 0.2, 1.]])
omega = 1.
result = np.squeeze(compute_hfx_density._evaluate_nu(mol, r0, omega=omega))
solver = dft.RKS(mol)
solver.grids.level = 2
solver.grids.build()
coords = solver.grids.coords
weights = solver.grids.weights
ao_value = dft.numint.eval_ao(mol, coords, deriv=0)
dist = np.linalg.norm(coords - r0, axis=1)
erf = scipy.special.erf(omega * dist) / dist
expected_result = np.squeeze(
np.einsum('g,ga,gb->ab', weights * erf, ao_value, ao_value))
np.testing.assert_allclose(result, expected_result)
if __name__ == '__main__':
absltest.main()
|
deepmind-research-master
|
density_functional_approximation_dm21/density_functional_approximation_dm21/compute_hfx_density_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""nest utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
from tensorflow.contrib import framework as contrib_framework
nest = contrib_framework.nest
def _nest_apply_over_list(list_of_nests, fn):
"""Equivalent to fn, but works on list-of-nests.
Transforms a list-of-nests to a nest-of-lists, then applies `fn`
to each of the inner lists.
It is assumed that all nests have the same structure. Elements of the nest may
be None, in which case they are ignored, i.e. they do not form part of the
stack. This is useful when stacking agent states where parts of the state nest
have been filtered.
Args:
list_of_nests: A Python list of nests.
fn: the function applied on the list of leaves.
Returns:
A nest-of-arrays, where the arrays are formed by `fn`ing a list.
"""
list_of_flat_nests = [nest.flatten(n) for n in list_of_nests]
flat_nest_of_stacks = []
for position in range(len(list_of_flat_nests[0])):
new_list = [flat_nest[position] for flat_nest in list_of_flat_nests]
new_list = [x for x in new_list if x is not None]
flat_nest_of_stacks.append(fn(new_list))
return nest.pack_sequence_as(
structure=list_of_nests[0], flat_sequence=flat_nest_of_stacks)
def _take_indices(inputs, indices):
return nest.map_structure(lambda t: np.take(t, indices, axis=0), inputs)
def nest_stack(list_of_nests, axis=0):
"""Equivalent to np.stack, but works on list-of-nests.
Transforms a list-of-nests to a nest-of-lists, then applies `np.stack`
to each of the inner lists.
It is assumed that all nests have the same structure. Elements of the nest may
be None, in which case they are ignored, i.e. they do not form part of the
stack. This is useful when stacking agent states where parts of the state nest
have been filtered.
Args:
list_of_nests: A Python list of nests.
axis: Optional, the `axis` argument for `np.stack`.
Returns:
A nest-of-arrays, where the arrays are formed by `np.stack`ing a list.
"""
return _nest_apply_over_list(list_of_nests, lambda l: np.stack(l, axis=axis))
def nest_unstack(batched_inputs, batch_size):
"""Splits a sequence of numpy arrays along 0th dimension."""
return [_take_indices(batched_inputs, idx) for idx in range(batch_size)]
|
deepmind-research-master
|
tvt/nest_utils.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Memory Reader/Writer for RMA."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import sonnet as snt
import tensorflow.compat.v1 as tf
ReadInformation = collections.namedtuple(
'ReadInformation', ('weights', 'indices', 'keys', 'strengths'))
class MemoryWriter(snt.RNNCore):
"""Memory Writer Module."""
def __init__(self, mem_shape, name='memory_writer'):
"""Initializes the `MemoryWriter`.
Args:
mem_shape: The shape of the memory `(num_rows, memory_width)`.
name: The name to use for the Sonnet module.
"""
super(MemoryWriter, self).__init__(name=name)
self._mem_shape = mem_shape
def _build(self, inputs, state):
"""Inserts z into the argmin row of usage markers and updates all rows.
Returns an operation that, when executed, correctly updates the internal
state and usage markers.
Args:
inputs: A tuple consisting of:
* z, the value to write at this timestep
* mem_state, the state of the memory at this timestep before writing
state: The state is just the write_counter.
Returns:
A tuple of the new memory state and a tuple containing the next state.
"""
z, mem_state = inputs
# Stop gradient on writes to memory.
z = tf.stop_gradient(z)
prev_write_counter = state
new_row_value = z
# Find the index to insert the next row into.
num_mem_rows = self._mem_shape[0]
write_index = tf.cast(prev_write_counter, dtype=tf.int32) % num_mem_rows
one_hot_row = tf.one_hot(write_index, num_mem_rows)
write_counter = prev_write_counter + 1
# Insert state variable to new row.
# First you need to size it up to the full size.
insert_new_row = lambda mem, o_hot, z: mem - (o_hot * mem) + (o_hot * z)
new_mem = insert_new_row(mem_state,
tf.expand_dims(one_hot_row, axis=-1),
tf.expand_dims(new_row_value, axis=-2))
new_state = write_counter
return new_mem, new_state
@property
def state_size(self):
"""Returns a description of the state size, without batch dimension."""
return tf.TensorShape([])
@property
def output_size(self):
"""Returns a description of the output size, without batch dimension."""
return self._mem_shape
class MemoryReader(snt.AbstractModule):
"""Memory Reader Module."""
def __init__(self,
memory_word_size,
num_read_heads,
top_k=0,
memory_size=None,
name='memory_reader'):
"""Initializes the `MemoryReader`.
Args:
memory_word_size: The dimension of the 1-D read keys this memory reader
should produce. Each row of the memory is of length `memory_word_size`.
num_read_heads: The number of reads to perform.
top_k: Softmax and summation when reading is only over top k most similar
entries in memory. top_k=0 (default) means dense reads, i.e. no top_k.
memory_size: Number of rows in memory.
name: The name for this Sonnet module.
"""
super(MemoryReader, self).__init__(name=name)
self._memory_word_size = memory_word_size
self._num_read_heads = num_read_heads
self._top_k = top_k
# This is not an RNNCore but it is useful to expose the output size.
self._output_size = num_read_heads * memory_word_size
num_read_weights = top_k if top_k > 0 else memory_size
self._read_info_size = ReadInformation(
weights=tf.TensorShape([num_read_heads, num_read_weights]),
indices=tf.TensorShape([num_read_heads, num_read_weights]),
keys=tf.TensorShape([num_read_heads, memory_word_size]),
strengths=tf.TensorShape([num_read_heads]),
)
with self._enter_variable_scope():
# Transforms to value-based read for each read head.
output_dim = (memory_word_size + 1) * num_read_heads
self._keys_and_read_strengths_generator = snt.Linear(output_dim)
def _build(self, inputs):
"""Looks up rows in memory.
In the args list, we have the following conventions:
B: batch size
M: number of slots in a row of the memory matrix
R: number of rows in the memory matrix
H: number of read heads in the memory controller
Args:
inputs: A tuple of
* read_inputs, a tensor of shape [B, ...] that will be flattened and
passed through a linear layer to get read keys/read_strengths for
each head.
* mem_state, the primary memory tensor. Of shape [B, R, M].
Returns:
The read from the memory (concatenated across read heads) and read
information.
"""
# Assert input shapes are compatible and separate inputs.
_assert_compatible_memory_reader_input(inputs)
read_inputs, mem_state = inputs
# Determine the read weightings for each key.
flat_outputs = self._keys_and_read_strengths_generator(
snt.BatchFlatten()(read_inputs))
# Separate the read_strengths from the rest of the weightings.
h = self._num_read_heads
flat_keys = flat_outputs[:, :-h]
read_strengths = tf.nn.softplus(flat_outputs[:, -h:])
# Reshape the weights.
read_shape = (self._num_read_heads, self._memory_word_size)
read_keys = snt.BatchReshape(read_shape)(flat_keys)
# Read from memory.
memory_reads, read_weights, read_indices, read_strengths = (
read_from_memory(read_keys, read_strengths, mem_state, self._top_k))
concatenated_reads = snt.BatchFlatten()(memory_reads)
return concatenated_reads, ReadInformation(
weights=read_weights,
indices=read_indices,
keys=read_keys,
strengths=read_strengths)
@property
def output_size(self):
"""Returns a description of the output size, without batch dimension."""
return self._output_size, self._read_info_size
def read_from_memory(read_keys, read_strengths, mem_state, top_k):
"""Function for cosine similarity content based reading from memory matrix.
In the args list, we have the following conventions:
B: batch size
M: number of slots in a row of the memory matrix
R: number of rows in the memory matrix
H: number of read heads (of the controller or the policy)
K: top_k if top_k>0
Args:
read_keys: the read keys of shape [B, H, M].
read_strengths: the coefficients used to compute the normalised weighting
vector of shape [B, H].
mem_state: the primary memory tensor. Of shape [B, R, M].
top_k: only use top k read matches, other reads do not go into softmax and
are zeroed out in the output. top_k=0 (default) means use dense reads.
Returns:
The memory reads [B, H, M], read weights [B, H, top k], read indices
[B, H, top k], and read strengths [B, H, 1].
"""
_assert_compatible_read_from_memory_inputs(read_keys, read_strengths,
mem_state)
batch_size = read_keys.shape[0]
num_read_heads = read_keys.shape[1]
with tf.name_scope('memory_reading'):
# Scale such that all rows are L2-unit vectors, for memory and read query.
scaled_read_keys = tf.math.l2_normalize(read_keys, axis=-1) # [B, H, M]
scaled_mem = tf.math.l2_normalize(mem_state, axis=-1) # [B, R, M]
# The cosine distance is then their dot product.
# Find the cosine distance between each read head and each row of memory.
cosine_distances = tf.matmul(
scaled_read_keys, scaled_mem, transpose_b=True) # [B, H, R]
# The rank must match cosine_distances for broadcasting to work.
read_strengths = tf.expand_dims(read_strengths, axis=-1) # [B, H, 1]
weighted_distances = read_strengths * cosine_distances # [B, H, R]
if top_k:
# Get top k indices (row indices with top k largest weighted distances).
top_k_output = tf.nn.top_k(weighted_distances, top_k, sorted=False)
read_indices = top_k_output.indices # [B, H, K]
# Create a sub-memory for each read head with only the top k rows.
# Each batch_gather is [B, K, M] and the list stacks to [B, H, K, M].
topk_mem_per_head = [tf.batch_gather(mem_state, ri_this_head)
for ri_this_head in tf.unstack(read_indices, axis=1)]
topk_mem = tf.stack(topk_mem_per_head, axis=1) # [B, H, K, M]
topk_scaled_mem = tf.math.l2_normalize(topk_mem, axis=-1) # [B, H, K, M]
# Calculate read weights for each head's top k sub-memory.
expanded_scaled_read_keys = tf.expand_dims(
scaled_read_keys, axis=2) # [B, H, 1, M]
topk_cosine_distances = tf.reduce_sum(
expanded_scaled_read_keys * topk_scaled_mem, axis=-1) # [B, H, K]
topk_weighted_distances = (
read_strengths * topk_cosine_distances) # [B, H, K]
read_weights = tf.nn.softmax(
topk_weighted_distances, axis=-1) # [B, H, K]
# For each head, read using the sub-memories and corresponding weights.
expanded_weights = tf.expand_dims(read_weights, axis=-1) # [B, H, K, 1]
memory_reads = tf.reduce_sum(
expanded_weights * topk_mem, axis=2) # [B, H, M]
else:
read_weights = tf.nn.softmax(weighted_distances, axis=-1)
num_rows_memory = mem_state.shape[1]
all_indices = tf.range(num_rows_memory, dtype=tf.int32)
all_indices = tf.reshape(all_indices, [1, 1, num_rows_memory])
read_indices = tf.tile(all_indices, [batch_size, num_read_heads, 1])
# This is the actual memory access.
# Note that matmul automatically batch applies for us.
memory_reads = tf.matmul(read_weights, mem_state)
read_keys.shape.assert_is_compatible_with(memory_reads.shape)
read_strengths = tf.squeeze(read_strengths, axis=-1) # [B, H, 1] -> [B, H]
return memory_reads, read_weights, read_indices, read_strengths
def _assert_compatible_read_from_memory_inputs(read_keys, read_strengths,
mem_state):
read_keys.shape.assert_has_rank(3)
b_shape, h_shape, m_shape = read_keys.shape
mem_state.shape.assert_has_rank(3)
r_shape = mem_state.shape[1]
read_strengths.shape.assert_is_compatible_with(
tf.TensorShape([b_shape, h_shape]))
mem_state.shape.assert_is_compatible_with(
tf.TensorShape([b_shape, r_shape, m_shape]))
def _assert_compatible_memory_reader_input(input_tensors):
"""Asserts MemoryReader's _build has been given the correct shapes."""
assert len(input_tensors) == 2
_, mem_state = input_tensors
mem_state.shape.assert_has_rank(3)
|
deepmind-research-master
|
tvt/memory.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Temporal Value Transport implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from concurrent import futures
import numpy as np
from six.moves import range
from six.moves import zip
def _unstack(array, axis):
"""Opposite of np.stack."""
split_array = np.split(array, array.shape[axis], axis=axis)
return [np.squeeze(a, axis=axis) for a in split_array]
def _top_k_args(array, k):
"""Return top k arguments or all arguments if array size is less than k."""
if len(array) <= k:
return np.arange(len(array))
return np.argpartition(array, kth=-k)[-k:]
def _threshold_read_event_times(read_strengths, threshold):
"""Return the times of max read strengths within one threshold read event."""
chosen_times = []
over_threshold = False
max_read_strength = 0.
# Wait until the threshold is crossed then keep track of max read strength and
# time of max read strength until the read strengths go back under the
# threshold, then add that max read strength time to the chosen times. Wait
# until threshold is crossed again and then repeat the process.
for time, strength in enumerate(read_strengths):
if strength > threshold:
over_threshold = True
if strength > max_read_strength:
max_read_strength = strength
max_read_strength_time = time
else:
# If coming back under threshold, add the time of the last max read.
if over_threshold:
chosen_times.append(max_read_strength_time)
max_read_strength = 0.
over_threshold = False
# Add max read strength time if episode finishes before going under threshold.
if over_threshold:
chosen_times.append(max_read_strength_time)
return np.array(chosen_times)
def _tvt_rewards_single_head(read_weights, read_strengths, read_times,
baselines, alpha, top_k_t1,
read_strength_threshold, no_transport_period):
"""Compute TVT rewards for a single read head, no batch dimension.
This performs the updates for one read head.
`t1` and `t2` refer to times to where and from where the value is being
transported, respectively. I.e. the rewards at `t1` times are being modified
based on values at times `t2`.
Args:
read_weights: shape (ep_length, top_k).
read_strengths: shape (ep_length,).
read_times: shape (ep_length, top_k).
baselines: shape (ep_length,).
alpha: The multiplier for the temporal value transport rewards.
top_k_t1: For each read event time, this determines how many time points
to send tvt reward to.
read_strength_threshold: Read strengths below this value are ignored.
no_transport_period: Length of no_transport_period.
Returns:
An array of TVT rewards with shape (ep_length,).
"""
tvt_rewards = np.zeros_like(baselines)
# Mask read_weights for reads that read back to times within
# no_transport_period of current time.
ep_length = read_times.shape[0]
times = np.arange(ep_length)
# Expand dims for correct broadcasting when subtracting read_times.
times = np.expand_dims(times, -1)
read_past_no_transport_period = (times - read_times) > no_transport_period
read_weights_masked = np.where(read_past_no_transport_period,
read_weights,
np.zeros_like(read_weights))
# Find t2 times with maximum read weights. Ignore t2 times whose maximum
# read weights fall inside the no_transport_period.
max_read_weight_args = np.argmax(read_weights, axis=1) # (ep_length,)
times = np.arange(ep_length)
max_read_weight_times = read_times[times,
max_read_weight_args] # (ep_length,)
read_strengths_cut = np.where(
times - max_read_weight_times > no_transport_period,
read_strengths,
np.zeros_like(read_strengths))
# Filter t2 candidates to perform value transport on local maximums
# above a threshold.
t2_times_with_largest_reads = _threshold_read_event_times(
read_strengths_cut, read_strength_threshold)
# Loop through all t2 candidates and transport value to top_k_t1 read times.
for t2 in t2_times_with_largest_reads:
try:
baseline_value_when_reading = baselines[t2]
except IndexError:
raise RuntimeError("Attempting to access baselines array with length {}"
" at index {}. Make sure output_baseline is set in"
" the agent config.".format(len(baselines), t2))
read_times_from_t2 = read_times[t2]
read_weights_from_t2 = read_weights_masked[t2]
# Find the top_k_t1 read times for this t2 and their corresponding read
# weights. The call to _top_k_args() here gives the array indices for the
# times and weights of the top_k_t1 reads from t2.
top_t1_indices = _top_k_args(read_weights_from_t2, top_k_t1)
top_t1_read_times = np.take(read_times_from_t2, top_t1_indices)
top_t1_read_weights = np.take(read_weights_from_t2, top_t1_indices)
# For each of the top_k_t1 read times t and corresponding read weight w,
# find the trajectory that contains step_num (t + shift) and modify the
# reward at step_num (t + shift) using w and the baseline value at t2.
# We ignore any read times t >= t2. These can emerge because if nothing
# in memory matches positively with the read query, the top reads may be
# in the empty region of the memory.
for step_num, read_weight in zip(top_t1_read_times, top_t1_read_weights):
if step_num >= t2:
# Skip this step_num as it is not really a memory time.
continue
# Compute the tvt reward and add it on.
tvt_reward = alpha * read_weight * baseline_value_when_reading
tvt_rewards[step_num] += tvt_reward
return tvt_rewards
def _compute_tvt_rewards_from_read_info(
read_weights, read_strengths, read_times, baselines, gamma,
alpha=0.9, top_k_t1=50,
read_strength_threshold=2.,
no_transport_period_when_gamma_1=25):
"""Compute TVT rewards given supplied read information, no batch dimension.
Args:
read_weights: shape (ep_length, num_read_heads, top_k).
read_strengths: shape (ep_length, num_read_heads).
read_times: shape (ep_length, num_read_heads, top_k).
baselines: shape (ep_length,).
gamma: Scalar discount factor used to calculate the no_transport_period.
alpha: The multiplier for the temporal value transport rewards.
top_k_t1: For each read event time, this determines how many time points
to send tvt reward to.
read_strength_threshold: Read strengths below this value are ignored.
no_transport_period_when_gamma_1: no transport period when gamma == 1.
Returns:
An array of TVT rewards with shape (ep_length,).
"""
if gamma < 1:
no_transport_period = int(1 / (1 - gamma))
else:
if no_transport_period_when_gamma_1 is None:
raise ValueError("No transport period must be defined when gamma == 1.")
no_transport_period = no_transport_period_when_gamma_1
# Split read infos by read head.
num_read_heads = read_weights.shape[1]
read_weights = _unstack(read_weights, axis=1)
read_strengths = _unstack(read_strengths, axis=1)
read_times = _unstack(read_times, axis=1)
# Calcuate TVT rewards for each read head separately and add to total.
tvt_rewards = np.zeros_like(baselines)
for i in range(num_read_heads):
tvt_rewards += _tvt_rewards_single_head(
read_weights[i], read_strengths[i], read_times[i],
baselines, alpha, top_k_t1, read_strength_threshold,
no_transport_period)
return tvt_rewards
def compute_tvt_rewards(read_infos, baselines, gamma=.96):
"""Compute TVT rewards from EpisodeOutputs.
Args:
read_infos: A memory_reader.ReadInformation namedtuple, where each element
has shape (ep_length, batch_size, num_read_heads, ...).
baselines: A numpy float array with shape (ep_length, batch_size).
gamma: Discount factor.
Returns:
An array of TVT rewards with shape (ep_length,).
"""
if not read_infos:
return np.zeros_like(baselines)
# TVT reward computation is without batch dimension. so we need to process
# read_infos and baselines into batchwise components.
batch_size = baselines.shape[1]
# Split each element of read info on batch dim.
read_weights = _unstack(read_infos.weights, axis=1)
read_strengths = _unstack(read_infos.strengths, axis=1)
read_indices = _unstack(read_infos.indices, axis=1)
# Split baselines on batch dim.
baselines = _unstack(baselines, axis=1)
# Comute TVT rewards for each element in the batch (threading over batch).
tvt_rewards = []
with futures.ThreadPoolExecutor(max_workers=batch_size) as executor:
for i in range(batch_size):
tvt_rewards.append(
executor.submit(
_compute_tvt_rewards_from_read_info,
read_weights[i],
read_strengths[i],
read_indices[i],
baselines[i],
gamma)
)
tvt_rewards = [f.result() for f in tvt_rewards]
# Process TVT rewards back into an array of shape (ep_length, batch_size).
return np.stack(tvt_rewards, axis=1)
|
deepmind-research-master
|
tvt/tvt_rewards.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Threaded batch environment wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from concurrent import futures
from six.moves import range
from six.moves import zip
from tvt import nest_utils
class BatchEnv(object):
"""Wrapper that steps multiple environments in separate threads.
The threads are stepped in lock step, so all threads progress by one step
before any move to the next step.
"""
def __init__(self, batch_size, env_builder, **env_kwargs):
self.batch_size = batch_size
self._envs = [env_builder(**env_kwargs) for _ in range(batch_size)]
self._num_actions = self._envs[0].num_actions
self._observation_shape = self._envs[0].observation_shape
self._episode_length = self._envs[0].episode_length
self._executor = futures.ThreadPoolExecutor(max_workers=self.batch_size)
def reset(self):
"""Reset the entire batch of environments."""
def reset_environment(env):
return env.reset()
try:
output_list = []
for env in self._envs:
output_list.append(self._executor.submit(reset_environment, env))
output_list = [env_output.result() for env_output in output_list]
except KeyboardInterrupt:
self._executor.shutdown(wait=True)
raise
observations, rewards = nest_utils.nest_stack(output_list)
return observations, rewards
def step(self, action_list):
"""Step batch of envs.
Args:
action_list: A list of actions, one per environment in the batch. Each one
should be a scalar int or a numpy scaler int.
Returns:
A tuple (observations, rewards):
observations: A nest of observations, each one a numpy array where the
first dimension has size equal to the number of environments in the
batch.
rewards: An array of rewards with size equal to the number of
environments in the batch.
"""
def step_environment(env, action):
return env.step(action)
try:
output_list = []
for env, action in zip(self._envs, action_list):
output_list.append(self._executor.submit(step_environment, env, action))
output_list = [env_output.result() for env_output in output_list]
except KeyboardInterrupt:
self._executor.shutdown(wait=True)
raise
observations, rewards = nest_utils.nest_stack(output_list)
return observations, rewards
@property
def observation_shape(self):
"""Observation shape per environment, i.e. with no batch dimension."""
return self._observation_shape
@property
def num_actions(self):
return self._num_actions
@property
def episode_length(self):
return self._episode_length
def last_phase_rewards(self):
return [env.last_phase_reward() for env in self._envs]
|
deepmind-research-master
|
tvt/batch_env.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""RMA agent."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from six.moves import range
from six.moves import zip
import sonnet as snt
import tensorflow.compat.v1 as tf
import trfl
from tvt import losses
from tvt import memory as memory_module
from tensorflow.contrib import framework as contrib_framework
nest = contrib_framework.nest
PolicyOutputs = collections.namedtuple(
'PolicyOutputs', ['policy', 'action', 'baseline'])
StepOutput = collections.namedtuple(
'StepOutput', ['action', 'baseline', 'read_info'])
AgentState = collections.namedtuple(
'AgentState', ['core_state', 'prev_action'])
Observation = collections.namedtuple(
'Observation', ['image', 'last_action', 'last_reward'])
RNNStateNoMem = collections.namedtuple(
'RNNStateNoMem', ['controller_outputs', 'h_controller'])
RNNState = collections.namedtuple(
'RNNState',
list(RNNStateNoMem._fields) + ['memory', 'mem_reads', 'h_mem_writer'])
CoreOutputs = collections.namedtuple(
'CoreOutputs', ['action', 'policy', 'baseline', 'z', 'read_info'])
def rnn_inputs_to_static_rnn_inputs(inputs):
"""Converts time major tensors to timestep lists."""
# Inputs to core build method are expected to be a tensor or tuple of tensors.
if isinstance(inputs, tuple):
num_timesteps = inputs[0].shape.as_list()[0]
converted_inputs = [tf.unstack(input_, num_timesteps) for input_ in inputs]
return list(zip(*converted_inputs))
else:
return tf.unstack(inputs)
def static_rnn_outputs_to_core_outputs(outputs):
"""Convert from length T list of nests to nest of tensors with first dim T."""
list_of_flats = [nest.flatten(n) for n in outputs]
new_outputs = list()
for i in range(len(list_of_flats[0])):
new_outputs.append(tf.stack([flat_nest[i] for flat_nest in list_of_flats]))
return nest.pack_sequence_as(structure=outputs[0], flat_sequence=new_outputs)
def unroll(core, initial_state, inputs, dtype=tf.float32):
"""Perform a static unroll of the core."""
static_rnn_inputs = rnn_inputs_to_static_rnn_inputs(inputs)
static_outputs, _ = tf.nn.static_rnn(
core,
inputs=static_rnn_inputs,
initial_state=initial_state,
dtype=dtype)
core_outputs = static_rnn_outputs_to_core_outputs(static_outputs)
return core_outputs
class ImageEncoderDecoder(snt.AbstractModule):
"""Image Encoder/Decoder module."""
def __init__(
self,
image_code_size,
name='image_encoder_decoder'):
"""Initialize the image encoder/decoder."""
super(ImageEncoderDecoder, self).__init__(name=name)
# This is set by a call to `encode`. `decode` will fail before this is set.
self._convnet_output_shape = None
with self._enter_variable_scope():
self._convnet = snt.nets.ConvNet2D(
output_channels=(16, 32),
kernel_shapes=(3, 3),
strides=(1, 1),
paddings=('SAME',))
self._post_convnet_layer = snt.Linear(image_code_size, name='final_layer')
@snt.reuse_variables
def encode(self, image):
"""Encode the image observation."""
convnet_output = self._convnet(image)
# Store unflattened convnet output shape for use in decoder.
self._convnet_output_shape = convnet_output.shape[1:]
# Flatten convnet outputs and pass through final layer to get image code.
return self._post_convnet_layer(snt.BatchFlatten()(convnet_output))
@snt.reuse_variables
def decode(self, code):
"""Decode the image observation from a latent code."""
if self._convnet_output_shape is None:
raise ValueError('Must call `encode` before `decode`.')
transpose_convnet_in_flat = snt.Linear(
self._convnet_output_shape.num_elements(),
name='decode_initial_linear')(
code)
transpose_convnet_in_flat = tf.nn.relu(transpose_convnet_in_flat)
transpose_convnet_in = snt.BatchReshape(
self._convnet_output_shape.as_list())(transpose_convnet_in_flat)
return self._convnet.transpose(None)(transpose_convnet_in)
def _build(self, *args): # Unused. Use encode/decode instead.
raise NotImplementedError('Use encode/decode methods instead of __call__.')
class Policy(snt.AbstractModule):
"""A policy module possibly containing a read-only DNC."""
def __init__(self,
num_actions,
num_policy_hiddens=(),
num_baseline_hiddens=(),
activation=tf.nn.tanh,
policy_clip_abs_value=10.0,
name='Policy'):
"""Construct a policy module possibly containing a read-only DNC.
Args:
num_actions: Number of discrete actions to choose from.
num_policy_hiddens: Tuple or List, sizes of policy MLP hidden layers.
num_baseline_hiddens: Tuple or List, sizes of baseline MLP hidden layers.
An empty tuple/list results in a linear layer instead of an MLP.
activation: Callable, e.g. tf.nn.tanh.
policy_clip_abs_value: float, Policy gradient clip value.
name: A string, the module's name
"""
super(Policy, self).__init__(name=name)
self._num_actions = num_actions
self._policy_layers = tuple(num_policy_hiddens) + (num_actions,)
self._baseline_layers = tuple(num_baseline_hiddens) + (1,)
self._policy_clip_abs_value = policy_clip_abs_value
self._activation = activation
def _build(self, inputs):
(shared_inputs, extra_policy_inputs) = inputs
policy_in = tf.concat([shared_inputs, extra_policy_inputs], axis=1)
policy = snt.nets.MLP(
output_sizes=self._policy_layers,
activation=self._activation,
name='policy_mlp')(
policy_in)
# Sample an action from the policy logits.
action = tf.multinomial(policy, num_samples=1, output_dtype=tf.int32)
action = tf.squeeze(action, 1) # [B, 1] -> [B]
if self._policy_clip_abs_value > 0:
policy = snt.clip_gradient(
net=policy,
clip_value_min=-self._policy_clip_abs_value,
clip_value_max=self._policy_clip_abs_value)
baseline_in = tf.concat([shared_inputs, tf.stop_gradient(policy)], axis=1)
baseline = snt.nets.MLP(
self._baseline_layers,
activation=self._activation,
name='baseline_mlp')(
baseline_in)
baseline = tf.squeeze(baseline, axis=-1) # [B, 1] -> [B]
if self._policy_clip_abs_value > 0:
baseline = snt.clip_gradient(
net=baseline,
clip_value_min=-self._policy_clip_abs_value,
clip_value_max=self._policy_clip_abs_value)
outputs = PolicyOutputs(
policy=policy,
action=action,
baseline=baseline)
return outputs
class _RMACore(snt.RNNCore):
"""RMA RNN Core."""
def __init__(self,
num_actions,
with_memory=True,
name='rma_core'):
super(_RMACore, self).__init__(name=name)
# MLP activation as callable.
mlp_activation = tf.nn.tanh
# Size of latent code written to memory (if using it) and used to
# reconstruct from (if including reconstructions).
num_latents = 200
# Value function decode settings.
baseline_mlp_num_hiddens = (200,)
# Policy settings.
num_policy_hiddens = (200,) # Only used for non-recurrent core.
# Controller settings.
control_hidden_size = 256
control_num_layers = 2
# Memory settings (only used if with_memory=True).
memory_size = 1000
memory_num_reads = 3
memory_top_k = 50
self._with_memory = with_memory
with self._enter_variable_scope():
# Construct the features -> latent encoder.
self._z_encoder_mlp = snt.nets.MLP(
output_sizes=(2 * num_latents, num_latents),
activation=mlp_activation,
activate_final=False,
name='z_encoder_mlp')
# Construct controller.
rnn_cores = [snt.LSTM(control_hidden_size)
for _ in range(control_num_layers)]
self._controller = snt.DeepRNN(
rnn_cores, skip_connections=True, name='controller')
# Construct memory.
if self._with_memory:
memory_dim = num_latents # Each write to memory is of size memory_dim.
self._mem_shape = (memory_size, memory_dim)
self._memory_reader = memory_module.MemoryReader(
memory_word_size=memory_dim,
num_read_heads=memory_num_reads,
top_k=memory_top_k,
memory_size=memory_size)
self._memory_writer = memory_module.MemoryWriter(
mem_shape=self._mem_shape)
# Construct policy, starting with policy_core and policy_action_head.
# `extra_inputs` in this case will be mem_out from current time step (note
# that mem_out is just the controller output if with_memory=False).
self._policy = Policy(
num_policy_hiddens=num_policy_hiddens,
num_actions=num_actions,
num_baseline_hiddens=baseline_mlp_num_hiddens,
activation=mlp_activation,
policy_clip_abs_value=10.0,)
# Set state_size and output_size.
controller_out_size = self._controller.output_size
controller_state_size = self._controller.state_size
self._state_size = RNNStateNoMem(controller_outputs=controller_out_size,
h_controller=controller_state_size)
read_info_size = ()
if self._with_memory:
mem_reads_size, read_info_size = self._memory_reader.output_size
mem_writer_state_size = self._memory_writer.state_size
self._state_size = RNNState(memory=tf.TensorShape(self._mem_shape),
mem_reads=mem_reads_size,
h_mem_writer=mem_writer_state_size,
**self._state_size._asdict())
z_size = num_latents
self._output_size = CoreOutputs(
action=tf.TensorShape([]), # Scalar tensor shapes must be explicit.
policy=num_actions,
baseline=tf.TensorShape([]), # Scalar tensor shapes must be explicit.
z=z_size,
read_info=read_info_size)
def _build(self, inputs, h_prev):
features = inputs
z_net_inputs = [features, h_prev.controller_outputs]
if self._with_memory:
z_net_inputs.append(h_prev.mem_reads)
z_net_inputs_concat = tf.concat(z_net_inputs, axis=1)
z = self._z_encoder_mlp(z_net_inputs_concat)
controller_out, h_controller = self._controller(z, h_prev.h_controller)
read_info = ()
if self._with_memory:
# Perform a memory read/write step before generating the policy_modules.
mem_reads, read_info = self._memory_reader((controller_out,
h_prev.memory))
memory, h_mem_writer = self._memory_writer((z, h_prev.memory),
h_prev.h_mem_writer)
policy_extra_input = tf.concat([controller_out, mem_reads], axis=1)
else:
policy_extra_input = controller_out
# Get policy, action and (possible empty) baseline from policy module.
policy_inputs = (z, policy_extra_input)
policy_outputs = self._policy(policy_inputs)
core_outputs = CoreOutputs(
z=z,
read_info=read_info,
**policy_outputs._asdict())
h_next = RNNStateNoMem(controller_outputs=controller_out,
h_controller=h_controller)
if self._with_memory:
h_next = RNNState(memory=memory,
mem_reads=mem_reads,
h_mem_writer=h_mem_writer,
**h_next._asdict())
return core_outputs, h_next
def initial_state(self, batch_size):
"""Use initial state for RNN modules, otherwise use zero state."""
zero_state = self.zero_state(batch_size, dtype=tf.float32)
controller_out = zero_state.controller_outputs
h_controller = self._controller.initial_state(batch_size)
state = RNNStateNoMem(controller_outputs=controller_out,
h_controller=h_controller)
if self._with_memory:
memory = zero_state.memory
mem_reads = zero_state.mem_reads
h_mem_writer = self._memory_writer.initial_state(batch_size)
state = RNNState(memory=memory,
mem_reads=mem_reads,
h_mem_writer=h_mem_writer,
**state._asdict())
return state
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
class Agent(snt.AbstractModule):
"""Myriad RMA agent.
`latents` here refers to a purely deterministic encoding of the inputs, rather
than VAE-like latents in e.g. the MERLIN agent.
"""
def __init__(self,
batch_size,
with_reconstructions=True,
with_memory=True,
image_code_size=500,
image_cost_weight=50.,
num_actions=None,
observation_shape=None,
entropy_cost=0.01,
return_cost_weight=0.4,
gamma=0.96,
read_strength_cost=5e-5,
read_strength_tolerance=2.,
name='rma_agent'):
super(Agent, self).__init__(name=name)
self._batch_size = batch_size
self._with_reconstructions = with_reconstructions
self._image_cost_weight = image_cost_weight
self._image_code_size = image_code_size
self._entropy_cost = entropy_cost
self._return_cost_weight = return_cost_weight
self._gamma = gamma
self._read_strength_cost = read_strength_cost
self._read_strength_tolerance = read_strength_tolerance
self._num_actions = num_actions
self._name = name
self._logged_values = {}
# Store total number of pixels across channels (for image loss scaling).
self._total_num_pixels = np.prod(observation_shape)
with self._enter_variable_scope():
# Construct image encoder/decoder.
self._image_encoder_decoder = ImageEncoderDecoder(
image_code_size=image_code_size)
self._core = _RMACore(
num_actions=self._num_actions,
with_memory=with_memory)
def initial_state(self, batch_size):
with tf.name_scope(self._name + '/initial_state'):
return AgentState(
core_state=self._core.initial_state(batch_size),
prev_action=tf.zeros(shape=(batch_size,), dtype=tf.int32))
def _prepare_observations(self, observation, last_reward, last_action):
image = observation
# Make sure the entries are in [0, 1) range.
if image.dtype.is_integer:
image = tf.cast(image, tf.float32) / 255.
if last_reward is None:
# For some envs, in the first timestep the last_reward can be None.
batch_size = observation.shape[0]
last_reward = tf.zeros((batch_size,), dtype=tf.float32)
return Observation(
image=image,
last_action=last_action,
last_reward=last_reward)
@snt.reuse_variables
def _encode(self, observation, last_reward, last_action):
inputs = self._prepare_observations(observation, last_reward, last_action)
# Encode image observation.
obs_code = self._image_encoder_decoder.encode(inputs.image)
# Encode last action.
action_code = tf.one_hot(inputs.last_action, self._num_actions)
# Encode last reward.
reward_code = tf.expand_dims(inputs.last_reward, -1)
features = tf.concat([obs_code, action_code, reward_code], axis=1)
return inputs, features
@snt.reuse_variables
def _decode(self, z):
# Decode image.
image_recon = self._image_encoder_decoder.decode(z)
# Decode action.
action_recon = snt.Linear(self._num_actions, name='action_recon_linear')(z)
# Decode reward.
reward_recon = snt.Linear(1, name='reward_recon_linear')(z)
# Full reconstructions.
recons = Observation(
image=image_recon,
last_reward=reward_recon,
last_action=action_recon)
return recons
def step(self, reward, observation, prev_state):
with tf.name_scope(self._name + '/step'):
_, features = self._encode(observation, reward, prev_state.prev_action)
core_outputs, next_core_state = self._core(
features, prev_state.core_state)
action = core_outputs.action
step_output = StepOutput(
action=action,
baseline=core_outputs.baseline,
read_info=core_outputs.read_info)
agent_state = AgentState(
core_state=next_core_state,
prev_action=action)
return step_output, agent_state
@snt.reuse_variables
def loss(self, observations, rewards, actions, additional_rewards=None):
"""Compute the loss."""
dummy_zeroth_step_actions = tf.zeros_like(actions[:1])
all_actions = tf.concat([dummy_zeroth_step_actions, actions], axis=0)
inputs, features = snt.BatchApply(self._encode)(
observations, rewards, all_actions)
rewards = rewards[1:] # Zeroth step reward not correlated to actions.
if additional_rewards is not None:
# Additional rewards are not passed to the encoder (above) in order to be
# consistent with the step, nor to the recon loss so that recons are
# consistent with the observations. Thus, additional rewards only affect
# the returns used to learn the value function.
rewards += additional_rewards
initial_state = self._core.initial_state(self._batch_size)
rnn_inputs = features
core_outputs = unroll(self._core, initial_state, rnn_inputs)
# Remove final timestep of outputs.
core_outputs = nest.map_structure(lambda t: t[:-1], core_outputs)
if self._with_reconstructions:
recons = snt.BatchApply(self._decode)(core_outputs.z)
recon_targets = nest.map_structure(lambda t: t[:-1], inputs)
recon_loss, recon_logged_values = losses.reconstruction_losses(
recons=recons,
targets=recon_targets,
image_cost=self._image_cost_weight / self._total_num_pixels,
action_cost=1.,
reward_cost=1.)
else:
recon_loss = tf.constant(0.0)
recon_logged_values = dict()
if core_outputs.read_info is not tuple():
read_reg_loss, read_reg_logged_values = (
losses.read_regularization_loss(
read_info=core_outputs.read_info,
strength_cost=self._read_strength_cost,
strength_tolerance=self._read_strength_tolerance,
strength_reg_mode='L1',
key_norm_cost=0.,
key_norm_tolerance=1.))
else:
read_reg_loss = tf.constant(0.0)
read_reg_logged_values = dict()
# Bootstrap value is at end of episode so is zero.
bootstrap_value = tf.zeros(shape=(self._batch_size,), dtype=tf.float32)
discounts = self._gamma * tf.ones_like(rewards)
a2c_loss, a2c_loss_extra = trfl.sequence_advantage_actor_critic_loss(
policy_logits=core_outputs.policy,
baseline_values=core_outputs.baseline,
actions=actions,
rewards=rewards,
pcontinues=discounts,
bootstrap_value=bootstrap_value,
lambda_=self._gamma,
entropy_cost=self._entropy_cost,
baseline_cost=self._return_cost_weight,
name='SequenceA2CLoss')
a2c_loss = tf.reduce_mean(a2c_loss) # Average over batch.
total_loss = a2c_loss + recon_loss + read_reg_loss
a2c_loss_logged_values = dict(
pg_loss=tf.reduce_mean(a2c_loss_extra.policy_gradient_loss),
baseline_loss=tf.reduce_mean(a2c_loss_extra.baseline_loss),
entropy_loss=tf.reduce_mean(a2c_loss_extra.entropy_loss))
agent_loss_log = losses.combine_logged_values(
a2c_loss_logged_values,
recon_logged_values,
read_reg_logged_values)
agent_loss_log['total_loss'] = total_loss
return total_loss, agent_loss_log
def _build(self, *args): # Unused.
# pylint: disable=no-value-for-parameter
return self.step(*args)
# pylint: enable=no-value-for-parameter
|
deepmind-research-master
|
tvt/rma.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Loss functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import tensorflow.compat.v1 as tf
def sum_time_average_batch(tensor, name=None):
"""Computes the mean over B assuming tensor is of shape [T, B]."""
tensor.get_shape().assert_has_rank(2)
return tf.reduce_mean(tf.reduce_sum(tensor, axis=0), axis=0, name=name)
def combine_logged_values(*logged_values_dicts):
"""Combine logged values dicts. Throws if there are any repeated keys."""
combined_dict = dict()
for logged_values in logged_values_dicts:
for k, v in six.iteritems(logged_values):
if k in combined_dict:
raise ValueError('Key "%s" is repeated in loss logging.' % k)
combined_dict[k] = v
return combined_dict
def reconstruction_losses(
recons,
targets,
image_cost,
action_cost,
reward_cost):
"""Reconstruction losses."""
if image_cost > 0.0:
# Neg log prob of obs image given Bernoulli(recon image) distribution.
negative_image_log_prob = tf.nn.sigmoid_cross_entropy_with_logits(
labels=targets.image, logits=recons.image)
nll_per_time = tf.reduce_sum(negative_image_log_prob, [-3, -2, -1])
image_loss = image_cost * nll_per_time
image_loss = sum_time_average_batch(image_loss)
else:
image_loss = tf.constant(0.)
if action_cost > 0.0 and recons.last_action is not tuple():
# Labels have shape (T, B), logits have shape (T, B, num_actions).
action_loss = action_cost * tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=targets.last_action, logits=recons.last_action)
action_loss = sum_time_average_batch(action_loss)
else:
action_loss = tf.constant(0.)
if reward_cost > 0.0 and recons.last_reward is not tuple():
# MSE loss for reward.
recon_last_reward = recons.last_reward
recon_last_reward = tf.squeeze(recon_last_reward, -1)
reward_loss = 0.5 * reward_cost * tf.square(
recon_last_reward - targets.last_reward)
reward_loss = sum_time_average_batch(reward_loss)
else:
reward_loss = tf.constant(0.)
total_loss = image_loss + action_loss + reward_loss
logged_values = dict(
recon_loss_image=image_loss,
recon_loss_action=action_loss,
recon_loss_reward=reward_loss,
total_reconstruction_loss=total_loss,)
return total_loss, logged_values
def read_regularization_loss(
read_info,
strength_cost,
strength_tolerance,
strength_reg_mode,
key_norm_cost,
key_norm_tolerance):
"""Computes the sum of read strength and read key regularization losses."""
if (strength_cost <= 0.) and (key_norm_cost <= 0.):
read_reg_loss = tf.constant(0.)
return read_reg_loss, dict(read_regularization_loss=read_reg_loss)
if hasattr(read_info, 'read_strengths'):
read_strengths = read_info.read_strengths
read_keys = read_info.read_keys
else:
read_strengths = read_info.strengths
read_keys = read_info.keys
if read_info == tuple():
raise ValueError('Make sure read regularization costs are zero when '
'not outputting read info.')
read_reg_loss = tf.constant(0.)
if strength_cost > 0.:
strength_hinged = tf.maximum(strength_tolerance, read_strengths)
if strength_reg_mode == 'L2':
strength_loss = 0.5 * tf.square(strength_hinged)
elif strength_reg_mode == 'L1':
# Read strengths are always positive.
strength_loss = strength_hinged
else:
raise ValueError(
'Strength regularization mode "{}" is not supported.'.format(
strength_reg_mode))
# Sum across read heads to reduce from [T, B, n_reads] to [T, B].
strength_loss = strength_cost * tf.reduce_sum(strength_loss, axis=2)
if key_norm_cost > 0.:
key_norm_norms = tf.norm(read_keys, axis=-1)
key_norm_norms_hinged = tf.maximum(key_norm_tolerance, key_norm_norms)
key_norm_loss = 0.5 * tf.square(key_norm_norms_hinged)
# Sum across read heads to reduce from [T, B, n_reads] to [T, B].
key_norm_loss = key_norm_cost * tf.reduce_sum(key_norm_loss, axis=2)
read_reg_loss += key_norm_cost * key_norm_loss
if strength_cost > 0.:
strength_loss = sum_time_average_batch(strength_loss)
else:
strength_loss = tf.constant(0.)
if key_norm_cost > 0.:
key_norm_loss = sum_time_average_batch(key_norm_loss)
else:
key_norm_loss = tf.constant(0.)
read_reg_loss = strength_loss + key_norm_loss
logged_values = dict(
read_reg_strength_loss=strength_loss,
read_reg_key_norm_loss=key_norm_loss,
total_read_reg_loss=read_reg_loss)
return read_reg_loss, logged_values
|
deepmind-research-master
|
tvt/losses.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Batched synchronous actor/learner training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl import app
from absl import flags
from absl import logging
import numpy as np
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from tvt import batch_env
from tvt import nest_utils
from tvt import rma
from tvt import tvt_rewards as tvt_module
from tvt.pycolab import env as pycolab_env
from tensorflow.contrib import framework as contrib_framework
nest = contrib_framework.nest
FLAGS = flags.FLAGS
flags.DEFINE_integer('logging_frequency', 1,
'Log training progress every logging_frequency episodes.')
flags.DEFINE_string('logdir', None, 'Directory for tensorboard logging.')
flags.DEFINE_boolean('with_memory', True,
'whether or not agent has external memory.')
flags.DEFINE_boolean('with_reconstruction', True,
'whether or not agent reconstruct the observation.')
flags.DEFINE_float('gamma', 0.92, 'Agent discount factor')
flags.DEFINE_float('entropy_cost', 0.05, 'weight of the entropy loss')
flags.DEFINE_float('image_cost_weight', 50., 'image recon cost weight.')
flags.DEFINE_float('read_strength_cost', 5e-5,
'Cost weight of the memory read strength.')
flags.DEFINE_float('read_strength_tolerance', 2.,
'The tolerance of hinge loss of the read_strength_cost.')
flags.DEFINE_boolean('do_tvt', True, 'whether or not do tvt')
flags.DEFINE_enum('pycolab_game', 'key_to_door',
['key_to_door', 'active_visual_match'],
'The name of the game in pycolab environment')
flags.DEFINE_integer('num_episodes', None,
'Number of episodes to train for. None means run forever.')
flags.DEFINE_integer('batch_size', 16, 'Batch size')
flags.DEFINE_float('learning_rate', 2e-4, 'Adam optimizer learning rate')
flags.DEFINE_float('beta1', 0., 'Adam optimizer beta1')
flags.DEFINE_float('beta2', 0.95, 'Adam optimizer beta2')
flags.DEFINE_float('epsilon', 1e-6, 'Adam optimizer epsilon')
# Pycolab-specific flags:
flags.DEFINE_integer('pycolab_num_apples', 10,
'Number of apples to sample from the distractor grid.')
flags.DEFINE_float('pycolab_apple_reward_min', 1.,
'A reward range [min, max) to uniformly sample from.')
flags.DEFINE_float('pycolab_apple_reward_max', 10.,
'A reward range [min, max) to uniformly sample from.')
flags.DEFINE_boolean('pycolab_fix_apple_reward_in_episode', True,
'Fix the sampled apple reward within an episode.')
flags.DEFINE_float('pycolab_final_reward', 10.,
'Reward obtained at the last phase.')
flags.DEFINE_boolean('pycolab_crop', True,
'Whether to crop observations or not.')
def main(_):
batch_size = FLAGS.batch_size
env_builder = pycolab_env.PycolabEnvironment
env_kwargs = {
'game': FLAGS.pycolab_game,
'num_apples': FLAGS.pycolab_num_apples,
'apple_reward': [FLAGS.pycolab_apple_reward_min,
FLAGS.pycolab_apple_reward_max],
'fix_apple_reward_in_episode': FLAGS.pycolab_fix_apple_reward_in_episode,
'final_reward': FLAGS.pycolab_final_reward,
'crop': FLAGS.pycolab_crop
}
env = batch_env.BatchEnv(batch_size, env_builder, **env_kwargs)
ep_length = env.episode_length
agent = rma.Agent(batch_size=batch_size,
num_actions=env.num_actions,
observation_shape=env.observation_shape,
with_reconstructions=FLAGS.with_reconstruction,
gamma=FLAGS.gamma,
read_strength_cost=FLAGS.read_strength_cost,
read_strength_tolerance=FLAGS.read_strength_tolerance,
entropy_cost=FLAGS.entropy_cost,
with_memory=FLAGS.with_memory,
image_cost_weight=FLAGS.image_cost_weight)
# Agent step placeholders and agent step.
batch_shape = (batch_size,)
observation_ph = tf.placeholder(
dtype=tf.uint8, shape=batch_shape + env.observation_shape, name='obs')
reward_ph = tf.placeholder(
dtype=tf.float32, shape=batch_shape, name='reward')
state_ph = nest.map_structure(
lambda s: tf.placeholder(dtype=s.dtype, shape=s.shape, name='state'),
agent.initial_state(batch_size=batch_size))
step_outputs, state = agent.step(reward_ph, observation_ph, state_ph)
# Update op placeholders and update op.
observations_ph = tf.placeholder(
dtype=tf.uint8, shape=(ep_length + 1, batch_size) + env.observation_shape,
name='observations')
rewards_ph = tf.placeholder(
dtype=tf.float32, shape=(ep_length + 1, batch_size), name='rewards')
actions_ph = tf.placeholder(
dtype=tf.int64, shape=(ep_length, batch_size), name='actions')
tvt_rewards_ph = tf.placeholder(
dtype=tf.float32, shape=(ep_length, batch_size), name='tvt_rewards')
loss, loss_logs = agent.loss(
observations_ph, rewards_ph, actions_ph, tvt_rewards_ph)
optimizer = tf.train.AdamOptimizer(
learning_rate=FLAGS.learning_rate,
beta1=FLAGS.beta1,
beta2=FLAGS.beta2,
epsilon=FLAGS.epsilon)
update_op = optimizer.minimize(loss)
initial_state = agent.initial_state(batch_size)
if FLAGS.logdir:
if not tf.io.gfile.exists(FLAGS.logdir):
tf.io.gfile.makedirs(FLAGS.logdir)
summary_writer = tf.summary.FileWriter(FLAGS.logdir)
# Do init
init_ops = (tf.global_variables_initializer(),
tf.local_variables_initializer())
tf.get_default_graph().finalize()
sess = tf.Session()
sess.run(init_ops)
run = True
ep_num = 0
prev_logging_time = time.time()
while run:
observation, reward = env.reset()
agent_state = sess.run(initial_state)
# Initialise episode data stores.
observations = [observation]
rewards = [reward]
actions = []
baselines = []
read_infos = []
for _ in range(ep_length):
step_feed = {reward_ph: reward, observation_ph: observation}
for ph, ar in zip(nest.flatten(state_ph), nest.flatten(agent_state)):
step_feed[ph] = ar
step_output, agent_state = sess.run(
(step_outputs, state), feed_dict=step_feed)
action = step_output.action
baseline = step_output.baseline
read_info = step_output.read_info
# Take step in environment, append results.
observation, reward = env.step(action)
observations.append(observation)
rewards.append(reward)
actions.append(action)
baselines.append(baseline)
if read_info is not None:
read_infos.append(read_info)
# Stack the lists of length ep_length so that each array (or each element
# of nest stucture for read_infos) has shape (ep_length, batch_size, ...).
observations = np.stack(observations)
rewards = np.array(rewards)
actions = np.array(actions)
baselines = np.array(baselines)
read_infos = nest_utils.nest_stack(read_infos)
# Compute TVT rewards.
if FLAGS.do_tvt:
tvt_rewards = tvt_module.compute_tvt_rewards(read_infos,
baselines,
gamma=FLAGS.gamma)
else:
tvt_rewards = np.squeeze(np.zeros_like(baselines))
# Run update op.
loss_feed = {observations_ph: observations,
rewards_ph: rewards,
actions_ph: actions,
tvt_rewards_ph: tvt_rewards}
ep_loss, _, ep_loss_logs = sess.run([loss, update_op, loss_logs],
feed_dict=loss_feed)
# Log episode results.
if ep_num % FLAGS.logging_frequency == 0:
steps_per_second = (
FLAGS.logging_frequency * ep_length * batch_size / (
time.time() - prev_logging_time))
mean_reward = np.mean(np.sum(rewards, axis=0))
mean_last_phase_reward = np.mean(env.last_phase_rewards())
mean_tvt_reward = np.mean(np.sum(tvt_rewards, axis=0))
logging.info('Episode %d. SPS: %s', ep_num, steps_per_second)
logging.info('Episode %d. Mean episode reward: %f', ep_num, mean_reward)
logging.info('Episode %d. Last phase reward: %f', ep_num,
mean_last_phase_reward)
logging.info('Episode %d. Mean TVT episode reward: %f', ep_num,
mean_tvt_reward)
logging.info('Episode %d. Loss: %s', ep_num, ep_loss)
logging.info('Episode %d. Loss logs: %s', ep_num, ep_loss_logs)
if FLAGS.logdir:
summary = tf.Summary()
summary.value.add(tag='reward', simple_value=mean_reward)
summary.value.add(tag='last phase reward',
simple_value=mean_last_phase_reward)
summary.value.add(tag='tvt reward', simple_value=mean_tvt_reward)
summary.value.add(tag='total loss', simple_value=ep_loss)
for k, v in ep_loss_logs.items():
summary.value.add(tag='loss - {}'.format(k), simple_value=v)
# Tensorboard x-axis is total number of episodes run.
summary_writer.add_summary(summary, ep_num * batch_size)
summary_writer.flush()
prev_logging_time = time.time()
ep_num += 1
if FLAGS.num_episodes and ep_num >= FLAGS.num_episodes:
run = False
if __name__ == '__main__':
app.run(main)
|
deepmind-research-master
|
tvt/main.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Pycolab env."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from pycolab import rendering
from tvt.pycolab import active_visual_match
from tvt.pycolab import key_to_door
from tensorflow.contrib import framework as contrib_framework
nest = contrib_framework.nest
class PycolabEnvironment(object):
"""A simple environment adapter for pycolab games."""
def __init__(self, game,
num_apples=10,
apple_reward=1.,
fix_apple_reward_in_episode=False,
final_reward=10.,
crop=True,
default_reward=0):
"""Construct a `environment.Base` adapter that wraps a pycolab game."""
rng = np.random.RandomState()
if game == 'key_to_door':
self._game = key_to_door.Game(rng,
num_apples,
apple_reward,
fix_apple_reward_in_episode,
final_reward,
crop)
elif game == 'active_visual_match':
self._game = active_visual_match.Game(rng,
num_apples,
apple_reward,
fix_apple_reward_in_episode,
final_reward)
else:
raise ValueError('Unsupported game "%s".' % game)
self._default_reward = default_reward
self._num_actions = self._game.num_actions
# Agents expect HWC uint8 observations, Pycolab uses CHW float observations.
colours = nest.map_structure(lambda c: float(c) * 255 / 1000,
self._game.colours)
self._rgb_converter = rendering.ObservationToArray(
value_mapping=colours, permute=(1, 2, 0), dtype=np.uint8)
episode = self._game.make_episode()
observation, _, _ = episode.its_showtime()
self._image_shape = self._rgb_converter(observation).shape
def _process_outputs(self, observation, reward):
if reward is None:
reward = self._default_reward
image = self._rgb_converter(observation)
return image, reward
def reset(self):
"""Start a new episode."""
self._episode = self._game.make_episode()
observation, reward, _ = self._episode.its_showtime()
return self._process_outputs(observation, reward)
def step(self, action):
"""Take step in episode."""
observation, reward, _ = self._episode.play(action)
return self._process_outputs(observation, reward)
@property
def num_actions(self):
return self._num_actions
@property
def observation_shape(self):
return self._image_shape
@property
def episode_length(self):
return self._game.episode_length
def last_phase_reward(self):
# In Pycolab games here we only track chapter_reward for final chapter.
return float(self._episode.the_plot['chapter_reward'])
|
deepmind-research-master
|
tvt/pycolab/env.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Pycolab Game interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class AbstractGame(object):
"""Abstract base class for Pycolab games."""
@abc.abstractmethod
def __init__(self, rng, **settings):
"""Initialize the game."""
@abc.abstractproperty
def num_actions(self):
"""Number of possible actions in the game."""
@abc.abstractproperty
def colours(self):
"""Symbol to colour map for the game."""
@abc.abstractmethod
def make_episode(self):
"""Factory method for generating new episodes of the game."""
|
deepmind-research-master
|
tvt/pycolab/game.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Key to door task.
The game is split up into three phases:
1. (exploration phase) player can collect a key,
2. (distractor phase) player is collecting apples,
3. (reward phase) player can open the door and get the reward if the key is
previously collected.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pycolab import ascii_art
from pycolab import storytelling
from pycolab import things as plab_things
from tvt.pycolab import common
from tvt.pycolab import game
from tvt.pycolab import objects
COLOURS = {
'i': (1000, 1000, 1000), # Indicator.
}
EXPLORE_GRID = [
' ####### ',
' #kkkkk# ',
' #kkkkk# ',
' ## ## ',
' #+++++# ',
' #+++++# ',
' ####### '
]
REWARD_GRID = [
' ',
' ##d## ',
' # # ',
' # + # ',
' # # ',
' ##### ',
' ',
]
class KeySprite(plab_things.Sprite):
"""Sprite for the key."""
def update(self, actions, board, layers, backdrop, things, the_plot):
player_position = things[common.PLAYER].position
pick_up = self.position == player_position
if self.visible and pick_up:
# Pass information to all phases.
the_plot['has_key'] = True
self._visible = False
class DoorSprite(plab_things.Sprite):
"""Sprite for the door."""
def __init__(self, corner, position, character, pickup_reward):
super(DoorSprite, self).__init__(corner, position, character)
self._pickup_reward = pickup_reward
def update(self, actions, board, layers, backdrop, things, the_plot):
player_position = things[common.PLAYER].position
pick_up = self.position == player_position
if pick_up and the_plot.get('has_key'):
the_plot.add_reward(self._pickup_reward)
# The key is lost after the first time opening the door
# to ensure only one reward per episode.
the_plot['has_key'] = False
class PlayerSprite(common.PlayerSprite):
"""Sprite for the actor."""
def __init__(self, corner, position, character):
super(PlayerSprite, self).__init__(
corner, position, character,
impassable=common.BORDER + common.INDICATOR + common.DOOR)
def update(self, actions, board, layers, backdrop, things, the_plot):
# Allow moving through the door if key is previously collected.
if common.DOOR in self.impassable and the_plot.get('has_key'):
self._impassable.remove(common.DOOR)
super(PlayerSprite, self).update(actions, board, layers, backdrop, things,
the_plot)
class Game(game.AbstractGame):
"""Key To Door Game."""
def __init__(self,
rng,
num_apples=10,
apple_reward=(1, 10),
fix_apple_reward_in_episode=True,
final_reward=10.,
crop=True,
max_frames=common.DEFAULT_MAX_FRAMES_PER_PHASE):
del rng # Each episode is identical and colours are not randomised.
self._num_apples = num_apples
self._apple_reward = apple_reward
self._fix_apple_reward_in_episode = fix_apple_reward_in_episode
self._final_reward = final_reward
self._crop = crop
self._max_frames = max_frames
self._episode_length = sum(self._max_frames.values())
self._num_actions = common.NUM_ACTIONS
self._colours = common.FIXED_COLOURS.copy()
self._colours.update(COLOURS)
self._extra_observation_fields = ['chapter_reward_as_string']
@property
def extra_observation_fields(self):
"""The field names of extra observations."""
return self._extra_observation_fields
@property
def num_actions(self):
"""Number of possible actions in the game."""
return self._num_actions
@property
def episode_length(self):
return self._episode_length
@property
def colours(self):
"""Symbol to colour map for key to door."""
return self._colours
def _make_explore_phase(self):
# Keep only one key and one player position.
explore_grid = common.keep_n_characters_in_grid(
EXPLORE_GRID, common.KEY, 1)
explore_grid = common.keep_n_characters_in_grid(
explore_grid, common.PLAYER, 1)
return ascii_art.ascii_art_to_game(
art=explore_grid,
what_lies_beneath=' ',
sprites={
common.PLAYER: PlayerSprite,
common.KEY: KeySprite,
common.INDICATOR: ascii_art.Partial(objects.IndicatorObjectSprite,
char_to_track=common.KEY,
override_position=(0, 5)),
common.TIMER: ascii_art.Partial(common.TimerSprite,
self._max_frames['explore']),
},
update_schedule=[
common.PLAYER, common.KEY, common.INDICATOR, common.TIMER],
z_order=[common.KEY, common.INDICATOR, common.PLAYER, common.TIMER],
)
def _make_distractor_phase(self):
return common.distractor_phase(
player_sprite=PlayerSprite,
num_apples=self._num_apples,
max_frames=self._max_frames['distractor'],
apple_reward=self._apple_reward,
fix_apple_reward_in_episode=self._fix_apple_reward_in_episode)
def _make_reward_phase(self):
return ascii_art.ascii_art_to_game(
art=REWARD_GRID,
what_lies_beneath=' ',
sprites={
common.PLAYER: PlayerSprite,
common.DOOR: ascii_art.Partial(DoorSprite,
pickup_reward=self._final_reward),
common.TIMER: ascii_art.Partial(common.TimerSprite,
self._max_frames['reward'],
track_chapter_reward=True),
},
update_schedule=[common.PLAYER, common.DOOR, common.TIMER],
z_order=[common.PLAYER, common.DOOR, common.TIMER],
)
def make_episode(self):
"""Factory method for generating new episodes of the game."""
if self._crop:
croppers = common.get_cropper()
else:
croppers = None
return storytelling.Story([
self._make_explore_phase,
self._make_distractor_phase,
self._make_reward_phase,
], croppers=croppers)
|
deepmind-research-master
|
tvt/pycolab/key_to_door.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Active visual match task.
The game is split up into three phases:
1. (exploration phase) player is in one room and there's a colour in the other,
2. (distractor phase) player is collecting apples,
3. (reward phase) player sees three doors of different colours and has to select
the one of the same color as the colour in the first phase.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pycolab import ascii_art
from pycolab import storytelling
from tvt.pycolab import common
from tvt.pycolab import game
from tvt.pycolab import objects
SYMBOLS_TO_SHUFFLE = ['b', 'c', 'e']
EXPLORE_GRID = [
' ppppppp ',
' p p ',
' p p ',
' pp pp ',
' p+++++p ',
' p+++++p ',
' ppppppp '
]
REWARD_GRID = [
'###########',
'# b c e #',
'# #',
'# #',
'#### ####',
' # + # ',
' ##### '
]
class Game(game.AbstractGame):
"""Image Match Passive Game."""
def __init__(self,
rng,
num_apples=10,
apple_reward=(1, 10),
fix_apple_reward_in_episode=True,
final_reward=10.,
max_frames=common.DEFAULT_MAX_FRAMES_PER_PHASE):
self._rng = rng
self._num_apples = num_apples
self._apple_reward = apple_reward
self._fix_apple_reward_in_episode = fix_apple_reward_in_episode
self._final_reward = final_reward
self._max_frames = max_frames
self._episode_length = sum(self._max_frames.values())
self._num_actions = common.NUM_ACTIONS
self._colours = common.FIXED_COLOURS.copy()
self._colours.update(
common.get_shuffled_symbol_colour_map(rng, SYMBOLS_TO_SHUFFLE))
self._extra_observation_fields = ['chapter_reward_as_string']
@property
def extra_observation_fields(self):
"""The field names of extra observations."""
return self._extra_observation_fields
@property
def num_actions(self):
"""Number of possible actions in the game."""
return self._num_actions
@property
def episode_length(self):
return self._episode_length
@property
def colours(self):
"""Symbol to colour map for key to door."""
return self._colours
def _make_explore_phase(self, target_char):
# Keep only one coloured position and one player position.
grid = common.keep_n_characters_in_grid(EXPLORE_GRID, 'p', 1, common.BORDER)
grid = common.keep_n_characters_in_grid(grid, 'p', 0, target_char)
grid = common.keep_n_characters_in_grid(grid, common.PLAYER, 1)
return ascii_art.ascii_art_to_game(
grid,
what_lies_beneath=' ',
sprites={
common.PLAYER:
ascii_art.Partial(
common.PlayerSprite,
impassable=common.BORDER + target_char),
target_char:
objects.ObjectSprite,
common.TIMER:
ascii_art.Partial(common.TimerSprite,
self._max_frames['explore']),
},
update_schedule=[common.PLAYER, target_char, common.TIMER],
z_order=[target_char, common.PLAYER, common.TIMER],
)
def _make_distractor_phase(self):
return common.distractor_phase(
player_sprite=common.PlayerSprite,
num_apples=self._num_apples,
max_frames=self._max_frames['distractor'],
apple_reward=self._apple_reward,
fix_apple_reward_in_episode=self._fix_apple_reward_in_episode)
def _make_reward_phase(self, target_char):
return ascii_art.ascii_art_to_game(
REWARD_GRID,
what_lies_beneath=' ',
sprites={
common.PLAYER: common.PlayerSprite,
'b': objects.ObjectSprite,
'c': objects.ObjectSprite,
'e': objects.ObjectSprite,
common.TIMER: ascii_art.Partial(common.TimerSprite,
self._max_frames['reward'],
track_chapter_reward=True),
target_char: ascii_art.Partial(objects.ObjectSprite,
reward=self._final_reward),
},
update_schedule=[common.PLAYER, 'b', 'c', 'e', common.TIMER],
z_order=[common.PLAYER, 'b', 'c', 'e', common.TIMER],
)
def make_episode(self):
"""Factory method for generating new episodes of the game."""
target_char = self._rng.choice(SYMBOLS_TO_SHUFFLE)
return storytelling.Story([
lambda: self._make_explore_phase(target_char),
self._make_distractor_phase,
lambda: self._make_reward_phase(target_char),
], croppers=common.get_cropper())
|
deepmind-research-master
|
tvt/pycolab/active_visual_match.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Common utilities for Pycolab games."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import colorsys
import numpy as np
from pycolab import ascii_art
from pycolab import cropping
from pycolab import things as plab_things
from pycolab.prefab_parts import sprites as prefab_sprites
from six.moves import zip
from tensorflow.contrib import framework as contrib_framework
nest = contrib_framework.nest
# Actions.
# Those with a negative ID are not allowed for the agent.
ACTION_QUIT = -2
ACTION_DELAY = -1
ACTION_NORTH = 0
ACTION_SOUTH = 1
ACTION_WEST = 2
ACTION_EAST = 3
NUM_ACTIONS = 4
DEFAULT_MAX_FRAMES_PER_PHASE = {
'explore': 15,
'distractor': 90,
'reward': 15
}
# Reserved symbols.
PLAYER = '+'
BORDER = '#'
BACKGROUND = ' '
KEY = 'k'
DOOR = 'd'
APPLE = 'a'
TIMER = 't'
INDICATOR = 'i'
FIXED_COLOURS = {
PLAYER: (898, 584, 430),
BORDER: (100, 100, 100),
BACKGROUND: (800, 800, 800),
KEY: (627, 321, 176),
DOOR: (529, 808, 922),
APPLE: (550, 700, 0),
}
APPLE_DISTRACTOR_GRID = [
'###########',
'#a a a a a#',
'# a a a a #',
'#a a a a a#',
'# a a a a #',
'#a a + a a#',
'###########'
]
DEFAULT_APPLE_RESPAWN_TIME = 20
DEFAULT_APPLE_REWARD = 1.
def get_shuffled_symbol_colour_map(rng_or_seed, symbols,
num_potential_colours=None):
"""Get a randomized mapping between symbols and colours.
Args:
rng_or_seed: A random state or random seed.
symbols: List of symbols.
num_potential_colours: Number of equally spaced colours to choose from.
Defaults to number of symbols. Colours are generated deterministically.
Returns:
Randomized mapping between symbols and colours.
"""
num_symbols = len(symbols)
num_potential_colours = num_potential_colours or num_symbols
if isinstance(rng_or_seed, np.random.RandomState):
rng = rng_or_seed
else:
rng = np.random.RandomState(rng_or_seed)
# Generate a range of colours.
step = 1. / num_potential_colours
hues = np.arange(0, num_potential_colours) * step
potential_colours = [colorsys.hsv_to_rgb(h, 1.0, 1.0) for h in hues]
# Randomly draw num_symbols colours without replacement.
rng.shuffle(potential_colours)
colours = potential_colours[:num_symbols]
symbol_to_colour_map = dict(list(zip(symbols, colours)))
# Multiply each colour value by 1000.
return nest.map_structure(lambda c: int(c * 1000), symbol_to_colour_map)
def get_cropper():
return cropping.ScrollingCropper(
rows=5,
cols=5,
to_track=PLAYER,
pad_char=BACKGROUND,
scroll_margins=(2, 2))
def distractor_phase(player_sprite, num_apples, max_frames,
apple_reward=DEFAULT_APPLE_REWARD,
fix_apple_reward_in_episode=False,
respawn_every=DEFAULT_APPLE_RESPAWN_TIME):
"""Distractor phase engine factory.
Args:
player_sprite: Player sprite class.
num_apples: Number of apples to sample from the apple distractor grid.
max_frames: Maximum duration of the distractor phase in frames.
apple_reward: Can either be a scalar specifying the reward or a reward range
[min, max), given as a list or tuple, to uniformly sample from.
fix_apple_reward_in_episode: The apple reward is constant throughout each
episode.
respawn_every: respawn frequency of apples.
Returns:
Distractor phase engine.
"""
distractor_grid = keep_n_characters_in_grid(APPLE_DISTRACTOR_GRID, APPLE,
num_apples)
engine = ascii_art.ascii_art_to_game(
distractor_grid,
what_lies_beneath=BACKGROUND,
sprites={
PLAYER: player_sprite,
TIMER: ascii_art.Partial(TimerSprite, max_frames),
},
drapes={
APPLE: ascii_art.Partial(
AppleDrape,
reward=apple_reward,
fix_apple_reward_in_episode=fix_apple_reward_in_episode,
respawn_every=respawn_every)
},
update_schedule=[PLAYER, APPLE, TIMER],
z_order=[APPLE, PLAYER, TIMER],
)
return engine
def replace_grid_symbols(grid, old_to_new_map):
"""Replaces symbols in the grid.
If mapping is not defined the symbol is not updated.
Args:
grid: Represented as a list of strings.
old_to_new_map: Mapping between symbols.
Returns:
Updated grid.
"""
def symbol_map(x):
if x in old_to_new_map:
return old_to_new_map[x]
return x
new_grid = []
for row in grid:
new_grid.append(''.join(symbol_map(i) for i in row))
return new_grid
def keep_n_characters_in_grid(grid, character, n, backdrop_char=BACKGROUND):
"""Keeps only a sample of characters `character` in the grid."""
np_grid = np.array([list(i) for i in grid])
char_positions = np.argwhere(np_grid == character)
# Randomly select parts to remove.
num_empty_positions = char_positions.shape[0] - n
if num_empty_positions < 0:
raise ValueError('Not enough characters `{}` in grid.'.format(character))
empty_pos = np.random.permutation(char_positions)[:num_empty_positions]
# Remove characters.
grid = [list(row) for row in grid]
for (i, j) in empty_pos:
grid[i][j] = backdrop_char
return [''.join(row) for row in grid]
class PlayerSprite(prefab_sprites.MazeWalker):
"""Sprite for the actor."""
def __init__(self, corner, position, character, impassable=BORDER):
super(PlayerSprite, self).__init__(
corner, position, character, impassable=impassable,
confined_to_board=True)
def update(self, actions, board, layers, backdrop, things, the_plot):
the_plot.add_reward(0.)
if actions == ACTION_QUIT:
the_plot.next_chapter = None
the_plot.terminate_episode()
if actions == ACTION_WEST:
self._west(board, the_plot)
elif actions == ACTION_EAST:
self._east(board, the_plot)
elif actions == ACTION_NORTH:
self._north(board, the_plot)
elif actions == ACTION_SOUTH:
self._south(board, the_plot)
class AppleDrape(plab_things.Drape):
"""Drape for the apples used in the distractor phase."""
def __init__(self,
curtain,
character,
respawn_every,
reward,
fix_apple_reward_in_episode):
"""Constructor.
Args:
curtain: Array specifying locations of apples. Obtained from ascii grid.
character: Character representing the drape.
respawn_every: respawn frequency of apples.
reward: Can either be a scalar specifying the reward or a reward range
[min, max), given as a list or tuple, to uniformly sample from.
fix_apple_reward_in_episode: If set to True, then only sample the apple's
reward once in the episode and then fix the value.
"""
super(AppleDrape, self).__init__(curtain, character)
self._respawn_every = respawn_every
if not isinstance(reward, (list, tuple)):
# Assuming scalar.
self._reward = [reward, reward]
else:
if len(reward) != 2:
raise ValueError('Reward must be a scalar or a two element list/tuple.')
self._reward = reward
self._fix_apple_reward_in_episode = fix_apple_reward_in_episode
# Grid specifying for each apple the last frame it was picked up.
# Initialized to inifinity for cells with apples and -1 for cells without.
self._last_pickup = np.where(curtain,
np.inf * np.ones_like(curtain),
-1. * np.ones_like(curtain))
def update(self, actions, board, layers, backdrop, things, the_plot):
player_position = things[PLAYER].position
# decide the apple_reward
if (self._fix_apple_reward_in_episode and
not the_plot.get('sampled_apple_reward', None)):
the_plot['sampled_apple_reward'] = np.random.choice((self._reward[0],
self._reward[1]))
if self.curtain[player_position]:
self._last_pickup[player_position] = the_plot.frame
self.curtain[player_position] = False
if not self._fix_apple_reward_in_episode:
the_plot.add_reward(np.random.uniform(*self._reward))
else:
the_plot.add_reward(the_plot['sampled_apple_reward'])
if self._respawn_every:
respawn_cond = the_plot.frame > self._last_pickup + self._respawn_every
respawn_cond &= self._last_pickup >= 0
self.curtain[respawn_cond] = True
class TimerSprite(plab_things.Sprite):
"""Sprite for the timer.
The timer is in charge of stopping the current chapter. Timer sprite should be
placed last in the update order to make sure everything is updated before the
chapter terminates.
"""
def __init__(self, corner, position, character, max_frames,
track_chapter_reward=False):
super(TimerSprite, self).__init__(corner, position, character)
if not isinstance(max_frames, int):
raise ValueError('max_frames must be of type integer.')
self._max_frames = max_frames
self._visible = False
self._track_chapter_reward = track_chapter_reward
self._total_chapter_reward = 0.
def update(self, actions, board, layers, backdrop, things, the_plot):
directives = the_plot._get_engine_directives() # pylint: disable=protected-access
if self._track_chapter_reward:
self._total_chapter_reward += directives.summed_reward or 0.
# Every chapter starts at frame = 0.
if the_plot.frame >= self._max_frames or directives.game_over:
# Calculate the reward obtained in this phase and send it through the
# extra observations channel.
if self._track_chapter_reward:
the_plot['chapter_reward'] = self._total_chapter_reward
the_plot.terminate_episode()
|
deepmind-research-master
|
tvt/pycolab/common.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Pycolab human player."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import curses
from absl import app
from absl import flags
import numpy as np
from pycolab import human_ui
from tvt.pycolab import active_visual_match
from tvt.pycolab import common
from tvt.pycolab import key_to_door
FLAGS = flags.FLAGS
flags.DEFINE_enum('game', 'key_to_door',
['key_to_door', 'active_visual_match'],
'The name of the game')
def main(unused_argv):
rng = np.random.RandomState()
if FLAGS.game == 'key_to_door':
game = key_to_door.Game(rng)
elif FLAGS.game == 'active_visual_match':
game = active_visual_match.Game(rng)
else:
raise ValueError('Unsupported game "%s".' % FLAGS.game)
episode = game.make_episode()
ui = human_ui.CursesUi(
keys_to_actions={
curses.KEY_UP: common.ACTION_NORTH,
curses.KEY_DOWN: common.ACTION_SOUTH,
curses.KEY_LEFT: common.ACTION_WEST,
curses.KEY_RIGHT: common.ACTION_EAST,
-1: common.ACTION_DELAY,
'q': common.ACTION_QUIT,
'Q': common.ACTION_QUIT},
delay=-1,
colour_fg=game.colours
)
ui.play(episode)
if __name__ == '__main__':
app.run(main)
|
deepmind-research-master
|
tvt/pycolab/human_player.py
|
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Pycolab sprites."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pycolab import things as plab_things
from pycolab.prefab_parts import sprites as prefab_sprites
import six
from tvt.pycolab import common
class PlayerSprite(prefab_sprites.MazeWalker):
"""Sprite representing the agent."""
def __init__(self, corner, position, character,
max_steps_per_act, moving_player):
"""Indicates to the superclass that we can't walk off the board."""
super(PlayerSprite, self).__init__(
corner, position, character, impassable=[common.BORDER],
confined_to_board=True)
self._moving_player = moving_player
self._max_steps_per_act = max_steps_per_act
self._num_steps = 0
def update(self, actions, board, layers, backdrop, things, the_plot):
del backdrop # Unused.
if actions is not None:
assert actions in common.ACTIONS
the_plot.log("Step {} | Action {}".format(self._num_steps, actions))
the_plot.add_reward(0.0)
self._num_steps += 1
if actions == common.ACTION_QUIT:
the_plot.terminate_episode()
if self._moving_player:
if actions == common.ACTION_WEST:
self._west(board, the_plot)
elif actions == common.ACTION_EAST:
self._east(board, the_plot)
elif actions == common.ACTION_NORTH:
self._north(board, the_plot)
elif actions == common.ACTION_SOUTH:
self._south(board, the_plot)
if self._max_steps_per_act == self._num_steps:
the_plot.terminate_episode()
class ObjectSprite(plab_things.Sprite):
"""Sprite for a generic object which can be collectable."""
def __init__(self, corner, position, character, reward=0., collectable=True,
terminate=True):
super(ObjectSprite, self).__init__(corner, position, character)
self._reward = reward # Reward on pickup.
self._collectable = collectable
def set_visibility(self, visible):
self._visible = visible
def update(self, actions, board, layers, backdrop, things, the_plot):
player_position = things[common.PLAYER].position
pick_up = self.position == player_position
if pick_up and self.visible:
the_plot.add_reward(self._reward)
if self._collectable:
self.set_visibility(False)
# set all other objects to be invisible
for v in six.itervalues(things):
if isinstance(v, ObjectSprite):
v.set_visibility(False)
class IndicatorObjectSprite(plab_things.Sprite):
"""Sprite for the indicator object.
The indicator object is an object that spawns at a designated position once
the player picks up an object defined by the `char_to_track` argument.
The indicator object is spawned for just a single frame.
"""
def __init__(self, corner, position, character, char_to_track,
override_position=None):
super(IndicatorObjectSprite, self).__init__(corner, position, character)
if override_position is not None:
self._position = override_position
self._char_to_track = char_to_track
self._visible = False
self._pickup_frame = None
def update(self, actions, board, layers, backdrop, things, the_plot):
player_position = things[common.PLAYER].position
pick_up = things[self._char_to_track].position == player_position
if self._pickup_frame:
self._visible = False
if pick_up and not self._pickup_frame:
self._visible = True
self._pickup_frame = the_plot.frame
|
deepmind-research-master
|
tvt/pycolab/objects.py
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Two dimensional convolutional neural net layers."""
from absl import logging
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
def weight_variable(shape, stddev=0.01):
"""Returns the weight variable."""
logging.vlog(1, 'weight init for shape %s', str(shape))
return tf.get_variable(
'w', shape, initializer=tf.random_normal_initializer(stddev=stddev))
def bias_variable(shape):
return tf.get_variable(
'b', shape, initializer=tf.zeros_initializer())
def conv2d(x, w, atrou_rate=1, data_format='NHWC'):
if atrou_rate > 1:
return tf.nn.convolution(
x,
w,
dilation_rate=[atrou_rate] * 2,
padding='SAME',
data_format=data_format)
else:
return tf.nn.conv2d(
x, w, strides=[1, 1, 1, 1], padding='SAME', data_format=data_format)
def make_conv_sep2d_layer(input_node,
in_channels,
channel_multiplier,
out_channels,
layer_name,
filter_size,
filter_size_2=None,
batch_norm=False,
is_training=True,
atrou_rate=1,
data_format='NHWC',
stddev=0.01):
"""Use separable convolutions."""
if filter_size_2 is None:
filter_size_2 = filter_size
logging.vlog(1, 'layer %s in %d out %d chan mult %d', layer_name, in_channels,
out_channels, channel_multiplier)
with tf.variable_scope(layer_name):
with tf.variable_scope('depthwise'):
w_depthwise = weight_variable(
[filter_size, filter_size_2, in_channels, channel_multiplier],
stddev=stddev)
with tf.variable_scope('pointwise'):
w_pointwise = weight_variable(
[1, 1, in_channels * channel_multiplier, out_channels], stddev=stddev)
h_conv = tf.nn.separable_conv2d(
input_node,
w_depthwise,
w_pointwise,
padding='SAME',
strides=[1, 1, 1, 1],
rate=[atrou_rate, atrou_rate],
data_format=data_format)
if batch_norm:
h_conv = batch_norm_layer(
h_conv, layer_name=layer_name, is_training=is_training,
data_format=data_format)
else:
b_conv = bias_variable([out_channels])
h_conv = tf.nn.bias_add(h_conv, b_conv, data_format=data_format)
return h_conv
def batch_norm_layer(h_conv, layer_name, is_training=True, data_format='NCHW'):
"""Batch norm layer."""
logging.vlog(1, 'batch norm for layer %s', layer_name)
return tf.contrib.layers.batch_norm(
h_conv,
is_training=is_training,
fused=True,
decay=0.999,
scope=layer_name,
data_format=data_format)
def make_conv_layer(input_node,
in_channels,
out_channels,
layer_name,
filter_size,
filter_size_2=None,
non_linearity=True,
batch_norm=False,
is_training=True,
atrou_rate=1,
data_format='NHWC',
stddev=0.01):
"""Creates a convolution layer."""
if filter_size_2 is None:
filter_size_2 = filter_size
logging.vlog(
1, 'layer %s in %d out %d', layer_name, in_channels, out_channels)
with tf.variable_scope(layer_name):
w_conv = weight_variable(
[filter_size, filter_size_2, in_channels, out_channels], stddev=stddev)
h_conv = conv2d(
input_node, w_conv, atrou_rate=atrou_rate, data_format=data_format)
if batch_norm:
h_conv = batch_norm_layer(
h_conv, layer_name=layer_name, is_training=is_training,
data_format=data_format)
else:
b_conv = bias_variable([out_channels])
h_conv = tf.nn.bias_add(h_conv, b_conv, data_format=data_format)
if non_linearity:
h_conv = tf.nn.elu(h_conv)
return h_conv
|
deepmind-research-master
|
alphafold_casp13/two_dim_convnet.py
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code to run distogram inference."""
import collections
import os
import time
from absl import app
from absl import flags
from absl import logging
import numpy as np
import six
import sonnet as snt
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from alphafold_casp13 import config_dict
from alphafold_casp13 import contacts_experiment
from alphafold_casp13 import distogram_io
from alphafold_casp13 import secstruct
flags.DEFINE_string('config_path', None, 'Path of the JSON config file.')
flags.DEFINE_string('checkpoint_path', None, 'Checkpoint path for evaluation.')
flags.DEFINE_boolean('cpu', False, 'Force onto CPU.')
flags.DEFINE_string('output_path', None,
'Base path where all output files will be saved to.')
flags.DEFINE_string('eval_sstable', None,
'Path of the SSTable to read the input tf.Examples from.')
flags.DEFINE_string('stats_file', None,
'Path of the statistics file to use for normalization.')
FLAGS = flags.FLAGS
# A named tuple to store the outputs of a single prediction run.
Prediction = collections.namedtuple(
'Prediction', [
'single_message', # A debugging message.
'num_crops_local', # The number of crops used to make this prediction.
'sequence', # The amino acid sequence.
'filebase', # The chain name. All output files will use this name.
'softmax_probs', # Softmax of the distogram.
'ss', # Secondary structure prediction.
'asa', # ASA prediction.
'torsions', # Torsion prediction.
])
def evaluate(crop_size_x, crop_size_y, feature_normalization, checkpoint_path,
normalization_exclusion, eval_config, network_config):
"""Main evaluation loop."""
experiment = contacts_experiment.Contacts(
tfrecord=eval_config.eval_sstable,
stats_file=eval_config.stats_file,
network_config=network_config,
crop_size_x=crop_size_x,
crop_size_y=crop_size_y,
feature_normalization=feature_normalization,
normalization_exclusion=normalization_exclusion)
checkpoint = snt.get_saver(experiment.model, collections=[
tf.GraphKeys.GLOBAL_VARIABLES,
tf.GraphKeys.MOVING_AVERAGE_VARIABLES])
with tf.train.SingularMonitoredSession(hooks=[]) as sess:
logging.info('Restoring from checkpoint %s', checkpoint_path)
checkpoint.restore(sess, checkpoint_path)
logging.info('Writing output to %s', eval_config.output_path)
eval_begin_time = time.time()
_run_evaluation(sess=sess,
experiment=experiment,
eval_config=eval_config,
output_dir=eval_config.output_path,
min_range=network_config.min_range,
max_range=network_config.max_range,
num_bins=network_config.num_bins,
torsion_bins=network_config.torsion_bins)
logging.info('Finished eval %.1fs', (time.time() - eval_begin_time))
def _run_evaluation(
sess, experiment, eval_config, output_dir, min_range, max_range, num_bins,
torsion_bins):
"""Evaluate a contact map by aggregating crops.
Args:
sess: A tf.train.Session.
experiment: An experiment class.
eval_config: A config dict of eval parameters.
output_dir: Directory to save the predictions to.
min_range: The minimum range in Angstroms to consider in distograms.
max_range: The maximum range in Angstroms to consider in distograms, see
num_bins below for clarification.
num_bins: The number of bins in the distance histogram being predicted.
We divide the min_range--(min_range + max_range) Angstrom range into this
many bins.
torsion_bins: The number of bins the torsion angles are discretised into.
"""
tf.io.gfile.makedirs(os.path.join(output_dir, 'pickle_files'))
logging.info('Eval config is %s\nnum_bins: %d', eval_config, num_bins)
num_examples = 0
num_crops = 0
start_all_time = time.time()
# Either do the whole test set, or up to a specified limit.
max_examples = experiment.num_eval_examples
if eval_config.max_num_examples > 0:
max_examples = min(max_examples, eval_config.max_num_examples)
while num_examples < max_examples:
one_prediction = compute_one_prediction(
num_examples, experiment, sess, eval_config, num_bins, torsion_bins)
single_message = one_prediction.single_message
num_crops_local = one_prediction.num_crops_local
sequence = one_prediction.sequence
filebase = one_prediction.filebase
softmax_probs = one_prediction.softmax_probs
ss = one_prediction.ss
asa = one_prediction.asa
torsions = one_prediction.torsions
num_examples += 1
num_crops += num_crops_local
# Save the output files.
filename = os.path.join(output_dir,
'pickle_files', '%s.pickle' % filebase)
distogram_io.save_distance_histogram(
filename, softmax_probs, filebase, sequence,
min_range=min_range, max_range=max_range, num_bins=num_bins)
if experiment.model.torsion_multiplier > 0:
torsions_dir = os.path.join(output_dir, 'torsions')
tf.io.gfile.makedirs(torsions_dir)
distogram_io.save_torsions(torsions_dir, filebase, sequence, torsions)
if experiment.model.secstruct_multiplier > 0:
ss_dir = os.path.join(output_dir, 'secstruct')
tf.io.gfile.makedirs(ss_dir)
secstruct.save_secstructs(ss_dir, filebase, None, sequence, ss)
if experiment.model.asa_multiplier > 0:
asa_dir = os.path.join(output_dir, 'asa')
tf.io.gfile.makedirs(asa_dir)
secstruct.save_secstructs(asa_dir, filebase, None, sequence,
np.expand_dims(asa, 1), label='Deepmind 2D ASA')
time_spent = time.time() - start_all_time
logging.info(
'Evaluate %d examples, %d crops %.1f crops/ex. '
'Took %.1fs, %.3f s/example %.3f crops/s\n%s',
num_examples, num_crops, num_crops / float(num_examples), time_spent,
time_spent / num_examples, num_crops / time_spent, single_message)
logging.info('Tested on %d', num_examples)
def compute_one_prediction(
num_examples, experiment, sess, eval_config, num_bins, torsion_bins):
"""Find the contact map for a single domain."""
num_crops_local = 0
debug_steps = 0
start = time.time()
output_fetches = {'probs': experiment.eval_probs}
output_fetches['softmax_probs'] = experiment.eval_probs_softmax
# Add the auxiliary outputs if present.
experiment.model.update_crop_fetches(output_fetches)
# Get data.
batch = experiment.get_one_example(sess)
length = batch['sequence_lengths'][0]
batch_size = batch['sequence_lengths'].shape[0]
domain = batch['domain_name'][0][0].decode('utf-8')
chain = batch['chain_name'][0][0].decode('utf-8')
filebase = domain or chain
sequence = six.ensure_str(batch['sequences'][0][0])
logging.info('SepWorking on %d %s %s %d', num_examples, domain, chain, length)
inputs_1d = batch['inputs_1d']
if 'residue_index' in batch:
logging.info('Getting residue_index from features')
residue_index = np.squeeze(
batch['residue_index'], axis=2).astype(np.int32)
else:
logging.info('Generating residue_index')
residue_index = np.tile(np.expand_dims(
np.arange(length, dtype=np.int32), 0), [batch_size, 1])
assert batch_size == 1
num_examples += batch_size
# Crops.
prob_accum = np.zeros((length, length, 2))
ss_accum = np.zeros((length, 8))
torsions_accum = np.zeros((length, torsion_bins**2))
asa_accum = np.zeros((length,))
weights_1d_accum = np.zeros((length,))
softmax_prob_accum = np.zeros((length, length, num_bins), dtype=np.float32)
crop_size_x = experiment.crop_size_x
crop_step_x = crop_size_x // eval_config.crop_shingle_x
crop_size_y = experiment.crop_size_y
crop_step_y = crop_size_y // eval_config.crop_shingle_y
prob_weights = 1
if eval_config.pyramid_weights > 0:
sx = np.expand_dims(np.linspace(1.0 / crop_size_x, 1, crop_size_x), 1)
sy = np.expand_dims(np.linspace(1.0 / crop_size_y, 1, crop_size_y), 0)
prob_weights = np.minimum(np.minimum(sx, np.flipud(sx)),
np.minimum(sy, np.fliplr(sy)))
prob_weights /= np.max(prob_weights)
prob_weights = np.minimum(prob_weights, eval_config.pyramid_weights)
logging.log_first_n(logging.INFO, 'Crop: %dx%d step %d,%d pyr %.2f',
debug_steps,
crop_size_x, crop_size_y,
crop_step_x, crop_step_y, eval_config.pyramid_weights)
# Accumulate all crops, starting and ending half off the square.
for i in range(-crop_size_x // 2, length - crop_size_x // 2, crop_step_x):
for j in range(-crop_size_y // 2, length - crop_size_y // 2, crop_step_y):
# The ideal crop.
patch = compute_one_patch(
sess, experiment, output_fetches, inputs_1d, residue_index,
prob_weights, batch, length, i, j, crop_size_x, crop_size_y)
# Assemble the crops into a final complete prediction.
ic = max(0, i)
jc = max(0, j)
ic_to = ic + patch['prob'].shape[1]
jc_to = jc + patch['prob'].shape[0]
prob_accum[jc:jc_to, ic:ic_to, 0] += patch['prob'] * patch['weight']
prob_accum[jc:jc_to, ic:ic_to, 1] += patch['weight']
softmax_prob_accum[jc:jc_to, ic:ic_to, :] += (
patch['softmax'] * np.expand_dims(patch['weight'], 2))
weights_1d_accum[jc:jc_to] += 1
weights_1d_accum[ic:ic_to] += 1
if 'asa_x' in patch:
asa_accum[ic:ic + patch['asa_x'].shape[0]] += np.squeeze(
patch['asa_x'], axis=1)
asa_accum[jc:jc + patch['asa_y'].shape[0]] += np.squeeze(
patch['asa_y'], axis=1)
if 'ss_x' in patch:
ss_accum[ic:ic + patch['ss_x'].shape[0]] += patch['ss_x']
ss_accum[jc:jc + patch['ss_y'].shape[0]] += patch['ss_y']
if 'torsions_x' in patch:
torsions_accum[
ic:ic + patch['torsions_x'].shape[0]] += patch['torsions_x']
torsions_accum[
jc:jc + patch['torsions_y'].shape[0]] += patch['torsions_y']
num_crops_local += 1
single_message = (
'Constructed %s len %d from %d chunks [%d, %d x %d, %d] '
'in %5.1fs' % (
filebase, length, num_crops_local,
crop_size_x, crop_step_x, crop_size_y, crop_step_y,
time.time() - start))
logging.info(single_message)
logging.info('prob_accum[:, :, 1]: %s', prob_accum[:, :, 1])
assert (prob_accum[:, :, 1] > 0.0).all()
probs = prob_accum[:, :, 0] / prob_accum[:, :, 1]
softmax_probs = softmax_prob_accum[:, :, :] / prob_accum[:, :, 1:2]
asa_accum /= weights_1d_accum
ss_accum /= np.expand_dims(weights_1d_accum, 1)
torsions_accum /= np.expand_dims(weights_1d_accum, 1)
# The probs are symmetrical.
probs = (probs + probs.transpose()) / 2
if num_bins > 1:
softmax_probs = (softmax_probs + np.transpose(
softmax_probs, axes=[1, 0, 2])) / 2
return Prediction(
single_message=single_message,
num_crops_local=num_crops_local,
sequence=sequence,
filebase=filebase,
softmax_probs=softmax_probs,
ss=ss_accum,
asa=asa_accum,
torsions=torsions_accum)
def compute_one_patch(sess, experiment, output_fetches, inputs_1d,
residue_index, prob_weights, batch, length, i, j,
crop_size_x, crop_size_y):
"""Compute the output predictions for a single crop."""
# Note that these are allowed to go off the end of the protein.
end_x = i + crop_size_x
end_y = j + crop_size_y
crop_limits = np.array([[i, end_x, j, end_y]], dtype=np.int32)
ic = max(0, i)
jc = max(0, j)
end_x_cropped = min(length, end_x)
end_y_cropped = min(length, end_y)
prepad_x = max(0, -i)
prepad_y = max(0, -j)
postpad_x = end_x - end_x_cropped
postpad_y = end_y - end_y_cropped
# Precrop the 2D features:
inputs_2d = np.pad(batch['inputs_2d'][
:, jc:end_y, ic:end_x, :],
[[0, 0],
[prepad_y, postpad_y],
[prepad_x, postpad_x],
[0, 0]], mode='constant')
assert inputs_2d.shape[1] == crop_size_y
assert inputs_2d.shape[2] == crop_size_x
# Generate the corresponding crop, but it might be truncated.
cxx = batch['inputs_2d'][:, ic:end_x, ic:end_x, :]
cyy = batch['inputs_2d'][:, jc:end_y, jc:end_y, :]
if cxx.shape[1] < inputs_2d.shape[1]:
cxx = np.pad(cxx, [[0, 0],
[prepad_x, max(0, i + crop_size_y - length)],
[prepad_x, postpad_x],
[0, 0]], mode='constant')
assert cxx.shape[1] == crop_size_y
assert cxx.shape[2] == crop_size_x
if cyy.shape[2] < inputs_2d.shape[2]:
cyy = np.pad(cyy, [[0, 0],
[prepad_y, postpad_y],
[prepad_y, max(0, j + crop_size_x - length)],
[0, 0]], mode='constant')
assert cyy.shape[1] == crop_size_y
assert cyy.shape[2] == crop_size_x
inputs_2d = np.concatenate([inputs_2d, cxx, cyy], 3)
output_results = sess.run(output_fetches, feed_dict={
experiment.inputs_1d_placeholder: inputs_1d,
experiment.residue_index_placeholder: residue_index,
experiment.inputs_2d_placeholder: inputs_2d,
experiment.crop_placeholder: crop_limits,
})
# Crop out the "live" region of the probs.
prob_patch = output_results['probs'][
0, prepad_y:crop_size_y - postpad_y,
prepad_x:crop_size_x - postpad_x]
weight_patch = prob_weights[prepad_y:crop_size_y - postpad_y,
prepad_x:crop_size_x - postpad_x]
patch = {'prob': prob_patch, 'weight': weight_patch}
if 'softmax_probs' in output_results:
patch['softmax'] = output_results['softmax_probs'][
0, prepad_y:crop_size_y - postpad_y,
prepad_x:crop_size_x - postpad_x]
if 'secstruct_probs' in output_results:
patch['ss_x'] = output_results['secstruct_probs'][
0, prepad_x:crop_size_x - postpad_x]
patch['ss_y'] = output_results['secstruct_probs'][
0, crop_size_x + prepad_y:crop_size_x + crop_size_y - postpad_y]
if 'torsion_probs' in output_results:
patch['torsions_x'] = output_results['torsion_probs'][
0, prepad_x:crop_size_x - postpad_x]
patch['torsions_y'] = output_results['torsion_probs'][
0, crop_size_x + prepad_y:crop_size_x + crop_size_y - postpad_y]
if 'asa_output' in output_results:
patch['asa_x'] = output_results['asa_output'][
0, prepad_x:crop_size_x - postpad_x]
patch['asa_y'] = output_results['asa_output'][
0, crop_size_x + prepad_y:crop_size_x + crop_size_y - postpad_y]
return patch
def main(argv):
del argv # Unused.
logging.info('Loading a JSON config from: %s', FLAGS.config_path)
with tf.io.gfile.GFile(FLAGS.config_path, 'r') as f:
config = config_dict.ConfigDict.from_json(f.read())
# Redefine the relevant output fields.
if FLAGS.eval_sstable:
config.eval_config.eval_sstable = FLAGS.eval_sstable
if FLAGS.stats_file:
config.eval_config.stats_file = FLAGS.stats_file
if FLAGS.output_path:
config.eval_config.output_path = FLAGS.output_path
with tf.device('/cpu:0' if FLAGS.cpu else None):
evaluate(checkpoint_path=FLAGS.checkpoint_path, **config)
if __name__ == '__main__':
app.run(main)
|
deepmind-research-master
|
alphafold_casp13/contacts.py
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layer for modelling and scoring secondary structure."""
import os
from absl import logging
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
# 8-class classes (Q8)
SECONDARY_STRUCTURES = '-HETSGBI'
# Equivalence classes for 3-class (Q3) from Li & Yu 2016.
# See http://www.cmbi.ru.nl/dssp.html for letter explanations.
Q3_MAP = ['-TSGIB', 'H', 'E']
def make_q3_matrices():
"""Generate mapping matrices for secstruct Q8:Q3 equivalence classes."""
dimension = len(SECONDARY_STRUCTURES)
q3_map_matrix = np.zeros((dimension, len(Q3_MAP)))
q3_lookup = np.zeros((dimension,), dtype=np.int32)
for i, eclass in enumerate(Q3_MAP): # equivalence classes
for m in eclass: # Members of the class.
ss_type = SECONDARY_STRUCTURES.index(m)
q3_map_matrix[ss_type, i] = 1.0
q3_lookup[ss_type] = i
return q3_map_matrix, q3_lookup
class Secstruct(object):
"""Make a layer that computes hierarchical secstruct."""
# Build static, shared structures:
q3_map_matrix, q3_lookup = make_q3_matrices()
static_dimension = len(SECONDARY_STRUCTURES)
def __init__(self, name='secstruct'):
self.name = name
self._dimension = Secstruct.static_dimension
def make_layer_new(self, activations):
"""Make the layer."""
with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):
logging.info('Creating secstruct %s', activations)
self.logits = tf.contrib.layers.linear(activations, self._dimension)
self.ss_q8_probs = tf.nn.softmax(self.logits)
self.ss_q3_probs = tf.matmul(
self.ss_q8_probs, tf.constant(self.q3_map_matrix, dtype=tf.float32))
def get_q8_probs(self):
return self.ss_q8_probs
def save_secstructs(dump_dir_path, name, index, sequence, probs,
label='Deepmind secstruct'):
"""Write secstruct prob distributions to an ss2 file.
Can be overloaded to write out asa values too.
Args:
dump_dir_path: directory where to write files.
name: name of domain
index: index number of multiple samples. (or None for no index)
sequence: string of L residue labels
probs: L x D matrix of probabilities. L is length of sequence,
D is probability dimension (usually 3).
label: A label for the file.
"""
filename = os.path.join(dump_dir_path, '%s.ss2' % name)
if index is not None:
filename = os.path.join(dump_dir_path, '%s_%04d.ss2' % (name, index))
with tf.io.gfile.GFile(filename, 'w') as gf:
logging.info('Saving secstruct to %s', filename)
gf.write('# %s CLASSES [%s] %s sample %s\n\n' % (
label, ''.join(SECONDARY_STRUCTURES[:probs.shape[1]]), name, index))
for l in range(probs.shape[0]):
ss = SECONDARY_STRUCTURES[np.argmax(probs[l, :])]
gf.write('%4d %1s %1s %s\n' % (l + 1, sequence[l], ss, ''.join(
[('%6.3f' % p) for p in probs[l, :]])))
|
deepmind-research-master
|
alphafold_casp13/secstruct.py
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for storing configuration flags."""
import json
class ConfigDict(dict):
"""Configuration dictionary with convenient dot element access."""
def __init__(self, *args, **kwargs):
super(ConfigDict, self).__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for key, value in arg.items():
self._add(key, value)
for key, value in kwargs.items():
self._add(key, value)
def _add(self, key, value):
if isinstance(value, dict):
self[key] = ConfigDict(value)
else:
self[key] = value
def __getattr__(self, attr):
try:
return self[attr]
except KeyError as e:
raise AttributeError(e)
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __setitem__(self, key, value):
super(ConfigDict, self).__setitem__(key, value)
self.__dict__.update({key: value})
def __delattr__(self, item):
self.__delitem__(item)
def __delitem__(self, key):
super(ConfigDict, self).__delitem__(key)
del self.__dict__[key]
def to_json(self):
return json.dumps(self)
@classmethod
def from_json(cls, json_string):
return cls(json.loads(json_string))
|
deepmind-research-master
|
alphafold_casp13/config_dict.py
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class for predicting Accessible Surface Area."""
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
class ASAOutputLayer(object):
"""An output layer to predict Accessible Surface Area."""
def __init__(self, name='asa'):
self.name = name
def compute_asa_output(self, activations):
"""Just compute the logits and outputs given activations."""
asa_logits = tf.contrib.layers.linear(
activations, 1,
weights_initializer=tf.random_uniform_initializer(-0.01, 0.01),
scope='ASALogits')
self.asa_output = tf.nn.relu(asa_logits, name='ASA_output_relu')
return asa_logits
|
deepmind-research-master
|
alphafold_casp13/asa_output.py
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""2D Resnet."""
from absl import logging
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from alphafold_casp13 import two_dim_convnet
def make_sep_res_layer(
input_node,
in_channels,
out_channels,
layer_name,
filter_size,
filter_size_2=None,
batch_norm=False,
is_training=True,
divide_channels_by=2,
atrou_rate=1,
channel_multiplier=0,
data_format='NHWC',
stddev=0.01,
dropout_keep_prob=1.0):
"""A separable resnet block."""
with tf.name_scope(layer_name):
input_times_almost_1 = input_node
h_conv = input_times_almost_1
if batch_norm:
h_conv = two_dim_convnet.batch_norm_layer(
h_conv, layer_name=layer_name, is_training=is_training,
data_format=data_format)
h_conv = tf.nn.elu(h_conv)
if filter_size_2 is None:
filter_size_2 = filter_size
# 1x1 with half size
h_conv = two_dim_convnet.make_conv_layer(
h_conv,
in_channels=in_channels,
out_channels=in_channels / divide_channels_by,
layer_name=layer_name + '_1x1h',
filter_size=1,
filter_size_2=1,
non_linearity=True,
batch_norm=batch_norm,
is_training=is_training,
data_format=data_format,
stddev=stddev)
# 3x3 with half size
if channel_multiplier == 0:
h_conv = two_dim_convnet.make_conv_layer(
h_conv,
in_channels=in_channels / divide_channels_by,
out_channels=in_channels / divide_channels_by,
layer_name=layer_name + '_%dx%dh' % (filter_size, filter_size_2),
filter_size=filter_size,
filter_size_2=filter_size_2,
non_linearity=True,
batch_norm=batch_norm,
is_training=is_training,
atrou_rate=atrou_rate,
data_format=data_format,
stddev=stddev)
else:
# We use separable convolution for 3x3
h_conv = two_dim_convnet.make_conv_sep2d_layer(
h_conv,
in_channels=in_channels / divide_channels_by,
channel_multiplier=channel_multiplier,
out_channels=in_channels / divide_channels_by,
layer_name=layer_name + '_sep%dx%dh' % (filter_size, filter_size_2),
filter_size=filter_size,
filter_size_2=filter_size_2,
batch_norm=batch_norm,
is_training=is_training,
atrou_rate=atrou_rate,
data_format=data_format,
stddev=stddev)
# 1x1 back to normal size without relu
h_conv = two_dim_convnet.make_conv_layer(
h_conv,
in_channels=in_channels / divide_channels_by,
out_channels=out_channels,
layer_name=layer_name + '_1x1',
filter_size=1,
filter_size_2=1,
non_linearity=False,
batch_norm=False,
is_training=is_training,
data_format=data_format,
stddev=stddev)
if dropout_keep_prob < 1.0:
logging.info('dropout keep prob %f', dropout_keep_prob)
h_conv = tf.nn.dropout(h_conv, keep_prob=dropout_keep_prob)
return h_conv + input_times_almost_1
def make_two_dim_resnet(
input_node,
num_residues=50,
num_features=40,
num_predictions=1,
num_channels=32,
num_layers=2,
filter_size=3,
filter_size_2=None,
final_non_linearity=False,
name_prefix='',
fancy=True,
batch_norm=False,
is_training=False,
atrou_rates=None,
channel_multiplier=0,
divide_channels_by=2,
resize_features_with_1x1=False,
data_format='NHWC',
stddev=0.01,
dropout_keep_prob=1.0):
"""Two dim resnet towers."""
del num_residues # Unused.
if atrou_rates is None:
atrou_rates = [1]
if not fancy:
raise ValueError('non fancy deprecated')
logging.info('atrou rates %s', atrou_rates)
logging.info('name prefix %s', name_prefix)
x_image = input_node
previous_layer = x_image
non_linearity = True
for i_layer in range(num_layers):
in_channels = num_channels
out_channels = num_channels
curr_atrou_rate = atrou_rates[i_layer % len(atrou_rates)]
if i_layer == 0:
in_channels = num_features
if i_layer == num_layers - 1:
out_channels = num_predictions
non_linearity = final_non_linearity
if i_layer == 0 or i_layer == num_layers - 1:
layer_name = name_prefix + 'conv%d' % (i_layer + 1)
initial_filter_size = filter_size
if resize_features_with_1x1:
initial_filter_size = 1
previous_layer = two_dim_convnet.make_conv_layer(
input_node=previous_layer,
in_channels=in_channels,
out_channels=out_channels,
layer_name=layer_name,
filter_size=initial_filter_size,
filter_size_2=filter_size_2,
non_linearity=non_linearity,
atrou_rate=curr_atrou_rate,
data_format=data_format,
stddev=stddev)
else:
layer_name = name_prefix + 'res%d' % (i_layer + 1)
previous_layer = make_sep_res_layer(
input_node=previous_layer,
in_channels=in_channels,
out_channels=out_channels,
layer_name=layer_name,
filter_size=filter_size,
filter_size_2=filter_size_2,
batch_norm=batch_norm,
is_training=is_training,
atrou_rate=curr_atrou_rate,
channel_multiplier=channel_multiplier,
divide_channels_by=divide_channels_by,
data_format=data_format,
stddev=stddev,
dropout_keep_prob=dropout_keep_prob)
y = previous_layer
return y
|
deepmind-research-master
|
alphafold_casp13/two_dim_resnet.py
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Form a weighted average of several distograms.
Can also/instead form a weighted average of a set of distance histogram pickle
files, so long as they have identical hyperparameters.
"""
import os
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from alphafold_casp13 import distogram_io
from alphafold_casp13 import parsers
flags.DEFINE_list(
'pickle_dirs', [],
'Comma separated list of directories with pickle files to ensemble.')
flags.DEFINE_list(
'weights', [],
'Comma separated list of weights for the pickle files from different dirs.')
flags.DEFINE_string(
'output_dir', None, 'Directory where to save results of the evaluation.')
FLAGS = flags.FLAGS
def ensemble_distance_histograms(pickle_dirs, weights, output_dir):
"""Find all the contact maps in the first dir, then ensemble across dirs."""
if len(pickle_dirs) <= 1:
logging.warning('Pointless to ensemble %d pickle_dirs %s',
len(pickle_dirs), pickle_dirs)
# Carry on if there's one dir, otherwise do nothing.
if not pickle_dirs:
return
tf.io.gfile.makedirs(output_dir)
one_dir_pickle_files = tf.io.gfile.glob(
os.path.join(pickle_dirs[0], '*.pickle'))
assert one_dir_pickle_files, pickle_dirs[0]
original_files = len(one_dir_pickle_files)
logging.info('Found %d files %d in first of %d dirs',
original_files, len(one_dir_pickle_files), len(pickle_dirs))
targets = [os.path.splitext(os.path.basename(f))[0]
for f in one_dir_pickle_files]
skipped = 0
wrote = 0
for t in targets:
dump_file = os.path.join(output_dir, t + '.pickle')
pickle_files = [os.path.join(pickle_dir, t + '.pickle')
for pickle_dir in pickle_dirs]
_, new_dict = ensemble_one_distance_histogram(pickle_files, weights)
if new_dict is not None:
wrote += 1
distogram_io.save_distance_histogram_from_dict(dump_file, new_dict)
msg = 'Distograms Wrote %s %d / %d Skipped %d %s' % (
t, wrote, len(one_dir_pickle_files), skipped, dump_file)
logging.info(msg)
def ensemble_one_distance_histogram(pickle_files, weights):
"""Average the given pickle_files and dump."""
dicts = []
sequence = None
max_dim = None
for picklefile in pickle_files:
if not tf.io.gfile.exists(picklefile):
logging.warning('missing %s', picklefile)
break
logging.info('loading pickle file %s', picklefile)
distance_histogram_dict = parsers.parse_distance_histogram_dict(picklefile)
if sequence is None:
sequence = distance_histogram_dict['sequence']
else:
assert sequence == distance_histogram_dict['sequence'], '%s vs %s' % (
sequence, distance_histogram_dict['sequence'])
dicts.append(distance_histogram_dict)
assert dicts[-1]['probs'].shape[0] == dicts[-1]['probs'].shape[1], (
'%d vs %d' % (dicts[-1]['probs'].shape[0], dicts[-1]['probs'].shape[1]))
assert (dicts[0]['probs'].shape[0:2] == dicts[-1]['probs'].shape[0:2]
), ('%d vs %d' % (dicts[0]['probs'].shape, dicts[-1]['probs'].shape))
if max_dim is None or max_dim < dicts[-1]['probs'].shape[2]:
max_dim = dicts[-1]['probs'].shape[2]
if len(dicts) != len(pickle_files):
logging.warning('length mismatch\n%s\nVS\n%s', dicts, pickle_files)
return sequence, None
ensemble_hist = (
sum(w * c['probs'] for w, c in zip(weights, dicts)) / sum(weights))
new_dict = dict(dicts[0])
new_dict['probs'] = ensemble_hist
return sequence, new_dict
def main(argv):
del argv # Unused.
num_dirs = len(FLAGS.pickle_dirs)
if FLAGS.weights:
assert len(FLAGS.weights) == num_dirs, (
'Supply as many weights as pickle_dirs, or no weights')
weights = [float(w) for w in FLAGS.weights]
else:
weights = [1.0 for w in range(num_dirs)]
ensemble_distance_histograms(
pickle_dirs=FLAGS.pickle_dirs,
weights=weights,
output_dir=FLAGS.output_dir)
if __name__ == '__main__':
app.run(main)
|
deepmind-research-master
|
alphafold_casp13/ensemble_contact_maps.py
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parsers for various standard biology or AlphaFold-specific formats."""
import pickle
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
def distance_histogram_dict(f):
"""Parses distance histogram dict pickle.
Distance histograms are stored as pickles of dicts.
Write one of these with contacts/write_rr_file.write_pickle_file()
Args:
f: File-like handle to distance histogram dict pickle.
Returns:
Dict with fields:
probs: (an L x L x num_bins) histogram.
num_bins: number of bins for each residue pair
min_range: left hand edge of the distance histogram
max_range: the extent of the histogram NOT the right hand edge.
"""
contact_dict = pickle.load(f, encoding='latin1')
num_res = len(contact_dict['sequence'])
if not all(key in contact_dict.keys()
for key in ['probs', 'num_bins', 'min_range', 'max_range']):
raise ValueError('The pickled contact dict doesn\'t contain all required '
'keys: probs, num_bins, min_range, max_range but %s.' %
contact_dict.keys())
if contact_dict['probs'].ndim != 3:
raise ValueError(
'Probs is not rank 3 but %d' % contact_dict['probs'].ndim)
if contact_dict['num_bins'] != contact_dict['probs'].shape[2]:
raise ValueError(
'The probs shape doesn\'t match num_bins in the third dimension. '
'Expected %d got %d.' % (contact_dict['num_bins'],
contact_dict['probs'].shape[2]))
if contact_dict['probs'].shape[:2] != (num_res, num_res):
raise ValueError(
'The first two probs dims (%i, %i) aren\'t equal to len(sequence) %i'
% (contact_dict['probs'].shape[0], contact_dict['probs'].shape[1],
num_res))
return contact_dict
def parse_distance_histogram_dict(filepath):
"""Parses distance histogram piclkle from filepath."""
with tf.io.gfile.GFile(filepath, 'rb') as f:
return distance_histogram_dict(f)
|
deepmind-research-master
|
alphafold_casp13/parsers.py
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contact prediction convnet experiment example."""
from absl import logging
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from alphafold_casp13 import contacts_dataset
from alphafold_casp13 import contacts_network
def _int_ph(shape, name):
return tf.placeholder(
dtype=tf.int32, shape=shape, name=('%s_placeholder' % name))
def _float_ph(shape, name):
return tf.placeholder(
dtype=tf.float32, shape=shape, name=('%s_placeholder' % name))
class Contacts(object):
"""Contact prediction experiment."""
def __init__(
self, tfrecord, stats_file, network_config, crop_size_x, crop_size_y,
feature_normalization, normalization_exclusion):
"""Builds the TensorFlow graph."""
self.network_config = network_config
self.crop_size_x = crop_size_x
self.crop_size_y = crop_size_y
self._feature_normalization = feature_normalization
self._normalization_exclusion = normalization_exclusion
self._model = contacts_network.ContactsNet(**network_config)
self._features = network_config.features
self._scalars = network_config.scalars
self._targets = network_config.targets
# Add extra targets we need.
required_targets = ['domain_name', 'resolution', 'chain_name']
if self.model.torsion_multiplier > 0:
required_targets.extend([
'phi_angles', 'phi_mask', 'psi_angles', 'psi_mask'])
if self.model.secstruct_multiplier > 0:
required_targets.extend(['sec_structure', 'sec_structure_mask'])
if self.model.asa_multiplier > 0:
required_targets.extend(['solv_surf', 'solv_surf_mask'])
extra_targets = [t for t in required_targets if t not in self._targets]
if extra_targets:
targets = list(self._targets)
targets.extend(extra_targets)
self._targets = tuple(targets)
logging.info('Targets %s %s extra %s',
type(self._targets), self._targets, extra_targets)
logging.info('Evaluating on %s, stats: %s', tfrecord, stats_file)
self._build_evaluation_graph(tfrecord=tfrecord, stats_file=stats_file)
@property
def model(self):
return self._model
def _get_feature_normalization(self, features):
return {key: self._feature_normalization
for key in features
if key not in list(self._normalization_exclusion)}
def _build_evaluation_graph(self, tfrecord, stats_file):
"""Constructs the graph in pieces so it can be fed."""
with tf.name_scope('competitionsep'):
# Construct the dataset and mapping ops.
dataset = contacts_dataset.create_tf_dataset(
tf_record_filename=tfrecord,
features=tuple(self._features) + tuple(
self._scalars) + tuple(self._targets))
def normalize(data):
return contacts_dataset.normalize_from_stats_file(
features=data,
stats_file_path=stats_file,
feature_normalization=self._get_feature_normalization(
self._features),
copy_unnormalized=list(set(self._features) & set(self._targets)))
def convert_to_legacy(features):
return contacts_dataset.convert_to_legacy_proteins_dataset_format(
features, self._features, self._scalars, self._targets)
dataset = dataset.map(normalize)
dataset = dataset.map(convert_to_legacy)
dataset = dataset.batch(1)
# Get a batch of tensors in the legacy ProteinsDataset format.
iterator = tf.data.make_one_shot_iterator(dataset)
self._input_batch = iterator.get_next()
self.num_eval_examples = sum(
1 for _ in tf.python_io.tf_record_iterator(tfrecord))
logging.info('Eval batch:\n%s', self._input_batch)
feature_dim_1d = self._input_batch.inputs_1d.shape.as_list()[-1]
feature_dim_2d = self._input_batch.inputs_2d.shape.as_list()[-1]
feature_dim_2d *= 3 # The diagonals will be stacked before feeding.
# Now placeholders for the graph to compute the outputs for one crop.
self.inputs_1d_placeholder = _float_ph(
shape=[None, None, feature_dim_1d], name='inputs_1d')
self.residue_index_placeholder = _int_ph(
shape=[None, None], name='residue_index')
self.inputs_2d_placeholder = _float_ph(
shape=[None, None, None, feature_dim_2d], name='inputs_2d')
# 4 ints: x_start, x_end, y_start, y_end.
self.crop_placeholder = _int_ph(shape=[None, 4], name='crop')
# Finally placeholders for the graph to score the complete contact map.
self.probs_placeholder = _float_ph(shape=[None, None, None], name='probs')
self.softmax_probs_placeholder = _float_ph(
shape=[None, None, None, self.network_config.num_bins],
name='softmax_probs')
self.cb_placeholder = _float_ph(shape=[None, None, 3], name='cb')
self.cb_mask_placeholder = _float_ph(shape=[None, None], name='cb_mask')
self.lengths_placeholder = _int_ph(shape=[None], name='lengths')
if self.model.secstruct_multiplier > 0:
self.sec_structure_placeholder = _float_ph(
shape=[None, None, 8], name='sec_structure')
self.sec_structure_logits_placeholder = _float_ph(
shape=[None, None, 8], name='sec_structure_logits')
self.sec_structure_mask_placeholder = _float_ph(
shape=[None, None, 1], name='sec_structure_mask')
if self.model.asa_multiplier > 0:
self.solv_surf_placeholder = _float_ph(
shape=[None, None, 1], name='solv_surf')
self.solv_surf_logits_placeholder = _float_ph(
shape=[None, None, 1], name='solv_surf_logits')
self.solv_surf_mask_placeholder = _float_ph(
shape=[None, None, 1], name='solv_surf_mask')
if self.model.torsion_multiplier > 0:
self.torsions_truth_placeholder = _float_ph(
shape=[None, None, 2], name='torsions_truth')
self.torsions_mask_placeholder = _float_ph(
shape=[None, None, 1], name='torsions_mask')
self.torsion_logits_placeholder = _float_ph(
shape=[None, None, self.network_config.torsion_bins ** 2],
name='torsion_logits')
# Build a dict to pass all the placeholders into build.
placeholders = {
'inputs_1d_placeholder': self.inputs_1d_placeholder,
'residue_index_placeholder': self.residue_index_placeholder,
'inputs_2d_placeholder': self.inputs_2d_placeholder,
'crop_placeholder': self.crop_placeholder,
'probs_placeholder': self.probs_placeholder,
'softmax_probs_placeholder': self.softmax_probs_placeholder,
'cb_placeholder': self.cb_placeholder,
'cb_mask_placeholder': self.cb_mask_placeholder,
'lengths_placeholder': self.lengths_placeholder,
}
if self.model.secstruct_multiplier > 0:
placeholders.update({
'sec_structure': self.sec_structure_placeholder,
'sec_structure_logits_placeholder':
self.sec_structure_logits_placeholder,
'sec_structure_mask': self.sec_structure_mask_placeholder,})
if self.model.asa_multiplier > 0:
placeholders.update({
'solv_surf': self.solv_surf_placeholder,
'solv_surf_logits_placeholder': self.solv_surf_logits_placeholder,
'solv_surf_mask': self.solv_surf_mask_placeholder,})
if self.model.torsion_multiplier > 0:
placeholders.update({
'torsions_truth': self.torsions_truth_placeholder,
'torsion_logits_placeholder': self.torsion_logits_placeholder,
'torsions_truth_mask': self.torsions_mask_placeholder,})
activations = self._model(
crop_size_x=self.crop_size_x,
crop_size_y=self.crop_size_y,
placeholders=placeholders)
self.eval_probs_softmax = tf.nn.softmax(
activations[:, :, :, :self.network_config.num_bins])
self.eval_probs = tf.reduce_sum(
self.eval_probs_softmax[:, :, :, :self._model.quant_threshold()],
axis=3)
def get_one_example(self, sess):
"""Pull one example off the queue so we can feed it for evaluation."""
request_dict = {
'inputs_1d': self._input_batch.inputs_1d,
'inputs_2d': self._input_batch.inputs_2d,
'sequence_lengths': self._input_batch.sequence_lengths,
'beta_positions': self._input_batch.targets.beta_positions,
'beta_mask': self._input_batch.targets.beta_mask,
'domain_name': self._input_batch.targets.domain_name,
'chain_name': self._input_batch.targets.chain_name,
'sequences': self._input_batch.sequences,
}
if hasattr(self._input_batch.targets, 'residue_index'):
request_dict.update(
{'residue_index': self._input_batch.targets.residue_index})
if hasattr(self._input_batch.targets, 'phi_angles'):
request_dict.update(
{'phi_angles': self._input_batch.targets.phi_angles,
'psi_angles': self._input_batch.targets.psi_angles,
'phi_mask': self._input_batch.targets.phi_mask,
'psi_mask': self._input_batch.targets.psi_mask})
if hasattr(self._input_batch.targets, 'sec_structure'):
request_dict.update(
{'sec_structure': self._input_batch.targets.sec_structure,
'sec_structure_mask': self._input_batch.targets.sec_structure_mask,})
if hasattr(self._input_batch.targets, 'solv_surf'):
request_dict.update(
{'solv_surf': self._input_batch.targets.solv_surf,
'solv_surf_mask': self._input_batch.targets.solv_surf_mask,})
if hasattr(self._input_batch.targets, 'alpha_positions'):
request_dict.update(
{'alpha_positions': self._input_batch.targets.alpha_positions,
'alpha_mask': self._input_batch.targets.alpha_mask,})
batch = sess.run(request_dict)
return batch
|
deepmind-research-master
|
alphafold_casp13/contacts_experiment.py
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Write contact map predictions to a tf.io.gfile.
Either write a binary contact map as an RR format text file, or a
histogram prediction as a pickle of a dict containing a numpy array.
"""
import os
import numpy as np
import six.moves.cPickle as pickle
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
RR_FORMAT = """PFRMAT RR
TARGET {}
AUTHOR DM-ORIGAMI-TEAM
METHOD {}
MODEL 1
{}
"""
def save_rr_file(filename, probs, domain, sequence,
method='dm-contacts-resnet'):
"""Save a contact probability matrix as an RR file."""
assert len(sequence) == probs.shape[0]
assert len(sequence) == probs.shape[1]
with tf.io.gfile.GFile(filename, 'w') as f:
f.write(RR_FORMAT.format(domain, method, sequence))
for i in range(probs.shape[0]):
for j in range(i + 1, probs.shape[1]):
f.write('{:d} {:d} {:d} {:d} {:f}\n'.format(
i + 1, j + 1, 0, 8, probs[j, i]))
f.write('END\n')
def save_torsions(torsions_dir, filebase, sequence, torsions_probs):
"""Save Torsions to a file as pickle of a dict."""
filename = os.path.join(torsions_dir, filebase + '.torsions')
t_dict = dict(probs=torsions_probs, sequence=sequence)
with tf.io.gfile.GFile(filename, 'w') as fh:
pickle.dump(t_dict, fh, protocol=2)
def save_distance_histogram(
filename, probs, domain, sequence, min_range, max_range, num_bins):
"""Save a distance histogram prediction matrix as a pickle file."""
dh_dict = {
'min_range': min_range,
'max_range': max_range,
'num_bins': num_bins,
'domain': domain,
'sequence': sequence,
'probs': probs.astype(np.float32)}
save_distance_histogram_from_dict(filename, dh_dict)
def save_distance_histogram_from_dict(filename, dh_dict):
"""Save a distance histogram prediction matrix as a pickle file."""
fields = ['min_range', 'max_range', 'num_bins', 'domain', 'sequence', 'probs']
missing_fields = [f for f in fields if f not in dh_dict]
assert not missing_fields, 'Fields {} missing from dictionary'.format(
missing_fields)
assert len(dh_dict['sequence']) == dh_dict['probs'].shape[0]
assert len(dh_dict['sequence']) == dh_dict['probs'].shape[1]
assert dh_dict['num_bins'] == dh_dict['probs'].shape[2]
assert dh_dict['min_range'] >= 0.0
assert dh_dict['max_range'] > 0.0
with tf.io.gfile.GFile(filename, 'wb') as fw:
pickle.dump(dh_dict, fw, protocol=2)
def contact_map_from_distogram(distogram_dict):
"""Split the boundary bin."""
num_bins = distogram_dict['probs'].shape[-1]
bin_size_angstrom = distogram_dict['max_range'] / num_bins
threshold_cts = (8.0 - distogram_dict['min_range']) / bin_size_angstrom
threshold_bin = int(threshold_cts) # Round down
pred_contacts = np.sum(distogram_dict['probs'][:, :, :threshold_bin], axis=-1)
if threshold_bin < threshold_cts: # Add on the fraction of the boundary bin.
pred_contacts += distogram_dict['probs'][:, :, threshold_bin] * (
threshold_cts - threshold_bin)
return pred_contacts
|
deepmind-research-master
|
alphafold_casp13/distogram_io.py
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF wrapper for protein tf.Example datasets."""
import collections
import enum
import json
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
_ProteinDescription = collections.namedtuple(
'_ProteinDescription', (
'sequence_lengths', 'key', 'sequences', 'inputs_1d', 'inputs_2d',
'inputs_2d_diagonal', 'crops', 'scalars', 'targets'))
class FeatureType(enum.Enum):
ZERO_DIM = 0 # Shape [x]
ONE_DIM = 1 # Shape [num_res, x]
TWO_DIM = 2 # Shape [num_res, num_res, x]
# Placeholder values that will be replaced with their true value at runtime.
NUM_RES = 'num residues placeholder'
# Sizes of the protein features. NUM_RES is allowed as a placeholder to be
# replaced with the number of residues.
FEATURES = {
'aatype': (tf.float32, [NUM_RES, 21]),
'alpha_mask': (tf.int64, [NUM_RES, 1]),
'alpha_positions': (tf.float32, [NUM_RES, 3]),
'beta_mask': (tf.int64, [NUM_RES, 1]),
'beta_positions': (tf.float32, [NUM_RES, 3]),
'between_segment_residues': (tf.int64, [NUM_RES, 1]),
'chain_name': (tf.string, [1]),
'deletion_probability': (tf.float32, [NUM_RES, 1]),
'domain_name': (tf.string, [1]),
'gap_matrix': (tf.float32, [NUM_RES, NUM_RES, 1]),
'hhblits_profile': (tf.float32, [NUM_RES, 22]),
'hmm_profile': (tf.float32, [NUM_RES, 30]),
'key': (tf.string, [1]),
'mutual_information': (tf.float32, [NUM_RES, NUM_RES, 1]),
'non_gapped_profile': (tf.float32, [NUM_RES, 21]),
'num_alignments': (tf.int64, [NUM_RES, 1]),
'num_effective_alignments': (tf.float32, [1]),
'phi_angles': (tf.float32, [NUM_RES, 1]),
'phi_mask': (tf.int64, [NUM_RES, 1]),
'profile': (tf.float32, [NUM_RES, 21]),
'profile_with_prior': (tf.float32, [NUM_RES, 22]),
'profile_with_prior_without_gaps': (tf.float32, [NUM_RES, 21]),
'pseudo_bias': (tf.float32, [NUM_RES, 22]),
'pseudo_frob': (tf.float32, [NUM_RES, NUM_RES, 1]),
'pseudolikelihood': (tf.float32, [NUM_RES, NUM_RES, 484]),
'psi_angles': (tf.float32, [NUM_RES, 1]),
'psi_mask': (tf.int64, [NUM_RES, 1]),
'residue_index': (tf.int64, [NUM_RES, 1]),
'resolution': (tf.float32, [1]),
'reweighted_profile': (tf.float32, [NUM_RES, 22]),
'sec_structure': (tf.int64, [NUM_RES, 8]),
'sec_structure_mask': (tf.int64, [NUM_RES, 1]),
'seq_length': (tf.int64, [NUM_RES, 1]),
'sequence': (tf.string, [1]),
'solv_surf': (tf.float32, [NUM_RES, 1]),
'solv_surf_mask': (tf.int64, [NUM_RES, 1]),
'superfamily': (tf.string, [1]),
}
FEATURE_TYPES = {k: v[0] for k, v in FEATURES.items()}
FEATURE_SIZES = {k: v[1] for k, v in FEATURES.items()}
def shape(feature_name, num_residues, features=None):
"""Get the shape for the given feature name.
Args:
feature_name: String identifier for the feature. If the feature name ends
with "_unnormalized", theis suffix is stripped off.
num_residues: The number of residues in the current domain - some elements
of the shape can be dynamic and will be replaced by this value.
features: A feature_name to (tf_dtype, shape) lookup; defaults to FEATURES.
Returns:
List of ints representation the tensor size.
"""
features = features or FEATURES
if feature_name.endswith('_unnormalized'):
feature_name = feature_name[:-13]
unused_dtype, raw_sizes = features[feature_name]
replacements = {NUM_RES: num_residues}
sizes = [replacements.get(dimension, dimension) for dimension in raw_sizes]
return sizes
def dim(feature_name):
"""Determine the type of feature.
Args:
feature_name: String identifier for the feature to lookup. If the feature
name ends with "_unnormalized", theis suffix is stripped off.
Returns:
A FeatureType enum describing whether the feature is of size num_res or
num_res * num_res.
Raises:
ValueError: If the feature is of an unknown type.
"""
if feature_name.endswith('_unnormalized'):
feature_name = feature_name[:-13]
num_dims = len(FEATURE_SIZES[feature_name])
if num_dims == 1:
return FeatureType.ZERO_DIM
elif num_dims == 2 and FEATURE_SIZES[feature_name][0] == NUM_RES:
return FeatureType.ONE_DIM
elif num_dims == 3 and FEATURE_SIZES[feature_name][0] == NUM_RES:
return FeatureType.TWO_DIM
else:
raise ValueError('Expect feature sizes to be 2 or 3, got %i' %
len(FEATURE_SIZES[feature_name]))
def _concat_or_zeros(tensor_list, axis, tensor_shape, name):
"""Concatenates the tensors if given, otherwise returns a tensor of zeros."""
if tensor_list:
return tf.concat(tensor_list, axis=axis, name=name)
return tf.zeros(tensor_shape, name=name + '_zeros')
def parse_tfexample(raw_data, features):
"""Read a single TF Example proto and return a subset of its features.
Args:
raw_data: A serialized tf.Example proto.
features: A dictionary of features, mapping string feature names to a tuple
(dtype, shape). This dictionary should be a subset of
protein_features.FEATURES (or the dictionary itself for all features).
Returns:
A dictionary of features mapping feature names to features. Only the given
features are returned, all other ones are filtered out.
"""
feature_map = {
k: tf.io.FixedLenSequenceFeature(shape=(), dtype=v[0], allow_missing=True)
for k, v in features.items()
}
parsed_features = tf.io.parse_single_example(raw_data, feature_map)
# Find out what is the number of sequences and the number of alignments.
num_residues = tf.cast(parsed_features['seq_length'][0], dtype=tf.int32)
# Reshape the tensors according to the sequence length and num alignments.
for k, v in parsed_features.items():
new_shape = shape(feature_name=k, num_residues=num_residues)
# Make sure the feature we are reshaping is not empty.
assert_non_empty = tf.assert_greater(
tf.size(v), 0, name='assert_%s_non_empty' % k,
message='The feature %s is not set in the tf.Example. Either do not '
'request the feature or use a tf.Example that has the feature set.' % k)
with tf.control_dependencies([assert_non_empty]):
parsed_features[k] = tf.reshape(v, new_shape, name='reshape_%s' % k)
return parsed_features
def create_tf_dataset(tf_record_filename, features):
"""Creates an instance of tf.data.Dataset backed by a protein dataset SSTable.
Args:
tf_record_filename: A string with filename of the TFRecord file.
features: A list of strings of feature names to be returned in the dataset.
Returns:
A tf.data.Dataset object. Its items are dictionaries from feature names to
feature values.
"""
# Make sure these features are always read.
required_features = ['aatype', 'sequence', 'seq_length']
features = list(set(features) | set(required_features))
features = {name: FEATURES[name] for name in features}
tf_dataset = tf.data.TFRecordDataset(filenames=[tf_record_filename])
tf_dataset = tf_dataset.map(lambda raw: parse_tfexample(raw, features))
return tf_dataset
def normalize_from_stats_file(
features, stats_file_path, feature_normalization, copy_unnormalized=None):
"""Normalizes the features set in the feature_normalization by the norm stats.
Args:
features: A dictionary mapping feature names to feature tensors.
stats_file_path: A string with the path of the statistics JSON file.
feature_normalization: A dictionary specifying the normalization type for
each input feature. Acceptable values are 'std' and 'none'. If not
specified default to 'none'. Any extra features that are not present in
features will be ignored.
copy_unnormalized: A list of features whose unnormalized copy should be
added. For any feature F in this list a feature F + "_unnormalized" will
be added in the output dictionary containing the unnormalized feature.
This is useful if you have a feature you want to have both in
desired_features (normalized) and also in desired_targets (unnormalized).
See convert_to_legacy_proteins_dataset_format for more details.
Returns:
A dictionary mapping features names to feature tensors. The ones that were
specified in feature_normalization will be normalized.
Raises:
ValueError: If an unknown normalization mode is used.
"""
with tf.io.gfile.GFile(stats_file_path, 'r') as f:
norm_stats = json.loads(f.read())
if not copy_unnormalized:
copy_unnormalized = []
# We need this unnormalized in convert_to_legacy_proteins_dataset_format.
copy_unnormalized.append('num_alignments')
for feature in copy_unnormalized:
if feature in features:
features[feature + '_unnormalized'] = features[feature]
range_epsilon = 1e-12
for key, value in features.items():
if key not in feature_normalization or feature_normalization[key] == 'none':
pass
elif feature_normalization[key] == 'std':
value = tf.cast(value, dtype=tf.float32)
train_mean = tf.cast(norm_stats['mean'][key], dtype=tf.float32)
train_range = tf.sqrt(tf.cast(norm_stats['var'][key], dtype=tf.float32))
value -= train_mean
value = tf.where(
train_range > range_epsilon, value / train_range, value)
features[key] = value
else:
raise ValueError('Unknown normalization mode %s for feature %s.'
% (feature_normalization[key], key))
return features
def convert_to_legacy_proteins_dataset_format(
features, desired_features, desired_scalars, desired_targets):
"""Converts the output of tf.Dataset to the legacy format.
Args:
features: A dictionary mapping feature names to feature tensors.
desired_features: A list with the names of the desired features. These will
be filtered out of features and returned in one of the inputs_1d or
inputs_2d. The features concatenated in `inputs_1d`, `inputs_2d` will be
concatenated in the same order as they were given in `desired_features`.
desired_scalars: A list naming the desired scalars. These will
be filtered out of features and returned in scalars. If features contain
an unnormalized version of a desired scalar, it will be used.
desired_targets: A list naming the desired targets. These will
be filtered out of features and returned in targets. If features contain
an unnormalized version of a desired target, it will be used.
Returns:
A _ProteinDescription namedtuple consisting of:
sequence_length: A scalar int32 tensor with the sequence length.
key: A string tensor with the sequence key or empty if not set features.
sequences: A string tensor with the protein sequence.
inputs_1d: All 1D features in a single tensor of shape
[num_res, 1d_channels].
inputs_2d: All 2D features in a single tensor of shape
[num_res, num_res, 2d_channels].
inputs_2d_diagonal: All 2D diagonal features in a single tensor of shape
[num_res, num_res, 2d_diagonal_channels]. If no diagonal features found
in features, the tensor will be set to inputs_2d.
crops: A int32 tensor with the crop poisitions. If not set in features,
it will be set to [0, num_res, 0, num_res].
scalars: All requested scalar tensors in a list.
targets: All requested target tensors in a list.
Raises:
ValueError: If the feature size is invalid.
"""
tensors_1d = []
tensors_2d = []
tensors_2d_diagonal = []
for key in desired_features:
# Determine if the feature is 1D or 2D.
feature_dim = dim(key)
if feature_dim == FeatureType.ONE_DIM:
tensors_1d.append(tf.cast(features[key], dtype=tf.float32))
elif feature_dim == FeatureType.TWO_DIM:
if key not in features:
if not(key + '_cropped' in features and key + '_diagonal' in features):
raise ValueError(
'The 2D feature %s is not in the features dictionary and neither '
'are its cropped and diagonal versions.' % key)
else:
tensors_2d.append(
tf.cast(features[key + '_cropped'], dtype=tf.float32))
tensors_2d_diagonal.append(
tf.cast(features[key + '_diagonal'], dtype=tf.float32))
else:
tensors_2d.append(tf.cast(features[key], dtype=tf.float32))
else:
raise ValueError('Unexpected FeatureType returned: %s' % str(feature_dim))
# Determine num_res from the sequence as seq_length was possibly normalized.
num_res = tf.strings.length(features['sequence'])[0]
# Concatenate feature tensors into a single tensor
inputs_1d = _concat_or_zeros(
tensors_1d, axis=1, tensor_shape=[num_res, 0],
name='inputs_1d_concat')
inputs_2d = _concat_or_zeros(
tensors_2d, axis=2, tensor_shape=[num_res, num_res, 0],
name='inputs_2d_concat')
if tensors_2d_diagonal:
# The legacy dataset outputs the two diagonal crops stacked
# A1, B1, C1, A2, B2, C2. So convert the A1, A2, B1, B2, C1, C2 format.
diagonal_crops1 = [t[:, :, :(t.shape[2] // 2)] for t in tensors_2d_diagonal]
diagonal_crops2 = [t[:, :, (t.shape[2] // 2):] for t in tensors_2d_diagonal]
inputs_2d_diagonal = tf.concat(diagonal_crops1 + diagonal_crops2, axis=2)
else:
inputs_2d_diagonal = inputs_2d
sequence = features['sequence']
sequence_key = features.get('key', tf.constant(['']))[0]
if 'crops' in features:
crops = features['crops']
else:
crops = tf.stack([0, tf.shape(sequence)[0], 0, tf.shape(sequence)[0]])
scalar_tensors = []
for key in desired_scalars:
scalar_tensors.append(features.get(key + '_unnormalized', features[key]))
target_tensors = []
for key in desired_targets:
target_tensors.append(features.get(key + '_unnormalized', features[key]))
scalar_class = collections.namedtuple('_ScalarClass', desired_scalars)
target_class = collections.namedtuple('_TargetClass', desired_targets)
return _ProteinDescription(
sequence_lengths=num_res,
key=sequence_key,
sequences=sequence,
inputs_1d=inputs_1d,
inputs_2d=inputs_2d,
inputs_2d_diagonal=inputs_2d_diagonal,
crops=crops,
scalars=scalar_class(*scalar_tensors),
targets=target_class(*target_tensors))
|
deepmind-research-master
|
alphafold_casp13/contacts_dataset.py
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Combines predictions by pasting."""
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
import six
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from alphafold_casp13 import distogram_io
from alphafold_casp13 import parsers
flags.DEFINE_string("pickle_input_dir", None,
"Directory to read pickle distance histogram files from.")
flags.DEFINE_string("output_dir", None, "Directory to write chain RR files to.")
flags.DEFINE_string("tfrecord_path", "",
"If provided, construct the average weighted by number of "
"alignments.")
flags.DEFINE_string("crop_sizes", "64,128,256", "The crop sizes to use.")
flags.DEFINE_integer("crop_step", 32, "The step size for cropping.")
FLAGS = flags.FLAGS
def generate_domains(target, sequence, crop_sizes, crop_step):
"""Take fasta files and generate a domain definition for data generation."""
logging.info("Generating crop domains for target %s", target)
windows = [int(x) for x in crop_sizes.split(",")]
num_residues = len(sequence)
domains = []
domains.append({"name": target, "description": (1, num_residues)})
for window in windows:
starts = list(range(0, num_residues - window, crop_step))
# Append a last crop to ensure we get all the way to the end of the
# sequence, even when num_residues - window is not divisible by crop_step.
if num_residues >= window:
starts += [num_residues - window]
for start in starts:
name = "%s-l%i_s%i" % (target, window, start)
domains.append({"name": name, "description": (start + 1, start + window)})
return domains
def get_weights(path):
"""Fetch all the weights from a TFRecord."""
if not path:
return {}
logging.info("Getting weights from %s", path)
weights = {}
record_iterator = tf.python_io.tf_record_iterator(path=path)
for serialized_tfexample in record_iterator:
example = tf.train.Example()
example.ParseFromString(serialized_tfexample)
domain_name = six.ensure_str(
example.features.feature["domain_name"].bytes_list.value[0])
weights[domain_name] = float(
example.features.feature["num_alignments"].int64_list.value[0])
logging.info("Weight %s: %d", domain_name, weights[domain_name])
logging.info("Loaded %d weights", len(weights))
return weights
def paste_distance_histograms(
input_dir, output_dir, weights, crop_sizes, crop_step):
"""Paste together distograms for given domains of given targets and write.
Domains distance histograms are 'pasted', meaning they are substituted
directly into the contact map. The order is determined by the order in the
domain definition file.
Args:
input_dir: String, path to directory containing chain and domain-level
distogram files.
output_dir: String, path to directory to write out chain-level distrogram
files.
weights: A dictionary with weights.
crop_sizes: The crop sizes.
crop_step: The step size for cropping.
Raises:
ValueError: if histogram parameters don't match.
"""
tf.io.gfile.makedirs(output_dir)
targets = tf.io.gfile.glob(os.path.join(input_dir, "*.pickle"))
targets = [os.path.splitext(os.path.basename(t))[0] for t in targets]
targets = set([t.split("-")[0] for t in targets])
logging.info("Pasting distance histograms for %d targets", len(targets))
for target in sorted(targets):
logging.info("%s as chain", target)
chain_pickle_path = os.path.join(input_dir, "%s.pickle" % target)
distance_histogram_dict = parsers.parse_distance_histogram_dict(
chain_pickle_path)
combined_cmap = np.array(distance_histogram_dict["probs"])
# Make the counter map 1-deep but still rank 3.
counter_map = np.ones_like(combined_cmap[:, :, 0:1])
sequence = distance_histogram_dict["sequence"]
target_domains = generate_domains(
target=target, sequence=sequence, crop_sizes=crop_sizes,
crop_step=crop_step)
# Paste in each domain.
for domain in sorted(target_domains, key=lambda x: x["name"]):
if domain["name"] == target:
logging.info("Skipping %s as domain", target)
continue
if "," in domain["description"]:
logging.info("Skipping multisegment domain %s",
domain["name"])
continue
crop_start, crop_end = domain["description"]
domain_pickle_path = os.path.join(input_dir, "%s.pickle" % domain["name"])
weight = weights.get(domain["name"], 1e9)
logging.info("Pasting %s: %d-%d. weight: %f", domain_pickle_path,
crop_start, crop_end, weight)
domain_distance_histogram_dict = parsers.parse_distance_histogram_dict(
domain_pickle_path)
for field in ["num_bins", "min_range", "max_range"]:
if domain_distance_histogram_dict[field] != distance_histogram_dict[
field]:
raise ValueError("Field {} does not match {} {}".format(
field,
domain_distance_histogram_dict[field],
distance_histogram_dict[field]))
weight_matrix_size = crop_end - crop_start + 1
weight_matrix = np.ones(
(weight_matrix_size, weight_matrix_size), dtype=np.float32) * weight
combined_cmap[crop_start - 1:crop_end, crop_start - 1:crop_end, :] += (
domain_distance_histogram_dict["probs"] *
np.expand_dims(weight_matrix, 2))
counter_map[crop_start - 1:crop_end,
crop_start - 1:crop_end, 0] += weight_matrix
# Broadcast across the histogram bins.
combined_cmap /= counter_map
# Write out full-chain cmap for folding.
output_chain_pickle_path = os.path.join(output_dir,
"{}.pickle".format(target))
logging.info("Writing to %s", output_chain_pickle_path)
distance_histogram_dict["probs"] = combined_cmap
distance_histogram_dict["target"] = target
# Save the distogram pickle file.
distogram_io.save_distance_histogram_from_dict(
output_chain_pickle_path, distance_histogram_dict)
# Compute the contact map and save it as an RR file.
contact_probs = distogram_io.contact_map_from_distogram(
distance_histogram_dict)
rr_path = os.path.join(output_dir, "%s.rr" % target)
distogram_io.save_rr_file(
filename=rr_path,
probs=contact_probs,
domain=target,
sequence=distance_histogram_dict["sequence"])
def main(argv):
del argv # Unused.
flags.mark_flag_as_required("pickle_input_dir")
weights = get_weights(FLAGS.tfrecord_path)
paste_distance_histograms(
FLAGS.pickle_input_dir, FLAGS.output_dir, weights, FLAGS.crop_sizes,
FLAGS.crop_step)
if __name__ == "__main__":
app.run(main)
|
deepmind-research-master
|
alphafold_casp13/paste_contact_maps.py
|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Network for predicting C-beta contacts."""
from absl import logging
import sonnet
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from alphafold_casp13 import asa_output
from alphafold_casp13 import secstruct
from alphafold_casp13 import two_dim_convnet
from alphafold_casp13 import two_dim_resnet
def call_on_tuple(f):
"""Unpacks a tuple input parameter into arguments for a function f.
Mimics tuple unpacking in lambdas, which existed in Python 2 but has been
removed in Python 3.
Args:
f: A function taking multiple arguments.
Returns:
A function equivalent to f accepting a tuple, which is then unpacked.
"""
return lambda args: f(*args)
class ContactsNet(sonnet.AbstractModule):
"""A network to go from sequence to distance histograms."""
def __init__(self,
binary_code_bits,
data_format,
distance_multiplier,
features,
features_forward,
max_range,
min_range,
num_bins,
reshape_layer,
resolution_noise_scale,
scalars,
targets,
network_2d_deep,
torsion_bins=None,
skip_connect=0,
position_specific_bias_size=0,
filters_1d=(),
collapsed_batch_norm=False,
is_ca_feature=False,
asa_multiplier=0.0,
secstruct_multiplier=0.0,
torsion_multiplier=0.0,
name='contacts_net'):
"""Construct position prediction network."""
super(ContactsNet, self).__init__(name=name)
self._filters_1d = filters_1d
self._collapsed_batch_norm = collapsed_batch_norm
self._is_ca_feature = is_ca_feature
self._binary_code_bits = binary_code_bits
self._data_format = data_format
self._distance_multiplier = distance_multiplier
self._features = features
self._features_forward = features_forward
self._max_range = max_range
self._min_range = min_range
self._num_bins = num_bins
self._position_specific_bias_size = position_specific_bias_size
self._reshape_layer = reshape_layer
self._resolution_noise_scale = resolution_noise_scale
self._scalars = scalars
self._torsion_bins = torsion_bins
self._skip_connect = skip_connect
self._targets = targets
self._network_2d_deep = network_2d_deep
self.asa_multiplier = asa_multiplier
self.secstruct_multiplier = secstruct_multiplier
self.torsion_multiplier = torsion_multiplier
with self._enter_variable_scope():
if self.secstruct_multiplier > 0:
self._secstruct = secstruct.Secstruct()
if self.asa_multiplier > 0:
self._asa = asa_output.ASAOutputLayer()
if self._position_specific_bias_size:
self._position_specific_bias = tf.get_variable(
'position_specific_bias',
[self._position_specific_bias_size, self._num_bins or 1],
initializer=tf.zeros_initializer())
def quant_threshold(self, threshold=8.0):
"""Find the bin that is 8A+: we sum mass below this bin gives contact prob.
Args:
threshold: The distance threshold.
Returns:
Index of bin.
"""
# Note that this misuses the max_range as the range.
return int(
(threshold - self._min_range) * self._num_bins / float(self._max_range))
def _build(self, crop_size_x=0, crop_size_y=0, placeholders=None):
"""Puts the network into the graph.
Args:
crop_size_x: Crop a chunk out in one dimension. 0 means no cropping.
crop_size_y: Crop a chunk out in one dimension. 0 means no cropping.
placeholders: A dict containing the placeholders needed.
Returns:
A Tensor with logits of size [batch_size, num_residues, 3].
"""
crop_placeholder = placeholders['crop_placeholder']
inputs_1d = placeholders['inputs_1d_placeholder']
if self._is_ca_feature and 'aatype' in self._features:
logging.info('Collapsing aatype to is_ca_feature %s',
inputs_1d.shape.as_list()[-1])
assert inputs_1d.shape.as_list()[-1] <= 21 + (
1 if 'seq_length' in self._features else 0)
inputs_1d = inputs_1d[:, :, 7:8]
logits = self.compute_outputs(
inputs_1d=inputs_1d,
residue_index=placeholders['residue_index_placeholder'],
inputs_2d=placeholders['inputs_2d_placeholder'],
crop_x=crop_placeholder[:, 0:2],
crop_y=crop_placeholder[:, 2:4],
use_on_the_fly_stats=True,
crop_size_x=crop_size_x,
crop_size_y=crop_size_y,
data_format='NHWC', # Force NHWC for evals.
)
return logits
def compute_outputs(self, inputs_1d, residue_index, inputs_2d, crop_x, crop_y,
use_on_the_fly_stats, crop_size_x, crop_size_y,
data_format='NHWC'):
"""Given the inputs for a block, compute the network outputs."""
hidden_1d = inputs_1d
hidden_1d_list = [hidden_1d]
if len(hidden_1d_list) != 1:
hidden_1d = tf.concat(hidden_1d_list, 2)
output_dimension = self._num_bins or 1
if self._distance_multiplier > 0:
output_dimension += 1
logits, activations = self._build_2d_embedding(
hidden_1d=hidden_1d,
residue_index=residue_index,
inputs_2d=inputs_2d,
output_dimension=output_dimension,
use_on_the_fly_stats=use_on_the_fly_stats,
crop_x=crop_x,
crop_y=crop_y,
crop_size_x=crop_size_x, crop_size_y=crop_size_y,
data_format=data_format)
logits = tf.debugging.check_numerics(
logits, 'NaN in resnet activations', name='resnet_activations')
if (self.secstruct_multiplier > 0 or
self.asa_multiplier > 0 or
self.torsion_multiplier > 0):
# Make a 1d embedding by reducing the 2D activations.
# We do this in the x direction and the y direction separately.
collapse_dim = 1
join_dim = -1
embedding_1d = tf.concat(
# First targets are crop_x (axis 2) which we must reduce on axis 1
[tf.concat([tf.reduce_max(activations, axis=collapse_dim),
tf.reduce_mean(activations, axis=collapse_dim)],
axis=join_dim),
# Next targets are crop_y (axis 1) which we must reduce on axis 2
tf.concat([tf.reduce_max(activations, axis=collapse_dim+1),
tf.reduce_mean(activations, axis=collapse_dim+1)],
axis=join_dim)],
axis=collapse_dim) # Join the two crops together.
if self._collapsed_batch_norm:
embedding_1d = tf.contrib.layers.batch_norm(
embedding_1d, is_training=use_on_the_fly_stats,
fused=True, decay=0.999, scope='collapsed_batch_norm',
data_format='NHWC')
for i, nfil in enumerate(self._filters_1d):
embedding_1d = tf.contrib.layers.fully_connected(
embedding_1d,
num_outputs=nfil,
normalizer_fn=(
tf.contrib.layers.batch_norm if self._collapsed_batch_norm
else None),
normalizer_params={'is_training': use_on_the_fly_stats,
'updates_collections': None},
scope='collapsed_embed_%d' % i)
if self.torsion_multiplier > 0:
self.torsion_logits = tf.contrib.layers.fully_connected(
embedding_1d,
num_outputs=self._torsion_bins * self._torsion_bins,
activation_fn=None,
scope='torsion_logits')
self.torsion_output = tf.nn.softmax(self.torsion_logits)
if self.secstruct_multiplier > 0:
self._secstruct.make_layer_new(embedding_1d)
if self.asa_multiplier > 0:
self.asa_logits = self._asa.compute_asa_output(embedding_1d)
return logits
@staticmethod
def _concatenate_2d(hidden_1d, residue_index, hidden_2d, crop_x, crop_y,
binary_code_bits, crop_size_x, crop_size_y):
# Form the pairwise expansion of the 1D embedding
# And the residue offsets and (one) absolute position.
with tf.name_scope('Features2D'):
range_scale = 100.0 # Crude normalization factor.
n = tf.shape(hidden_1d)[1]
# pylint: disable=g-long-lambda
hidden_1d_cropped_y = tf.map_fn(
call_on_tuple(lambda c, h: tf.pad(
h[tf.maximum(0, c[0]):c[1]],
[[tf.maximum(0, -c[0]),
tf.maximum(0, crop_size_y -(n - c[0]))], [0, 0]])),
elems=(crop_y, hidden_1d), dtype=tf.float32,
back_prop=True)
range_n_y = tf.map_fn(
call_on_tuple(lambda ri, c: tf.pad(
ri[tf.maximum(0, c[0]):c[1]],
[[tf.maximum(0, -c[0]),
tf.maximum(0, crop_size_y -(n - c[0]))]])),
elems=(residue_index, crop_y), dtype=tf.int32,
back_prop=False)
hidden_1d_cropped_x = tf.map_fn(
call_on_tuple(lambda c, h: tf.pad(
h[tf.maximum(0, c[0]):c[1]],
[[tf.maximum(0, -c[0]),
tf.maximum(0, crop_size_x -(n - c[0]))], [0, 0]])),
elems=(crop_x, hidden_1d), dtype=tf.float32,
back_prop=True)
range_n_x = tf.map_fn(
call_on_tuple(lambda ri, c: tf.pad(
ri[tf.maximum(0, c[0]):c[1]],
[[tf.maximum(0, -c[0]),
tf.maximum(0, crop_size_x -(n - c[0]))]])),
elems=(residue_index, crop_x), dtype=tf.int32,
back_prop=False)
# pylint: enable=g-long-lambda
n_x = crop_size_x
n_y = crop_size_y
offset = (tf.expand_dims(tf.cast(range_n_x, tf.float32), 1) -
tf.expand_dims(tf.cast(range_n_y, tf.float32), 2)) / range_scale
position_features = [
tf.tile(
tf.reshape(
(tf.cast(range_n_y, tf.float32) - range_scale) / range_scale,
[-1, n_y, 1, 1]), [1, 1, n_x, 1],
name='TileRange'),
tf.tile(
tf.reshape(offset, [-1, n_y, n_x, 1]), [1, 1, 1, 1],
name='TileOffset')
]
channels = 2
if binary_code_bits:
# Binary coding of position.
exp_range_n_y = tf.expand_dims(range_n_y, 2)
bin_y = tf.stop_gradient(
tf.concat([tf.math.floormod(exp_range_n_y // (1 << i), 2)
for i in range(binary_code_bits)], 2))
exp_range_n_x = tf.expand_dims(range_n_x, 2)
bin_x = tf.stop_gradient(
tf.concat([tf.math.floormod(exp_range_n_x // (1 << i), 2)
for i in range(binary_code_bits)], 2))
position_features += [
tf.tile(
tf.expand_dims(tf.cast(bin_y, tf.float32), 2), [1, 1, n_x, 1],
name='TileBinRangey'),
tf.tile(
tf.expand_dims(tf.cast(bin_x, tf.float32), 1), [1, n_y, 1, 1],
name='TileBinRangex')
]
channels += 2 * binary_code_bits
augmentation_features = position_features + [
tf.tile(tf.expand_dims(hidden_1d_cropped_x, 1),
[1, n_y, 1, 1], name='Tile1Dx'),
tf.tile(tf.expand_dims(hidden_1d_cropped_y, 2),
[1, 1, n_x, 1], name='Tile1Dy')]
channels += 2 * hidden_1d.shape.as_list()[-1]
channels += hidden_2d.shape.as_list()[-1]
hidden_2d = tf.concat(
[hidden_2d] + augmentation_features, 3, name='Stack2Dfeatures')
logging.info('2d stacked features are depth %d %s', channels, hidden_2d)
hidden_2d.set_shape([None, None, None, channels])
return hidden_2d
def _build_2d_embedding(self, hidden_1d, residue_index, inputs_2d,
output_dimension, use_on_the_fly_stats, crop_x,
crop_y, crop_size_x, crop_size_y, data_format):
"""Returns NHWC logits and NHWC preactivations."""
logging.info('2d %s %s', inputs_2d, data_format)
# Stack with diagonal has already happened.
inputs_2d_cropped = inputs_2d
features_forward = None
hidden_2d = inputs_2d_cropped
hidden_2d = self._concatenate_2d(
hidden_1d, residue_index, hidden_2d, crop_x, crop_y,
self._binary_code_bits, crop_size_x, crop_size_y)
config_2d_deep = self._network_2d_deep
num_features = hidden_2d.shape.as_list()[3]
if data_format == 'NCHW':
logging.info('NCHW shape deep pre %s', hidden_2d)
hidden_2d = tf.transpose(hidden_2d, perm=[0, 3, 1, 2])
hidden_2d.set_shape([None, num_features, None, None])
logging.info('NCHW shape deep post %s', hidden_2d)
layers_forward = None
if config_2d_deep.extra_blocks:
# Optionally put some extra double-size blocks at the beginning.
with tf.variable_scope('Deep2DExtra'):
hidden_2d = two_dim_resnet.make_two_dim_resnet(
input_node=hidden_2d,
num_residues=None, # Unused
num_features=num_features,
num_predictions=2 * config_2d_deep.num_filters,
num_channels=2 * config_2d_deep.num_filters,
num_layers=config_2d_deep.extra_blocks *
config_2d_deep.num_layers_per_block,
filter_size=3,
batch_norm=config_2d_deep.use_batch_norm,
is_training=use_on_the_fly_stats,
fancy=True,
final_non_linearity=True,
atrou_rates=[1, 2, 4, 8],
data_format=data_format,
dropout_keep_prob=1.0
)
num_features = 2 * config_2d_deep.num_filters
if self._skip_connect:
layers_forward = hidden_2d
if features_forward is not None:
hidden_2d = tf.concat([hidden_2d, features_forward], 1
if data_format == 'NCHW' else 3)
with tf.variable_scope('Deep2D'):
logging.info('2d hidden shape is %s', str(hidden_2d.shape.as_list()))
contact_pre_logits = two_dim_resnet.make_two_dim_resnet(
input_node=hidden_2d,
num_residues=None, # Unused
num_features=num_features,
num_predictions=(config_2d_deep.num_filters
if self._reshape_layer else output_dimension),
num_channels=config_2d_deep.num_filters,
num_layers=config_2d_deep.num_blocks *
config_2d_deep.num_layers_per_block,
filter_size=3,
batch_norm=config_2d_deep.use_batch_norm,
is_training=use_on_the_fly_stats,
fancy=True,
final_non_linearity=self._reshape_layer,
atrou_rates=[1, 2, 4, 8],
data_format=data_format,
dropout_keep_prob=1.0
)
contact_logits = self._output_from_pre_logits(
contact_pre_logits, features_forward, layers_forward,
output_dimension, data_format, crop_x, crop_y, use_on_the_fly_stats)
if data_format == 'NCHW':
contact_pre_logits = tf.transpose(contact_pre_logits, perm=[0, 2, 3, 1])
# Both of these will be NHWC
return contact_logits, contact_pre_logits
def _output_from_pre_logits(self, contact_pre_logits, features_forward,
layers_forward, output_dimension, data_format,
crop_x, crop_y, use_on_the_fly_stats):
"""Given pre-logits, compute the final distogram/contact activations."""
config_2d_deep = self._network_2d_deep
if self._reshape_layer:
in_channels = config_2d_deep.num_filters
concat_features = [contact_pre_logits]
if features_forward is not None:
concat_features.append(features_forward)
in_channels += self._features_forward
if layers_forward is not None:
concat_features.append(layers_forward)
in_channels += 2 * config_2d_deep.num_filters
if len(concat_features) > 1:
contact_pre_logits = tf.concat(concat_features,
1 if data_format == 'NCHW' else 3)
contact_logits = two_dim_convnet.make_conv_layer(
contact_pre_logits,
in_channels=in_channels,
out_channels=output_dimension,
layer_name='output_reshape_1x1h',
filter_size=1,
filter_size_2=1,
non_linearity=False,
batch_norm=config_2d_deep.use_batch_norm,
is_training=use_on_the_fly_stats,
data_format=data_format)
else:
contact_logits = contact_pre_logits
if data_format == 'NCHW':
contact_logits = tf.transpose(contact_logits, perm=[0, 2, 3, 1])
if self._position_specific_bias_size:
# Make 2D pos-specific biases: NHWC.
biases = build_crops_biases(
self._position_specific_bias_size,
self._position_specific_bias, crop_x, crop_y, back_prop=True)
contact_logits += biases
# Will be NHWC.
return contact_logits
def update_crop_fetches(self, fetches):
"""Add auxiliary outputs for a crop to the fetches."""
if self.secstruct_multiplier > 0:
fetches['secstruct_probs'] = self._secstruct.get_q8_probs()
if self.asa_multiplier > 0:
fetches['asa_output'] = self._asa.asa_output
if self.torsion_multiplier > 0:
fetches['torsion_probs'] = self.torsion_output
def build_crops_biases(bias_size, raw_biases, crop_x, crop_y, back_prop):
"""Take the offset-specific biases and reshape them to match current crops.
Args:
bias_size: how many bias variables we're storing.
raw_biases: the bias variable
crop_x: B x 2 array of start/end for the batch
crop_y: B x 2 array of start/end for the batch
back_prop: whether to backprop through the map_fn.
Returns:
Reshaped biases.
"""
# First pad the biases with a copy of the final value to the maximum length.
max_off_diag = tf.reduce_max(
tf.maximum(tf.abs(crop_x[:, 1] - crop_y[:, 0]),
tf.abs(crop_y[:, 1] - crop_x[:, 0])))
padded_bias_size = tf.maximum(bias_size, max_off_diag)
biases = tf.concat(
[raw_biases,
tf.tile(raw_biases[-1:, :],
[padded_bias_size - bias_size, 1])], axis=0)
# Now prepend a mirror image (excluding 0th elt) for below-diagonal.
biases = tf.concat([tf.reverse(biases[1:, :], axis=[0]), biases], axis=0)
# Which diagonal of the full matrix each crop starts on (top left):
start_diag = crop_x[:, 0:1] - crop_y[:, 0:1] # B x 1
crop_size_x = tf.reduce_max(crop_x[:, 1] - crop_x[:, 0])
crop_size_y = tf.reduce_max(crop_y[:, 1] - crop_y[:, 0])
# Relative offset of each row within a crop:
# (off-diagonal decreases as y increases)
increment = tf.expand_dims(-tf.range(0, crop_size_y), 0) # 1 x crop_size_y
# Index of diagonal of first element of each row, flattened.
row_offsets = tf.reshape(start_diag + increment, [-1]) # B*crop_size_y
logging.info('row_offsets %s', row_offsets)
# Make it relative to the start of the biases array. (0-th diagonal is in
# the middle at position padded_bias_size - 1)
row_offsets += padded_bias_size - 1
# Map_fn to build the individual rows.
# B*cropsizey x cropsizex x num_bins
cropped_biases = tf.map_fn(lambda i: biases[i:i+crop_size_x, :],
elems=row_offsets, dtype=tf.float32,
back_prop=back_prop)
logging.info('cropped_biases %s', cropped_biases)
return tf.reshape(
cropped_biases, [-1, crop_size_y, crop_size_x, tf.shape(raw_biases)[-1]])
|
deepmind-research-master
|
alphafold_casp13/contacts_network.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `bernoulli.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import tree
from gated_linear_networks import bernoulli
def _get_dataset(input_size, batch_size=None):
"""Get mock dataset."""
if batch_size:
inputs = jnp.ones([batch_size, input_size])
side_info = jnp.ones([batch_size, input_size])
targets = jnp.ones([batch_size])
else:
inputs = jnp.ones([input_size])
side_info = jnp.ones([input_size])
targets = jnp.ones([])
return inputs, side_info, targets
class GatedLinearNetworkTest(parameterized.TestCase):
# TODO(b/170843789): Factor out common test utilities.
def setUp(self):
super(GatedLinearNetworkTest, self).setUp()
self._name = "test_network"
self._rng = hk.PRNGSequence(jax.random.PRNGKey(42))
self._output_sizes = (4, 5, 6)
self._context_dim = 2
def gln_factory():
return bernoulli.GatedLinearNetwork(
output_sizes=self._output_sizes,
context_dim=self._context_dim,
name=self._name)
def inference_fn(inputs, side_info):
return gln_factory().inference(inputs, side_info)
def batch_inference_fn(inputs, side_info):
return jax.vmap(inference_fn, in_axes=(0, 0))(inputs, side_info)
def update_fn(inputs, side_info, label, learning_rate):
params, predictions, unused_loss = gln_factory().update(
inputs, side_info, label, learning_rate)
return predictions, params
def batch_update_fn(inputs, side_info, label, learning_rate):
predictions, params = jax.vmap(
update_fn, in_axes=(0, 0, 0, None))(inputs, side_info, label,
learning_rate)
avg_params = tree.map_structure(lambda x: jnp.mean(x, axis=0), params)
return predictions, avg_params
# Haiku transform functions.
self._init_fn, inference_fn_ = hk.without_apply_rng(
hk.transform_with_state(inference_fn))
self._batch_init_fn, batch_inference_fn_ = hk.without_apply_rng(
hk.transform_with_state(batch_inference_fn))
_, update_fn_ = hk.without_apply_rng(hk.transform_with_state(update_fn))
_, batch_update_fn_ = hk.without_apply_rng(
hk.transform_with_state(batch_update_fn))
self._inference_fn = jax.jit(inference_fn_)
self._batch_inference_fn = jax.jit(batch_inference_fn_)
self._update_fn = jax.jit(update_fn_)
self._batch_update_fn = jax.jit(batch_update_fn_)
@parameterized.named_parameters(("Online mode", None), ("Batch mode", 3))
def test_shapes(self, batch_size):
"""Test shapes in online and batch regimes."""
if batch_size is None:
init_fn = self._init_fn
inference_fn = self._inference_fn
else:
init_fn = self._batch_init_fn
inference_fn = self._batch_inference_fn
input_size = 10
inputs, side_info, _ = _get_dataset(input_size, batch_size)
input_size = inputs.shape[-1]
# Initialize network.
gln_params, gln_state = init_fn(next(self._rng), inputs, side_info)
# Test shapes of parameters layer-wise.
layer_input_size = input_size
for layer_idx, output_size in enumerate(self._output_sizes):
name = "{}/~/{}_layer_{}".format(self._name, self._name, layer_idx)
weights = gln_params[name]["weights"]
expected_shape = (output_size, 2**self._context_dim, layer_input_size + 1)
self.assertEqual(weights.shape, expected_shape)
layer_input_size = output_size
# Test shape of output.
output_size = sum(self._output_sizes)
predictions, _ = inference_fn(gln_params, gln_state, inputs, side_info)
expected_shape = (batch_size, output_size) if batch_size else (output_size,)
self.assertEqual(predictions.shape, expected_shape)
@parameterized.named_parameters(("Online mode", None), ("Batch mode", 3))
def test_update(self, batch_size):
"""Test network updates in online and batch regimes."""
if batch_size is None:
init_fn = self._init_fn
inference_fn = self._inference_fn
update_fn = self._update_fn
else:
init_fn = self._batch_init_fn
inference_fn = self._batch_inference_fn
update_fn = self._batch_update_fn
input_size = 10
inputs, side_info, targets = _get_dataset(input_size, batch_size)
# Initialize network.
initial_params, gln_state = init_fn(next(self._rng), inputs, side_info)
# Initial predictions.
initial_predictions, _ = inference_fn(initial_params, gln_state, inputs,
side_info)
# Test that params remain valid after consecutive updates.
gln_params = initial_params
for _ in range(3):
(_, gln_params), gln_state = update_fn(
gln_params, gln_state, inputs, side_info, targets, learning_rate=1e-4)
# Check updated weights layer-wise.
for layer_idx in range(len(self._output_sizes)):
name = "{}/~/{}_layer_{}".format(self._name, self._name, layer_idx)
initial_weights = initial_params[name]["weights"]
new_weights = gln_params[name]["weights"]
# Shape consistency.
self.assertEqual(new_weights.shape, initial_weights.shape)
# Check that different weights yield different predictions.
new_predictions, _ = inference_fn(gln_params, gln_state, inputs,
side_info)
self.assertFalse(np.array_equal(new_predictions, initial_predictions))
def test_batch_consistency(self):
"""Test consistency between online and batch updates."""
input_size = 10
batch_size = 3
inputs, side_info, targets = _get_dataset(input_size, batch_size)
# Initialize network.
gln_params, gln_state = self._batch_init_fn(
next(self._rng), inputs, side_info)
test_layer = "{}/~/{}_layer_0".format(self._name, self._name)
for _ in range(10):
# Update on full batch.
(expected_predictions, expected_params), _ = self._batch_update_fn(
gln_params, gln_state, inputs, side_info, targets, learning_rate=1e-3)
# Average updates across batch and check equivalence.
accum_predictions = []
accum_weights = []
for inputs_, side_info_, targets_ in zip(inputs, side_info, targets):
(predictions, params), _ = self._update_fn(
gln_params,
gln_state,
inputs_,
side_info_,
targets_,
learning_rate=1e-3)
accum_predictions.append(predictions)
accum_weights.append(params[test_layer]["weights"])
# Check prediction equivalence.
actual_predictions = np.stack(accum_predictions, axis=0)
np.testing.assert_array_almost_equal(actual_predictions,
expected_predictions)
# Check weight equivalence.
actual_weights = np.mean(np.stack(accum_weights, axis=0), axis=0)
expected_weights = expected_params[test_layer]["weights"]
np.testing.assert_array_almost_equal(actual_weights, expected_weights)
gln_params = expected_params
if __name__ == "__main__":
absltest.main()
|
deepmind-research-master
|
gated_linear_networks/bernoulli_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bernoulli Gated Linear Network."""
from typing import List, Text, Tuple
import chex
import jax
import jax.numpy as jnp
import rlax
import tensorflow_probability as tfp
from gated_linear_networks import base
tfp = tfp.experimental.substrates.jax
tfd = tfp.distributions
Array = chex.Array
GLN_EPS = 0.01
MAX_WEIGHT = 200.
class GatedLinearNetwork(base.GatedLinearNetwork):
"""Bernoulli Gated Linear Network."""
def __init__(self,
output_sizes: List[int],
context_dim: int,
name: Text = "bernoulli_gln"):
"""Initialize a Bernoulli GLN."""
super(GatedLinearNetwork, self).__init__(
output_sizes,
context_dim,
inference_fn=GatedLinearNetwork._inference_fn,
update_fn=GatedLinearNetwork._update_fn,
init=jnp.zeros,
dtype=jnp.float32,
name=name)
def _add_bias(self, inputs):
return jnp.append(inputs, rlax.sigmoid(1.))
@staticmethod
def _inference_fn(
inputs: Array, # [input_size]
side_info: Array, # [side_info_size]
weights: Array, # [2**context_dim, input_size]
hyperplanes: Array, # [context_dim, side_info_size]
hyperplane_bias: Array, # [context_dim]
) -> Array:
"""Inference step for a single Beurnolli neuron."""
weight_index = GatedLinearNetwork._compute_context(side_info, hyperplanes,
hyperplane_bias)
used_weights = weights[weight_index]
inputs = rlax.logit(jnp.clip(inputs, GLN_EPS, 1. - GLN_EPS))
prediction = rlax.sigmoid(jnp.dot(used_weights, inputs))
return prediction
@staticmethod
def _update_fn(
inputs: Array, # [input_size]
side_info: Array, # [side_info_size]
weights: Array, # [2**context_dim, num_features]
hyperplanes: Array, # [context_dim, side_info_size]
hyperplane_bias: Array, # [context_dim]
target: Array, # []
learning_rate: float,
) -> Tuple[Array, Array, Array]:
"""Update step for a single Bernoulli neuron."""
def log_loss_fn(inputs, side_info, weights, hyperplanes, hyperplane_bias,
target):
"""Log loss for a single Bernoulli neuron."""
prediction = GatedLinearNetwork._inference_fn(inputs, side_info, weights,
hyperplanes,
hyperplane_bias)
prediction = jnp.clip(prediction, GLN_EPS, 1. - GLN_EPS)
return rlax.log_loss(prediction, target), prediction
grad_log_loss = jax.value_and_grad(log_loss_fn, argnums=2, has_aux=True)
((log_loss, prediction),
dloss_dweights) = grad_log_loss(inputs, side_info, weights, hyperplanes,
hyperplane_bias, target)
delta_weights = learning_rate * dloss_dweights
new_weights = jnp.clip(weights - delta_weights, -MAX_WEIGHT, MAX_WEIGHT)
return new_weights, prediction, log_loss
class LastNeuronAggregator(base.LastNeuronAggregator):
"""Bernoulli last neuron aggregator, implemented by the super class."""
pass
|
deepmind-research-master
|
gated_linear_networks/bernoulli.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `gaussian.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import tree
from gated_linear_networks import gaussian
def _get_dataset(input_size, batch_size=None):
"""Get mock dataset."""
if batch_size:
inputs = jnp.ones([batch_size, input_size, 2])
side_info = jnp.ones([batch_size, input_size])
targets = 0.8 * jnp.ones([batch_size])
else:
inputs = jnp.ones([input_size, 2])
side_info = jnp.ones([input_size])
targets = jnp.ones([])
return inputs, side_info, targets
class UtilsTest(absltest.TestCase):
def test_packing_identity(self):
mu = jnp.array([1., 2., 3., 4., 5.])
sigma_sq = jnp.array([6., 7., 8., 9., 10.])
mu_2, sigma_sq_2 = gaussian._unpack_inputs(
gaussian._pack_inputs(mu, sigma_sq))
np.testing.assert_array_equal(mu, mu_2)
np.testing.assert_array_equal(sigma_sq, sigma_sq_2)
class GatedLinearNetworkTest(parameterized.TestCase):
# TODO(b/170843789): Factor out common test utilities.
def setUp(self):
super(GatedLinearNetworkTest, self).setUp()
self._name = "test_network"
self._rng = hk.PRNGSequence(jax.random.PRNGKey(42))
self._output_sizes = (4, 5, 6)
self._context_dim = 2
self._bias_len = 3
def gln_factory():
return gaussian.GatedLinearNetwork(
output_sizes=self._output_sizes,
context_dim=self._context_dim,
bias_len=self._bias_len,
name=self._name,
)
def inference_fn(inputs, side_info):
return gln_factory().inference(inputs, side_info, 0.5)
def batch_inference_fn(inputs, side_info):
return jax.vmap(inference_fn, in_axes=(0, 0))(inputs, side_info)
def update_fn(inputs, side_info, label, learning_rate):
params, predictions, unused_loss = gln_factory().update(
inputs, side_info, label, learning_rate, 0.5)
return predictions, params
def batch_update_fn(inputs, side_info, label, learning_rate):
predictions, params = jax.vmap(
update_fn, in_axes=(0, 0, 0, None))(
inputs,
side_info,
label,
learning_rate)
avg_params = tree.map_structure(lambda x: jnp.mean(x, axis=0), params)
return predictions, avg_params
# Haiku transform functions.
self._init_fn, inference_fn_ = hk.without_apply_rng(
hk.transform_with_state(inference_fn))
self._batch_init_fn, batch_inference_fn_ = hk.without_apply_rng(
hk.transform_with_state(batch_inference_fn))
_, update_fn_ = hk.without_apply_rng(hk.transform_with_state(update_fn))
_, batch_update_fn_ = hk.without_apply_rng(
hk.transform_with_state(batch_update_fn))
self._inference_fn = jax.jit(inference_fn_)
self._batch_inference_fn = jax.jit(batch_inference_fn_)
self._update_fn = jax.jit(update_fn_)
self._batch_update_fn = jax.jit(batch_update_fn_)
@parameterized.named_parameters(("Online mode", None), ("Batch mode", 3))
def test_shapes(self, batch_size):
"""Test shapes in online and batch regimes."""
if batch_size is None:
init_fn = self._init_fn
inference_fn = self._inference_fn
else:
init_fn = self._batch_init_fn
inference_fn = self._batch_inference_fn
input_size = 10
inputs, side_info, _ = _get_dataset(input_size, batch_size)
# Initialize network.
gln_params, gln_state = init_fn(next(self._rng), inputs, side_info)
# Test shapes of parameters layer-wise.
layer_input_size = input_size
for layer_idx, output_size in enumerate(self._output_sizes):
name = "{}/~/{}_layer_{}".format(self._name, self._name, layer_idx)
weights = gln_params[name]["weights"]
expected_shape = (output_size, 2**self._context_dim,
layer_input_size + self._bias_len)
self.assertEqual(weights.shape, expected_shape)
layer_input_size = output_size
# Test shape of output.
output_size = sum(self._output_sizes)
predictions, _ = inference_fn(gln_params, gln_state, inputs, side_info)
expected_shape = (batch_size, output_size,
2) if batch_size else (output_size, 2)
self.assertEqual(predictions.shape, expected_shape)
@parameterized.named_parameters(("Online mode", None), ("Batch mode", 3))
def test_update(self, batch_size):
"""Test network updates in online and batch regimes."""
if batch_size is None:
init_fn = self._init_fn
inference_fn = self._inference_fn
update_fn = self._update_fn
else:
init_fn = self._batch_init_fn
inference_fn = self._batch_inference_fn
update_fn = self._batch_update_fn
inputs, side_info, targets = _get_dataset(10, batch_size)
# Initialize network.
initial_params, gln_state = init_fn(next(self._rng), inputs, side_info)
# Initial predictions.
initial_predictions, _ = inference_fn(initial_params, gln_state, inputs,
side_info)
# Test that params remain valid after consecutive updates.
gln_params = initial_params
for _ in range(3):
(_, gln_params), _ = update_fn(
gln_params, gln_state, inputs, side_info, targets, learning_rate=1e-4)
# Check updated weights layer-wise.
for layer_idx in range(len(self._output_sizes)):
name = "{}/~/{}_layer_{}".format(self._name, self._name, layer_idx)
initial_weights = initial_params[name]["weights"]
new_weights = gln_params[name]["weights"]
# Shape consistency.
self.assertEqual(new_weights.shape, initial_weights.shape)
# Check that different weights yield different predictions.
new_predictions, _ = inference_fn(gln_params, gln_state, inputs,
side_info)
self.assertFalse(np.array_equal(new_predictions, initial_predictions))
def test_batch_consistency(self):
"""Test consistency between online and batch updates."""
batch_size = 3
inputs, side_info, targets = _get_dataset(10, batch_size)
# Initialize network.
gln_params, gln_state = self._batch_init_fn(
next(self._rng), inputs, side_info)
test_layer = "{}/~/{}_layer_0".format(self._name, self._name)
for _ in range(10):
# Update on full batch.
(expected_predictions, expected_params), _ = self._batch_update_fn(
gln_params, gln_state, inputs, side_info, targets, learning_rate=1e-3)
# Average updates across batch and check equivalence.
accum_predictions = []
accum_weights = []
for inputs_, side_info_, targets_ in zip(inputs, side_info, targets):
(predictions, params), _ = self._update_fn(
gln_params,
gln_state,
inputs_,
side_info_,
targets_,
learning_rate=1e-3)
accum_predictions.append(predictions)
accum_weights.append(params[test_layer]["weights"])
# Check prediction equivalence.
actual_predictions = np.stack(accum_predictions, axis=0)
np.testing.assert_array_almost_equal(actual_predictions,
expected_predictions)
# Check weight equivalence.
actual_weights = np.mean(np.stack(accum_weights, axis=0), axis=0)
expected_weights = expected_params[test_layer]["weights"]
np.testing.assert_array_almost_equal(actual_weights, expected_weights)
gln_params = expected_params
if __name__ == "__main__":
absltest.main()
|
deepmind-research-master
|
gated_linear_networks/gaussian_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes for Gated Linear Networks."""
import abc
import collections
import functools
import inspect
from typing import Any, Callable, Optional, Sequence, Tuple
import chex
import haiku as hk
import jax
import jax.numpy as jnp
Array = chex.Array
DType = Any
Initializer = hk.initializers.Initializer
Shape = Sequence[int]
EPS = 1e-12
MIN_ALPHA = 1e-5
def _l2_normalize(x: Array, axis: int) -> Array:
return x / jnp.sqrt(jnp.maximum(jnp.sum(x**2, axis, keepdims=True), EPS))
def _wrapped_fn_argnames(fun):
"""Returns list of argnames of a (possibly wrapped) function."""
return tuple(inspect.signature(fun).parameters)
def _vmap(fun, in_axes=0, out_axes=0, parameters=None):
"""JAX vmap with human-friendly axes."""
def _axes(fun, d):
"""Maps dict {kwarg_i, : val_i} to [None, ..., val_i, ..., None]."""
argnames = _wrapped_fn_argnames(fun) if not parameters else parameters
for key in d:
if key not in argnames:
raise ValueError(f"{key} is not a valid axis.")
return tuple(d.get(key, None) for key in argnames)
in_axes = _axes(fun, in_axes) if isinstance(in_axes, dict) else in_axes
return jax.vmap(fun, in_axes, out_axes)
# Map a neuron-level function across a layer.
_layer_vmap = functools.partial(
_vmap,
in_axes=({
"weights": 0,
"hyperplanes": 0,
"hyperplane_bias": 0,
}))
class NormalizedRandomNormal(hk.initializers.RandomNormal):
"""Random normal initializer with l2-normalization."""
def __init__(self,
stddev: float = 1.,
mean: float = 0.,
normalize_axis: int = 0):
super(NormalizedRandomNormal, self).__init__(stddev, mean)
self._normalize_axis = normalize_axis
def __call__(self, shape: Shape, dtype: DType) -> Array:
if self._normalize_axis >= len(shape):
raise ValueError("Cannot normalize axis {} for ndim = {}.".format(
self._normalize_axis, len(shape)))
weights = super(NormalizedRandomNormal, self).__call__(shape, dtype)
return _l2_normalize(weights, axis=self._normalize_axis)
class ShapeScaledConstant(hk.initializers.Initializer):
"""Initializes with a constant dependent on last dimension of input shape."""
def __call__(self, shape: Shape, dtype: DType) -> jnp.ndarray:
constant = 1. / shape[-1]
return jnp.broadcast_to(constant, shape).astype(dtype)
class LocalUpdateModule(hk.Module):
"""Abstract base class for GLN variants and utils."""
def __init__(self, name: Optional[str] = None):
if hasattr(self, "__call__"):
raise ValueError("Do not implement `__call__` for a LocalUpdateModule." +
" Implement `inference` and `update` instead.")
super(LocalUpdateModule, self).__init__(name)
@abc.abstractmethod
def inference(self, *args, **kwargs):
"""Module inference step."""
@abc.abstractmethod
def update(self, *args, **kwargs):
"""Module update step."""
@property
@abc.abstractmethod
def output_sizes(self) -> Shape:
"""Returns network output sizes."""
class GatedLinearNetwork(LocalUpdateModule):
"""Abstract base class for a multi-layer Gated Linear Network."""
def __init__(self,
output_sizes: Shape,
context_dim: int,
inference_fn: Callable[..., Array],
update_fn: Callable[..., Array],
init: Initializer,
hyp_w_init: Optional[Initializer] = None,
hyp_b_init: Optional[Initializer] = None,
dtype: DType = jnp.float32,
name: str = "gated_linear_network"):
"""Initialize a GatedLinearNetwork as a sequence of GatedLinearLayers."""
super(GatedLinearNetwork, self).__init__(name=name)
self._layers = []
self._output_sizes = output_sizes
for i, output_size in enumerate(self._output_sizes):
layer = _GatedLinearLayer(
output_size=output_size,
context_dim=context_dim,
update_fn=update_fn,
inference_fn=inference_fn,
init=init,
hyp_w_init=hyp_w_init,
hyp_b_init=hyp_b_init,
dtype=dtype,
name=name + "_layer_{}".format(i))
self._layers.append(layer)
self._name = name
@abc.abstractmethod
def _add_bias(self, inputs):
pass
def inference(self, inputs: Array, side_info: Array, *args,
**kwargs) -> Array:
"""GatedLinearNetwork inference."""
predictions_per_layer = []
predictions = inputs
for layer in self._layers:
predictions = self._add_bias(predictions)
predictions = layer.inference(predictions, side_info, *args, **kwargs)
predictions_per_layer.append(predictions)
return jnp.concatenate(predictions_per_layer, axis=0)
def update(self, inputs, side_info, target, learning_rate, *args, **kwargs):
"""GatedLinearNetwork update."""
all_params = []
all_predictions = []
all_losses = []
predictions = inputs
for layer in self._layers:
predictions = self._add_bias(predictions)
# Note: This is correct because returned predictions are pre-update.
params, predictions, log_loss = layer.update(predictions, side_info,
target, learning_rate, *args,
**kwargs)
all_params.append(params)
all_predictions.append(predictions)
all_losses.append(log_loss)
new_params = dict(collections.ChainMap(*all_params))
predictions = jnp.concatenate(all_predictions, axis=0)
log_loss = jnp.concatenate(all_losses, axis=0)
return new_params, predictions, log_loss
@property
def output_sizes(self):
return self._output_sizes
@staticmethod
def _compute_context(
side_info: Array, # [side_info_size]
hyperplanes: Array, # [context_dim, side_info_size]
hyperplane_bias: Array, # [context_dim]
) -> Array:
# Index weights by side information.
context_dim = hyperplane_bias.shape[0]
proj = jnp.dot(hyperplanes, side_info)
bits = (proj > hyperplane_bias).astype(jnp.int32)
weight_index = jnp.sum(
bits *
jnp.array([2**i for i in range(context_dim)])) if context_dim else 0
return weight_index
class _GatedLinearLayer(LocalUpdateModule):
"""A single layer of a Gated Linear Network."""
def __init__(self,
output_size: int,
context_dim: int,
inference_fn: Callable[..., Array],
update_fn: Callable[..., Array],
init: Initializer,
hyp_w_init: Optional[Initializer] = None,
hyp_b_init: Optional[Initializer] = None,
dtype: DType = jnp.float32,
name: str = "gated_linear_layer"):
"""Initialize a GatedLinearLayer."""
super(_GatedLinearLayer, self).__init__(name=name)
self._output_size = output_size
self._context_dim = context_dim
self._inference_fn = inference_fn
self._update_fn = update_fn
self._init = init
self._hyp_w_init = hyp_w_init
self._hyp_b_init = hyp_b_init
self._dtype = dtype
self._name = name
def _get_weights(self, input_size):
"""Get (or initialize) weight parameters."""
weights = hk.get_parameter(
"weights",
shape=(self._output_size, 2**self._context_dim, input_size),
dtype=self._dtype,
init=self._init,
)
return weights
def _get_hyperplanes(self, side_info_size):
"""Get (or initialize) hyperplane weights and bias."""
hyp_w_init = self._hyp_w_init or NormalizedRandomNormal(
stddev=1., normalize_axis=1)
hyperplanes = hk.get_state(
"hyperplanes",
shape=(self._output_size, self._context_dim, side_info_size),
init=hyp_w_init)
hyp_b_init = self._hyp_b_init or hk.initializers.RandomNormal(stddev=0.05)
hyperplane_bias = hk.get_state(
"hyperplane_bias",
shape=(self._output_size, self._context_dim),
init=hyp_b_init)
return hyperplanes, hyperplane_bias
def inference(self, inputs: Array, side_info: Array, *args,
**kwargs) -> Array:
"""GatedLinearLayer inference."""
# Initialize layer weights.
weights = self._get_weights(inputs.shape[0])
# Initialize fixed random hyperplanes.
side_info_size = side_info.shape[0]
hyperplanes, hyperplane_bias = self._get_hyperplanes(side_info_size)
# Perform layer-wise inference by mapping along output_size (num_neurons).
layer_inference = _layer_vmap(self._inference_fn)
predictions = layer_inference(inputs, side_info, weights, hyperplanes,
hyperplane_bias, *args, **kwargs)
return predictions
def update(self, inputs: Array, side_info: Array, target: Array,
learning_rate: float, *args,
**kwargs) -> Tuple[Array, Array, Array]:
"""GatedLinearLayer update."""
# Fetch layer weights.
weights = self._get_weights(inputs.shape[0])
# Fetch fixed random hyperplanes.
side_info_size = side_info.shape[0]
hyperplanes, hyperplane_bias = self._get_hyperplanes(side_info_size)
# Perform layer-wise update by mapping along output_size (num_neurons).
layer_update = _layer_vmap(self._update_fn)
new_weights, predictions, log_loss = layer_update(inputs, side_info,
weights, hyperplanes,
hyperplane_bias, target,
learning_rate, *args,
**kwargs)
assert new_weights.shape == weights.shape
params = {self.module_name: {"weights": new_weights}}
return params, predictions, log_loss
@property
def output_sizes(self):
return self._output_size
class Mutator(LocalUpdateModule):
"""Abstract base class for GLN Mutators."""
def __init__(
self,
network_factory: Callable[..., LocalUpdateModule],
name: str,
):
super(Mutator, self).__init__(name=name)
self._network = network_factory()
self._name = name
@property
def output_sizes(self):
return self._network.output_sizes
class LastNeuronAggregator(Mutator):
"""Last neuron aggregator: network output is read from the last neuron."""
def __init__(
self,
network_factory: Callable[..., LocalUpdateModule],
name: str = "last_neuron",
):
super(LastNeuronAggregator, self).__init__(network_factory, name)
if self._network.output_sizes[-1] != 1:
raise ValueError(
"LastNeuronAggregator requires the last GLN layer to have"
" output_size = 1.")
def inference(self, *args, **kwargs) -> Array:
predictions = self._network.inference(*args, **kwargs)
return predictions[-1]
def update(self, *args, **kwargs) -> Tuple[Array, Array, Array]:
params_t, predictions_tm1, loss_tm1 = self._network.update(*args, **kwargs)
return params_t, predictions_tm1[-1], loss_tm1[-1]
|
deepmind-research-master
|
gated_linear_networks/base.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gaussian Gated Linear Network."""
from typing import Callable, List, Text, Tuple
import chex
import jax
import jax.numpy as jnp
import tensorflow_probability as tfp
from gated_linear_networks import base
tfp = tfp.experimental.substrates.jax
tfd = tfp.distributions
Array = chex.Array
MIN_SIGMA_SQ_AGGREGATOR = 0.5
MAX_SIGMA_SQ = 1e5
MAX_WEIGHT = 1e3
MIN_WEIGHT = -1e3
def _unpack_inputs(inputs: Array) -> Tuple[Array, Array]:
inputs = jnp.atleast_2d(inputs)
chex.assert_rank(inputs, 2)
(mu, sigma_sq) = [jnp.squeeze(x, 1) for x in jnp.hsplit(inputs, 2)]
return mu, sigma_sq
def _pack_inputs(mu: Array, sigma_sq: Array) -> Array:
mu = jnp.atleast_1d(mu)
sigma_sq = jnp.atleast_1d(sigma_sq)
chex.assert_rank([mu, sigma_sq], 1)
return jnp.vstack([mu, sigma_sq]).T
class GatedLinearNetwork(base.GatedLinearNetwork):
"""Gaussian Gated Linear Network."""
def __init__(
self,
output_sizes: List[int],
context_dim: int,
bias_len: int = 3,
bias_max_mu: float = 1.,
bias_sigma_sq: float = 1.,
name: Text = "gaussian_gln"):
"""Initialize a Gaussian GLN."""
super(GatedLinearNetwork, self).__init__(
output_sizes,
context_dim,
inference_fn=GatedLinearNetwork._inference_fn,
update_fn=GatedLinearNetwork._update_fn,
init=base.ShapeScaledConstant(),
dtype=jnp.float64,
name=name)
self._bias_len = bias_len
self._bias_max_mu = bias_max_mu
self._bias_sigma_sq = bias_sigma_sq
def _add_bias(self, inputs):
mu = jnp.linspace(-1. * self._bias_max_mu, self._bias_max_mu,
self._bias_len)
sigma_sq = self._bias_sigma_sq * jnp.ones_like(mu)
bias = _pack_inputs(mu, sigma_sq)
return jnp.concatenate([inputs, bias], axis=0)
@staticmethod
def _inference_fn(
inputs: Array, # [input_size, 2]
side_info: Array, # [side_info_size]
weights: Array, # [2**context_dim, input_size]
hyperplanes: Array, # [context_dim, side_info_size]
hyperplane_bias: Array, # [context_dim]
min_sigma_sq: float,
) -> Array:
"""Inference step for a single Gaussian neuron."""
mu_in, sigma_sq_in = _unpack_inputs(inputs)
weight_index = GatedLinearNetwork._compute_context(side_info, hyperplanes,
hyperplane_bias)
used_weights = weights[weight_index]
# This projection operation is differentiable and affects the gradients.
used_weights = GatedLinearNetwork._project_weights(inputs, used_weights,
min_sigma_sq)
sigma_sq_out = 1. / jnp.sum(used_weights / sigma_sq_in)
mu_out = sigma_sq_out * jnp.sum((used_weights * mu_in) / sigma_sq_in)
prediction = jnp.hstack((mu_out, sigma_sq_out))
return prediction
@staticmethod
def _project_weights(inputs: Array, # [input_size]
weights: Array, # [2**context_dim, num_features]
min_sigma_sq: float) -> Array:
"""Implements hard projection."""
# This projection should be performed before the sigma related ones.
weights = jnp.minimum(jnp.maximum(MIN_WEIGHT, weights), MAX_WEIGHT)
_, sigma_sq_in = _unpack_inputs(inputs)
lambda_in = 1. / sigma_sq_in
sigma_sq_out = 1. / weights.dot(lambda_in)
# If w.dot(x) < U, linearly project w such that w.dot(x) = U.
weights = jnp.where(
sigma_sq_out < min_sigma_sq, weights - lambda_in *
(1. / sigma_sq_out - 1. / min_sigma_sq) / jnp.sum(lambda_in**2),
weights)
# If w.dot(x) > U, linearly project w such that w.dot(x) = U.
weights = jnp.where(
sigma_sq_out > MAX_SIGMA_SQ, weights - lambda_in *
(1. / sigma_sq_out - 1. / MAX_SIGMA_SQ) / jnp.sum(lambda_in**2),
weights)
return weights
@staticmethod
def _update_fn(
inputs: Array, # [input_size]
side_info: Array, # [side_info_size]
weights: Array, # [2**context_dim, num_features]
hyperplanes: Array, # [context_dim, side_info_size]
hyperplane_bias: Array, # [context_dim]
target: Array, # []
learning_rate: float,
min_sigma_sq: float, # needed for inference (weight projection)
) -> Tuple[Array, Array, Array]:
"""Update step for a single Gaussian neuron."""
def log_loss_fn(inputs, side_info, weights, hyperplanes, hyperplane_bias,
target):
"""Log loss for a single Gaussian neuron."""
prediction = GatedLinearNetwork._inference_fn(inputs, side_info, weights,
hyperplanes,
hyperplane_bias,
min_sigma_sq)
mu, sigma_sq = prediction.T
loss = -tfd.Normal(mu, jnp.sqrt(sigma_sq)).log_prob(target)
return loss, prediction
grad_log_loss = jax.value_and_grad(log_loss_fn, argnums=2, has_aux=True)
(log_loss,
prediction), dloss_dweights = grad_log_loss(inputs, side_info, weights,
hyperplanes, hyperplane_bias,
target)
delta_weights = learning_rate * dloss_dweights
return weights - delta_weights, prediction, log_loss
class ConstantInputSigma(base.Mutator):
"""Input pre-processing by concatenating a constant sigma^2."""
def __init__(
self,
network_factory: Callable[..., GatedLinearNetwork],
input_sigma_sq: float,
name: Text = "constant_input_sigma",
):
super(ConstantInputSigma, self).__init__(network_factory, name)
self._input_sigma_sq = input_sigma_sq
def inference(self, inputs, *args, **kwargs):
"""ConstantInputSigma inference."""
chex.assert_rank(inputs, 1)
sigma_sq = self._input_sigma_sq * jnp.ones_like(inputs)
return self._network.inference(_pack_inputs(inputs, sigma_sq), *args,
**kwargs)
def update(self, inputs, *args, **kwargs):
"""ConstantInputSigma update."""
chex.assert_rank(inputs, 1)
sigma_sq = self._input_sigma_sq * jnp.ones_like(inputs)
return self._network.update(_pack_inputs(inputs, sigma_sq), *args, **kwargs)
class LastNeuronAggregator(base.LastNeuronAggregator):
"""Gaussian last neuron aggregator, implemented by the super class."""
pass
|
deepmind-research-master
|
gated_linear_networks/gaussian.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `utils.py`."""
from absl.testing import absltest
import haiku as hk
import jax
import numpy as np
from gated_linear_networks.examples import utils
class MeanStdEstimator(absltest.TestCase):
def test_statistics(self):
num_features = 100
feature_size = 3
samples = np.random.normal(
loc=5., scale=2., size=(num_features, feature_size))
true_mean = np.mean(samples, axis=0)
true_std = np.std(samples, axis=0)
def tick_(sample):
return utils.MeanStdEstimator()(sample)
init_fn, apply_fn = hk.without_apply_rng(hk.transform_with_state(tick_))
tick = jax.jit(apply_fn)
params, state = init_fn(rng=None, sample=samples[0])
for sample in samples:
(mean, std), state = tick(params, state, sample)
np.testing.assert_array_almost_equal(mean, true_mean, decimal=5)
np.testing.assert_array_almost_equal(std, true_std, decimal=5)
if __name__ == '__main__':
absltest.main()
|
deepmind-research-master
|
gated_linear_networks/examples/utils_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Online MNIST classification example with Bernoulli GLN."""
from absl import app
from absl import flags
import haiku as hk
import jax
import jax.numpy as jnp
import rlax
from gated_linear_networks import bernoulli
from gated_linear_networks.examples import utils
MAX_TRAIN_STEPS = flags.DEFINE_integer(
name='max_train_steps',
default=None,
help='Maximum number of training steps to perform (None=no limit)',
)
# Small example network, achieves ~95% test set accuracy =======================
# Network parameters.
NUM_LAYERS = flags.DEFINE_integer(
name='num_layers',
default=2,
help='Number of network layers',
)
NEURONS_PER_LAYER = flags.DEFINE_integer(
name='neurons_per_layer',
default=100,
help='Number of neurons per layer',
)
CONTEXT_DIM = flags.DEFINE_integer(
name='context_dim',
default=1,
help='Context vector size',
)
# Learning rate schedule.
MAX_LR = flags.DEFINE_float(
name='max_lr',
default=0.003,
help='Maximum learning rate',
)
LR_CONSTANT = flags.DEFINE_float(
name='lr_constant',
default=1.0,
help='Learning rate constant parameter',
)
LR_DECAY = flags.DEFINE_float(
name='lr_decay',
default=0.1,
help='Learning rate decay parameter',
)
# Logging parameters.
EVALUATE_EVERY = flags.DEFINE_integer(
name='evaluate_every',
default=1000,
help='Number of training steps per evaluation epoch',
)
def main(unused_argv):
# Load MNIST dataset =========================================================
mnist_data, info = utils.load_deskewed_mnist(
name='mnist', batch_size=-1, with_info=True)
num_classes = info.features['label'].num_classes
(train_images, train_labels) = (mnist_data['train']['image'],
mnist_data['train']['label'])
(test_images, test_labels) = (mnist_data['test']['image'],
mnist_data['test']['label'])
# Build a (binary) GLN classifier ============================================
def network_factory():
def gln_factory():
output_sizes = [NEURONS_PER_LAYER.value] * NUM_LAYERS.value + [1]
return bernoulli.GatedLinearNetwork(
output_sizes=output_sizes, context_dim=CONTEXT_DIM.value)
return bernoulli.LastNeuronAggregator(gln_factory)
def extract_features(image):
mean, stddev = utils.MeanStdEstimator()(image)
standardized_img = (image - mean) / (stddev + 1.)
inputs = rlax.sigmoid(standardized_img)
side_info = standardized_img
return inputs, side_info
def inference_fn(image, *args, **kwargs):
inputs, side_info = extract_features(image)
return network_factory().inference(inputs, side_info, *args, **kwargs)
def update_fn(image, *args, **kwargs):
inputs, side_info = extract_features(image)
return network_factory().update(inputs, side_info, *args, **kwargs)
init_, inference_ = hk.without_apply_rng(
hk.transform_with_state(inference_fn))
_, update_ = hk.without_apply_rng(hk.transform_with_state(update_fn))
# Map along class dimension to create a one-vs-all classifier ================
@jax.jit
def init(dummy_image, key):
"""One-vs-all classifier init fn."""
dummy_images = jnp.stack([dummy_image] * num_classes, axis=0)
keys = jax.random.split(key, num_classes)
return jax.vmap(init_, in_axes=(0, 0))(keys, dummy_images)
@jax.jit
def accuracy(params, state, image, label):
"""One-vs-all classifier inference fn."""
fn = jax.vmap(inference_, in_axes=(0, 0, None))
predictions, unused_state = fn(params, state, image)
return (jnp.argmax(predictions) == label).astype(jnp.float32)
@jax.jit
def update(params, state, step, image, label):
"""One-vs-all classifier update fn."""
# Learning rate schedules.
learning_rate = jnp.minimum(
MAX_LR.value, LR_CONSTANT.value / (1. + LR_DECAY.value * step))
# Update weights and report log-loss.
targets = hk.one_hot(jnp.asarray(label), num_classes)
fn = jax.vmap(update_, in_axes=(0, 0, None, 0, None))
out = fn(params, state, image, targets, learning_rate)
(params, unused_predictions, log_loss), state = out
return (jnp.mean(log_loss), params), state
# Train on train split =======================================================
dummy_image = train_images[0]
params, state = init(dummy_image, jax.random.PRNGKey(42))
for step, (image, label) in enumerate(zip(train_images, train_labels), 1):
(unused_loss, params), state = update(
params,
state,
step,
image,
label,
)
# Evaluate on test split ===================================================
if not step % EVALUATE_EVERY.value:
batch_accuracy = jax.vmap(accuracy, in_axes=(None, None, 0, 0))
accuracies = batch_accuracy(params, state, test_images, test_labels)
total_accuracy = float(jnp.mean(accuracies))
# Report statistics.
print({
'step': step,
'accuracy': float(total_accuracy),
})
if MAX_TRAIN_STEPS.value is not None and step >= MAX_TRAIN_STEPS.value:
return
if __name__ == '__main__':
app.run(main)
|
deepmind-research-master
|
gated_linear_networks/examples/bernoulli_mnist.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Haiku modules for feature processing."""
import copy
from typing import Tuple
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from scipy.ndimage import interpolation
import tensorflow_datasets as tfds
Array = chex.Array
def _moments(image):
"""Compute the first and second moments of a given image."""
c0, c1 = np.mgrid[:image.shape[0], :image.shape[1]]
total_image = np.sum(image)
m0 = np.sum(c0 * image) / total_image
m1 = np.sum(c1 * image) / total_image
m00 = np.sum((c0 - m0)**2 * image) / total_image
m11 = np.sum((c1 - m1)**2 * image) / total_image
m01 = np.sum((c0 - m0) * (c1 - m1) * image) / total_image
mu_vector = np.array([m0, m1])
covariance_matrix = np.array([[m00, m01], [m01, m11]])
return mu_vector, covariance_matrix
def _deskew(image):
"""Image deskew."""
c, v = _moments(image)
alpha = v[0, 1] / v[0, 0]
affine = np.array([[1, 0], [alpha, 1]])
ocenter = np.array(image.shape) / 2.0
offset = c - np.dot(affine, ocenter)
return interpolation.affine_transform(image, affine, offset=offset)
def _deskew_dataset(dataset):
"""Dataset deskew."""
deskewed = copy.deepcopy(dataset)
for k, before in dataset.items():
images = before["image"]
num_images = images.shape[0]
after = np.stack([_deskew(i) for i in np.squeeze(images, axis=-1)], axis=0)
deskewed[k]["image"] = np.reshape(after, (num_images, -1))
return deskewed
def load_deskewed_mnist(*a, **k):
"""Returns deskewed MNIST numpy dataset."""
mnist_data, info = tfds.load(*a, **k)
mnist_data = tfds.as_numpy(mnist_data)
deskewed_data = _deskew_dataset(mnist_data)
return deskewed_data, info
class MeanStdEstimator(hk.Module):
"""Online mean and standard deviation estimator using Welford's algorithm."""
def __call__(self, sample: jax.Array) -> Tuple[Array, Array]:
if len(sample.shape) > 1:
raise ValueError("sample must be a rank 0 or 1 DeviceArray.")
count = hk.get_state("count", shape=(), dtype=jnp.int32, init=jnp.zeros)
mean = hk.get_state(
"mean", shape=sample.shape, dtype=jnp.float32, init=jnp.zeros)
m2 = hk.get_state(
"m2", shape=sample.shape, dtype=jnp.float32, init=jnp.zeros)
count += 1
delta = sample - mean
mean += delta / count
delta_2 = sample - mean
m2 += delta * delta_2
hk.set_state("count", count)
hk.set_state("mean", mean)
hk.set_state("m2", m2)
stddev = jnp.sqrt(m2 / count)
return mean, stddev
|
deepmind-research-master
|
gated_linear_networks/examples/utils.py
|
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GAN modules."""
import collections
import math
import sonnet as snt
import tensorflow.compat.v1 as tf
from cs_gan import utils
class GAN(object):
"""Standard generative adversarial network setup.
The aim of the generator is to generate samples which fool a discriminator.
Does not make any assumptions about the discriminator and generator loss
functions.
Trained module components:
* discriminator
* generator
For the standard GAN algorithm, generator_inputs is a vector of noise (either
Gaussian or uniform).
"""
def __init__(self, discriminator, generator,
num_z_iters=None, z_step_size=None,
z_project_method=None, optimisation_cost_weight=None):
"""Constructs the module.
Args:
discriminator: The discriminator network. A sonnet module. See `nets.py`.
generator: The generator network. A sonnet module. For examples, see
`nets.py`.
num_z_iters: an integer, the number of latent optimisation steps.
z_step_size: an integer, latent optimisation step size.
z_project_method: the method for projecting latent after optimisation,
a string from {'norm', 'clip'}.
optimisation_cost_weight: a float, how much to penalise the distance of z
moved by latent optimisation.
"""
self._discriminator = discriminator
self.generator = generator
self.num_z_iters = num_z_iters
self.z_project_method = z_project_method
if z_step_size:
self._log_step_size_module = snt.TrainableVariable(
[],
initializers={'w': tf.constant_initializer(math.log(z_step_size))})
self.z_step_size = tf.exp(self._log_step_size_module())
self._optimisation_cost_weight = optimisation_cost_weight
def connect(self, data, generator_inputs):
"""Connects the components and returns the losses, outputs and debug ops.
Args:
data: a `tf.Tensor`: `[batch_size, ...]`. There are no constraints on the
rank
of this tensor, but it has to be compatible with the shapes expected
by the discriminator.
generator_inputs: a `tf.Tensor`: `[g_in_batch_size, ...]`. It does not
have to have the same batch size as the `data` tensor. There are not
constraints on the rank of this tensor, but it has to be compatible
with the shapes the generator network supports as inputs.
Returns:
An `ModelOutputs` instance.
"""
samples, optimised_z = utils.optimise_and_sample(
generator_inputs, self, data, is_training=True)
optimisation_cost = utils.get_optimisation_cost(generator_inputs,
optimised_z)
# Pass in the labels to the discriminator in case we are using a
# discriminator which makes use of labels. The labels can be None.
disc_data_logits = self._discriminator(data)
disc_sample_logits = self._discriminator(samples)
disc_data_loss = utils.cross_entropy_loss(
disc_data_logits,
tf.ones(tf.shape(disc_data_logits[:, 0]), dtype=tf.int32))
disc_sample_loss = utils.cross_entropy_loss(
disc_sample_logits,
tf.zeros(tf.shape(disc_sample_logits[:, 0]), dtype=tf.int32))
disc_loss = disc_data_loss + disc_sample_loss
generator_loss = utils.cross_entropy_loss(
disc_sample_logits,
tf.ones(tf.shape(disc_sample_logits[:, 0]), dtype=tf.int32))
optimization_components = self._build_optimization_components(
discriminator_loss=disc_loss, generator_loss=generator_loss,
optimisation_cost=optimisation_cost)
debug_ops = {}
debug_ops['disc_data_loss'] = disc_data_loss
debug_ops['disc_sample_loss'] = disc_sample_loss
debug_ops['disc_loss'] = disc_loss
debug_ops['gen_loss'] = generator_loss
debug_ops['opt_cost'] = optimisation_cost
if hasattr(self, 'z_step_size'):
debug_ops['z_step_size'] = self.z_step_size
return utils.ModelOutputs(
optimization_components, debug_ops)
def gen_loss_fn(self, data, samples):
"""Generator loss as latent optimisation's error function."""
del data
disc_sample_logits = self._discriminator(samples)
generator_loss = utils.cross_entropy_loss(
disc_sample_logits,
tf.ones(tf.shape(disc_sample_logits[:, 0]), dtype=tf.int32))
return generator_loss
def _build_optimization_components(
self, generator_loss=None, discriminator_loss=None,
optimisation_cost=None):
"""Create the optimization components for this module."""
discriminator_vars = _get_and_check_variables(self._discriminator)
generator_vars = _get_and_check_variables(self.generator)
if hasattr(self, '_log_step_size_module'):
step_vars = _get_and_check_variables(self._log_step_size_module)
generator_vars += step_vars
optimization_components = collections.OrderedDict()
optimization_components['disc'] = utils.OptimizationComponent(
discriminator_loss, discriminator_vars)
if self._optimisation_cost_weight:
generator_loss += self._optimisation_cost_weight * optimisation_cost
optimization_components['gen'] = utils.OptimizationComponent(
generator_loss, generator_vars)
return optimization_components
def get_variables(self):
disc_vars = _get_and_check_variables(self._discriminator)
gen_vars = _get_and_check_variables(self.generator)
return disc_vars, gen_vars
def _get_and_check_variables(module):
module_variables = module.get_all_variables()
if not module_variables:
raise ValueError(
'Module {} has no variables! Variables needed for training.'.format(
module.module_name))
# TensorFlow optimizers require lists to be passed in.
return list(module_variables)
|
deepmind-research-master
|
cs_gan/gan.py
|
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
from cs_gan import cs
from cs_gan import file_utils
from cs_gan import utils
tfd = tfp.distributions
flags.DEFINE_string(
'mode', 'recons', 'Model mode.')
flags.DEFINE_integer(
'num_training_iterations', 10000000,
'Number of training iterations.')
flags.DEFINE_integer(
'batch_size', 64, 'Training batch size.')
flags.DEFINE_integer(
'num_measurements', 25, 'The number of measurements')
flags.DEFINE_integer(
'num_latents', 100, 'The number of latents')
flags.DEFINE_integer(
'num_z_iters', 3, 'The number of latent optimisation steps.')
flags.DEFINE_float(
'z_step_size', 0.01, 'Step size for latent optimisation.')
flags.DEFINE_string(
'z_project_method', 'norm', 'The method to project z.')
flags.DEFINE_integer(
'summary_every_step', 1000,
'The interval at which to log debug ops.')
flags.DEFINE_integer(
'export_every', 10,
'The interval at which to export samples.')
flags.DEFINE_string(
'dataset', 'mnist', 'The dataset used for learning (cifar|mnist.')
flags.DEFINE_float('learning_rate', 1e-4, 'Learning rate.')
flags.DEFINE_string(
'output_dir', '/tmp/cs_gan/cs', 'Location where to save output files.')
FLAGS = flags.FLAGS
# Log info level (for Hooks).
tf.logging.set_verbosity(tf.logging.INFO)
def main(argv):
del argv
utils.make_output_dir(FLAGS.output_dir)
data_processor = utils.DataProcessor()
images = utils.get_train_dataset(data_processor, FLAGS.dataset,
FLAGS.batch_size)
logging.info('Learning rate: %d', FLAGS.learning_rate)
# Construct optimizers.
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
# Create the networks and models.
generator = utils.get_generator(FLAGS.dataset)
metric_net = utils.get_metric_net(FLAGS.dataset, FLAGS.num_measurements)
model = cs.CS(metric_net, generator,
FLAGS.num_z_iters, FLAGS.z_step_size, FLAGS.z_project_method)
prior = utils.make_prior(FLAGS.num_latents)
generator_inputs = prior.sample(FLAGS.batch_size)
model_output = model.connect(images, generator_inputs)
optimization_components = model_output.optimization_components
debug_ops = model_output.debug_ops
reconstructions, _ = utils.optimise_and_sample(
generator_inputs, model, images, is_training=False)
global_step = tf.train.get_or_create_global_step()
update_op = optimizer.minimize(
optimization_components.loss,
var_list=optimization_components.vars,
global_step=global_step)
sample_exporter = file_utils.FileExporter(
os.path.join(FLAGS.output_dir, 'reconstructions'))
# Hooks.
debug_ops['it'] = global_step
# Abort training on Nans.
nan_hook = tf.train.NanTensorHook(optimization_components.loss)
# Step counter.
step_conter_hook = tf.train.StepCounterHook()
checkpoint_saver_hook = tf.train.CheckpointSaverHook(
checkpoint_dir=utils.get_ckpt_dir(FLAGS.output_dir), save_secs=10 * 60)
loss_summary_saver_hook = tf.train.SummarySaverHook(
save_steps=FLAGS.summary_every_step,
output_dir=os.path.join(FLAGS.output_dir, 'summaries'),
summary_op=utils.get_summaries(debug_ops))
hooks = [checkpoint_saver_hook, nan_hook, step_conter_hook,
loss_summary_saver_hook]
# Start training.
with tf.train.MonitoredSession(hooks=hooks) as sess:
logging.info('starting training')
for i in range(FLAGS.num_training_iterations):
sess.run(update_op)
if i % FLAGS.export_every == 0:
reconstructions_np, data_np = sess.run([reconstructions, images])
# Create an object which gets data and does the processing.
data_np = data_processor.postprocess(data_np)
reconstructions_np = data_processor.postprocess(reconstructions_np)
sample_exporter.save(reconstructions_np, 'reconstructions')
sample_exporter.save(data_np, 'data')
if __name__ == '__main__':
app.run(main)
|
deepmind-research-master
|
cs_gan/main_cs.py
|
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
from cs_gan import file_utils
from cs_gan import gan
from cs_gan import image_metrics
from cs_gan import utils
flags.DEFINE_integer(
'num_training_iterations', 1200000,
'Number of training iterations.')
flags.DEFINE_string(
'ode_mode', 'rk4', 'Integration method.')
flags.DEFINE_integer(
'batch_size', 64, 'Training batch size.')
flags.DEFINE_float(
'grad_reg_weight', 0.02, 'Step size for latent optimisation.')
flags.DEFINE_string(
'opt_name', 'gd', 'Name of the optimiser (gd|adam).')
flags.DEFINE_bool(
'schedule_lr', True, 'The method to project z.')
flags.DEFINE_bool(
'reg_first_grad_only', True, 'Whether only to regularise the first grad.')
flags.DEFINE_integer(
'num_latents', 128, 'The number of latents')
flags.DEFINE_integer(
'summary_every_step', 1000,
'The interval at which to log debug ops.')
flags.DEFINE_integer(
'image_metrics_every_step', 1000,
'The interval at which to log (expensive) image metrics.')
flags.DEFINE_integer(
'export_every', 10,
'The interval at which to export samples.')
# Use 50k to reproduce scores from the paper. Default to 10k here to avoid the
# runtime error caused by too large graph with 50k samples on some machines.
flags.DEFINE_integer(
'num_eval_samples', 10000,
'The number of samples used to evaluate FID/IS.')
flags.DEFINE_string(
'dataset', 'cifar', 'The dataset used for learning (cifar|mnist).')
flags.DEFINE_string(
'output_dir', '/tmp/ode_gan/gan', 'Location where to save output files.')
flags.DEFINE_float('disc_lr', 4e-2, 'Discriminator Learning rate.')
flags.DEFINE_float('gen_lr', 4e-2, 'Generator Learning rate.')
flags.DEFINE_bool(
'run_real_data_metrics', False,
'Whether or not to run image metrics on real data.')
flags.DEFINE_bool(
'run_sample_metrics', True,
'Whether or not to run image metrics on samples.')
FLAGS = flags.FLAGS
# Log info level (for Hooks).
tf.logging.set_verbosity(tf.logging.INFO)
def _copy_vars(v_list):
"""Copy variables in v_list."""
t_list = []
for v in v_list:
t_list.append(tf.identity(v))
return t_list
def _restore_vars(v_list, t_list):
"""Restore variables in v_list from t_list."""
ops = []
for v, t in zip(v_list, t_list):
ops.append(v.assign(t))
return ops
def _scale_vars(s, v_list):
"""Scale all variables in v_list by s."""
return [s * v for v in v_list]
def _acc_grads(g_sum, g_w, g):
"""Accumulate gradients in g, weighted by g_w."""
return [g_sum_i + g_w * g_i for g_sum_i, g_i in zip(g_sum, g)]
def _compute_reg_grads(gen_grads, disc_vars):
"""Compute gradients norm (this is an upper-bpund of the full-batch norm)."""
gen_norm = tf.accumulate_n([tf.reduce_sum(u * u) for u in gen_grads])
disc_reg_grads = tf.gradients(gen_norm, disc_vars)
return disc_reg_grads
def run_model(prior, images, model, disc_reg_weight):
"""Run the model with new data and samples.
Args:
prior: the noise source as the generator input.
images: images sampled from dataset.
model: a GAN model defined in gan.py.
disc_reg_weight: regularisation weight for discrmininator gradients.
Returns:
debug_ops: statistics from the model, see gan.py for more detials.
disc_grads: discriminator gradients.
gen_grads: generator gradients.
"""
generator_inputs = prior.sample(FLAGS.batch_size)
model_output = model.connect(images, generator_inputs)
optimization_components = model_output.optimization_components
disc_grads = tf.gradients(
optimization_components['disc'].loss,
optimization_components['disc'].vars)
gen_grads = tf.gradients(
optimization_components['gen'].loss,
optimization_components['gen'].vars)
if disc_reg_weight > 0.0:
reg_grads = _compute_reg_grads(gen_grads,
optimization_components['disc'].vars)
disc_grads = _acc_grads(disc_grads, disc_reg_weight, reg_grads)
debug_ops = model_output.debug_ops
return debug_ops, disc_grads, gen_grads
def update_model(model, disc_grads, gen_grads, disc_opt, gen_opt,
global_step, update_scale):
"""Update model with gradients."""
disc_vars, gen_vars = model.get_variables()
with tf.control_dependencies(gen_grads + disc_grads):
disc_update_op = disc_opt.apply_gradients(
zip(_scale_vars(update_scale, disc_grads),
disc_vars))
gen_update_op = gen_opt.apply_gradients(
zip(_scale_vars(update_scale, gen_grads),
gen_vars),
global_step=global_step)
update_op = tf.group([disc_update_op, gen_update_op])
return update_op
def main(argv):
del argv
utils.make_output_dir(FLAGS.output_dir)
data_processor = utils.DataProcessor()
# Compute the batch-size multiplier
if FLAGS.ode_mode == 'rk2':
batch_mul = 2
elif FLAGS.ode_mode == 'rk4':
batch_mul = 4
else:
batch_mul = 1
images = utils.get_train_dataset(data_processor, FLAGS.dataset,
int(FLAGS.batch_size * batch_mul))
image_splits = tf.split(images, batch_mul)
logging.info('Generator learning rate: %d', FLAGS.gen_lr)
logging.info('Discriminator learning rate: %d', FLAGS.disc_lr)
global_step = tf.train.get_or_create_global_step()
# Construct optimizers.
if FLAGS.opt_name == 'adam':
disc_opt = tf.train.AdamOptimizer(FLAGS.disc_lr, beta1=0.5, beta2=0.999)
gen_opt = tf.train.AdamOptimizer(FLAGS.gen_lr, beta1=0.5, beta2=0.999)
elif FLAGS.opt_name == 'gd':
if FLAGS.schedule_lr:
gd_disc_lr = tf.train.piecewise_constant(
global_step,
values=[FLAGS.disc_lr / 4., FLAGS.disc_lr, FLAGS.disc_lr / 2.],
boundaries=[500, 400000])
gd_gen_lr = tf.train.piecewise_constant(
global_step,
values=[FLAGS.gen_lr / 4., FLAGS.gen_lr, FLAGS.gen_lr / 2.],
boundaries=[500, 400000])
else:
gd_disc_lr = FLAGS.disc_lr
gd_gen_lr = FLAGS.gen_lr
disc_opt = tf.train.GradientDescentOptimizer(gd_disc_lr)
gen_opt = tf.train.GradientDescentOptimizer(gd_gen_lr)
else:
raise ValueError('Unknown ODE mode!')
# Create the networks and models.
generator = utils.get_generator(FLAGS.dataset)
metric_net = utils.get_metric_net(FLAGS.dataset, use_sn=False)
model = gan.GAN(metric_net, generator)
prior = utils.make_prior(FLAGS.num_latents)
# Setup ODE parameters.
if FLAGS.ode_mode == 'rk2':
ode_grad_weights = [0.5, 0.5]
step_scale = [1.0]
elif FLAGS.ode_mode == 'rk4':
ode_grad_weights = [1. / 6., 1. / 3., 1. / 3., 1. / 6.]
step_scale = [0.5, 0.5, 1.]
elif FLAGS.ode_mode == 'euler':
# Euler update
ode_grad_weights = [1.0]
step_scale = []
else:
raise ValueError('Unknown ODE mode!')
# Extra steps for RK updates.
num_extra_steps = len(step_scale)
if FLAGS.reg_first_grad_only:
first_reg_weight = FLAGS.grad_reg_weight / ode_grad_weights[0]
other_reg_weight = 0.0
else:
first_reg_weight = FLAGS.grad_reg_weight
other_reg_weight = FLAGS.grad_reg_weight
debug_ops, disc_grads, gen_grads = run_model(prior, image_splits[0],
model, first_reg_weight)
disc_vars, gen_vars = model.get_variables()
final_disc_grads = _scale_vars(ode_grad_weights[0], disc_grads)
final_gen_grads = _scale_vars(ode_grad_weights[0], gen_grads)
restore_ops = []
# Preparing for further RK steps.
if num_extra_steps > 0:
# copy the variables before they are changed by update_op
saved_disc_vars = _copy_vars(disc_vars)
saved_gen_vars = _copy_vars(gen_vars)
# Enter RK loop.
with tf.control_dependencies(saved_disc_vars + saved_gen_vars):
step_deps = []
for i_step in range(num_extra_steps):
with tf.control_dependencies(step_deps):
# Compute gradient steps for intermediate updates.
update_op = update_model(
model, disc_grads, gen_grads, disc_opt, gen_opt,
None, step_scale[i_step])
with tf.control_dependencies([update_op]):
_, disc_grads, gen_grads = run_model(
prior, image_splits[i_step + 1], model, other_reg_weight)
# Accumlate gradients for final update.
final_disc_grads = _acc_grads(final_disc_grads,
ode_grad_weights[i_step + 1],
disc_grads)
final_gen_grads = _acc_grads(final_gen_grads,
ode_grad_weights[i_step + 1],
gen_grads)
# Make new restore_op for each step.
restore_ops = []
restore_ops += _restore_vars(disc_vars, saved_disc_vars)
restore_ops += _restore_vars(gen_vars, saved_gen_vars)
step_deps = restore_ops
with tf.control_dependencies(restore_ops):
update_op = update_model(
model, final_disc_grads, final_gen_grads, disc_opt, gen_opt,
global_step, 1.0)
samples = generator(prior.sample(FLAGS.batch_size), is_training=False)
# Get data needed to compute FID. We also compute metrics on
# real data as a sanity check and as a reference point.
eval_real_data = utils.get_real_data_for_eval(FLAGS.num_eval_samples,
FLAGS.dataset,
split='train')
def sample_fn(x):
return utils.optimise_and_sample(x, module=model,
data=None, is_training=False)[0]
if FLAGS.run_sample_metrics:
sample_metrics = image_metrics.get_image_metrics_for_samples(
eval_real_data, sample_fn,
prior, data_processor,
num_eval_samples=FLAGS.num_eval_samples)
else:
sample_metrics = {}
if FLAGS.run_real_data_metrics:
data_metrics = image_metrics.get_image_metrics(
eval_real_data, eval_real_data)
else:
data_metrics = {}
sample_exporter = file_utils.FileExporter(
os.path.join(FLAGS.output_dir, 'samples'))
# Hooks.
debug_ops['it'] = global_step
# Abort training on Nans.
nan_disc_hook = tf.train.NanTensorHook(debug_ops['disc_loss'])
nan_gen_hook = tf.train.NanTensorHook(debug_ops['gen_loss'])
# Step counter.
step_conter_hook = tf.train.StepCounterHook()
checkpoint_saver_hook = tf.train.CheckpointSaverHook(
checkpoint_dir=utils.get_ckpt_dir(FLAGS.output_dir), save_secs=10 * 60)
loss_summary_saver_hook = tf.train.SummarySaverHook(
save_steps=FLAGS.summary_every_step,
output_dir=os.path.join(FLAGS.output_dir, 'summaries'),
summary_op=utils.get_summaries(debug_ops))
metrics_summary_saver_hook = tf.train.SummarySaverHook(
save_steps=FLAGS.image_metrics_every_step,
output_dir=os.path.join(FLAGS.output_dir, 'summaries'),
summary_op=utils.get_summaries(sample_metrics))
hooks = [checkpoint_saver_hook, metrics_summary_saver_hook,
nan_disc_hook, nan_gen_hook, step_conter_hook,
loss_summary_saver_hook]
# Start training.
with tf.train.MonitoredSession(hooks=hooks) as sess:
logging.info('starting training')
for key, value in sess.run(data_metrics).items():
logging.info('%s: %d', key, value)
for i in range(FLAGS.num_training_iterations):
sess.run(update_op)
if i % FLAGS.export_every == 0:
samples_np, data_np = sess.run([samples, image_splits[0]])
# Create an object which gets data and does the processing.
data_np = data_processor.postprocess(data_np)
samples_np = data_processor.postprocess(samples_np)
sample_exporter.save(samples_np, 'samples')
sample_exporter.save(data_np, 'data')
if __name__ == '__main__':
tf.enable_resource_variables()
app.run(main)
|
deepmind-research-master
|
cs_gan/main_ode.py
|
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GAN modules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import sonnet as snt
import tensorflow.compat.v1 as tf
from cs_gan import utils
class CS(object):
"""Compressed Sensing Module."""
def __init__(self, metric_net, generator,
num_z_iters, z_step_size, z_project_method):
"""Constructs the module.
Args:
metric_net: the measurement network.
generator: The generator network. A sonnet module. For examples, see
`nets.py`.
num_z_iters: an integer, the number of latent optimisation steps.
z_step_size: an integer, latent optimisation step size.
z_project_method: the method for projecting latent after optimisation,
a string from {'norm', 'clip'}.
"""
self._measure = metric_net
self.generator = generator
self.num_z_iters = num_z_iters
self.z_project_method = z_project_method
self._log_step_size_module = snt.TrainableVariable(
[],
initializers={'w': tf.constant_initializer(math.log(z_step_size))})
self.z_step_size = tf.exp(self._log_step_size_module())
def connect(self, data, generator_inputs):
"""Connects the components and returns the losses, outputs and debug ops.
Args:
data: a `tf.Tensor`: `[batch_size, ...]`. There are no constraints on the
rank
of this tensor, but it has to be compatible with the shapes expected
by the discriminator.
generator_inputs: a `tf.Tensor`: `[g_in_batch_size, ...]`. It does not
have to have the same batch size as the `data` tensor. There are not
constraints on the rank of this tensor, but it has to be compatible
with the shapes the generator network supports as inputs.
Returns:
An `ModelOutputs` instance.
"""
samples, optimised_z = utils.optimise_and_sample(
generator_inputs, self, data, is_training=True)
optimisation_cost = utils.get_optimisation_cost(generator_inputs,
optimised_z)
debug_ops = {}
initial_samples = self.generator(generator_inputs, is_training=True)
generator_loss = tf.reduce_mean(self.gen_loss_fn(data, samples))
# compute the RIP loss
# (\sqrt{F(x_1 - x_2)^2} - \sqrt{(x_1 - x_2)^2})^2
# as a triplet loss for 3 pairs of images.
r1 = self._get_rip_loss(samples, initial_samples)
r2 = self._get_rip_loss(samples, data)
r3 = self._get_rip_loss(initial_samples, data)
rip_loss = tf.reduce_mean((r1 + r2 + r3) / 3.0)
total_loss = generator_loss + rip_loss
optimization_components = self._build_optimization_components(
generator_loss=total_loss)
debug_ops['rip_loss'] = rip_loss
debug_ops['recons_loss'] = tf.reduce_mean(
tf.norm(snt.BatchFlatten()(samples)
- snt.BatchFlatten()(data), axis=-1))
debug_ops['z_step_size'] = self.z_step_size
debug_ops['opt_cost'] = optimisation_cost
debug_ops['gen_loss'] = generator_loss
return utils.ModelOutputs(
optimization_components, debug_ops)
def _get_rip_loss(self, img1, img2):
r"""Compute the RIP loss from two images.
The RIP loss: (\sqrt{F(x_1 - x_2)^2} - \sqrt{(x_1 - x_2)^2})^2
Args:
img1: an image (x_1), 4D tensor of shape [batch_size, W, H, C].
img2: an other image (x_2), 4D tensor of shape [batch_size, W, H, C].
"""
m1 = self._measure(img1)
m2 = self._measure(img2)
img_diff_norm = tf.norm(snt.BatchFlatten()(img1)
- snt.BatchFlatten()(img2), axis=-1)
m_diff_norm = tf.norm(m1 - m2, axis=-1)
return tf.square(img_diff_norm - m_diff_norm)
def _get_measurement_error(self, target_img, sample_img):
"""Compute the measurement error of sample images given the targets."""
m_targets = self._measure(target_img)
m_samples = self._measure(sample_img)
return tf.reduce_sum(tf.square(m_targets - m_samples), -1)
def gen_loss_fn(self, data, samples):
"""Generator loss as latent optimisation's error function."""
return self._get_measurement_error(data, samples)
def _build_optimization_components(
self, generator_loss=None, discriminator_loss=None):
"""Create the optimization components for this module."""
metric_vars = _get_and_check_variables(self._measure)
generator_vars = _get_and_check_variables(self.generator)
step_vars = _get_and_check_variables(self._log_step_size_module)
assert discriminator_loss is None
optimization_components = utils.OptimizationComponent(
generator_loss, generator_vars + metric_vars + step_vars)
return optimization_components
def _get_and_check_variables(module):
module_variables = module.get_all_variables()
if not module_variables:
raise ValueError(
'Module {} has no variables! Variables needed for training.'.format(
module.module_name))
# TensorFlow optimizers require lists to be passed in.
return list(module_variables)
|
deepmind-research-master
|
cs_gan/cs.py
|
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for latent optimisation."""
import collections
import os
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
from cs_gan import nets
tfd = tfp.distributions
class ModelOutputs(
collections.namedtuple('AdversarialModelOutputs',
['optimization_components', 'debug_ops'])):
"""All the information produced by the adversarial module.
Fields:
* `optimization_components`: A dictionary. Each entry in this dictionary
corresponds to a module to train using their own optimizer. The keys are
names of the components, and the values are `common.OptimizationComponent`
instances. The keys of this dict can be made keys of the configuration
used by the main train loop, to define the configuration of the
optimization details for each module.
* `debug_ops`: A dictionary, from string to a scalar `tf.Tensor`. Quantities
used for tracking training.
"""
class OptimizationComponent(
collections.namedtuple('OptimizationComponent', ['loss', 'vars'])):
"""Information needed by the optimizer to train modules.
Usage:
`optimizer.minimize(
opt_compoment.loss, var_list=opt_component.vars)`
Fields:
* `loss`: A `tf.Tensor` the loss of the module.
* `vars`: A list of variables, the ones which will be used to minimize the
loss.
"""
def cross_entropy_loss(logits, expected):
"""The cross entropy classification loss between logits and expected values.
The loss proposed by the original GAN paper: https://arxiv.org/abs/1406.2661.
Args:
logits: a `tf.Tensor`, the model produced logits.
expected: a `tf.Tensor`, the expected output.
Returns:
A scalar `tf.Tensor`, the average loss obtained on the given inputs.
Raises:
ValueError: if the logits do not have shape [batch_size, 2].
"""
num_logits = logits.get_shape()[1]
if num_logits != 2:
raise ValueError(('Invalid number of logits for cross_entropy_loss! '
'cross_entropy_loss supports only 2 output logits!'))
return tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=expected))
def optimise_and_sample(init_z, module, data, is_training):
"""Optimising generator latent variables and sample."""
if module.num_z_iters is None or module.num_z_iters == 0:
z_final = init_z
else:
init_loop_vars = (0, _project_z(init_z, module.z_project_method))
loop_cond = lambda i, _: i < module.num_z_iters
def loop_body(i, z):
loop_samples = module.generator(z, is_training)
gen_loss = module.gen_loss_fn(data, loop_samples)
z_grad = tf.gradients(gen_loss, z)[0]
z -= module.z_step_size * z_grad
z = _project_z(z, module.z_project_method)
return i + 1, z
# Use the following static loop for debugging
# z = init_z
# for _ in xrange(num_z_iters):
# _, z = loop_body(0, z)
# z_final = z
_, z_final = tf.while_loop(loop_cond,
loop_body,
init_loop_vars)
return module.generator(z_final, is_training), z_final
def get_optimisation_cost(initial_z, optimised_z):
optimisation_cost = tf.reduce_mean(
tf.reduce_sum((optimised_z - initial_z)**2, -1))
return optimisation_cost
def _project_z(z, project_method='clip'):
"""To be used for projected gradient descent over z."""
if project_method == 'norm':
z_p = tf.nn.l2_normalize(z, axis=-1)
elif project_method == 'clip':
z_p = tf.clip_by_value(z, -1, 1)
else:
raise ValueError('Unknown project_method: {}'.format(project_method))
return z_p
class DataProcessor(object):
def preprocess(self, x):
return x * 2 - 1
def postprocess(self, x):
return (x + 1) / 2.
def _get_np_data(data_processor, dataset, split='train'):
"""Get the dataset as numpy arrays."""
index = 0 if split == 'train' else 1
if dataset == 'mnist':
# Construct the dataset.
x, _ = tf.keras.datasets.mnist.load_data()[index]
# Note: tf dataset is binary so we convert it to float.
x = x.astype(np.float32)
x = x / 255.
x = x.reshape((-1, 28, 28, 1))
if dataset == 'cifar':
x, _ = tf.keras.datasets.cifar10.load_data()[index]
x = x.astype(np.float32)
x = x / 255.
if data_processor:
# Normalize data if a processor is given.
x = data_processor.preprocess(x)
return x
def make_output_dir(output_dir):
logging.info('Creating output dir %s', output_dir)
if not tf.gfile.IsDirectory(output_dir):
tf.gfile.MakeDirs(output_dir)
def get_ckpt_dir(output_dir):
ckpt_dir = os.path.join(output_dir, 'ckpt')
if not tf.gfile.IsDirectory(ckpt_dir):
tf.gfile.MakeDirs(ckpt_dir)
return ckpt_dir
def get_real_data_for_eval(num_eval_samples, dataset, split='valid'):
data = _get_np_data(data_processor=None, dataset=dataset, split=split)
data = data[:num_eval_samples]
return tf.constant(data)
def get_summaries(ops):
summaries = []
for name, op in ops.items():
# Ensure to log the value ops before writing them in the summary.
# We do this instead of a hook to ensure IS/FID are never computed twice.
print_op = tf.print(name, [op], output_stream=tf.logging.info)
with tf.control_dependencies([print_op]):
summary = tf.summary.scalar(name, op)
summaries.append(summary)
return summaries
def get_train_dataset(data_processor, dataset, batch_size):
"""Creates the training data tensors."""
x_train = _get_np_data(data_processor, dataset, split='train')
# Create the TF dataset.
dataset = tf.data.Dataset.from_tensor_slices(x_train)
# Shuffle and repeat the dataset for training.
# This is required because we want to do multiple passes through the entire
# dataset when training.
dataset = dataset.shuffle(100000).repeat()
# Batch the data and return the data batch.
one_shot_iterator = dataset.batch(batch_size).make_one_shot_iterator()
data_batch = one_shot_iterator.get_next()
return data_batch
def get_generator(dataset):
if dataset == 'mnist':
return nets.MLPGeneratorNet()
if dataset == 'cifar':
return nets.ConvGenNet()
def get_metric_net(dataset, num_outputs=2, use_sn=True):
if dataset == 'mnist':
return nets.MLPMetricNet(num_outputs)
if dataset == 'cifar':
return nets.ConvMetricNet(num_outputs, use_sn)
def make_prior(num_latents):
# Zero mean, unit variance prior.
prior_mean = tf.zeros(shape=(num_latents), dtype=tf.float32)
prior_scale = tf.ones(shape=(num_latents), dtype=tf.float32)
return tfd.Normal(loc=prior_mean, scale=prior_scale)
|
deepmind-research-master
|
cs_gan/utils.py
|
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compute image metrics: IS, FID."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow_gan as tfgan
def get_image_metrics_for_samples(
real_images, generator, prior, data_processor, num_eval_samples):
"""Compute inception score and FID."""
max_classifier_batch = 10
num_batches = num_eval_samples // max_classifier_batch
def sample_fn(arg):
del arg
samples = generator(prior.sample(max_classifier_batch))
# Samples must be in [-1, 1], as expected by TFGAN.
# Resizing to appropriate size is done by TFGAN.
return samples
fake_outputs = tfgan.eval.sample_and_run_inception(
sample_fn,
sample_inputs=[1.0] * num_batches) # Dummy inputs.
fake_logits = fake_outputs['logits']
inception_score = tfgan.eval.classifier_score_from_logits(fake_logits)
real_outputs = tfgan.eval.run_inception(
data_processor.preprocess(real_images), num_batches=num_batches)
fid = tfgan.eval.frechet_classifier_distance_from_activations(
real_outputs['pool_3'], fake_outputs['pool_3'])
return {
'inception_score': inception_score,
'fid': fid}
|
deepmind-research-master
|
cs_gan/image_metrics.py
|
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""File utilities."""
import math
import os
import numpy as np
from PIL import Image
class FileExporter(object):
"""File exporter utilities."""
def __init__(self, path, grid_height=None, zoom=1):
"""Constructor.
Arguments:
path: The directory to save data to.
grid_height: How many data elements tall to make the grid, if appropriate.
The width will be chosen based on height. If None, automatically
determined.
zoom: How much to zoom in each data element by, if appropriate.
"""
if not os.path.exists(path):
os.makedirs(path)
self._path = path
self._zoom = zoom
self._grid_height = grid_height
def _reshape(self, data):
"""Reshape given data into image format."""
batch_size, height, width, n_channels = data.shape
if self._grid_height:
grid_height = self._grid_height
else:
grid_height = int(math.floor(math.sqrt(batch_size)))
grid_width = int(math.ceil(batch_size/grid_height))
if n_channels == 1:
data = np.tile(data, (1, 1, 1, 3))
n_channels = 3
if n_channels != 3:
raise ValueError('Image batch must have either 1 or 3 channels, but '
'was {}'.format(n_channels))
shape = (height * grid_height, width * grid_width, n_channels)
buf = np.full(shape, 255, dtype=np.uint8)
multiplier = 1 if data.dtype in (np.int32, np.int64) else 255
for k in range(batch_size):
i = k // grid_width
j = k % grid_width
arr = data[k]
x, y = i * height, j * width
buf[x:x + height, y:y + width, :] = np.clip(
multiplier * arr, 0, 255).astype(np.uint8)
if self._zoom > 1:
buf = buf.repeat(self._zoom, axis=0).repeat(self._zoom, axis=1)
return buf
def save(self, data, name):
data = self._reshape(data)
relative_name = '{}_last.png'.format(name)
target_file = os.path.join(self._path, relative_name)
img = Image.fromarray(data)
img.save(target_file, format='PNG')
|
deepmind-research-master
|
cs_gan/file_utils.py
|
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
from cs_gan import file_utils
from cs_gan import gan
from cs_gan import image_metrics
from cs_gan import utils
flags.DEFINE_integer(
'num_training_iterations', 200000,
'Number of training iterations.')
flags.DEFINE_integer(
'batch_size', 64, 'Training batch size.')
flags.DEFINE_integer(
'num_latents', 128, 'The number of latents')
flags.DEFINE_integer(
'summary_every_step', 1000,
'The interval at which to log debug ops.')
flags.DEFINE_integer(
'image_metrics_every_step', 2000,
'The interval at which to log (expensive) image metrics.')
flags.DEFINE_integer(
'export_every', 10,
'The interval at which to export samples.')
flags.DEFINE_integer(
'num_eval_samples', 10000,
'The number of samples used to evaluate FID/IS')
flags.DEFINE_string(
'dataset', 'cifar', 'The dataset used for learning (cifar|mnist.')
flags.DEFINE_float(
'optimisation_cost_weight', 3., 'weight for latent optimisation cost.')
flags.DEFINE_integer(
'num_z_iters', 3, 'The number of latent optimisation steps.'
'It falls back to vanilla GAN when num_z_iters is set to 0.')
flags.DEFINE_float(
'z_step_size', 0.01, 'Step size for latent optimisation.')
flags.DEFINE_string(
'z_project_method', 'norm', 'The method to project z.')
flags.DEFINE_string(
'output_dir', '/tmp/cs_gan/gan', 'Location where to save output files.')
flags.DEFINE_float('disc_lr', 2e-4, 'Discriminator Learning rate.')
flags.DEFINE_float('gen_lr', 2e-4, 'Generator Learning rate.')
flags.DEFINE_bool(
'run_real_data_metrics', False,
'Whether or not to run image metrics on real data.')
flags.DEFINE_bool(
'run_sample_metrics', True,
'Whether or not to run image metrics on samples.')
FLAGS = flags.FLAGS
# Log info level (for Hooks).
tf.logging.set_verbosity(tf.logging.INFO)
def main(argv):
del argv
utils.make_output_dir(FLAGS.output_dir)
data_processor = utils.DataProcessor()
images = utils.get_train_dataset(data_processor, FLAGS.dataset,
FLAGS.batch_size)
logging.info('Generator learning rate: %d', FLAGS.gen_lr)
logging.info('Discriminator learning rate: %d', FLAGS.disc_lr)
# Construct optimizers.
disc_optimizer = tf.train.AdamOptimizer(FLAGS.disc_lr, beta1=0.5, beta2=0.999)
gen_optimizer = tf.train.AdamOptimizer(FLAGS.gen_lr, beta1=0.5, beta2=0.999)
# Create the networks and models.
generator = utils.get_generator(FLAGS.dataset)
metric_net = utils.get_metric_net(FLAGS.dataset)
model = gan.GAN(metric_net, generator,
FLAGS.num_z_iters, FLAGS.z_step_size,
FLAGS.z_project_method, FLAGS.optimisation_cost_weight)
prior = utils.make_prior(FLAGS.num_latents)
generator_inputs = prior.sample(FLAGS.batch_size)
model_output = model.connect(images, generator_inputs)
optimization_components = model_output.optimization_components
debug_ops = model_output.debug_ops
samples = generator(generator_inputs, is_training=False)
global_step = tf.train.get_or_create_global_step()
# We pass the global step both to the disc and generator update ops.
# This means that the global step will not be the same as the number of
# iterations, but ensures that hooks which rely on global step work correctly.
disc_update_op = disc_optimizer.minimize(
optimization_components['disc'].loss,
var_list=optimization_components['disc'].vars,
global_step=global_step)
gen_update_op = gen_optimizer.minimize(
optimization_components['gen'].loss,
var_list=optimization_components['gen'].vars,
global_step=global_step)
# Get data needed to compute FID. We also compute metrics on
# real data as a sanity check and as a reference point.
eval_real_data = utils.get_real_data_for_eval(FLAGS.num_eval_samples,
FLAGS.dataset,
split='train')
def sample_fn(x):
return utils.optimise_and_sample(x, module=model,
data=None, is_training=False)[0]
if FLAGS.run_sample_metrics:
sample_metrics = image_metrics.get_image_metrics_for_samples(
eval_real_data, sample_fn,
prior, data_processor,
num_eval_samples=FLAGS.num_eval_samples)
else:
sample_metrics = {}
if FLAGS.run_real_data_metrics:
data_metrics = image_metrics.get_image_metrics(
eval_real_data, eval_real_data)
else:
data_metrics = {}
sample_exporter = file_utils.FileExporter(
os.path.join(FLAGS.output_dir, 'samples'))
# Hooks.
debug_ops['it'] = global_step
# Abort training on Nans.
nan_disc_hook = tf.train.NanTensorHook(optimization_components['disc'].loss)
nan_gen_hook = tf.train.NanTensorHook(optimization_components['gen'].loss)
# Step counter.
step_conter_hook = tf.train.StepCounterHook()
checkpoint_saver_hook = tf.train.CheckpointSaverHook(
checkpoint_dir=utils.get_ckpt_dir(FLAGS.output_dir), save_secs=10 * 60)
loss_summary_saver_hook = tf.train.SummarySaverHook(
save_steps=FLAGS.summary_every_step,
output_dir=os.path.join(FLAGS.output_dir, 'summaries'),
summary_op=utils.get_summaries(debug_ops))
metrics_summary_saver_hook = tf.train.SummarySaverHook(
save_steps=FLAGS.image_metrics_every_step,
output_dir=os.path.join(FLAGS.output_dir, 'summaries'),
summary_op=utils.get_summaries(sample_metrics))
hooks = [checkpoint_saver_hook, metrics_summary_saver_hook,
nan_disc_hook, nan_gen_hook, step_conter_hook,
loss_summary_saver_hook]
# Start training.
with tf.train.MonitoredSession(hooks=hooks) as sess:
logging.info('starting training')
for key, value in sess.run(data_metrics).items():
logging.info('%s: %d', key, value)
for i in range(FLAGS.num_training_iterations):
sess.run(disc_update_op)
sess.run(gen_update_op)
if i % FLAGS.export_every == 0:
samples_np, data_np = sess.run([samples, images])
# Create an object which gets data and does the processing.
data_np = data_processor.postprocess(data_np)
samples_np = data_processor.postprocess(samples_np)
sample_exporter.save(samples_np, 'samples')
sample_exporter.save(data_np, 'data')
if __name__ == '__main__':
app.run(main)
|
deepmind-research-master
|
cs_gan/main.py
|
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Network utilities."""
import functools
import re
import numpy as np
import sonnet as snt
import tensorflow.compat.v1 as tf
import tensorflow_gan as tfgan
def _sn_custom_getter():
def name_filter(name):
match = re.match(r'.*w(_.*)?$', name)
return match is not None
return tfgan.features.spectral_normalization_custom_getter(
name_filter=name_filter)
class ConvGenNet(snt.AbstractModule):
"""As in the SN paper."""
def __init__(self, name='conv_gen'):
super(ConvGenNet, self).__init__(name=name)
def _build(self, inputs, is_training):
batch_size = inputs.get_shape().as_list()[0]
first_shape = [4, 4, 512]
norm_ctor = snt.BatchNormV2
norm_ctor_config = {'scale': True}
up_tensor = snt.Linear(np.prod(first_shape))(inputs)
first_tensor = tf.reshape(up_tensor, shape=[batch_size] + first_shape)
net = snt.nets.ConvNet2DTranspose(
output_channels=[256, 128, 64, 3],
output_shapes=[(8, 8), (16, 16), (32, 32), (32, 32)],
kernel_shapes=[(4, 4), (4, 4), (4, 4), (3, 3)],
strides=[2, 2, 2, 1],
normalization_ctor=norm_ctor,
normalization_kwargs=norm_ctor_config,
normalize_final=False,
paddings=[snt.SAME], activate_final=False, activation=tf.nn.relu)
output = net(first_tensor, is_training=is_training)
return tf.nn.tanh(output)
class ConvMetricNet(snt.AbstractModule):
"""Convolutional discriminator (metric) architecture."""
def __init__(self, num_outputs=2, use_sn=True, name='sn_metric'):
super(ConvMetricNet, self).__init__(name=name)
self._num_outputs = num_outputs
self._use_sn = use_sn
def _build(self, inputs):
def build_net():
net = snt.nets.ConvNet2D(
output_channels=[64, 64, 128, 128, 256, 256, 512],
kernel_shapes=[
(3, 3), (4, 4), (3, 3), (4, 4), (3, 3), (4, 4), (3, 3)],
strides=[1, 2, 1, 2, 1, 2, 1],
paddings=[snt.SAME], activate_final=True,
activation=functools.partial(tf.nn.leaky_relu, alpha=0.1))
linear = snt.Linear(self._num_outputs)
output = linear(snt.BatchFlatten()(net(inputs)))
return output
if self._use_sn:
with tf.variable_scope('', custom_getter=_sn_custom_getter()):
output = build_net()
else:
output = build_net()
return output
class MLPGeneratorNet(snt.AbstractModule):
"""MNIST generator net."""
def __init__(self, name='mlp_generator'):
super(MLPGeneratorNet, self).__init__(name=name)
def _build(self, inputs, is_training=True):
del is_training
net = snt.nets.MLP([500, 500, 784], activation=tf.nn.leaky_relu)
out = net(inputs)
out = tf.nn.tanh(out)
return snt.BatchReshape([28, 28, 1])(out)
class MLPMetricNet(snt.AbstractModule):
"""Same as in Grover and Ermon, ICLR workshop 2017."""
def __init__(self, num_outputs=2, name='mlp_metric'):
super(MLPMetricNet, self).__init__(name=name)
self._layer_size = [500, 500, num_outputs]
def _build(self, inputs):
net = snt.nets.MLP(self._layer_size,
activation=tf.nn.leaky_relu)
output = net(snt.BatchFlatten()(inputs))
return output
|
deepmind-research-master
|
cs_gan/nets.py
|
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sonnet as snt
import tensorflow.compat.v1 as tf
from cs_gan import gan
class DummyGenerator(snt.AbstractModule):
def __init__(self):
super(DummyGenerator, self).__init__(name='dummy_generator')
def _build(self, inputs, is_training):
return snt.Linear(10)(inputs)
class GanTest(tf.test.TestCase):
def testConnect(self):
discriminator = snt.Linear(2)
generator = DummyGenerator()
model = gan.GAN(
discriminator, generator,
num_z_iters=0, z_step_size=0.1,
z_project_method='none', optimisation_cost_weight=0.0)
generator_inputs = tf.ones((16, 3), dtype=tf.float32)
data = tf.ones((16, 10))
opt_compoments, _ = model.connect(data, generator_inputs)
self.assertIn('disc', opt_compoments)
self.assertIn('gen', opt_compoments)
self.assertCountEqual(
opt_compoments['disc'].vars,
discriminator.get_variables())
self.assertCountEqual(
opt_compoments['gen'].vars,
generator.get_variables() + model._log_step_size_module.get_variables())
if __name__ == '__main__':
tf.test.main()
|
deepmind-research-master
|
cs_gan/tests/gan_test.py
|
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for the Transporter module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import tensorflow.compat.v1 as tf
from transporter import transporter
IMAGE_H = 16
IMAGE_W = 16
IMAGE_C = 3
BATCH_SIZE = 4
IMAGE_BATCH_SHAPE = (BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_C)
FILTERS = (16, 16, 32, 32, 64, 64)
STRIDES = (1, 1, 2, 1, 2, 1)
KERNEL_SIZES = (7, 3, 3, 3, 3, 3)
class TransporterTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
{'norm_type': 'batch'},
{'norm_type': 'layer'},
{'norm_type': 'instance'})
def test_output_shape(self, norm_type):
encoder_ctor = transporter.Encoder
encoder_kwargs = {
'filters': FILTERS,
'strides': STRIDES,
'kernel_sizes': KERNEL_SIZES,
'norm_type': norm_type,
}
decoder_filters = 4
num_keypoints = 5
gauss_std = 0.1
encoder = encoder_ctor(name='encoder', **encoder_kwargs)
keypoint_encoder = encoder_ctor(name='keypoint_encoder', **encoder_kwargs)
keypointer = transporter.KeyPointer(keypoint_encoder=keypoint_encoder,
num_keypoints=num_keypoints,
gauss_std=gauss_std)
decoder = transporter.Decoder(initial_filters=decoder_filters,
output_size=[IMAGE_H, IMAGE_W],
output_channels=IMAGE_C,
norm_type=norm_type)
model = transporter.Transporter(encoder=encoder,
decoder=decoder,
keypointer=keypointer)
image_a = tf.random.normal(IMAGE_BATCH_SHAPE)
image_b = tf.random.normal(IMAGE_BATCH_SHAPE)
transporter_results = model(image_a, image_b, is_training=True)
reconstructed_image_b = transporter_results['reconstructed_image_b']
self.assertEqual(reconstructed_image_b.shape, IMAGE_BATCH_SHAPE)
def testIncorrectEncoderShapes(self):
"""Test that a possible misconfiguration throws an error as expected.
If the two encoders used produce different spatial sizes for their
feature maps, this should cause an error when multiplying tensors together.
"""
decoder_filters = 4
num_keypoints = 5
gauss_std = 0.1
encoder = transporter.Encoder(
filters=FILTERS,
strides=STRIDES,
kernel_sizes=KERNEL_SIZES)
# Use less conv layers in this, in particular one less stride 2 layer, so
# we will get a different spatial output resolution.
keypoint_encoder = transporter.Encoder(
filters=FILTERS[:-2],
strides=STRIDES[:-2],
kernel_sizes=KERNEL_SIZES[:-2])
keypointer = transporter.KeyPointer(
keypoint_encoder=keypoint_encoder,
num_keypoints=num_keypoints,
gauss_std=gauss_std)
decoder = transporter.Decoder(
initial_filters=decoder_filters,
output_size=[IMAGE_H, IMAGE_W],
output_channels=IMAGE_C)
model = transporter.Transporter(
encoder=encoder,
decoder=decoder,
keypointer=keypointer)
with self.assertRaisesRegexp(ValueError, 'Dimensions must be equal'):
model(tf.random.normal(IMAGE_BATCH_SHAPE),
tf.random.normal(IMAGE_BATCH_SHAPE),
is_training=True)
class EncoderTest(tf.test.TestCase):
def test_output_shape(self):
image_batch = tf.random.normal(shape=IMAGE_BATCH_SHAPE)
filters = (4, 4, 8, 8, 16, 16)
encoder = transporter.Encoder(filters=filters,
strides=STRIDES,
kernel_sizes=KERNEL_SIZES)
features = encoder(image_batch, is_training=True)
self.assertEqual(features.shape, (BATCH_SIZE,
IMAGE_H // 4,
IMAGE_W // 4,
filters[-1]))
class KeyPointerTest(tf.test.TestCase):
def test_output_shape(self):
image_batch = tf.random.normal(shape=IMAGE_BATCH_SHAPE)
num_keypoints = 6
gauss_std = 0.1
keypoint_encoder = transporter.Encoder(filters=FILTERS,
strides=STRIDES,
kernel_sizes=KERNEL_SIZES)
keypointer = transporter.KeyPointer(keypoint_encoder=keypoint_encoder,
num_keypoints=num_keypoints,
gauss_std=gauss_std)
keypointer_results = keypointer(image_batch, is_training=True)
self.assertEqual(keypointer_results['centers'].shape,
(BATCH_SIZE, num_keypoints, 2))
self.assertEqual(keypointer_results['heatmaps'].shape,
(BATCH_SIZE, IMAGE_H // 4, IMAGE_W // 4, num_keypoints))
class DecoderTest(tf.test.TestCase):
def test_output_shape(self):
feature_batch = tf.random.normal(shape=(BATCH_SIZE,
IMAGE_H // 4,
IMAGE_W // 4,
64))
decoder = transporter.Decoder(initial_filters=64,
output_size=[IMAGE_H, IMAGE_W],
output_channels=IMAGE_C)
reconstructed_image_batch = decoder(feature_batch, is_training=True)
self.assertEqual(reconstructed_image_batch.shape, IMAGE_BATCH_SHAPE)
def test_encoder_decoder_output_shape(self):
image_batch = tf.random.normal(shape=IMAGE_BATCH_SHAPE)
encoder = transporter.Encoder(filters=FILTERS,
strides=STRIDES,
kernel_sizes=KERNEL_SIZES)
decoder = transporter.Decoder(initial_filters=4,
output_size=[IMAGE_H, IMAGE_W],
output_channels=IMAGE_C)
features = encoder(image_batch, is_training=True)
reconstructed_images = decoder(features, is_training=True)
self.assertEqual(reconstructed_images.shape, IMAGE_BATCH_SHAPE)
if __name__ == '__main__':
tf.test.main()
|
deepmind-research-master
|
transporter/transporter_test.py
|
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transporter architecture in Sonnet/TF 1: https://arxiv.org/abs/1906.11883."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import sonnet as snt
import tensorflow.compat.v1 as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers as contrib_layers
nest = contrib_framework.nest
# Paper submission used BatchNorm, but we have since found that Layer & Instance
# norm can be quite a lot more stable.
_NORMALIZATION_CTORS = {
"layer": snt.LayerNorm,
"instance": functools.partial(snt.LayerNorm, axis=[1, 2]),
"batch": snt.BatchNormV2,
}
def _connect_module_with_kwarg_if_supported(module,
input_tensor,
kwarg_name,
kwarg_value):
"""Connects a module to some input, plus a kwarg= if supported by module."""
if snt.supports_kwargs(module, kwarg_name) == "supported":
kwargs = {kwarg_name: kwarg_value}
else:
kwargs = {}
return module(input_tensor, **kwargs)
class Transporter(snt.AbstractModule):
"""Sonnet module implementing the Transporter architecture."""
def __init__(
self,
encoder,
keypointer,
decoder,
name="transporter"):
"""Initialize the Transporter module.
Args:
encoder: `snt.AbstractModule` mapping images to features (see `Encoder`)
keypointer: `snt.AbstractModule` mapping images to keypoint masks (see
`KeyPointer`)
decoder: `snt.AbstractModule` decoding features to images (see `Decoder`)
name: `str` module name
"""
super(Transporter, self).__init__(name=name)
self._encoder = encoder
self._decoder = decoder
self._keypointer = keypointer
def _build(self, image_a, image_b, is_training):
"""Reconstructs image_b using feature transport from image_a.
This approaches matches the NeurIPS submission.
Args:
image_a: Tensor of shape [B, H, W, C] containing a batch of images.
image_b: Tensor of shape [B, H, W, C] containing a batch of images.
is_training: `bool` indication whether the model is in training mode.
Returns:
A dict containing keys:
'reconstructed_image_b': Reconstruction of image_b, with the same shape.
'features_a': Tensor of shape [B, F_h, F_w, N] of the extracted features
for `image_a`.
'features_b': Tensor of shape [B, F_h, F_w, N] of the extracted features
for `image_b`.
'keypoints_a': The result of the keypointer module on image_a, with stop
gradients applied.
'keypoints_b': The result of the keypointer module on image_b.
"""
# Process both images. All gradients related to image_a are stopped.
image_a_features = tf.stop_gradient(
self._encoder(image_a, is_training=is_training))
image_a_keypoints = nest.map_structure(
tf.stop_gradient, self._keypointer(image_a, is_training=is_training))
image_b_features = self._encoder(image_b, is_training=is_training)
image_b_keypoints = self._keypointer(image_b, is_training=is_training)
# Transport features
num_keypoints = image_a_keypoints["heatmaps"].shape[-1]
transported_features = image_a_features
for k in range(num_keypoints):
mask_a = image_a_keypoints["heatmaps"][..., k, None]
mask_b = image_b_keypoints["heatmaps"][..., k, None]
# suppress features from image a, around both keypoint locations.
transported_features = (
(1 - mask_a) * (1 - mask_b) * transported_features)
# copy features from image b around keypoints for image b.
transported_features += (mask_b * image_b_features)
reconstructed_image_b = self._decoder(
transported_features, is_training=is_training)
return {
"reconstructed_image_b": reconstructed_image_b,
"features_a": image_a_features,
"features_b": image_b_features,
"keypoints_a": image_a_keypoints,
"keypoints_b": image_b_keypoints,
}
def reconstruction_loss(image, predicted_image, loss_type="l2"):
"""Returns the reconstruction loss between the image and the predicted_image.
Args:
image: target image tensor of shape [B, H, W, C]
predicted_image: reconstructed image as returned by the model
loss_type: `str` reconstruction loss, either `l2` (default) or `l1`.
Returns:
The reconstruction loss
"""
if loss_type == "l2":
return tf.reduce_mean(tf.square(image - predicted_image))
elif loss_type == "l1":
return tf.reduce_mean(tf.abs(image - predicted_image))
else:
raise ValueError("Unknown loss type: {}".format(loss_type))
class Encoder(snt.AbstractModule):
"""Encoder module mapping an image to features.
The encoder is a standard convolutional network with ReLu activations.
"""
def __init__(
self,
filters=(16, 16, 32, 32),
kernel_sizes=(7, 3, 3, 3),
strides=(1, 1, 2, 1),
norm_type="batch",
name="encoder"):
"""Initialize the Encoder.
Args:
filters: tuple of `int`. The ith layer of the encoder will
consist of `filters[i]` filters.
kernel_sizes: tuple of `int` kernel sizes for each layer
strides: tuple of `int` strides for each layer
norm_type: string, one of 'instance', 'layer', 'batch'.
name: `str` name of the module.
"""
super(Encoder, self).__init__(name=name)
if len({len(filters), len(kernel_sizes), len(strides)}) != 1:
raise ValueError(
"length of filters/kernel_sizes/strides lists must be the same")
self._filters = filters
self._kernels = kernel_sizes
self._strides = strides
self._norm_ctor = _NORMALIZATION_CTORS[norm_type]
def _build(self, image, is_training):
"""Connect the Encoder.
Args:
image: A batch of images of shape [B, H, W, C]
is_training: `bool` indicating if the model is in training mode.
Returns:
A tensor of features of shape [B, F_h, F_w, N] where F_h and F_w are the
height and width of the feature map and N = 4 * `self._filters`
"""
regularizers = {"w": contrib_layers.l2_regularizer(1.0)}
features = image
for l in range(len(self._filters)):
with tf.variable_scope("conv_{}".format(l + 1)):
conv = snt.Conv2D(
self._filters[l],
self._kernels[l],
self._strides[l],
padding=snt.SAME,
regularizers=regularizers,
name="conv_{}".format(l+1))
norm_module = self._norm_ctor(name="normalization")
features = conv(features)
features = _connect_module_with_kwarg_if_supported(
norm_module, features, "is_training", is_training)
features = tf.nn.relu(features)
return features
class KeyPointer(snt.AbstractModule):
"""Module for extracting keypoints from an image."""
def __init__(self,
num_keypoints,
gauss_std,
keypoint_encoder,
custom_getter=None,
name="key_pointer"):
"""Iniitialize the keypointer.
Args:
num_keypoints: `int` number of keypoints to extract
gauss_std: `float` size of the keypoints, relative to the image dimensions
normalized to the range [-1, 1]
keypoint_encoder: sonnet Module which produces a feature map. Must accept
an is_training kwarg. When used in the Transporter, the output spatial
resolution of this encoder should match the output spatial resolution
of the other encoder, although these two encoders should not share
weights.
custom_getter: optional custom getter for variables in this module.
name: `str` name of the module
"""
super(KeyPointer, self).__init__(name=name, custom_getter=custom_getter)
self._num_keypoints = num_keypoints
self._gauss_std = gauss_std
self._keypoint_encoder = keypoint_encoder
def _build(self, image, is_training):
"""Compute the gaussian keypoints for the image.
Args:
image: Image tensor of shape [B, H, W, C]
is_training: `bool` whether the model is in training or evaluation mode
Returns:
a dict with keys:
'centers': A tensor of shape [B, K, 2] of the center locations for each
of the K keypoints.
'heatmaps': A tensor of shape [B, F_h, F_w, K] of gaussian maps over the
keypoints, where [F_h, F_w] is the size of the keypoint_encoder
feature maps.
"""
conv = snt.Conv2D(
self._num_keypoints, [1, 1],
stride=1,
regularizers={"w": contrib_layers.l2_regularizer(1.0)},
name="conv_1/conv_1")
image_features = self._keypoint_encoder(image, is_training=is_training)
keypoint_features = conv(image_features)
return get_keypoint_data_from_feature_map(
keypoint_features, self._gauss_std)
def get_keypoint_data_from_feature_map(feature_map, gauss_std):
"""Returns keypoint information from a feature map.
Args:
feature_map: [B, H, W, K] Tensor, should be activations from a convnet.
gauss_std: float, the standard deviation of the gaussians to be put around
the keypoints.
Returns:
a dict with keys:
'centers': A tensor of shape [B, K, 2] of the center locations for each
of the K keypoints.
'heatmaps': A tensor of shape [B, H, W, K] of gaussian maps over the
keypoints.
"""
gauss_mu = _get_keypoint_mus(feature_map)
map_size = feature_map.shape.as_list()[1:3]
gauss_maps = _get_gaussian_maps(gauss_mu, map_size, 1.0 / gauss_std)
return {
"centers": gauss_mu,
"heatmaps": gauss_maps,
}
def _get_keypoint_mus(keypoint_features):
"""Returns the keypoint center points.
Args:
keypoint_features: A tensor of shape [B, F_h, F_w, K] where K is the number
of keypoints to extract.
Returns:
A tensor of shape [B, K, 2] of the y, x center points of each keypoint. Each
center point are in the range [-1, 1]^2. Note: the first element is the y
coordinate, the second is the x coordinate.
"""
gauss_y = _get_coord(keypoint_features, 1)
gauss_x = _get_coord(keypoint_features, 2)
gauss_mu = tf.stack([gauss_y, gauss_x], axis=2)
return gauss_mu
def _get_coord(features, axis):
"""Returns the keypoint coordinate encoding for the given axis.
Args:
features: A tensor of shape [B, F_h, F_w, K] where K is the number of
keypoints to extract.
axis: `int` which axis to extract the coordinate for. Has to be axis 1 or 2.
Returns:
A tensor of shape [B, K] containing the keypoint centers along the given
axis. The location is given in the range [-1, 1].
"""
if axis != 1 and axis != 2:
raise ValueError("Axis needs to be 1 or 2.")
other_axis = 1 if axis == 2 else 2
axis_size = features.shape[axis]
# Compute the normalized weight for each row/column along the axis
g_c_prob = tf.reduce_mean(features, axis=other_axis)
g_c_prob = tf.nn.softmax(g_c_prob, axis=1)
# Linear combination of the interval [-1, 1] using the normalized weights to
# give a single coordinate in the same interval [-1, 1]
scale = tf.cast(tf.linspace(-1.0, 1.0, axis_size), tf.float32)
scale = tf.reshape(scale, [1, axis_size, 1])
coordinate = tf.reduce_sum(g_c_prob * scale, axis=1)
return coordinate
def _get_gaussian_maps(mu, map_size, inv_std, power=2):
"""Transforms the keypoint center points to a gaussian masks."""
mu_y, mu_x = mu[:, :, 0:1], mu[:, :, 1:2]
y = tf.cast(tf.linspace(-1.0, 1.0, map_size[0]), tf.float32)
x = tf.cast(tf.linspace(-1.0, 1.0, map_size[1]), tf.float32)
mu_y, mu_x = tf.expand_dims(mu_y, -1), tf.expand_dims(mu_x, -1)
y = tf.reshape(y, [1, 1, map_size[0], 1])
x = tf.reshape(x, [1, 1, 1, map_size[1]])
g_y = tf.pow(y - mu_y, power)
g_x = tf.pow(x - mu_x, power)
dist = (g_y + g_x) * tf.pow(inv_std, power)
g_yx = tf.exp(-dist)
g_yx = tf.transpose(g_yx, perm=[0, 2, 3, 1])
return g_yx
class Decoder(snt.AbstractModule):
"""Decoder reconstruction network.
The decoder is a standard convolutional network with ReLu activations.
"""
def __init__(self, initial_filters, output_size,
output_channels=3,
norm_type="batch",
name="decoder"):
"""Initialize the decoder.
Args:
initial_filters: `int` number of initial filters used in the decoder
output_size: tuple of `int` height and width of the reconstructed image
output_channels: `int` number of output channels, for RGB use 3 (default)
norm_type: string, one of 'instance', 'layer', 'batch'.
name: `str` name of the module
"""
super(Decoder, self).__init__(name=name)
self._initial_filters = initial_filters
self._output_height = output_size[0]
self._output_width = output_size[1]
self._output_channels = output_channels
self._norm_ctor = _NORMALIZATION_CTORS[norm_type]
def _build(self, features, is_training):
"""Connect the Decoder.
Args:
features: Tensor of shape [B, F_h, F_w, N]
is_training: `bool` whether the module is in training mode.
Returns:
A reconstructed image tensor of shape [B, output_height, output_width,
output_channels]
"""
height, width = features.shape.as_list()[1:3]
filters = self._initial_filters
regularizers = {"w": contrib_layers.l2_regularizer(1.0)}
layer = 0
while height <= self._output_height:
layer += 1
with tf.variable_scope("conv_{}".format(layer)):
conv1 = snt.Conv2D(
filters,
[3, 3],
stride=1,
regularizers=regularizers,
name="conv_{}".format(layer))
norm_module = self._norm_ctor(name="normalization")
features = conv1(features)
features = _connect_module_with_kwarg_if_supported(
norm_module, features, "is_training", is_training)
features = tf.nn.relu(features)
if height == self._output_height:
layer += 1
with tf.variable_scope("conv_{}".format(layer)):
conv2 = snt.Conv2D(
self._output_channels,
[3, 3],
stride=1,
regularizers=regularizers,
name="conv_{}".format(layer))
features = conv2(features)
break
else:
layer += 1
with tf.variable_scope("conv_{}".format(layer)):
conv2 = snt.Conv2D(
filters,
[3, 3],
stride=1,
regularizers=regularizers,
name="conv_{}".format(layer))
norm_module = self._norm_ctor(name="normalization")
features = conv2(features)
features = _connect_module_with_kwarg_if_supported(
norm_module, features, "is_training", is_training)
features = tf.nn.relu(features)
height *= 2
width *= 2
features = tf.image.resize(features, [height, width])
if filters >= 8:
filters /= 2
assert height == self._output_height
assert width == self._output_width
return features
|
deepmind-research-master
|
transporter/transporter.py
|
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Single file script for doing a quick evaluation of a model.
This script is called by run.sh.
Usage:
user@host:/path/to/deepmind_research$ unsupervised_adversarial_training/run.sh
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl import app
from absl import flags
import cleverhans
from cleverhans import attacks
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
UAT_HUB_URL = ('https://tfhub.dev/deepmind/unsupervised-adversarial-training/'
'cifar10/wrn_106/1')
FLAGS = flags.FLAGS
flags.DEFINE_enum('attack_fn_name', 'fgsm', ['fgsm', 'none'],
'Name of the attack method to use.')
flags.DEFINE_float('epsilon_attack', 8.0 / 255,
'Maximum allowable perturbation size, between 0 and 1.')
flags.DEFINE_integer('num_steps', 20, 'Number of attack iterations.')
flags.DEFINE_integer('num_batches', 100, 'Number of batches to evaluate.')
flags.DEFINE_integer('batch_size', 32, 'Batch size.')
flags.DEFINE_integer('skip_batches', 0,
'Controls index of start image. This can be used to '
'evaluate the model on different subsets of the test set.')
flags.DEFINE_float('learning_rate', 0.003, 'Attack optimizer learning rate.')
def _top_1_accuracy(logits, labels):
return tf.reduce_mean(tf.cast(tf.nn.in_top_k(logits, labels, 1), tf.float32))
def make_classifier():
model = hub.Module(UAT_HUB_URL)
def classifier(x):
x = _cifar_meanstd_normalize(x)
model_input = dict(x=x, decay_rate=0.1, prefix='default')
return model(model_input)
return classifier
def eval_cifar():
"""Evaluate an adversarially trained model."""
attack_fn_name = FLAGS.attack_fn_name
total_batches = FLAGS.num_batches
batch_size = FLAGS.batch_size
# Note that a `classifier` is a function mapping [0,1]-scaled image Tensors
# to a logit Tensor. In particular, it includes *both* the preprocessing
# function, and the neural network.
classifier = make_classifier()
cleverhans_model = cleverhans.model.CallableModelWrapper(classifier, 'logits')
_, data_test = tf.keras.datasets.cifar10.load_data()
data = _build_dataset(data_test, batch_size=batch_size, shuffle=False)
# Generate adversarial images.
if attack_fn_name == 'fgsm':
attack = attacks.MadryEtAl(cleverhans_model)
num_cifar_classes = 10
adv_x = attack.generate(data.image,
eps=FLAGS.epsilon_attack,
eps_iter=FLAGS.learning_rate,
nb_iter=FLAGS.num_steps,
y=tf.one_hot(data.label, depth=num_cifar_classes))
elif attack_fn_name == 'none':
adv_x = data.image
logits = classifier(adv_x)
probs = tf.nn.softmax(logits)
adv_acc = _top_1_accuracy(logits, data.label)
with tf.train.SingularMonitoredSession() as sess:
total_acc = 0.
for _ in range(FLAGS.skip_batches):
sess.run(data.image)
for _ in range(total_batches):
_, _, adv_acc_val = sess.run([probs, data.label, adv_acc])
total_acc += adv_acc_val
print('Batch accuracy: {}'.format(adv_acc_val))
print('Total accuracy against {}: {}'.format(
FLAGS.attack_fn_name, total_acc / total_batches))
########## Utilities ##########
# Defines a dataset sample."""
Sample = collections.namedtuple('Sample', ['image', 'label'])
def _build_dataset(raw_data, batch_size=32, shuffle=False):
"""Builds a dataset from raw NumPy tensors.
Args:
raw_data: Pair (images, labels) of numpy arrays. `images` should have shape
(N, H, W, C) with values in [0, 255], and `labels` should have shape
(N,) or (N, 1) indicating class indices.
batch_size: int, batch size
shuffle: bool, whether to shuffle the data (default: True).
Returns:
(image_tensor, label_tensor), which iterate over the dataset, which are
(batch_size, H, W, C) tf.float32 and (batch_size,) tf.int32 Tensors
respectively
"""
images, labels = raw_data
labels = np.squeeze(labels)
samples = Sample(images.astype(np.float32) / 255., labels.astype(np.int64))
data = tf.data.Dataset.from_tensor_slices(samples)
if shuffle:
data = data.shuffle(1000)
return data.repeat().batch(batch_size).make_one_shot_iterator().get_next()
def _cifar_meanstd_normalize(image):
"""Mean + stddev whitening for CIFAR-10 used in ResNets.
Args:
image: Numpy array or TF Tensor, with values in [0, 255]
Returns:
image: Numpy array or TF Tensor, shifted and scaled by mean/stdev on
CIFAR-10 dataset.
"""
# Channel-wise means and std devs calculated from the CIFAR-10 training set
cifar_means = [125.3, 123.0, 113.9]
cifar_devs = [63.0, 62.1, 66.7]
rescaled_means = [x / 255. for x in cifar_means]
rescaled_devs = [x / 255. for x in cifar_devs]
image = (image - rescaled_means) / rescaled_devs
return image
def main(unused_argv):
eval_cifar()
if __name__ == '__main__':
app.run(main)
|
deepmind-research-master
|
unsupervised_adversarial_training/quick_eval_cifar.py
|
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Loads images from the 80M@200K training set and saves them in PNG format.
Usage:
cd /path/to/deepmind_research
python -m unsupervised_adversarial_training.save_example_images \
--data_bin_path=/path/to/tiny_images.bin
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import numpy as np
from PIL import Image
DIR_NAME = os.path.dirname(__file__)
FLAGS = flags.FLAGS
flags.DEFINE_string('data_bin_path', None,
'path to 80M Tiny Images data binary')
flags.DEFINE_string('idxs_path', os.path.join(DIR_NAME, 'tiny_200K_idxs.txt'),
'path to file of indices indicating subset of 80M dataset')
flags.DEFINE_string('output_dir', os.path.join(DIR_NAME, 'images'),
'path to output directory for images')
flags.mark_flag_as_required('data_bin_path')
CIFAR_LABEL_IDX_TO_NAME = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
DATASET_SIZE = 79302017
def _load_dataset_as_array(ds_path):
dataset = np.memmap(filename=ds_path, dtype=np.uint8, mode='r',
shape=(DATASET_SIZE, 3, 32, 32))
return dataset.transpose([0, 3, 2, 1])
def main(unused_argv):
dataset = _load_dataset_as_array(FLAGS.data_bin_path)
# Load the indices and labels of the 80M@200K training set
data_idxs, data_labels = np.loadtxt(
FLAGS.idxs_path,
delimiter=',',
dtype=[('index', np.uint64), ('label', np.uint8)],
unpack=True)
# Save images as PNG files
if not os.path.exists(FLAGS.output_dir):
os.makedirs(FLAGS.output_dir)
for i in range(100):
class_name = CIFAR_LABEL_IDX_TO_NAME[data_labels[i]]
file_name = 'im{}_{}.png'.format(i, class_name)
file_path = os.path.join(FLAGS.output_dir, file_name)
img = dataset[data_idxs[i]]
Image.fromarray(img).save(file_path)
if __name__ == '__main__':
app.run(main)
|
deepmind-research-master
|
unsupervised_adversarial_training/save_example_images.py
|
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the PolyGen open-source version."""
from modules import FaceModel
from modules import VertexModel
import numpy as np
import tensorflow as tf
_BATCH_SIZE = 4
_TRANSFORMER_CONFIG = {
'num_layers': 2,
'hidden_size': 64,
'fc_size': 256
}
_CLASS_CONDITIONAL = True
_NUM_CLASSES = 4
_NUM_INPUT_VERTS = 50
_NUM_PAD_VERTS = 10
_NUM_INPUT_FACE_INDICES = 200
_QUANTIZATION_BITS = 8
_VERTEX_MODEL_USE_DISCRETE_EMBEDDINGS = True
_FACE_MODEL_DECODER_CROSS_ATTENTION = True
_FACE_MODEL_DISCRETE_EMBEDDINGS = True
_MAX_SAMPLE_LENGTH_VERTS = 10
_MAX_SAMPLE_LENGTH_FACES = 10
def _get_vertex_model_batch():
"""Returns batch with placeholders for vertex model inputs."""
return {
'class_label': tf.range(_BATCH_SIZE),
'vertices_flat': tf.placeholder(
dtype=tf.int32, shape=[_BATCH_SIZE, None]),
}
def _get_face_model_batch():
"""Returns batch with placeholders for face model inputs."""
return {
'vertices': tf.placeholder(
dtype=tf.float32, shape=[_BATCH_SIZE, None, 3]),
'vertices_mask': tf.placeholder(
dtype=tf.float32, shape=[_BATCH_SIZE, None]),
'faces': tf.placeholder(
dtype=tf.int32, shape=[_BATCH_SIZE, None]),
}
class VertexModelTest(tf.test.TestCase):
def setUp(self):
"""Defines a vertex model."""
super(VertexModelTest, self).setUp()
self.model = VertexModel(
decoder_config=_TRANSFORMER_CONFIG,
class_conditional=_CLASS_CONDITIONAL,
num_classes=_NUM_CLASSES,
max_num_input_verts=_NUM_INPUT_VERTS,
quantization_bits=_QUANTIZATION_BITS,
use_discrete_embeddings=_VERTEX_MODEL_USE_DISCRETE_EMBEDDINGS)
def test_model_runs(self):
"""Tests if the model runs without crashing."""
batch = _get_vertex_model_batch()
pred_dist = self.model(batch, is_training=False)
logits = pred_dist.logits
with self.session() as sess:
sess.run(tf.global_variables_initializer())
vertices_flat = np.random.randint(
2**_QUANTIZATION_BITS + 1,
size=[_BATCH_SIZE, _NUM_INPUT_VERTS * 3 + 1])
sess.run(logits, {batch['vertices_flat']: vertices_flat})
def test_sample_outputs_range(self):
"""Tests if the model produces samples in the correct range."""
context = {'class_label': tf.zeros((_BATCH_SIZE,), dtype=tf.int32)}
sample_dict = self.model.sample(
_BATCH_SIZE, max_sample_length=_MAX_SAMPLE_LENGTH_VERTS,
context=context)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
sample_dict_np = sess.run(sample_dict)
in_range = np.logical_and(
0 <= sample_dict_np['vertices'],
sample_dict_np['vertices'] <= 2**_QUANTIZATION_BITS).all()
self.assertTrue(in_range)
class FaceModelTest(tf.test.TestCase):
def setUp(self):
"""Defines a face model."""
super(FaceModelTest, self).setUp()
self.model = FaceModel(
encoder_config=_TRANSFORMER_CONFIG,
decoder_config=_TRANSFORMER_CONFIG,
class_conditional=False,
max_seq_length=_NUM_INPUT_FACE_INDICES,
decoder_cross_attention=_FACE_MODEL_DECODER_CROSS_ATTENTION,
use_discrete_vertex_embeddings=_FACE_MODEL_DISCRETE_EMBEDDINGS,
quantization_bits=_QUANTIZATION_BITS)
def test_model_runs(self):
"""Tests if the model runs without crashing."""
batch = _get_face_model_batch()
pred_dist = self.model(batch, is_training=False)
logits = pred_dist.logits
with self.session() as sess:
sess.run(tf.global_variables_initializer())
vertices = np.random.rand(_BATCH_SIZE, _NUM_INPUT_VERTS, 3) - 0.5
vertices_mask = np.ones([_BATCH_SIZE, _NUM_INPUT_VERTS])
faces = np.random.randint(
_NUM_INPUT_VERTS + 2, size=[_BATCH_SIZE, _NUM_INPUT_FACE_INDICES])
sess.run(
logits,
{batch['vertices']: vertices,
batch['vertices_mask']: vertices_mask,
batch['faces']: faces}
)
def test_sample_outputs_range(self):
"""Tests if the model produces samples in the correct range."""
context = _get_face_model_batch()
del context['faces']
sample_dict = self.model.sample(
context, max_sample_length=_MAX_SAMPLE_LENGTH_FACES)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
# Pad the vertices in order to test that the face model only outputs
# vertex indices in the unpadded range
vertices = np.pad(
np.random.rand(_BATCH_SIZE, _NUM_INPUT_VERTS, 3) - 0.5,
[[0, 0], [0, _NUM_PAD_VERTS], [0, 0]], mode='constant')
vertices_mask = np.pad(
np.ones([_BATCH_SIZE, _NUM_INPUT_VERTS]),
[[0, 0], [0, _NUM_PAD_VERTS]], mode='constant')
sample_dict_np = sess.run(
sample_dict,
{context['vertices']: vertices,
context['vertices_mask']: vertices_mask})
in_range = np.logical_and(
0 <= sample_dict_np['faces'],
sample_dict_np['faces'] <= _NUM_INPUT_VERTS + 1).all()
self.assertTrue(in_range)
if __name__ == '__main__':
tf.test.main()
|
deepmind-research-master
|
polygen/model_test.py
|
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for pip package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = ['numpy', 'dm-sonnet==1.36', 'tensorflow==1.14',
'tensor2tensor==1.15', 'networkx', 'matplotlib', 'six']
setup(
name='polygen',
version='0.1',
description='A library for PolyGen: An Autoregressive Generative Model of 3D Meshes.',
url='https://github.com/deepmind/deepmind-research/polygen',
author='DeepMind',
author_email='no-reply@google.com',
# Contained modules and scripts.
packages=find_packages(),
install_requires=REQUIRED_PACKAGES,
platforms=['any'],
license='Apache 2.0',
)
|
deepmind-research-master
|
polygen/setup.py
|
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mesh data utilities."""
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d # pylint: disable=unused-import
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import networkx as nx
import numpy as np
import six
from six.moves import range
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
def random_shift(vertices, shift_factor=0.25):
"""Apply random shift to vertices."""
max_shift_pos = tf.cast(255 - tf.reduce_max(vertices, axis=0), tf.float32)
max_shift_pos = tf.maximum(max_shift_pos, 1e-9)
max_shift_neg = tf.cast(tf.reduce_min(vertices, axis=0), tf.float32)
max_shift_neg = tf.maximum(max_shift_neg, 1e-9)
shift = tfd.TruncatedNormal(
tf.zeros([1, 3]), shift_factor*255, -max_shift_neg,
max_shift_pos).sample()
shift = tf.cast(shift, tf.int32)
vertices += shift
return vertices
def make_vertex_model_dataset(ds, apply_random_shift=False):
"""Prepare dataset for vertex model training."""
def _vertex_model_map_fn(example):
vertices = example['vertices']
# Randomly shift vertices
if apply_random_shift:
vertices = random_shift(vertices)
# Re-order vertex coordinates as (z, y, x).
vertices_permuted = tf.stack(
[vertices[:, 2], vertices[:, 1], vertices[:, 0]], axis=-1)
# Flatten quantized vertices, reindex starting from 1, and pad with a
# zero stopping token.
vertices_flat = tf.reshape(vertices_permuted, [-1])
example['vertices_flat'] = tf.pad(vertices_flat + 1, [[0, 1]])
# Create mask to indicate valid tokens after padding and batching.
example['vertices_flat_mask'] = tf.ones_like(
example['vertices_flat'], dtype=tf.float32)
return example
return ds.map(_vertex_model_map_fn)
def make_face_model_dataset(
ds, apply_random_shift=False, shuffle_vertices=True, quantization_bits=8):
"""Prepare dataset for face model training."""
def _face_model_map_fn(example):
vertices = example['vertices']
# Randomly shift vertices
if apply_random_shift:
vertices = random_shift(vertices)
example['num_vertices'] = tf.shape(vertices)[0]
# Optionally shuffle vertices and re-order faces to match
if shuffle_vertices:
permutation = tf.random_shuffle(tf.range(example['num_vertices']))
vertices = tf.gather(vertices, permutation)
face_permutation = tf.concat(
[tf.constant([0, 1], dtype=tf.int32), tf.argsort(permutation) + 2],
axis=0)
example['faces'] = tf.cast(
tf.gather(face_permutation, example['faces']), tf.int64)
def _dequantize_verts(verts, n_bits):
min_range = -0.5
max_range = 0.5
range_quantize = 2**n_bits - 1
verts = tf.cast(verts, tf.float32)
verts = verts * (max_range - min_range) / range_quantize + min_range
return verts
# Vertices are quantized. So convert to floats for input to face model
example['vertices'] = _dequantize_verts(vertices, quantization_bits)
example['vertices_mask'] = tf.ones_like(
example['vertices'][..., 0], dtype=tf.float32)
example['faces_mask'] = tf.ones_like(example['faces'], dtype=tf.float32)
return example
return ds.map(_face_model_map_fn)
def read_obj_file(obj_file):
"""Read vertices and faces from already opened file."""
vertex_list = []
flat_vertices_list = []
flat_vertices_indices = {}
flat_triangles = []
for line in obj_file:
tokens = line.split()
if not tokens:
continue
line_type = tokens[0]
# We skip lines not starting with v or f.
if line_type == 'v':
vertex_list.append([float(x) for x in tokens[1:]])
elif line_type == 'f':
triangle = []
for i in range(len(tokens) - 1):
vertex_name = tokens[i + 1]
if vertex_name in flat_vertices_indices:
triangle.append(flat_vertices_indices[vertex_name])
continue
flat_vertex = []
for index in six.ensure_str(vertex_name).split('/'):
if not index:
continue
# obj triangle indices are 1 indexed, so subtract 1 here.
flat_vertex += vertex_list[int(index) - 1]
flat_vertex_index = len(flat_vertices_list)
flat_vertices_list.append(flat_vertex)
flat_vertices_indices[vertex_name] = flat_vertex_index
triangle.append(flat_vertex_index)
flat_triangles.append(triangle)
return np.array(flat_vertices_list, dtype=np.float32), flat_triangles
def read_obj(obj_path):
"""Open .obj file from the path provided and read vertices and faces."""
with open(obj_path) as obj_file:
return read_obj_file(obj_file)
def write_obj(vertices, faces, file_path, transpose=True, scale=1.):
"""Write vertices and faces to obj."""
if transpose:
vertices = vertices[:, [1, 2, 0]]
vertices *= scale
if faces is not None:
if min(min(faces)) == 0:
f_add = 1
else:
f_add = 0
with open(file_path, 'w') as f:
for v in vertices:
f.write('v {} {} {}\n'.format(v[0], v[1], v[2]))
for face in faces:
line = 'f'
for i in face:
line += ' {}'.format(i + f_add)
line += '\n'
f.write(line)
def quantize_verts(verts, n_bits=8):
"""Convert vertices in [-1., 1.] to discrete values in [0, n_bits**2 - 1]."""
min_range = -0.5
max_range = 0.5
range_quantize = 2**n_bits - 1
verts_quantize = (verts - min_range) * range_quantize / (
max_range - min_range)
return verts_quantize.astype('int32')
def dequantize_verts(verts, n_bits=8, add_noise=False):
"""Convert quantized vertices to floats."""
min_range = -0.5
max_range = 0.5
range_quantize = 2**n_bits - 1
verts = verts.astype('float32')
verts = verts * (max_range - min_range) / range_quantize + min_range
if add_noise:
verts += np.random.uniform(size=verts.shape) * (1 / range_quantize)
return verts
def face_to_cycles(face):
"""Find cycles in face."""
g = nx.Graph()
for v in range(len(face) - 1):
g.add_edge(face[v], face[v + 1])
g.add_edge(face[-1], face[0])
return list(nx.cycle_basis(g))
def flatten_faces(faces):
"""Converts from list of faces to flat face array with stopping indices."""
if not faces:
return np.array([0])
else:
l = [f + [-1] for f in faces[:-1]]
l += [faces[-1] + [-2]]
return np.array([item for sublist in l for item in sublist]) + 2 # pylint: disable=g-complex-comprehension
def unflatten_faces(flat_faces):
"""Converts from flat face sequence to a list of separate faces."""
def group(seq):
g = []
for el in seq:
if el == 0 or el == -1:
yield g
g = []
else:
g.append(el - 1)
yield g
outputs = list(group(flat_faces - 1))[:-1]
# Remove empty faces
return [o for o in outputs if len(o) > 2]
def center_vertices(vertices):
"""Translate the vertices so that bounding box is centered at zero."""
vert_min = vertices.min(axis=0)
vert_max = vertices.max(axis=0)
vert_center = 0.5 * (vert_min + vert_max)
return vertices - vert_center
def normalize_vertices_scale(vertices):
"""Scale the vertices so that the long diagonal of the bounding box is one."""
vert_min = vertices.min(axis=0)
vert_max = vertices.max(axis=0)
extents = vert_max - vert_min
scale = np.sqrt(np.sum(extents**2))
return vertices / scale
def quantize_process_mesh(vertices, faces, tris=None, quantization_bits=8):
"""Quantize vertices, remove resulting duplicates and reindex faces."""
vertices = quantize_verts(vertices, quantization_bits)
vertices, inv = np.unique(vertices, axis=0, return_inverse=True)
# Sort vertices by z then y then x.
sort_inds = np.lexsort(vertices.T)
vertices = vertices[sort_inds]
# Re-index faces and tris to re-ordered vertices.
faces = [np.argsort(sort_inds)[inv[f]] for f in faces]
if tris is not None:
tris = np.array([np.argsort(sort_inds)[inv[t]] for t in tris])
# Merging duplicate vertices and re-indexing the faces causes some faces to
# contain loops (e.g [2, 3, 5, 2, 4]). Split these faces into distinct
# sub-faces.
sub_faces = []
for f in faces:
cliques = face_to_cycles(f)
for c in cliques:
c_length = len(c)
# Only append faces with more than two verts.
if c_length > 2:
d = np.argmin(c)
# Cyclically permute faces just that first index is the smallest.
sub_faces.append([c[(d + i) % c_length] for i in range(c_length)])
faces = sub_faces
if tris is not None:
tris = np.array([v for v in tris if len(set(v)) == len(v)])
# Sort faces by lowest vertex indices. If two faces have the same lowest
# index then sort by next lowest and so on.
faces.sort(key=lambda f: tuple(sorted(f)))
if tris is not None:
tris = tris.tolist()
tris.sort(key=lambda f: tuple(sorted(f)))
tris = np.array(tris)
# After removing degenerate faces some vertices are now unreferenced.
# Remove these.
num_verts = vertices.shape[0]
vert_connected = np.equal(
np.arange(num_verts)[:, None], np.hstack(faces)[None]).any(axis=-1)
vertices = vertices[vert_connected]
# Re-index faces and tris to re-ordered vertices.
vert_indices = (
np.arange(num_verts) - np.cumsum(1 - vert_connected.astype('int')))
faces = [vert_indices[f].tolist() for f in faces]
if tris is not None:
tris = np.array([vert_indices[t].tolist() for t in tris])
return vertices, faces, tris
def process_mesh(vertices, faces, quantization_bits=8):
"""Process mesh vertices and faces."""
# Transpose so that z-axis is vertical.
vertices = vertices[:, [2, 0, 1]]
# Translate the vertices so that bounding box is centered at zero.
vertices = center_vertices(vertices)
# Scale the vertices so that the long diagonal of the bounding box is equal
# to one.
vertices = normalize_vertices_scale(vertices)
# Quantize and sort vertices, remove resulting duplicates, sort and reindex
# faces.
vertices, faces, _ = quantize_process_mesh(
vertices, faces, quantization_bits=quantization_bits)
# Flatten faces and add 'new face' = 1 and 'stop' = 0 tokens.
faces = flatten_faces(faces)
# Discard degenerate meshes without faces.
return {
'vertices': vertices,
'faces': faces,
}
def load_process_mesh(mesh_obj_path, quantization_bits=8):
"""Load obj file and process."""
# Load mesh
vertices, faces = read_obj(mesh_obj_path)
return process_mesh(vertices, faces, quantization_bits)
def plot_meshes(mesh_list,
ax_lims=0.3,
fig_size=4,
el=30,
rot_start=120,
vert_size=10,
vert_alpha=0.75,
n_cols=4):
"""Plots mesh data using matplotlib."""
n_plot = len(mesh_list)
n_cols = np.minimum(n_plot, n_cols)
n_rows = np.ceil(n_plot / n_cols).astype('int')
fig = plt.figure(figsize=(fig_size * n_cols, fig_size * n_rows))
for p_inc, mesh in enumerate(mesh_list):
for key in [
'vertices', 'faces', 'vertices_conditional', 'pointcloud', 'class_name'
]:
if key not in list(mesh.keys()):
mesh[key] = None
ax = fig.add_subplot(n_rows, n_cols, p_inc + 1, projection='3d')
if mesh['faces'] is not None:
if mesh['vertices_conditional'] is not None:
face_verts = np.concatenate(
[mesh['vertices_conditional'], mesh['vertices']], axis=0)
else:
face_verts = mesh['vertices']
collection = []
for f in mesh['faces']:
collection.append(face_verts[f])
plt_mesh = Poly3DCollection(collection)
plt_mesh.set_edgecolor((0., 0., 0., 0.3))
plt_mesh.set_facecolor((1, 0, 0, 0.2))
ax.add_collection3d(plt_mesh)
if mesh['vertices'] is not None:
ax.scatter3D(
mesh['vertices'][:, 0],
mesh['vertices'][:, 1],
mesh['vertices'][:, 2],
lw=0.,
s=vert_size,
c='g',
alpha=vert_alpha)
if mesh['vertices_conditional'] is not None:
ax.scatter3D(
mesh['vertices_conditional'][:, 0],
mesh['vertices_conditional'][:, 1],
mesh['vertices_conditional'][:, 2],
lw=0.,
s=vert_size,
c='b',
alpha=vert_alpha)
if mesh['pointcloud'] is not None:
ax.scatter3D(
mesh['pointcloud'][:, 0],
mesh['pointcloud'][:, 1],
mesh['pointcloud'][:, 2],
lw=0.,
s=2.5 * vert_size,
c='b',
alpha=1.)
ax.set_xlim(-ax_lims, ax_lims)
ax.set_ylim(-ax_lims, ax_lims)
ax.set_zlim(-ax_lims, ax_lims)
ax.view_init(el, rot_start)
display_string = ''
if mesh['faces'] is not None:
display_string += 'Num. faces: {}\n'.format(len(collection))
if mesh['vertices'] is not None:
num_verts = mesh['vertices'].shape[0]
if mesh['vertices_conditional'] is not None:
num_verts += mesh['vertices_conditional'].shape[0]
display_string += 'Num. verts: {}\n'.format(num_verts)
if mesh['class_name'] is not None:
display_string += 'Synset: {}'.format(mesh['class_name'])
if mesh['pointcloud'] is not None:
display_string += 'Num. pointcloud: {}\n'.format(
mesh['pointcloud'].shape[0])
ax.text2D(0.05, 0.8, display_string, transform=ax.transAxes)
plt.subplots_adjust(
left=0., right=1., bottom=0., top=1., wspace=0.025, hspace=0.025)
plt.show()
|
deepmind-research-master
|
polygen/data_utils.py
|
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modules and networks for mesh generation."""
import sonnet as snt
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_layers
import tensorflow.compat.v1 as tf
from tensorflow.python.framework import function
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
def dequantize_verts(verts, n_bits, add_noise=False):
"""Quantizes vertices and outputs integers with specified n_bits."""
min_range = -0.5
max_range = 0.5
range_quantize = 2**n_bits - 1
verts = tf.cast(verts, tf.float32)
verts = verts * (max_range - min_range) / range_quantize + min_range
if add_noise:
verts += tf.random_uniform(tf.shape(verts)) * (1 / float(range_quantize))
return verts
def quantize_verts(verts, n_bits):
"""Dequantizes integer vertices to floats."""
min_range = -0.5
max_range = 0.5
range_quantize = 2**n_bits - 1
verts_quantize = (
(verts - min_range) * range_quantize / (max_range - min_range))
return tf.cast(verts_quantize, tf.int32)
def top_k_logits(logits, k):
"""Masks logits such that logits not in top-k are small."""
if k == 0:
return logits
else:
values, _ = tf.math.top_k(logits, k=k)
k_largest = tf.reduce_min(values)
logits = tf.where(tf.less_equal(logits, k_largest),
tf.ones_like(logits)*-1e9, logits)
return logits
def top_p_logits(logits, p):
"""Masks logits using nucleus (top-p) sampling."""
if p == 1:
return logits
else:
logit_shape = tf.shape(logits)
seq, dim = logit_shape[1], logit_shape[2]
logits = tf.reshape(logits, [-1, dim])
sort_indices = tf.argsort(logits, axis=-1, direction='DESCENDING')
probs = tf.gather(tf.nn.softmax(logits), sort_indices, batch_dims=1)
cumprobs = tf.cumsum(probs, axis=-1, exclusive=True)
# The top 1 candidate always will not be masked.
# This way ensures at least 1 indices will be selected.
sort_mask = tf.cast(tf.greater(cumprobs, p), logits.dtype)
batch_indices = tf.tile(
tf.expand_dims(tf.range(tf.shape(logits)[0]), axis=-1), [1, dim])
top_p_mask = tf.scatter_nd(
tf.stack([batch_indices, sort_indices], axis=-1), sort_mask,
tf.shape(logits))
logits -= top_p_mask * 1e9
return tf.reshape(logits, [-1, seq, dim])
_function_cache = {} # For multihead_self_attention_memory_efficient
def multihead_self_attention_memory_efficient(x,
bias,
num_heads,
head_size=None,
cache=None,
epsilon=1e-6,
forget=True,
test_vars=None,
name=None):
"""Memory-efficient Multihead scaled-dot-product self-attention.
Based on Tensor2Tensor version but adds optional caching.
Returns multihead-self-attention(layer_norm(x))
Computes one attention head at a time to avoid exhausting memory.
If forget=True, then forget all forwards activations and recompute on
the backwards pass.
Args:
x: a Tensor with shape [batch, length, input_size]
bias: an attention bias tensor broadcastable to [batch, 1, length, length]
num_heads: an integer
head_size: an optional integer - defaults to input_size/num_heads
cache: Optional dict containing tensors which are the results of previous
attentions, used for fast decoding. Expects the dict to contain two
keys ('k' and 'v'), for the initial call the values for these keys
should be empty Tensors of the appropriate shape.
'k' [batch_size, 0, key_channels] 'v' [batch_size, 0, value_channels]
epsilon: a float, for layer norm
forget: a boolean - forget forwards activations and recompute on backprop
test_vars: optional tuple of variables for testing purposes
name: an optional string
Returns:
A Tensor.
"""
io_size = x.get_shape().as_list()[-1]
if head_size is None:
assert io_size % num_heads == 0
head_size = io_size / num_heads
def forward_internal(x, wqkv, wo, attention_bias, norm_scale, norm_bias):
"""Forward function."""
n = common_layers.layer_norm_compute(x, epsilon, norm_scale, norm_bias)
wqkv_split = tf.unstack(wqkv, num=num_heads)
wo_split = tf.unstack(wo, num=num_heads)
y = 0
if cache is not None:
cache_k = []
cache_v = []
for h in range(num_heads):
with tf.control_dependencies([y] if h > 0 else []):
combined = tf.nn.conv1d(n, wqkv_split[h], 1, 'SAME')
q, k, v = tf.split(combined, 3, axis=2)
if cache is not None:
k = tf.concat([cache['k'][:, h], k], axis=1)
v = tf.concat([cache['v'][:, h], v], axis=1)
cache_k.append(k)
cache_v.append(v)
o = common_attention.scaled_dot_product_attention_simple(
q, k, v, attention_bias)
y += tf.nn.conv1d(o, wo_split[h], 1, 'SAME')
if cache is not None:
cache['k'] = tf.stack(cache_k, axis=1)
cache['v'] = tf.stack(cache_v, axis=1)
return y
key = (
'multihead_self_attention_memory_efficient %s %s' % (num_heads, epsilon))
if not forget:
forward_fn = forward_internal
elif key in _function_cache:
forward_fn = _function_cache[key]
else:
@function.Defun(compiled=True)
def grad_fn(x, wqkv, wo, attention_bias, norm_scale, norm_bias, dy):
"""Custom gradient function."""
with tf.control_dependencies([dy]):
n = common_layers.layer_norm_compute(x, epsilon, norm_scale, norm_bias)
wqkv_split = tf.unstack(wqkv, num=num_heads)
wo_split = tf.unstack(wo, num=num_heads)
deps = []
dwqkvs = []
dwos = []
dn = 0
for h in range(num_heads):
with tf.control_dependencies(deps):
combined = tf.nn.conv1d(n, wqkv_split[h], 1, 'SAME')
q, k, v = tf.split(combined, 3, axis=2)
o = common_attention.scaled_dot_product_attention_simple(
q, k, v, attention_bias)
partial_y = tf.nn.conv1d(o, wo_split[h], 1, 'SAME')
pdn, dwqkvh, dwoh = tf.gradients(
ys=[partial_y],
xs=[n, wqkv_split[h], wo_split[h]],
grad_ys=[dy])
dn += pdn
dwqkvs.append(dwqkvh)
dwos.append(dwoh)
deps = [dn, dwqkvh, dwoh]
dwqkv = tf.stack(dwqkvs)
dwo = tf.stack(dwos)
with tf.control_dependencies(deps):
dx, dnorm_scale, dnorm_bias = tf.gradients(
ys=[n], xs=[x, norm_scale, norm_bias], grad_ys=[dn])
return (dx, dwqkv, dwo, tf.zeros_like(attention_bias), dnorm_scale,
dnorm_bias)
@function.Defun(
grad_func=grad_fn, compiled=True, separate_compiled_gradients=True)
def forward_fn(x, wqkv, wo, attention_bias, norm_scale, norm_bias):
return forward_internal(x, wqkv, wo, attention_bias, norm_scale,
norm_bias)
_function_cache[key] = forward_fn
if bias is not None:
bias = tf.squeeze(bias, 1)
with tf.variable_scope(name, default_name='multihead_attention', values=[x]):
if test_vars is not None:
wqkv, wo, norm_scale, norm_bias = list(test_vars)
else:
wqkv = tf.get_variable(
'wqkv', [num_heads, 1, io_size, 3 * head_size],
initializer=tf.random_normal_initializer(stddev=io_size**-0.5))
wo = tf.get_variable(
'wo', [num_heads, 1, head_size, io_size],
initializer=tf.random_normal_initializer(
stddev=(head_size * num_heads)**-0.5))
norm_scale, norm_bias = common_layers.layer_norm_vars(io_size)
y = forward_fn(x, wqkv, wo, bias, norm_scale, norm_bias)
y.set_shape(x.get_shape()) # pytype: disable=attribute-error
return y
class TransformerEncoder(snt.AbstractModule):
"""Transformer encoder.
Sonnet Transformer encoder module as described in Vaswani et al. 2017. Uses
the Tensor2Tensor multihead_attention function for full self attention
(no masking). Layer norm is applied inside the residual path as in sparse
transformers (Child 2019).
This module expects inputs to be already embedded, and does not add position
embeddings.
"""
def __init__(self,
hidden_size=256,
fc_size=1024,
num_heads=4,
layer_norm=True,
num_layers=8,
dropout_rate=0.2,
re_zero=True,
memory_efficient=False,
name='transformer_encoder'):
"""Initializes TransformerEncoder.
Args:
hidden_size: Size of embedding vectors.
fc_size: Size of fully connected layer.
num_heads: Number of attention heads.
layer_norm: If True, apply layer normalization
num_layers: Number of Transformer blocks, where each block contains a
multi-head attention layer and a MLP.
dropout_rate: Dropout rate applied immediately after the ReLU in each
fully-connected layer.
re_zero: If True, alpha scale residuals with zero init.
memory_efficient: If True, recompute gradients for memory savings.
name: Name of variable scope
"""
super(TransformerEncoder, self).__init__(name=name)
self.hidden_size = hidden_size
self.num_heads = num_heads
self.layer_norm = layer_norm
self.fc_size = fc_size
self.num_layers = num_layers
self.dropout_rate = dropout_rate
self.re_zero = re_zero
self.memory_efficient = memory_efficient
def _build(self, inputs, is_training=False):
"""Passes inputs through Transformer encoder network.
Args:
inputs: Tensor of shape [batch_size, sequence_length, embed_size]. Zero
embeddings are masked in self-attention.
is_training: If True, dropout is applied.
Returns:
output: Tensor of shape [batch_size, sequence_length, embed_size].
"""
if is_training:
dropout_rate = self.dropout_rate
else:
dropout_rate = 0.
# Identify elements with all zeros as padding, and create bias to mask
# out padding elements in self attention.
encoder_padding = common_attention.embedding_to_padding(inputs)
encoder_self_attention_bias = (
common_attention.attention_bias_ignore_padding(encoder_padding))
x = inputs
for layer_num in range(self.num_layers):
with tf.variable_scope('layer_{}'.format(layer_num)):
# Multihead self-attention from Tensor2Tensor.
res = x
if self.memory_efficient:
res = multihead_self_attention_memory_efficient(
res,
bias=encoder_self_attention_bias,
num_heads=self.num_heads,
head_size=self.hidden_size // self.num_heads,
forget=True if is_training else False,
name='self_attention'
)
else:
if self.layer_norm:
res = common_layers.layer_norm(res, name='self_attention')
res = common_attention.multihead_attention(
res,
memory_antecedent=None,
bias=encoder_self_attention_bias,
total_key_depth=self.hidden_size,
total_value_depth=self.hidden_size,
output_depth=self.hidden_size,
num_heads=self.num_heads,
dropout_rate=0.,
make_image_summary=False,
name='self_attention')
if self.re_zero:
res *= tf.get_variable('self_attention/alpha', initializer=0.)
if dropout_rate:
res = tf.nn.dropout(res, rate=dropout_rate)
x += res
# MLP
res = x
if self.layer_norm:
res = common_layers.layer_norm(res, name='fc')
res = tf.layers.dense(
res, self.fc_size, activation=tf.nn.relu, name='fc_1')
res = tf.layers.dense(res, self.hidden_size, name='fc_2')
if self.re_zero:
res *= tf.get_variable('fc/alpha', initializer=0.)
if dropout_rate:
res = tf.nn.dropout(res, rate=dropout_rate)
x += res
if self.layer_norm:
output = common_layers.layer_norm(x, name='output')
else:
output = x
return output
class TransformerDecoder(snt.AbstractModule):
"""Transformer decoder.
Sonnet Transformer decoder module as described in Vaswani et al. 2017. Uses
the Tensor2Tensor multihead_attention function for masked self attention, and
non-masked cross attention attention. Layer norm is applied inside the
residual path as in sparse transformers (Child 2019).
This module expects inputs to be already embedded, and does not
add position embeddings.
"""
def __init__(self,
hidden_size=256,
fc_size=1024,
num_heads=4,
layer_norm=True,
num_layers=8,
dropout_rate=0.2,
re_zero=True,
memory_efficient=False,
name='transformer_decoder'):
"""Initializes TransformerDecoder.
Args:
hidden_size: Size of embedding vectors.
fc_size: Size of fully connected layer.
num_heads: Number of attention heads.
layer_norm: If True, apply layer normalization. If mem_efficient_attention
is True, then layer norm is always applied.
num_layers: Number of Transformer blocks, where each block contains a
multi-head attention layer and a MLP.
dropout_rate: Dropout rate applied immediately after the ReLU in each
fully-connected layer.
re_zero: If True, alpha scale residuals with zero init.
memory_efficient: If True, recompute gradients for memory savings.
name: Name of variable scope
"""
super(TransformerDecoder, self).__init__(name=name)
self.hidden_size = hidden_size
self.num_heads = num_heads
self.layer_norm = layer_norm
self.fc_size = fc_size
self.num_layers = num_layers
self.dropout_rate = dropout_rate
self.re_zero = re_zero
self.memory_efficient = memory_efficient
def _build(self,
inputs,
sequential_context_embeddings=None,
is_training=False,
cache=None):
"""Passes inputs through Transformer decoder network.
Args:
inputs: Tensor of shape [batch_size, sequence_length, embed_size]. Zero
embeddings are masked in self-attention.
sequential_context_embeddings: Optional tensor with global context
(e.g image embeddings) of shape
[batch_size, context_seq_length, context_embed_size].
is_training: If True, dropout is applied.
cache: Optional dict containing tensors which are the results of previous
attentions, used for fast decoding. Expects the dict to contain two
keys ('k' and 'v'), for the initial call the values for these keys
should be empty Tensors of the appropriate shape.
'k' [batch_size, 0, key_channels] 'v' [batch_size, 0, value_channels]
Returns:
output: Tensor of shape [batch_size, sequence_length, embed_size].
"""
if is_training:
dropout_rate = self.dropout_rate
else:
dropout_rate = 0.
# create bias to mask future elements for causal self-attention.
seq_length = tf.shape(inputs)[1]
decoder_self_attention_bias = common_attention.attention_bias_lower_triangle(
seq_length)
# If using sequential_context, identify elements with all zeros as padding,
# and create bias to mask out padding elements in self attention.
if sequential_context_embeddings is not None:
encoder_padding = common_attention.embedding_to_padding(
sequential_context_embeddings)
encoder_decoder_attention_bias = (
common_attention.attention_bias_ignore_padding(encoder_padding))
x = inputs
for layer_num in range(self.num_layers):
with tf.variable_scope('layer_{}'.format(layer_num)):
# If using cached decoding, access cache for current layer, and create
# bias that enables un-masked attention into the cache
if cache is not None:
layer_cache = cache[layer_num]
layer_decoder_bias = tf.zeros([1, 1, 1, 1])
# Otherwise use standard masked bias
else:
layer_cache = None
layer_decoder_bias = decoder_self_attention_bias
# Multihead self-attention from Tensor2Tensor.
res = x
if self.memory_efficient:
res = multihead_self_attention_memory_efficient(
res,
bias=layer_decoder_bias,
cache=layer_cache,
num_heads=self.num_heads,
head_size=self.hidden_size // self.num_heads,
forget=True if is_training else False,
name='self_attention'
)
else:
if self.layer_norm:
res = common_layers.layer_norm(res, name='self_attention')
res = common_attention.multihead_attention(
res,
memory_antecedent=None,
bias=layer_decoder_bias,
total_key_depth=self.hidden_size,
total_value_depth=self.hidden_size,
output_depth=self.hidden_size,
num_heads=self.num_heads,
cache=layer_cache,
dropout_rate=0.,
make_image_summary=False,
name='self_attention')
if self.re_zero:
res *= tf.get_variable('self_attention/alpha', initializer=0.)
if dropout_rate:
res = tf.nn.dropout(res, rate=dropout_rate)
x += res
# Optional cross attention into sequential context
if sequential_context_embeddings is not None:
res = x
if self.layer_norm:
res = common_layers.layer_norm(res, name='cross_attention')
res = common_attention.multihead_attention(
res,
memory_antecedent=sequential_context_embeddings,
bias=encoder_decoder_attention_bias,
total_key_depth=self.hidden_size,
total_value_depth=self.hidden_size,
output_depth=self.hidden_size,
num_heads=self.num_heads,
dropout_rate=0.,
make_image_summary=False,
name='cross_attention')
if self.re_zero:
res *= tf.get_variable('cross_attention/alpha', initializer=0.)
if dropout_rate:
res = tf.nn.dropout(res, rate=dropout_rate)
x += res
# FC layers
res = x
if self.layer_norm:
res = common_layers.layer_norm(res, name='fc')
res = tf.layers.dense(
res, self.fc_size, activation=tf.nn.relu, name='fc_1')
res = tf.layers.dense(res, self.hidden_size, name='fc_2')
if self.re_zero:
res *= tf.get_variable('fc/alpha', initializer=0.)
if dropout_rate:
res = tf.nn.dropout(res, rate=dropout_rate)
x += res
if self.layer_norm:
output = common_layers.layer_norm(x, name='output')
else:
output = x
return output
def create_init_cache(self, batch_size):
"""Creates empty cache dictionary for use in fast decoding."""
def compute_cache_shape_invariants(tensor):
"""Helper function to get dynamic shapes for cache tensors."""
shape_list = tensor.shape.as_list()
if len(shape_list) == 4:
return tf.TensorShape(
[shape_list[0], shape_list[1], None, shape_list[3]])
elif len(shape_list) == 3:
return tf.TensorShape([shape_list[0], None, shape_list[2]])
# Build cache
k = common_attention.split_heads(
tf.zeros([batch_size, 0, self.hidden_size]), self.num_heads)
v = common_attention.split_heads(
tf.zeros([batch_size, 0, self.hidden_size]), self.num_heads)
cache = [{'k': k, 'v': v} for _ in range(self.num_layers)]
shape_invariants = tf.nest.map_structure(
compute_cache_shape_invariants, cache)
return cache, shape_invariants
def conv_residual_block(inputs,
output_channels=None,
downsample=False,
kernel_size=3,
re_zero=True,
dropout_rate=0.,
name='conv_residual_block'):
"""Convolutional block with residual connections for 2D or 3D inputs.
Args:
inputs: Input tensor of shape [batch_size, height, width, channels] or
[batch_size, height, width, depth, channels].
output_channels: Number of output channels.
downsample: If True, downsample by 1/2 in this block.
kernel_size: Spatial size of convolutional kernels.
re_zero: If True, alpha scale residuals with zero init.
dropout_rate: Dropout rate applied after second ReLU in residual path.
name: Name for variable scope.
Returns:
outputs: Output tensor of shape [batch_size, height, width, output_channels]
or [batch_size, height, width, depth, output_channels].
"""
with tf.variable_scope(name):
input_shape = inputs.get_shape().as_list()
num_dims = len(input_shape) - 2
if num_dims == 2:
conv = tf.layers.conv2d
elif num_dims == 3:
conv = tf.layers.conv3d
input_channels = input_shape[-1]
if output_channels is None:
output_channels = input_channels
if downsample:
shortcut = conv(
inputs,
filters=output_channels,
strides=2,
kernel_size=kernel_size,
padding='same',
name='conv_shortcut')
else:
shortcut = inputs
res = inputs
res = tf.nn.relu(res)
res = conv(
res, filters=input_channels, kernel_size=kernel_size, padding='same',
name='conv_1')
res = tf.nn.relu(res)
if dropout_rate:
res = tf.nn.dropout(res, rate=dropout_rate)
if downsample:
out_strides = 2
else:
out_strides = 1
res = conv(
res,
filters=output_channels,
kernel_size=kernel_size,
padding='same',
strides=out_strides,
name='conv_2')
if re_zero:
res *= tf.get_variable('alpha', initializer=0.)
return shortcut + res
class ResNet(snt.AbstractModule):
"""ResNet architecture for 2D image or 3D voxel inputs."""
def __init__(self,
num_dims,
hidden_sizes=(64, 256),
num_blocks=(2, 2),
dropout_rate=0.1,
re_zero=True,
name='res_net'):
"""Initializes ResNet.
Args:
num_dims: Number of spatial dimensions. 2 for images or 3 for voxels.
hidden_sizes: Sizes of hidden layers in resnet blocks.
num_blocks: Number of resnet blocks at each size.
dropout_rate: Dropout rate applied immediately after the ReLU in each
fully-connected layer.
re_zero: If True, alpha scale residuals with zero init.
name: Name of variable scope
"""
super(ResNet, self).__init__(name=name)
self.num_dims = num_dims
self.hidden_sizes = hidden_sizes
self.num_blocks = num_blocks
self.dropout_rate = dropout_rate
self.re_zero = re_zero
def _build(self, inputs, is_training=False):
"""Passes inputs through resnet.
Args:
inputs: Tensor of shape [batch_size, height, width, channels] or
[batch_size, height, width, depth, channels].
is_training: If True, dropout is applied.
Returns:
output: Tensor of shape [batch_size, height, width, depth, output_size].
"""
if is_training:
dropout_rate = self.dropout_rate
else:
dropout_rate = 0.
# Initial projection with large kernel as in original resnet architecture
if self.num_dims == 3:
conv = tf.layers.conv3d
elif self.num_dims == 2:
conv = tf.layers.conv2d
x = conv(
inputs,
filters=self.hidden_sizes[0],
kernel_size=7,
strides=2,
padding='same',
name='conv_input')
if self.num_dims == 2:
x = tf.layers.max_pooling2d(
x, strides=2, pool_size=3, padding='same', name='pool_input')
for d, (hidden_size,
blocks) in enumerate(zip(self.hidden_sizes, self.num_blocks)):
with tf.variable_scope('resolution_{}'.format(d)):
# Downsample at the start of each collection of blocks
x = conv_residual_block(
x,
downsample=False if d == 0 else True,
dropout_rate=dropout_rate,
output_channels=hidden_size,
re_zero=self.re_zero,
name='block_1_downsample')
for i in range(blocks - 1):
x = conv_residual_block(
x,
dropout_rate=dropout_rate,
output_channels=hidden_size,
re_zero=self.re_zero,
name='block_{}'.format(i + 2))
return x
class VertexModel(snt.AbstractModule):
"""Autoregressive generative model of quantized mesh vertices.
Operates on flattened vertex sequences with a stopping token:
[z_0, y_0, x_0, z_1, y_1, x_1, ..., z_n, y_n, z_n, STOP]
Input vertex coordinates are embedded and tagged with learned coordinate and
position indicators. A transformer decoder outputs logits for a quantized
vertex distribution.
"""
def __init__(self,
decoder_config,
quantization_bits,
class_conditional=False,
num_classes=55,
max_num_input_verts=2500,
use_discrete_embeddings=True,
name='vertex_model'):
"""Initializes VertexModel.
Args:
decoder_config: Dictionary with TransformerDecoder config
quantization_bits: Number of quantization used in mesh preprocessing.
class_conditional: If True, then condition on learned class embeddings.
num_classes: Number of classes to condition on.
max_num_input_verts: Maximum number of vertices. Used for learned position
embeddings.
use_discrete_embeddings: If True, use discrete rather than continuous
vertex embeddings.
name: Name of variable scope
"""
super(VertexModel, self).__init__(name=name)
self.embedding_dim = decoder_config['hidden_size']
self.class_conditional = class_conditional
self.num_classes = num_classes
self.max_num_input_verts = max_num_input_verts
self.quantization_bits = quantization_bits
self.use_discrete_embeddings = use_discrete_embeddings
with self._enter_variable_scope():
self.decoder = TransformerDecoder(**decoder_config)
@snt.reuse_variables
def _embed_class_label(self, labels):
"""Embeds class label with learned embedding matrix."""
init_dict = {'embeddings': tf.glorot_uniform_initializer}
return snt.Embed(
vocab_size=self.num_classes,
embed_dim=self.embedding_dim,
initializers=init_dict,
densify_gradients=True,
name='class_label')(labels)
@snt.reuse_variables
def _prepare_context(self, context, is_training=False):
"""Prepare class label context."""
if self.class_conditional:
global_context_embedding = self._embed_class_label(context['class_label'])
else:
global_context_embedding = None
return global_context_embedding, None
@snt.reuse_variables
def _embed_inputs(self, vertices, global_context_embedding=None):
"""Embeds flat vertices and adds position and coordinate information."""
# Dequantize inputs and get shapes
input_shape = tf.shape(vertices)
batch_size, seq_length = input_shape[0], input_shape[1]
# Coord indicators (x, y, z)
coord_embeddings = snt.Embed(
vocab_size=3,
embed_dim=self.embedding_dim,
initializers={'embeddings': tf.glorot_uniform_initializer},
densify_gradients=True,
name='coord_embeddings')(tf.mod(tf.range(seq_length), 3))
# Position embeddings
pos_embeddings = snt.Embed(
vocab_size=self.max_num_input_verts,
embed_dim=self.embedding_dim,
initializers={'embeddings': tf.glorot_uniform_initializer},
densify_gradients=True,
name='coord_embeddings')(tf.floordiv(tf.range(seq_length), 3))
# Discrete vertex value embeddings
if self.use_discrete_embeddings:
vert_embeddings = snt.Embed(
vocab_size=2**self.quantization_bits + 1,
embed_dim=self.embedding_dim,
initializers={'embeddings': tf.glorot_uniform_initializer},
densify_gradients=True,
name='value_embeddings')(vertices)
# Continuous vertex value embeddings
else:
vert_embeddings = tf.layers.dense(
dequantize_verts(vertices[..., None], self.quantization_bits),
self.embedding_dim,
use_bias=True,
name='value_embeddings')
# Step zero embeddings
if global_context_embedding is None:
zero_embed = tf.get_variable(
'embed_zero', shape=[1, 1, self.embedding_dim])
zero_embed_tiled = tf.tile(zero_embed, [batch_size, 1, 1])
else:
zero_embed_tiled = global_context_embedding[:, None]
# Aggregate embeddings
embeddings = vert_embeddings + (coord_embeddings + pos_embeddings)[None]
embeddings = tf.concat([zero_embed_tiled, embeddings], axis=1)
return embeddings
@snt.reuse_variables
def _project_to_logits(self, inputs):
"""Projects transformer outputs to logits for predictive distribution."""
return tf.layers.dense(
inputs,
2**self.quantization_bits + 1, # + 1 for stopping token
use_bias=True,
kernel_initializer=tf.zeros_initializer(),
name='project_to_logits')
@snt.reuse_variables
def _create_dist(self,
vertices,
global_context_embedding=None,
sequential_context_embeddings=None,
temperature=1.,
top_k=0,
top_p=1.,
is_training=False,
cache=None):
"""Outputs categorical dist for quantized vertex coordinates."""
# Embed inputs
decoder_inputs = self._embed_inputs(vertices, global_context_embedding)
if cache is not None:
decoder_inputs = decoder_inputs[:, -1:]
# pass through decoder
outputs = self.decoder(
decoder_inputs, cache=cache,
sequential_context_embeddings=sequential_context_embeddings,
is_training=is_training)
# Get logits and optionally process for sampling
logits = self._project_to_logits(outputs)
logits /= temperature
logits = top_k_logits(logits, top_k)
logits = top_p_logits(logits, top_p)
cat_dist = tfd.Categorical(logits=logits)
return cat_dist
def _build(self, batch, is_training=False):
"""Pass batch through vertex model and get log probabilities under model.
Args:
batch: Dictionary containing:
'vertices_flat': int32 vertex tensors of shape [batch_size, seq_length].
is_training: If True, use dropout.
Returns:
pred_dist: tfd.Categorical predictive distribution with batch shape
[batch_size, seq_length].
"""
global_context, seq_context = self._prepare_context(
batch, is_training=is_training)
pred_dist = self._create_dist(
batch['vertices_flat'][:, :-1], # Last element not used for preds
global_context_embedding=global_context,
sequential_context_embeddings=seq_context,
is_training=is_training)
return pred_dist
def sample(self,
num_samples,
context=None,
max_sample_length=None,
temperature=1.,
top_k=0,
top_p=1.,
recenter_verts=True,
only_return_complete=True):
"""Autoregressive sampling with caching.
Args:
num_samples: Number of samples to produce.
context: Dictionary of context, such as class labels. See _prepare_context
for details.
max_sample_length: Maximum length of sampled vertex sequences. Sequences
that do not complete are truncated.
temperature: Scalar softmax temperature > 0.
top_k: Number of tokens to keep for top-k sampling.
top_p: Proportion of probability mass to keep for top-p sampling.
recenter_verts: If True, center vertex samples around origin. This should
be used if model is trained using shift augmentations.
only_return_complete: If True, only return completed samples. Otherwise
return all samples along with completed indicator.
Returns:
outputs: Output dictionary with fields:
'completed': Boolean tensor of shape [num_samples]. If True then
corresponding sample completed within max_sample_length.
'vertices': Tensor of samples with shape [num_samples, num_verts, 3].
'num_vertices': Tensor indicating number of vertices for each example
in padded vertex samples.
'vertices_mask': Tensor of shape [num_samples, num_verts] that masks
corresponding invalid elements in 'vertices'.
"""
# Obtain context for decoder
global_context, seq_context = self._prepare_context(
context, is_training=False)
# num_samples is the minimum value of num_samples and the batch size of
# context inputs (if present).
if global_context is not None:
num_samples = tf.minimum(num_samples, tf.shape(global_context)[0])
global_context = global_context[:num_samples]
if seq_context is not None:
seq_context = seq_context[:num_samples]
elif seq_context is not None:
num_samples = tf.minimum(num_samples, tf.shape(seq_context)[0])
seq_context = seq_context[:num_samples]
def _loop_body(i, samples, cache):
"""While-loop body for autoregression calculation."""
cat_dist = self._create_dist(
samples,
global_context_embedding=global_context,
sequential_context_embeddings=seq_context,
cache=cache,
temperature=temperature,
top_k=top_k,
top_p=top_p)
next_sample = cat_dist.sample()
samples = tf.concat([samples, next_sample], axis=1)
return i + 1, samples, cache
def _stopping_cond(i, samples, cache):
"""Stopping condition for sampling while-loop."""
del i, cache # Unused
return tf.reduce_any(tf.reduce_all(tf.not_equal(samples, 0), axis=-1))
# Initial values for loop variables
samples = tf.zeros([num_samples, 0], dtype=tf.int32)
max_sample_length = max_sample_length or self.max_num_input_verts
cache, cache_shape_invariants = self.decoder.create_init_cache(num_samples)
_, v, _ = tf.while_loop(
cond=_stopping_cond,
body=_loop_body,
loop_vars=(0, samples, cache),
shape_invariants=(tf.TensorShape([]), tf.TensorShape([None, None]),
cache_shape_invariants),
maximum_iterations=max_sample_length * 3 + 1,
back_prop=False,
parallel_iterations=1)
# Check if samples completed. Samples are complete if the stopping token
# is produced.
completed = tf.reduce_any(tf.equal(v, 0), axis=-1)
# Get the number of vertices in the sample. This requires finding the
# index of the stopping token. For complete samples use to argmax to get
# first nonzero index.
stop_index_completed = tf.argmax(
tf.cast(tf.equal(v, 0), tf.int32), axis=-1, output_type=tf.int32)
# For incomplete samples the stopping index is just the maximum index.
stop_index_incomplete = (
max_sample_length * 3 * tf.ones_like(stop_index_completed))
stop_index = tf.where(
completed, stop_index_completed, stop_index_incomplete)
num_vertices = tf.floordiv(stop_index, 3)
# Convert to 3D vertices by reshaping and re-ordering x -> y -> z
v = v[:, :(tf.reduce_max(num_vertices) * 3)] - 1
verts_dequantized = dequantize_verts(v, self.quantization_bits)
vertices = tf.reshape(verts_dequantized, [num_samples, -1, 3])
vertices = tf.stack(
[vertices[..., 2], vertices[..., 1], vertices[..., 0]], axis=-1)
# Pad samples to max sample length. This is required in order to concatenate
# Samples across different replicator instances. Pad with stopping tokens
# for incomplete samples.
pad_size = max_sample_length - tf.shape(vertices)[1]
vertices = tf.pad(vertices, [[0, 0], [0, pad_size], [0, 0]])
# 3D Vertex mask
vertices_mask = tf.cast(
tf.range(max_sample_length)[None] < num_vertices[:, None], tf.float32)
if recenter_verts:
vert_max = tf.reduce_max(
vertices - 1e10 * (1. - vertices_mask)[..., None], axis=1,
keepdims=True)
vert_min = tf.reduce_min(
vertices + 1e10 * (1. - vertices_mask)[..., None], axis=1,
keepdims=True)
vert_centers = 0.5 * (vert_max + vert_min)
vertices -= vert_centers
vertices *= vertices_mask[..., None]
if only_return_complete:
vertices = tf.boolean_mask(vertices, completed)
num_vertices = tf.boolean_mask(num_vertices, completed)
vertices_mask = tf.boolean_mask(vertices_mask, completed)
completed = tf.boolean_mask(completed, completed)
# Outputs
outputs = {
'completed': completed,
'vertices': vertices,
'num_vertices': num_vertices,
'vertices_mask': vertices_mask,
}
return outputs
class ImageToVertexModel(VertexModel):
"""Generative model of quantized mesh vertices with image conditioning.
Operates on flattened vertex sequences with a stopping token:
[z_0, y_0, x_0, z_1, y_1, x_1, ..., z_n, y_n, z_n, STOP]
Input vertex coordinates are embedded and tagged with learned coordinate and
position indicators. A transformer decoder outputs logits for a quantized
vertex distribution. Image inputs are encoded and used to condition the
vertex decoder.
"""
def __init__(self,
res_net_config,
decoder_config,
quantization_bits,
use_discrete_embeddings=True,
max_num_input_verts=2500,
name='image_to_vertex_model'):
"""Initializes VoxelToVertexModel.
Args:
res_net_config: Dictionary with ResNet config.
decoder_config: Dictionary with TransformerDecoder config.
quantization_bits: Number of quantization used in mesh preprocessing.
use_discrete_embeddings: If True, use discrete rather than continuous
vertex embeddings.
max_num_input_verts: Maximum number of vertices. Used for learned position
embeddings.
name: Name of variable scope
"""
super(ImageToVertexModel, self).__init__(
decoder_config=decoder_config,
quantization_bits=quantization_bits,
max_num_input_verts=max_num_input_verts,
use_discrete_embeddings=use_discrete_embeddings,
name=name)
with self._enter_variable_scope():
self.res_net = ResNet(num_dims=2, **res_net_config)
@snt.reuse_variables
def _prepare_context(self, context, is_training=False):
# Pass images through encoder
image_embeddings = self.res_net(
context['image'] - 0.5, is_training=is_training)
# Add 2D coordinate grid embedding
processed_image_resolution = tf.shape(image_embeddings)[1]
x = tf.linspace(-1., 1., processed_image_resolution)
image_coords = tf.stack(tf.meshgrid(x, x), axis=-1)
image_coord_embeddings = tf.layers.dense(
image_coords,
self.embedding_dim,
use_bias=True,
name='image_coord_embeddings')
image_embeddings += image_coord_embeddings[None]
# Reshape spatial grid to sequence
batch_size = tf.shape(image_embeddings)[0]
sequential_context_embedding = tf.reshape(
image_embeddings, [batch_size, -1, self.embedding_dim])
return None, sequential_context_embedding
class VoxelToVertexModel(VertexModel):
"""Generative model of quantized mesh vertices with voxel conditioning.
Operates on flattened vertex sequences with a stopping token:
[z_0, y_0, x_0, z_1, y_1, x_1, ..., z_n, y_n, z_n, STOP]
Input vertex coordinates are embedded and tagged with learned coordinate and
position indicators. A transformer decoder outputs logits for a quantized
vertex distribution. Image inputs are encoded and used to condition the
vertex decoder.
"""
def __init__(self,
res_net_config,
decoder_config,
quantization_bits,
use_discrete_embeddings=True,
max_num_input_verts=2500,
name='voxel_to_vertex_model'):
"""Initializes VoxelToVertexModel.
Args:
res_net_config: Dictionary with ResNet config.
decoder_config: Dictionary with TransformerDecoder config.
quantization_bits: Integer number of bits used for vertex quantization.
use_discrete_embeddings: If True, use discrete rather than continuous
vertex embeddings.
max_num_input_verts: Maximum number of vertices. Used for learned position
embeddings.
name: Name of variable scope
"""
super(VoxelToVertexModel, self).__init__(
decoder_config=decoder_config,
quantization_bits=quantization_bits,
max_num_input_verts=max_num_input_verts,
use_discrete_embeddings=use_discrete_embeddings,
name=name)
with self._enter_variable_scope():
self.res_net = ResNet(num_dims=3, **res_net_config)
@snt.reuse_variables
def _prepare_context(self, context, is_training=False):
# Embed binary input voxels
voxel_embeddings = snt.Embed(
vocab_size=2,
embed_dim=self.pre_embed_dim,
initializers={'embeddings': tf.glorot_uniform_initializer},
densify_gradients=True,
name='voxel_embeddings')(context['voxels'])
# Pass embedded voxels through voxel encoder
voxel_embeddings = self.res_net(
voxel_embeddings, is_training=is_training)
# Add 3D coordinate grid embedding
processed_voxel_resolution = tf.shape(voxel_embeddings)[1]
x = tf.linspace(-1., 1., processed_voxel_resolution)
voxel_coords = tf.stack(tf.meshgrid(x, x, x), axis=-1)
voxel_coord_embeddings = tf.layers.dense(
voxel_coords,
self.embedding_dim,
use_bias=True,
name='voxel_coord_embeddings')
voxel_embeddings += voxel_coord_embeddings[None]
# Reshape spatial grid to sequence
batch_size = tf.shape(voxel_embeddings)[0]
sequential_context_embedding = tf.reshape(
voxel_embeddings, [batch_size, -1, self.embedding_dim])
return None, sequential_context_embedding
class FaceModel(snt.AbstractModule):
"""Autoregressive generative model of n-gon meshes.
Operates on sets of input vertices as well as flattened face sequences with
new face and stopping tokens:
[f_0^0, f_0^1, f_0^2, NEW, f_1^0, f_1^1, ..., STOP]
Input vertices are encoded using a Transformer encoder.
Input face sequences are embedded and tagged with learned position indicators,
as well as their corresponding vertex embeddings. A transformer decoder
outputs a pointer which is compared to each vertex embedding to obtain a
distribution over vertex indices.
"""
def __init__(self,
encoder_config,
decoder_config,
class_conditional=True,
num_classes=55,
decoder_cross_attention=True,
use_discrete_vertex_embeddings=True,
quantization_bits=8,
max_seq_length=5000,
name='face_model'):
"""Initializes FaceModel.
Args:
encoder_config: Dictionary with TransformerEncoder config.
decoder_config: Dictionary with TransformerDecoder config.
class_conditional: If True, then condition on learned class embeddings.
num_classes: Number of classes to condition on.
decoder_cross_attention: If True, the use cross attention from decoder
querys into encoder outputs.
use_discrete_vertex_embeddings: If True, use discrete vertex embeddings.
quantization_bits: Number of quantization bits for discrete vertex
embeddings.
max_seq_length: Maximum face sequence length. Used for learned position
embeddings.
name: Name of variable scope
"""
super(FaceModel, self).__init__(name=name)
self.embedding_dim = decoder_config['hidden_size']
self.class_conditional = class_conditional
self.num_classes = num_classes
self.max_seq_length = max_seq_length
self.decoder_cross_attention = decoder_cross_attention
self.use_discrete_vertex_embeddings = use_discrete_vertex_embeddings
self.quantization_bits = quantization_bits
with self._enter_variable_scope():
self.decoder = TransformerDecoder(**decoder_config)
self.encoder = TransformerEncoder(**encoder_config)
@snt.reuse_variables
def _embed_class_label(self, labels):
"""Embeds class label with learned embedding matrix."""
init_dict = {'embeddings': tf.glorot_uniform_initializer}
return snt.Embed(
vocab_size=self.num_classes,
embed_dim=self.embedding_dim,
initializers=init_dict,
densify_gradients=True,
name='class_label')(labels)
@snt.reuse_variables
def _prepare_context(self, context, is_training=False):
"""Prepare class label and vertex context."""
if self.class_conditional:
global_context_embedding = self._embed_class_label(context['class_label'])
else:
global_context_embedding = None
vertex_embeddings = self._embed_vertices(
context['vertices'], context['vertices_mask'],
is_training=is_training)
if self.decoder_cross_attention:
sequential_context_embeddings = (
vertex_embeddings *
tf.pad(context['vertices_mask'], [[0, 0], [2, 0]],
constant_values=1)[..., None])
else:
sequential_context_embeddings = None
return (vertex_embeddings, global_context_embedding,
sequential_context_embeddings)
@snt.reuse_variables
def _embed_vertices(self, vertices, vertices_mask, is_training=False):
"""Embeds vertices with transformer encoder."""
# num_verts = tf.shape(vertices)[1]
if self.use_discrete_vertex_embeddings:
vertex_embeddings = 0.
verts_quantized = quantize_verts(vertices, self.quantization_bits)
for c in range(3):
vertex_embeddings += snt.Embed(
vocab_size=256,
embed_dim=self.embedding_dim,
initializers={'embeddings': tf.glorot_uniform_initializer},
densify_gradients=True,
name='coord_{}'.format(c))(verts_quantized[..., c])
else:
vertex_embeddings = tf.layers.dense(
vertices, self.embedding_dim, use_bias=True, name='vertex_embeddings')
vertex_embeddings *= vertices_mask[..., None]
# Pad vertex embeddings with learned embeddings for stopping and new face
# tokens
stopping_embeddings = tf.get_variable(
'stopping_embeddings', shape=[1, 2, self.embedding_dim])
stopping_embeddings = tf.tile(stopping_embeddings,
[tf.shape(vertices)[0], 1, 1])
vertex_embeddings = tf.concat(
[stopping_embeddings, vertex_embeddings], axis=1)
# Pass through Transformer encoder
vertex_embeddings = self.encoder(vertex_embeddings, is_training=is_training)
return vertex_embeddings
@snt.reuse_variables
def _embed_inputs(self, faces_long, vertex_embeddings,
global_context_embedding=None):
"""Embeds face sequences and adds within and between face positions."""
# Face value embeddings are gathered vertex embeddings
face_embeddings = tf.gather(vertex_embeddings, faces_long, batch_dims=1)
# Position embeddings
pos_embeddings = snt.Embed(
vocab_size=self.max_seq_length,
embed_dim=self.embedding_dim,
initializers={'embeddings': tf.glorot_uniform_initializer},
densify_gradients=True,
name='coord_embeddings')(tf.range(tf.shape(faces_long)[1]))
# Step zero embeddings
batch_size = tf.shape(face_embeddings)[0]
if global_context_embedding is None:
zero_embed = tf.get_variable(
'embed_zero', shape=[1, 1, self.embedding_dim])
zero_embed_tiled = tf.tile(zero_embed, [batch_size, 1, 1])
else:
zero_embed_tiled = global_context_embedding[:, None]
# Aggregate embeddings
embeddings = face_embeddings + pos_embeddings[None]
embeddings = tf.concat([zero_embed_tiled, embeddings], axis=1)
return embeddings
@snt.reuse_variables
def _project_to_pointers(self, inputs):
"""Projects transformer outputs to pointer vectors."""
return tf.layers.dense(
inputs,
self.embedding_dim,
use_bias=True,
kernel_initializer=tf.zeros_initializer(),
name='project_to_pointers'
)
@snt.reuse_variables
def _create_dist(self,
vertex_embeddings,
vertices_mask,
faces_long,
global_context_embedding=None,
sequential_context_embeddings=None,
temperature=1.,
top_k=0,
top_p=1.,
is_training=False,
cache=None):
"""Outputs categorical dist for vertex indices."""
# Embed inputs
decoder_inputs = self._embed_inputs(
faces_long, vertex_embeddings, global_context_embedding)
# Pass through Transformer decoder
if cache is not None:
decoder_inputs = decoder_inputs[:, -1:]
decoder_outputs = self.decoder(
decoder_inputs,
cache=cache,
sequential_context_embeddings=sequential_context_embeddings,
is_training=is_training)
# Get pointers
pred_pointers = self._project_to_pointers(decoder_outputs)
# Get logits and mask
logits = tf.matmul(pred_pointers, vertex_embeddings, transpose_b=True)
logits /= tf.sqrt(float(self.embedding_dim))
f_verts_mask = tf.pad(
vertices_mask, [[0, 0], [2, 0]], constant_values=1.)[:, None]
logits *= f_verts_mask
logits -= (1. - f_verts_mask) * 1e9
logits /= temperature
logits = top_k_logits(logits, top_k)
logits = top_p_logits(logits, top_p)
return tfd.Categorical(logits=logits)
def _build(self, batch, is_training=False):
"""Pass batch through face model and get log probabilities.
Args:
batch: Dictionary containing:
'vertices_dequantized': Tensor of shape [batch_size, num_vertices, 3].
'faces': int32 tensor of shape [batch_size, seq_length] with flattened
faces.
'vertices_mask': float32 tensor with shape
[batch_size, num_vertices] that masks padded elements in 'vertices'.
is_training: If True, use dropout.
Returns:
pred_dist: tfd.Categorical predictive distribution with batch shape
[batch_size, seq_length].
"""
vertex_embeddings, global_context, seq_context = self._prepare_context(
batch, is_training=is_training)
pred_dist = self._create_dist(
vertex_embeddings,
batch['vertices_mask'],
batch['faces'][:, :-1],
global_context_embedding=global_context,
sequential_context_embeddings=seq_context,
is_training=is_training)
return pred_dist
def sample(self,
context,
max_sample_length=None,
temperature=1.,
top_k=0,
top_p=1.,
only_return_complete=True):
"""Sample from face model using caching.
Args:
context: Dictionary of context, including 'vertices' and 'vertices_mask'.
See _prepare_context for details.
max_sample_length: Maximum length of sampled vertex sequences. Sequences
that do not complete are truncated.
temperature: Scalar softmax temperature > 0.
top_k: Number of tokens to keep for top-k sampling.
top_p: Proportion of probability mass to keep for top-p sampling.
only_return_complete: If True, only return completed samples. Otherwise
return all samples along with completed indicator.
Returns:
outputs: Output dictionary with fields:
'completed': Boolean tensor of shape [num_samples]. If True then
corresponding sample completed within max_sample_length.
'faces': Tensor of samples with shape [num_samples, num_verts, 3].
'num_face_indices': Tensor indicating number of vertices for each
example in padded vertex samples.
"""
vertex_embeddings, global_context, seq_context = self._prepare_context(
context, is_training=False)
num_samples = tf.shape(vertex_embeddings)[0]
def _loop_body(i, samples, cache):
"""While-loop body for autoregression calculation."""
pred_dist = self._create_dist(
vertex_embeddings,
context['vertices_mask'],
samples,
global_context_embedding=global_context,
sequential_context_embeddings=seq_context,
cache=cache,
temperature=temperature,
top_k=top_k,
top_p=top_p)
next_sample = pred_dist.sample()[:, -1:]
samples = tf.concat([samples, next_sample], axis=1)
return i + 1, samples, cache
def _stopping_cond(i, samples, cache):
"""Stopping conditions for autoregressive calculation."""
del i, cache # Unused
return tf.reduce_any(tf.reduce_all(tf.not_equal(samples, 0), axis=-1))
# While loop sampling with caching
samples = tf.zeros([num_samples, 0], dtype=tf.int32)
max_sample_length = max_sample_length or self.max_seq_length
cache, cache_shape_invariants = self.decoder.create_init_cache(num_samples)
_, f, _ = tf.while_loop(
cond=_stopping_cond,
body=_loop_body,
loop_vars=(0, samples, cache),
shape_invariants=(tf.TensorShape([]), tf.TensorShape([None, None]),
cache_shape_invariants),
back_prop=False,
parallel_iterations=1,
maximum_iterations=max_sample_length)
# Record completed samples
complete_samples = tf.reduce_any(tf.equal(f, 0), axis=-1)
# Find number of faces
sample_length = tf.shape(f)[-1]
# Get largest new face (1) index as stopping point for incomplete samples.
max_one_ind = tf.reduce_max(
tf.range(sample_length)[None] * tf.cast(tf.equal(f, 1), tf.int32),
axis=-1)
zero_inds = tf.cast(
tf.argmax(tf.cast(tf.equal(f, 0), tf.int32), axis=-1), tf.int32)
num_face_indices = tf.where(complete_samples, zero_inds, max_one_ind) + 1
# Mask faces beyond stopping token with zeros
# This mask has a -1 in order to replace the last new face token with zero
faces_mask = tf.cast(
tf.range(sample_length)[None] < num_face_indices[:, None] - 1, tf.int32)
f *= faces_mask
# This is the real mask
faces_mask = tf.cast(
tf.range(sample_length)[None] < num_face_indices[:, None], tf.int32)
# Pad to maximum size with zeros
pad_size = max_sample_length - sample_length
f = tf.pad(f, [[0, 0], [0, pad_size]])
if only_return_complete:
f = tf.boolean_mask(f, complete_samples)
num_face_indices = tf.boolean_mask(num_face_indices, complete_samples)
context = tf.nest.map_structure(
lambda x: tf.boolean_mask(x, complete_samples), context)
complete_samples = tf.boolean_mask(complete_samples, complete_samples)
# outputs
outputs = {
'context': context,
'completed': complete_samples,
'faces': f,
'num_face_indices': num_face_indices,
}
return outputs
|
deepmind-research-master
|
polygen/modules.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Haiku module implementing hierarchical attention over memory."""
import functools
import inspect
from typing import Optional, NamedTuple
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
_EPSILON = 1e-3
class HierarchicalMemory(NamedTuple):
"""Structure of the hierarchical memory.
Where 'B' is batch size, 'M' is number of memories, 'C' is chunk size, and 'D'
is memory dimension.
"""
keys: jnp.ndarray # [B, M, D]
contents: jnp.ndarray # [B, M, C, D]
steps_since_last_write: jnp.ndarray # [B], steps since last memory write
accumulator: jnp.ndarray # [B, C, D], accumulates experiences before write
def sinusoid_position_encoding(
sequence_length: int,
hidden_size: int,
min_timescale: float = 2.,
max_timescale: float = 1e4,
) -> jnp.ndarray:
"""Creates sinusoidal encodings.
Args:
sequence_length: length [L] of sequence to be position encoded.
hidden_size: dimension [D] of the positional encoding vectors.
min_timescale: minimum timescale for the frequency.
max_timescale: maximum timescale for the frequency.
Returns:
An array of shape [L, D]
"""
freqs = np.arange(0, hidden_size, min_timescale)
inv_freq = max_timescale**(-freqs / hidden_size)
pos_seq = np.arange(sequence_length - 1, -1, -1.0)
sinusoid_inp = np.einsum("i,j->ij", pos_seq, inv_freq)
pos_emb = np.concatenate(
[np.sin(sinusoid_inp), np.cos(sinusoid_inp)], axis=-1)
return pos_emb
class HierarchicalMemoryAttention(hk.Module):
"""Multi-head attention over hierarchical memory."""
def __init__(self,
feature_size: int,
k: int,
num_heads: int = 1,
memory_position_encoding: bool = True,
init_scale: float = 2.,
name: Optional[str] = None) -> None:
"""Constructor.
Args:
feature_size: size of feature dimension of attention-over-memories
embedding.
k: number of memories to sample.
num_heads: number of attention heads.
memory_position_encoding: whether to add positional encodings to memories
during within memory attention.
init_scale: scale factor for Variance weight initializers.
name: module name.
"""
super().__init__(name=name)
self._size = feature_size
self._k = k
self._num_heads = num_heads
self._weights = None
self._memory_position_encoding = memory_position_encoding
self._init_scale = init_scale
@property
def num_heads(self):
return self._num_heads
@hk.transparent
def _singlehead_linear(self,
inputs: jnp.ndarray,
hidden_size: int,
name: str):
linear = hk.Linear(
hidden_size,
with_bias=False,
w_init=hk.initializers.VarianceScaling(scale=self._init_scale),
name=name)
out = linear(inputs)
return out
def __call__(
self,
queries: jnp.ndarray,
hm_memory: HierarchicalMemory,
hm_mask: Optional[jnp.ndarray] = None) -> jnp.ndarray:
"""Do hierarchical attention over the stored memories.
Args:
queries: Tensor [B, Q, E] Query(ies) in, for batch size B, query length
Q, and embedding dimension E.
hm_memory: Hierarchical Memory.
hm_mask: Optional boolean mask tensor of shape [B, Q, M]. Where false,
the corresponding query timepoints cannot attend to the corresponding
memory chunks. This can be used for enforcing causal attention on the
learner, not attending to memories from prior episodes, etc.
Returns:
Value updates for each query slot: [B, Q, D]
"""
# some shape checks
batch_size, query_length, _ = queries.shape
(memory_batch_size, num_memories,
memory_chunk_size, mem_embbedding_size) = hm_memory.contents.shape
assert batch_size == memory_batch_size
chex.assert_shape(hm_memory.keys,
(batch_size, num_memories, mem_embbedding_size))
chex.assert_shape(hm_memory.accumulator,
(memory_batch_size, memory_chunk_size,
mem_embbedding_size))
chex.assert_shape(hm_memory.steps_since_last_write,
(memory_batch_size,))
if hm_mask is not None:
chex.assert_type(hm_mask, bool)
chex.assert_shape(hm_mask,
(batch_size, query_length, num_memories))
query_head = self._singlehead_linear(queries, self._size, "query")
key_head = self._singlehead_linear(
jax.lax.stop_gradient(hm_memory.keys), self._size, "key")
# What times in the input [t] attend to what times in the memories [T].
logits = jnp.einsum("btd,bTd->btT", query_head, key_head)
scaled_logits = logits / np.sqrt(self._size)
# Mask last dimension, replacing invalid logits with large negative values.
# This allows e.g. enforcing causal attention on learner, or blocking
# attention across episodes
if hm_mask is not None:
masked_logits = jnp.where(hm_mask, scaled_logits, -1e6)
else:
masked_logits = scaled_logits
# identify the top-k memories and their relevance weights
top_k_logits, top_k_indices = jax.lax.top_k(masked_logits, self._k)
weights = jax.nn.softmax(top_k_logits)
# set up the within-memory attention
assert self._size % self._num_heads == 0
mha_key_size = self._size // self._num_heads
attention_layer = hk.MultiHeadAttention(
key_size=mha_key_size,
model_size=self._size,
num_heads=self._num_heads,
w_init_scale=self._init_scale,
name="within_mem_attn")
# position encodings
augmented_contents = hm_memory.contents
if self._memory_position_encoding:
position_embs = sinusoid_position_encoding(
memory_chunk_size, mem_embbedding_size)
augmented_contents += position_embs[None, None, :, :]
def _within_memory_attention(sub_inputs, sub_memory_contents, sub_weights,
sub_top_k_indices):
top_k_contents = sub_memory_contents[sub_top_k_indices, :, :]
# Now we go deeper, with another vmap over **tokens**, because each token
# can each attend to different memories.
def do_attention(sub_sub_inputs, sub_sub_top_k_contents):
tiled_inputs = jnp.tile(sub_sub_inputs[None, None, :],
reps=(self._k, 1, 1))
sub_attention_results = attention_layer(
query=tiled_inputs,
key=sub_sub_top_k_contents,
value=sub_sub_top_k_contents)
return sub_attention_results
do_attention = hk_vmap(do_attention, in_axes=0, split_rng=False)
attention_results = do_attention(sub_inputs, top_k_contents)
attention_results = jnp.squeeze(attention_results, axis=2)
# Now collapse results across k memories
attention_results = sub_weights[:, :, None] * attention_results
attention_results = jnp.sum(attention_results, axis=1)
return attention_results
# vmap across batch
batch_within_memory_attention = hk_vmap(_within_memory_attention,
in_axes=0, split_rng=False)
outputs = batch_within_memory_attention(
queries,
jax.lax.stop_gradient(augmented_contents),
weights,
top_k_indices)
return outputs
@functools.wraps(hk.vmap)
def hk_vmap(*args, **kwargs):
"""Helper function to support older versions of Haiku."""
# Older versions of Haiku did not have split_rng, but the behavior has always
# been equivalent to split_rng=False.
if "split_rng" not in inspect.signature(hk.vmap).parameters:
kwargs.setdefault("split_rng", False)
if kwargs.get["split_rng"]:
raise ValueError("The installed version of Haiku only supports "
"`split_rng=False`, please upgrade Haiku.")
del kwargs["split_rng"]
return hk.vmap(*args, **kwargs)
|
deepmind-research-master
|
hierarchical_transformer_memory/hierarchical_attention/htm_attention.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for hierarchical_attention.htm_attention."""
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
import numpy as np
from hierarchical_transformer_memory.hierarchical_attention import htm_attention
def _build_queries_and_memory(query_length, num_memories, mem_chunk_size,
batch_size=2, embedding_size=12):
"""Builds dummy queries + memory contents for tests."""
queries = np.random.random([batch_size, query_length, embedding_size])
memory_contents = np.random.random(
[batch_size, num_memories, mem_chunk_size, embedding_size])
# summary key = average across chunk
memory_keys = np.mean(memory_contents, axis=2)
# to accumulate newest memories before writing
memory_accumulator = np.zeros_like(memory_contents[:, -1, :, :])
memory = htm_attention.HierarchicalMemory(
keys=memory_keys,
contents=memory_contents,
accumulator=memory_accumulator,
steps_since_last_write=np.zeros([batch_size,], dtype=np.int32))
return queries, memory
class HierarchicalAttentionTest(parameterized.TestCase):
@parameterized.parameters([
{
'query_length': 1,
'num_memories': 7,
'mem_chunk_size': 5,
'mem_k': 4,
},
{
'query_length': 9,
'num_memories': 7,
'mem_chunk_size': 5,
'mem_k': 4,
},
])
@hk.testing.transform_and_run
def test_output_shapes(self, query_length, num_memories, mem_chunk_size,
mem_k):
np.random.seed(0)
batch_size = 2
embedding_size = 12
num_heads = 3
queries, memory = _build_queries_and_memory(
query_length=query_length, num_memories=num_memories,
mem_chunk_size=mem_chunk_size, embedding_size=embedding_size)
hm_att = htm_attention.HierarchicalMemoryAttention(
feature_size=embedding_size,
k=mem_k,
num_heads=num_heads)
results = hm_att(queries, memory)
self.assertEqual(results.shape,
(batch_size, query_length, embedding_size))
self.assertTrue(np.all(np.isfinite(results)))
@hk.testing.transform_and_run
def test_masking(self):
np.random.seed(0)
batch_size = 2
embedding_size = 12
num_heads = 3
query_length = 5
num_memories = 7
mem_chunk_size = 6
mem_k = 4
queries, memory = _build_queries_and_memory(
query_length=query_length, num_memories=num_memories,
mem_chunk_size=mem_chunk_size, embedding_size=embedding_size)
hm_att = htm_attention.HierarchicalMemoryAttention(
feature_size=embedding_size,
k=mem_k,
num_heads=num_heads)
# get a random boolean mask
mask = np.random.binomial(
1, 0.5, [batch_size, query_length, num_memories]).astype(bool)
results = hm_att(queries, memory, hm_mask=mask)
self.assertEqual(results.shape,
(batch_size, query_length, embedding_size))
self.assertTrue(np.all(np.isfinite(results)))
if __name__ == '__main__':
absltest.main()
|
deepmind-research-master
|
hierarchical_transformer_memory/hierarchical_attention/htm_attention_test.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A pycolab environment for going to the ballet.
A pycolab-based environment for testing memory for sequences of events. The
environment contains some number of "dancer" characters in (implicit) 3 x 3
squares within a larger 9 x 9 room. The agent starts in the center of the room.
At the beginning of an episode, the dancers each do a dance solo of a fixed
length, separated by empty time of a fixed length. The agent's actions do
nothing during the dances. After the last dance ends, the agent must go up to a
dancer, identified using language describing the dance. The agent is rewarded +1
for approaching the correct dancer, 0 otherwise.
The room is upsampled at a size of 9 pixels per square to render a view for the
agent, which is cropped in egocentric perspective, i.e. the agent is always in
the center of its view (see https://arxiv.org/abs/1910.00571).
"""
from absl import app
from absl import flags
from absl import logging
import dm_env
import numpy as np
from pycolab import cropping
from hierarchical_transformer_memory.pycolab_ballet import ballet_environment_core as ballet_core
FLAGS = flags.FLAGS
UPSAMPLE_SIZE = 9 # pixels per game square
SCROLL_CROP_SIZE = 11 # in game squares
DANCER_SHAPES = [
"triangle", "empty_square", "plus", "inverse_plus", "ex", "inverse_ex",
"circle", "empty_circle", "tee", "upside_down_tee",
"h", "u", "upside_down_u", "vertical_stripes", "horizontal_stripes"
]
COLORS = {
"red": np.array([255, 0, 0]),
"green": np.array([0, 255, 0]),
"blue": np.array([0, 0, 255]),
"purple": np.array([128, 0, 128]),
"orange": np.array([255, 165, 0]),
"yellow": np.array([255, 255, 0]),
"brown": np.array([128, 64, 0]),
"pink": np.array([255, 64, 255]),
"cyan": np.array([0, 255, 255]),
"dark_green": np.array([0, 100, 0]),
"dark_red": np.array([100, 0, 0]),
"dark_blue": np.array([0, 0, 100]),
"olive": np.array([100, 100, 0]),
"teal": np.array([0, 100, 100]),
"lavender": np.array([215, 200, 255]),
"peach": np.array([255, 210, 170]),
"rose": np.array([255, 205, 230]),
"light_green": np.array([200, 255, 200]),
"light_yellow": np.array([255, 255, 200]),
}
def _generate_template(object_name):
"""Generates a template object image, given a name with color and shape."""
object_color, object_type = object_name.split()
template = np.zeros((UPSAMPLE_SIZE, UPSAMPLE_SIZE))
half = UPSAMPLE_SIZE // 2
if object_type == "triangle":
for i in range(UPSAMPLE_SIZE):
for j in range(UPSAMPLE_SIZE):
if (j <= half and i >= 2 * (half - j)) or (j > half and i >= 2 *
(j - half)):
template[i, j] = 1.
elif object_type == "square":
template[:, :] = 1.
elif object_type == "empty_square":
template[:2, :] = 1.
template[-2:, :] = 1.
template[:, :2] = 1.
template[:, -2:] = 1.
elif object_type == "plus":
template[:, half - 1:half + 2] = 1.
template[half - 1:half + 2, :] = 1.
elif object_type == "inverse_plus":
template[:, :] = 1.
template[:, half - 1:half + 2] = 0.
template[half - 1:half + 2, :] = 0.
elif object_type == "ex":
for i in range(UPSAMPLE_SIZE):
for j in range(UPSAMPLE_SIZE):
if abs(i - j) <= 1 or abs(UPSAMPLE_SIZE - 1 - j - i) <= 1:
template[i, j] = 1.
elif object_type == "inverse_ex":
for i in range(UPSAMPLE_SIZE):
for j in range(UPSAMPLE_SIZE):
if not (abs(i - j) <= 1 or abs(UPSAMPLE_SIZE - 1 - j - i) <= 1):
template[i, j] = 1.
elif object_type == "circle":
for i in range(UPSAMPLE_SIZE):
for j in range(UPSAMPLE_SIZE):
if (i - half)**2 + (j - half)**2 <= half**2:
template[i, j] = 1.
elif object_type == "empty_circle":
for i in range(UPSAMPLE_SIZE):
for j in range(UPSAMPLE_SIZE):
if abs((i - half)**2 + (j - half)**2 - half**2) < 6:
template[i, j] = 1.
elif object_type == "tee":
template[:, half - 1:half + 2] = 1.
template[:3, :] = 1.
elif object_type == "upside_down_tee":
template[:, half - 1:half + 2] = 1.
template[-3:, :] = 1.
elif object_type == "h":
template[:, :3] = 1.
template[:, -3:] = 1.
template[half - 1:half + 2, :] = 1.
elif object_type == "u":
template[:, :3] = 1.
template[:, -3:] = 1.
template[-3:, :] = 1.
elif object_type == "upside_down_u":
template[:, :3] = 1.
template[:, -3:] = 1.
template[:3, :] = 1.
elif object_type == "vertical_stripes":
for j in range(half + UPSAMPLE_SIZE % 2):
template[:, 2*j] = 1.
elif object_type == "horizontal_stripes":
for i in range(half + UPSAMPLE_SIZE % 2):
template[2*i, :] = 1.
else:
raise ValueError("Unknown object: {}".format(object_type))
if object_color not in COLORS:
raise ValueError("Unknown color: {}".format(object_color))
template = np.tensordot(template, COLORS[object_color], axes=0)
return template
# Agent and wall templates
_CHAR_TO_TEMPLATE_BASE = {
ballet_core.AGENT_CHAR:
np.tensordot(
np.ones([UPSAMPLE_SIZE, UPSAMPLE_SIZE]),
np.array([255, 255, 255]),
axes=0),
ballet_core.WALL_CHAR:
np.tensordot(
np.ones([UPSAMPLE_SIZE, UPSAMPLE_SIZE]),
np.array([40, 40, 40]),
axes=0),
}
def get_scrolling_cropper(rows=9, cols=9, crop_pad_char=" "):
return cropping.ScrollingCropper(rows=rows, cols=cols,
to_track=[ballet_core.AGENT_CHAR],
pad_char=crop_pad_char,
scroll_margins=(None, None))
class BalletEnvironment(dm_env.Environment):
"""A Python environment API for pycolab ballet tasks."""
def __init__(self, num_dancers, dance_delay, max_steps, rng=None):
"""Construct a BalletEnvironment that wraps pycolab games for agent use.
This class inherits from dm_env and has all the expected methods and specs.
Args:
num_dancers: The number of dancers to use, between 1 and 8 (inclusive).
dance_delay: How long to delay between the dances.
max_steps: The maximum number of steps to allow in an episode, after which
it will terminate.
rng: An optional numpy Random Generator, to set a fixed seed use e.g.
`rng=np.random.default_rng(seed=...)`
"""
self._num_dancers = num_dancers
self._dance_delay = dance_delay
self._max_steps = max_steps
# internal state
if rng is None:
rng = np.random.default_rng()
self._rng = rng
self._current_game = None # Current pycolab game instance.
self._state = None # Current game step state.
self._game_over = None # Whether the game has ended.
self._char_to_template = None # Mapping of chars to sprite images.
# rendering tools
self._cropper = get_scrolling_cropper(SCROLL_CROP_SIZE, SCROLL_CROP_SIZE,
" ")
def _game_factory(self):
"""Samples dancers and positions, returns a pycolab core game engine."""
target_dancer_index = self._rng.integers(self._num_dancers)
motions = list(ballet_core.DANCE_SEQUENCES.keys())
positions = ballet_core.DANCER_POSITIONS.copy()
colors = list(COLORS.keys())
shapes = DANCER_SHAPES.copy()
self._rng.shuffle(positions)
self._rng.shuffle(motions)
self._rng.shuffle(colors)
self._rng.shuffle(shapes)
dancers_and_properties = []
for dancer_i in range(self._num_dancers):
if dancer_i == target_dancer_index:
value = 1.
else:
value = 0.
dancers_and_properties.append(
(ballet_core.POSSIBLE_DANCER_CHARS[dancer_i],
positions[dancer_i],
motions[dancer_i],
shapes[dancer_i],
colors[dancer_i],
value))
logging.info("Making level with dancers_and_properties: %s",
dancers_and_properties)
return ballet_core.make_game(
dancers_and_properties=dancers_and_properties,
dance_delay=self._dance_delay)
def _render_observation(self, observation):
"""Renders from raw pycolab image observation to agent-usable ones."""
observation = self._cropper.crop(observation)
obs_rows, obs_cols = observation.board.shape
image = np.zeros([obs_rows * UPSAMPLE_SIZE, obs_cols * UPSAMPLE_SIZE, 3],
dtype=np.float32)
for i in range(obs_rows):
for j in range(obs_cols):
this_char = chr(observation.board[i, j])
if this_char != ballet_core.FLOOR_CHAR:
image[
i * UPSAMPLE_SIZE:(i + 1) * UPSAMPLE_SIZE, j *
UPSAMPLE_SIZE:(j + 1) * UPSAMPLE_SIZE] = self._char_to_template[
this_char]
image /= 255.
language = np.array(self._current_game.the_plot["instruction_string"])
full_observation = (image, language)
return full_observation
def reset(self):
"""Start a new episode."""
# Build a new game and retrieve its first set of state/reward/discount.
self._current_game = self._game_factory()
# set up rendering, cropping, and state for current game
self._char_to_template = {
k: _generate_template(v) for k, v in self._current_game.the_plot[
"char_to_color_shape"]}
self._char_to_template.update(_CHAR_TO_TEMPLATE_BASE)
self._cropper.set_engine(self._current_game)
self._state = dm_env.StepType.FIRST
# let's go!
observation, _, _ = self._current_game.its_showtime()
observation = self._render_observation(observation)
return dm_env.TimeStep(
step_type=self._state,
reward=None,
discount=None,
observation=observation)
def step(self, action):
"""Apply action, step the world forward, and return observations."""
# If needed, reset and start new episode.
if self._state == dm_env.StepType.LAST:
self._clear_state()
if self._current_game is None:
return self.reset()
# Execute the action in pycolab.
observation, reward, discount = self._current_game.play(action)
self._game_over = self._is_game_over()
reward = reward if reward is not None else 0.
observation = self._render_observation(observation)
# Check the current status of the game.
if self._game_over:
self._state = dm_env.StepType.LAST
else:
self._state = dm_env.StepType.MID
return dm_env.TimeStep(
step_type=self._state,
reward=reward,
discount=discount,
observation=observation)
@property
def observation_spec(self):
image_shape = (SCROLL_CROP_SIZE * UPSAMPLE_SIZE,
SCROLL_CROP_SIZE * UPSAMPLE_SIZE,
3)
return (
# vision
dm_env.specs.Array(
shape=image_shape, dtype=np.float32, name="image"),
# language
dm_env.specs.Array(
shape=[], dtype=str, name="language"),
)
@property
def action_spec(self):
return dm_env.specs.BoundedArray(
shape=[], dtype="int32",
minimum=0, maximum=7,
name="grid_actions")
def _is_game_over(self):
"""Returns whether it is game over, either from the engine or timeout."""
return (self._current_game.game_over or
(self._current_game.the_plot.frame >= self._max_steps))
def _clear_state(self):
"""Clear all the internal information about the game."""
self._state = None
self._current_game = None
self._char_to_template = None
self._game_over = None
def simple_builder(level_name):
"""Simplifies building from fixed defs.
Args:
level_name: '{num_dancers}_delay{delay_length}', where each variable is an
integer. The levels used in the paper were:
['2_delay16', '4_delay16', '8_delay16',
'2_delay48', '4_delay48', '8_delay48']
Returns:
A BalletEnvironment with the requested settings.
"""
num_dancers, dance_delay = level_name.split("_")
num_dancers = int(num_dancers)
dance_delay = int(dance_delay[5:])
max_steps = 320 if dance_delay == 16 else 1024
level_args = dict(
num_dancers=num_dancers,
dance_delay=dance_delay,
max_steps=max_steps)
return BalletEnvironment(**level_args)
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
env = simple_builder("4_delay16")
for _ in range(3):
obs = env.reset().observation
for _ in range(300):
obs = env.step(0).observation
print(obs)
if __name__ == "__main__":
app.run(main)
|
deepmind-research-master
|
hierarchical_transformer_memory/pycolab_ballet/ballet_environment.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pycolab_ballet.ballet_environment_wrapper."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from hierarchical_transformer_memory.pycolab_ballet import ballet_environment
from hierarchical_transformer_memory.pycolab_ballet import ballet_environment_core
class BalletEnvironmentTest(parameterized.TestCase):
def test_full_wrapper(self):
env = ballet_environment.BalletEnvironment(
num_dancers=1, dance_delay=16, max_steps=200,
rng=np.random.default_rng(seed=0))
result = env.reset()
self.assertIsNone(result.reward)
level_size = ballet_environment_core.ROOM_SIZE
upsample_size = ballet_environment.UPSAMPLE_SIZE
# wait for dance to complete
for i in range(30):
result = env.step(0).observation
self.assertEqual(result[0].shape,
(level_size[0] * upsample_size,
level_size[1] * upsample_size,
3))
self.assertEqual(str(result[1])[:5],
np.array("watch"))
for i in [1, 1, 1, 1]: # first gets eaten before agent can move
result = env.step(i)
self.assertEqual(result.observation[0].shape,
(level_size[0] * upsample_size,
level_size[1] * upsample_size,
3))
self.assertEqual(str(result.observation[1])[:11],
np.array("up_and_down"))
self.assertEqual(result.reward, 1.)
# check egocentric scrolling is working, by checking object is in center
np.testing.assert_array_almost_equal(
result.observation[0][45:54, 45:54],
ballet_environment._generate_template("orange plus") / 255.)
@parameterized.parameters(
"2_delay16",
"4_delay16",
"8_delay48",
)
def test_simple_builder(self, level_name):
dance_delay = int(level_name[-2:])
np.random.seed(0)
env = ballet_environment.simple_builder(level_name)
# check max steps are set to match paper settings
self.assertEqual(env._max_steps,
320 if dance_delay == 16 else 1024)
# test running a few steps of each
env.reset()
level_size = ballet_environment_core.ROOM_SIZE
upsample_size = ballet_environment.UPSAMPLE_SIZE
for i in range(8):
result = env.step(i) # check all 8 movements work
self.assertEqual(result.observation[0].shape,
(level_size[0] * upsample_size,
level_size[1] * upsample_size,
3))
self.assertEqual(str(result.observation[1])[:5],
np.array("watch"))
self.assertEqual(result.reward, 0.)
if __name__ == "__main__":
absltest.main()
|
deepmind-research-master
|
hierarchical_transformer_memory/pycolab_ballet/ballet_environment_test.py
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The pycolab core of the environment for going to the ballet.
This builds the text-based (non-graphical) engine of the environment, and offers
a UI which a human can play (for a fixed level). However, the logic of level
creation, the graphics, and anything that is external to the pycolab engine
itself is contained in ballet_environment.py.
"""
import curses
import enum
from absl import app
from absl import flags
from pycolab import ascii_art
from pycolab import human_ui
from pycolab.prefab_parts import sprites as prefab_sprites
FLAGS = flags.FLAGS
ROOM_SIZE = (11, 11) # one square around edge will be wall.
DANCER_POSITIONS = [(2, 2), (2, 5), (2, 8),
(5, 2), (5, 8), # space in center for agent
(8, 2), (8, 5), (8, 8)]
AGENT_START = (5, 5)
AGENT_CHAR = "A"
WALL_CHAR = "#"
FLOOR_CHAR = " "
RESERVED_CHARS = [AGENT_CHAR, WALL_CHAR, FLOOR_CHAR]
POSSIBLE_DANCER_CHARS = [
chr(i) for i in range(65, 91) if chr(i) not in RESERVED_CHARS
]
DANCE_SEQUENCE_LENGTHS = 16
# movement directions for dancers / actions for agent
class DIRECTIONS(enum.IntEnum):
N = 0
NE = 1
E = 2
SE = 3
S = 4
SW = 5
W = 6
NW = 7
DANCE_SEQUENCES = {
"circle_cw": [
DIRECTIONS.N, DIRECTIONS.E, DIRECTIONS.S, DIRECTIONS.S, DIRECTIONS.W,
DIRECTIONS.W, DIRECTIONS.N, DIRECTIONS.N, DIRECTIONS.E, DIRECTIONS.E,
DIRECTIONS.S, DIRECTIONS.S, DIRECTIONS.W, DIRECTIONS.W, DIRECTIONS.N,
DIRECTIONS.E
],
"circle_ccw": [
DIRECTIONS.N, DIRECTIONS.W, DIRECTIONS.S, DIRECTIONS.S, DIRECTIONS.E,
DIRECTIONS.E, DIRECTIONS.N, DIRECTIONS.N, DIRECTIONS.W, DIRECTIONS.W,
DIRECTIONS.S, DIRECTIONS.S, DIRECTIONS.E, DIRECTIONS.E, DIRECTIONS.N,
DIRECTIONS.W
],
"up_and_down": [
DIRECTIONS.N, DIRECTIONS.S, DIRECTIONS.S, DIRECTIONS.N, DIRECTIONS.N,
DIRECTIONS.S, DIRECTIONS.S, DIRECTIONS.N, DIRECTIONS.N, DIRECTIONS.S,
DIRECTIONS.S, DIRECTIONS.N, DIRECTIONS.N, DIRECTIONS.S, DIRECTIONS.S,
DIRECTIONS.N
],
"left_and_right": [
DIRECTIONS.E, DIRECTIONS.W, DIRECTIONS.W, DIRECTIONS.E, DIRECTIONS.E,
DIRECTIONS.W, DIRECTIONS.W, DIRECTIONS.E, DIRECTIONS.E, DIRECTIONS.W,
DIRECTIONS.W, DIRECTIONS.E, DIRECTIONS.E, DIRECTIONS.W, DIRECTIONS.W,
DIRECTIONS.E
],
"diagonal_uldr": [
DIRECTIONS.NW, DIRECTIONS.SE, DIRECTIONS.SE, DIRECTIONS.NW,
DIRECTIONS.NW, DIRECTIONS.SE, DIRECTIONS.SE, DIRECTIONS.NW,
DIRECTIONS.NW, DIRECTIONS.SE, DIRECTIONS.SE, DIRECTIONS.NW,
DIRECTIONS.NW, DIRECTIONS.SE, DIRECTIONS.SE, DIRECTIONS.NW
],
"diagonal_urdl": [
DIRECTIONS.NE, DIRECTIONS.SW, DIRECTIONS.SW, DIRECTIONS.NE,
DIRECTIONS.NE, DIRECTIONS.SW, DIRECTIONS.SW, DIRECTIONS.NE,
DIRECTIONS.NE, DIRECTIONS.SW, DIRECTIONS.SW, DIRECTIONS.NE,
DIRECTIONS.NE, DIRECTIONS.SW, DIRECTIONS.SW, DIRECTIONS.NE
],
"plus_cw": [
DIRECTIONS.N, DIRECTIONS.S, DIRECTIONS.E, DIRECTIONS.W, DIRECTIONS.S,
DIRECTIONS.N, DIRECTIONS.W, DIRECTIONS.E, DIRECTIONS.N, DIRECTIONS.S,
DIRECTIONS.E, DIRECTIONS.W, DIRECTIONS.S, DIRECTIONS.N, DIRECTIONS.W,
DIRECTIONS.E
],
"plus_ccw": [
DIRECTIONS.N, DIRECTIONS.S, DIRECTIONS.W, DIRECTIONS.E, DIRECTIONS.S,
DIRECTIONS.N, DIRECTIONS.E, DIRECTIONS.W, DIRECTIONS.N, DIRECTIONS.S,
DIRECTIONS.W, DIRECTIONS.E, DIRECTIONS.S, DIRECTIONS.N, DIRECTIONS.E,
DIRECTIONS.W
],
"times_cw": [
DIRECTIONS.NE, DIRECTIONS.SW, DIRECTIONS.SE, DIRECTIONS.NW,
DIRECTIONS.SW, DIRECTIONS.NE, DIRECTIONS.NW, DIRECTIONS.SE,
DIRECTIONS.NE, DIRECTIONS.SW, DIRECTIONS.SE, DIRECTIONS.NW,
DIRECTIONS.SW, DIRECTIONS.NE, DIRECTIONS.NW, DIRECTIONS.SE
],
"times_ccw": [
DIRECTIONS.NW, DIRECTIONS.SE, DIRECTIONS.SW, DIRECTIONS.NE,
DIRECTIONS.SE, DIRECTIONS.NW, DIRECTIONS.NE, DIRECTIONS.SW,
DIRECTIONS.NW, DIRECTIONS.SE, DIRECTIONS.SW, DIRECTIONS.NE,
DIRECTIONS.SE, DIRECTIONS.NW, DIRECTIONS.NE, DIRECTIONS.SW
],
"zee": [
DIRECTIONS.NE, DIRECTIONS.W, DIRECTIONS.W, DIRECTIONS.E, DIRECTIONS.E,
DIRECTIONS.SW, DIRECTIONS.NE, DIRECTIONS.SW, DIRECTIONS.SW,
DIRECTIONS.E, DIRECTIONS.E, DIRECTIONS.W, DIRECTIONS.W, DIRECTIONS.NE,
DIRECTIONS.SW, DIRECTIONS.NE
],
"chevron_down": [
DIRECTIONS.NW, DIRECTIONS.S, DIRECTIONS.SE, DIRECTIONS.NE, DIRECTIONS.N,
DIRECTIONS.SW, DIRECTIONS.NE, DIRECTIONS.SW, DIRECTIONS.NE,
DIRECTIONS.S, DIRECTIONS.SW, DIRECTIONS.NW, DIRECTIONS.N, DIRECTIONS.SE,
DIRECTIONS.NW, DIRECTIONS.SE
],
"chevron_up": [
DIRECTIONS.SE, DIRECTIONS.N, DIRECTIONS.NW, DIRECTIONS.SW, DIRECTIONS.S,
DIRECTIONS.NE, DIRECTIONS.SW, DIRECTIONS.NE, DIRECTIONS.SW,
DIRECTIONS.N, DIRECTIONS.NE, DIRECTIONS.SE, DIRECTIONS.S, DIRECTIONS.NW,
DIRECTIONS.SE, DIRECTIONS.NW
],
}
class DancerSprite(prefab_sprites.MazeWalker):
"""A `Sprite` for dancers."""
def __init__(self, corner, position, character, motion, color, shape,
value=0.):
super(DancerSprite, self).__init__(
corner, position, character, impassable="#")
self.motion = motion
self.dance_sequence = DANCE_SEQUENCES[motion].copy()
self.color = color
self.shape = shape
self.value = value
self.is_dancing = False
def update(self, actions, board, layers, backdrop, things, the_plot):
if the_plot["task_phase"] == "dance" and self.is_dancing:
if not self.dance_sequence:
raise ValueError(
"Dance sequence is empty! Was this dancer repeated in the order?")
dance_move = self.dance_sequence.pop(0)
if dance_move == DIRECTIONS.N:
self._north(board, the_plot)
elif dance_move == DIRECTIONS.NE:
self._northeast(board, the_plot)
elif dance_move == DIRECTIONS.E:
self._east(board, the_plot)
elif dance_move == DIRECTIONS.SE:
self._southeast(board, the_plot)
elif dance_move == DIRECTIONS.S:
self._south(board, the_plot)
elif dance_move == DIRECTIONS.SW:
self._southwest(board, the_plot)
elif dance_move == DIRECTIONS.W:
self._west(board, the_plot)
elif dance_move == DIRECTIONS.NW:
self._northwest(board, the_plot)
if not self.dance_sequence: # done!
self.is_dancing = False
the_plot["time_until_next_dance"] = the_plot["dance_delay"]
else:
if self.position == things[AGENT_CHAR].position:
# Award the player the appropriate amount of reward, and end episode.
the_plot.add_reward(self.value)
the_plot.terminate_episode()
class PlayerSprite(prefab_sprites.MazeWalker):
"""The player / agent character.
MazeWalker class methods handle basic movement and collision detection.
"""
def __init__(self, corner, position, character):
super(PlayerSprite, self).__init__(
corner, position, character, impassable="#")
def update(self, actions, board, layers, backdrop, things, the_plot):
if the_plot["task_phase"] == "dance":
# agent's actions are ignored, this logic updates the dance phases.
if the_plot["time_until_next_dance"] > 0:
the_plot["time_until_next_dance"] -= 1
if the_plot["time_until_next_dance"] == 0: # next phase time!
if the_plot["dance_order"]: # start the next dance!
next_dancer = the_plot["dance_order"].pop(0)
things[next_dancer].is_dancing = True
else: # choice time!
the_plot["task_phase"] = "choice"
the_plot["instruction_string"] = the_plot[
"choice_instruction_string"]
elif the_plot["task_phase"] == "choice":
# agent can now move and make its choice
if actions == DIRECTIONS.N:
self._north(board, the_plot)
elif actions == DIRECTIONS.NE:
self._northeast(board, the_plot)
elif actions == DIRECTIONS.E:
self._east(board, the_plot)
elif actions == DIRECTIONS.SE:
self._southeast(board, the_plot)
elif actions == DIRECTIONS.S:
self._south(board, the_plot)
elif actions == DIRECTIONS.SW:
self._southwest(board, the_plot)
elif actions == DIRECTIONS.W:
self._west(board, the_plot)
elif actions == DIRECTIONS.NW:
self._northwest(board, the_plot)
def make_game(dancers_and_properties, dance_delay=16):
"""Constructs an ascii map, then uses pycolab to make it a game.
Args:
dancers_and_properties: list of (character, (row, column), motion, shape,
color, value), for placing objects in the world.
dance_delay: how long to wait between dances.
Returns:
this_game: Pycolab engine running the specified game.
"""
num_rows, num_cols = ROOM_SIZE
level_layout = []
# upper wall
level_layout.append("".join([WALL_CHAR] * num_cols))
# room
middle_string = "".join([WALL_CHAR] + [" "] * (num_cols - 2) + [WALL_CHAR])
level_layout.extend([middle_string for _ in range(num_rows - 2)])
# lower wall
level_layout.append("".join([WALL_CHAR] * num_cols))
def _add_to_map(obj, loc):
"""Adds an ascii character to the level at the requested position."""
obj_row = level_layout[loc[0]]
pre_string = obj_row[:loc[1]]
post_string = obj_row[loc[1] + 1:]
level_layout[loc[0]] = pre_string + obj + post_string
_add_to_map(AGENT_CHAR, AGENT_START)
sprites = {AGENT_CHAR: PlayerSprite}
dance_order = []
char_to_color_shape = []
# add dancers to level
for obj, loc, motion, shape, color, value in dancers_and_properties:
_add_to_map(obj, loc)
sprites[obj] = ascii_art.Partial(
DancerSprite, motion=motion, color=color, shape=shape, value=value)
char_to_color_shape.append((obj, color + " " + shape))
dance_order += obj
if value > 0.:
choice_instruction_string = motion
this_game = ascii_art.ascii_art_to_game(
art=level_layout,
what_lies_beneath=" ",
sprites=sprites,
update_schedule=[[AGENT_CHAR],
dance_order])
this_game.the_plot["task_phase"] = "dance"
this_game.the_plot["instruction_string"] = "watch"
this_game.the_plot["choice_instruction_string"] = choice_instruction_string
this_game.the_plot["dance_order"] = dance_order
this_game.the_plot["dance_delay"] = dance_delay
this_game.the_plot["time_until_next_dance"] = 1
this_game.the_plot["char_to_color_shape"] = char_to_color_shape
return this_game
def main(argv):
del argv # unused
these_dancers_and_properties = [
(POSSIBLE_DANCER_CHARS[1], (2, 2), "chevron_up", "triangle", "red", 1),
(POSSIBLE_DANCER_CHARS[2], (2, 5), "circle_ccw", "triangle", "red", 0),
(POSSIBLE_DANCER_CHARS[3], (2, 8), "plus_cw", "triangle", "red", 0),
(POSSIBLE_DANCER_CHARS[4], (5, 2), "plus_ccw", "triangle", "red", 0),
(POSSIBLE_DANCER_CHARS[5], (5, 8), "times_cw", "triangle", "red", 0),
(POSSIBLE_DANCER_CHARS[6], (8, 2), "up_and_down", "plus", "blue", 0),
(POSSIBLE_DANCER_CHARS[7], (8, 5), "left_and_right", "plus", "blue", 0),
(POSSIBLE_DANCER_CHARS[8], (8, 8), "zee", "plus", "blue", 0),
]
game = make_game(dancers_and_properties=these_dancers_and_properties)
# Note that these colors are only for human UI
fg_colours = {
AGENT_CHAR: (999, 999, 999), # Agent is white
WALL_CHAR: (300, 300, 300), # Wall, dark grey
FLOOR_CHAR: (0, 0, 0), # Floor
}
for (c, _, _, _, col, _) in these_dancers_and_properties:
fg_colours[c] = (999, 0, 0) if col == "red" else (0, 0, 999)
bg_colours = {
c: (0, 0, 0) for c in RESERVED_CHARS + POSSIBLE_DANCER_CHARS[1:8]
}
ui = human_ui.CursesUi(
keys_to_actions={
# Basic movement.
curses.KEY_UP: DIRECTIONS.N,
curses.KEY_DOWN: DIRECTIONS.S,
curses.KEY_LEFT: DIRECTIONS.W,
curses.KEY_RIGHT: DIRECTIONS.E,
-1: 8, # Do nothing.
},
delay=500,
colour_fg=fg_colours,
colour_bg=bg_colours)
ui.play(game)
if __name__ == "__main__":
app.run(main)
|
deepmind-research-master
|
hierarchical_transformer_memory/pycolab_ballet/ballet_environment_core.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Visualize physical planning games in Domain Explorer.
"""
import functools
from absl import app
from absl import flags
from dm_control import composer
from dm_control import viewer
from dm_control.locomotion import walkers
from physics_planning_games import board_games
from physics_planning_games.mujoban.boxoban import boxoban_level_generator
from physics_planning_games.mujoban.mujoban import Mujoban
from physics_planning_games.mujoban.mujoban_level import MujobanLevel
flags.DEFINE_enum('environment_name', 'mujoban', [
'mujoban', 'go_7x7', 'tic_tac_toe_markers_features',
'tic_tac_toe_mixture_opponent_markers_features',
'tic_tac_toe_optimal_opponent_markers_features'],
'Name of an environment to load.')
FLAGS = flags.FLAGS
TIME_LIMIT = 1000
CONTROL_TIMESTEP = .1
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
environment_name = FLAGS.environment_name
if environment_name == 'mujoban':
walker = walkers.JumpingBallWithHead(add_ears=True, camera_height=0.25)
arena = MujobanLevel(boxoban_level_generator)
task = Mujoban(
walker=walker,
maze=arena,
control_timestep=CONTROL_TIMESTEP,
top_camera_height=64,
top_camera_width=48)
env = composer.Environment(
time_limit=TIME_LIMIT, task=task, strip_singleton_obs_buffer_dim=True)
else:
env = functools.partial(
board_games.load, environment_name=environment_name)
viewer.launch(env)
if __name__ == '__main__':
app.run(main)
|
deepmind-research-master
|
physics_planning_games/explore.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for Mujoban."""
from absl.testing import absltest
from dm_control import composer
from dm_control.locomotion import walkers
import dm_env as environment
import numpy as np
from physics_planning_games.mujoban.mujoban import Mujoban
from physics_planning_games.mujoban.mujoban_level import MujobanLevel
TIME_LIMIT = 5
CONTROL_TIMESTEP = .1
class MujobanTest(absltest.TestCase):
def test(self):
walker = walkers.JumpingBallWithHead(add_ears=True, camera_height=0.25)
arena = MujobanLevel()
task = Mujoban(
walker=walker,
maze=arena,
control_timestep=CONTROL_TIMESTEP,
top_camera_height=64,
top_camera_width=48)
env = composer.Environment(
time_limit=TIME_LIMIT,
task=task,
strip_singleton_obs_buffer_dim=True)
time_step = env.reset()
self.assertEqual(
set([
'pixel_layer', 'full_entity_layer', 'top_camera',
'walker/body_height', 'walker/end_effectors_pos',
'walker/joints_pos', 'walker/joints_vel',
'walker/sensors_accelerometer', 'walker/sensors_gyro',
'walker/sensors_touch', 'walker/sensors_velocimeter',
'walker/world_zaxis', 'walker/orientation',
]), set(time_step.observation.keys()))
top_camera = time_step.observation['top_camera']
self.assertEqual(np.uint8, top_camera.dtype)
self.assertEqual((64, 48, 3), top_camera.shape)
all_step_types = []
# Run enough actions that we are guaranteed to have restarted the
# episode at least once.
for _ in range(int(2*TIME_LIMIT/CONTROL_TIMESTEP)):
action = 2*np.random.random(env.action_spec().shape) - 1
time_step = env.step(action)
all_step_types.append(time_step.step_type)
self.assertEqual(set([environment.StepType.FIRST,
environment.StepType.MID,
environment.StepType.LAST]),
set(all_step_types))
if __name__ == '__main__':
absltest.main()
|
deepmind-research-master
|
physics_planning_games/mujoban/mujoban_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A floor pad that is activated through touch."""
import weakref
from dm_control import composer
from dm_control import mjcf
import numpy as np
def _get_activator_box(pad_xpos, pad_size, boxes, tolerance=0.0):
"""Returns the activator box, if any. Otherwise returns None."""
# Ignore the height
pad_min = pad_xpos[0:2] - pad_size[0:2]
pad_max = pad_xpos[0:2] + pad_size[0:2]
for box in boxes:
box_xpos = np.array(box.xpos[0:2])
box_size = np.array(box.size[0:2])
min_ = pad_min + box_size - tolerance
max_ = pad_max - box_size + tolerance
in_range = np.logical_and(box_xpos >= min_, box_xpos <= max_).all()
if in_range:
return box
# No activator box was found
return None
class MujobanPad(composer.Entity):
"""A less sensitive floor pad for Mujoban."""
def _build(self, rgba=None, pressed_rgba=None,
size=1, height=0.02, detection_tolerance=0.0, name='mujoban_pad'):
rgba = tuple(rgba or (1, 0, 0, 1))
pressed_rgba = tuple(pressed_rgba or (0.2, 0, 0, 1))
self._mjcf_root = mjcf.RootElement(model=name)
self._site = self._mjcf_root.worldbody.add(
'site', type='box', name='site',
pos=[0, 0, (height / 2 or -0.001)],
size=[size / 2, size / 2, (height / 2 or 0.001)], rgba=rgba)
self._activated = False
self._rgba = np.array(rgba, dtype=float)
self._pressed_rgba = np.array(pressed_rgba, dtype=float)
self._activator = None
self._detection_tolerance = detection_tolerance
self._boxes = []
@property
def rgba(self):
return self._rgba
@property
def pressed_rgba(self):
return self._pressed_rgba
def register_box(self, box_entity):
self._boxes.append(weakref.proxy(box_entity))
@property
def site(self):
return self._site
@property
def boxes(self):
return self._boxes
@property
def activator(self):
return self._activator if self._activated else None
@property
def mjcf_model(self):
return self._mjcf_root
def initialize_episode_mjcf(self, unused_random_state):
self._activated = False
def initialize_episode(self, physics, unused_random_state):
self._update_activation(physics)
def _update_activation(self, physics):
# Note: we get the physically bound box, not an object from self._boxes.
# That's because the generator expression below generates bound objects.
box = _get_activator_box(
pad_xpos=np.array(physics.bind(self._site).xpos),
pad_size=np.array(physics.bind(self._site).size),
boxes=(physics.bind(box.geom) for box in self._boxes),
tolerance=self._detection_tolerance,)
if box:
self._activated = True
self._activator = box
else:
self._activated = False
self._activator = None
if self._activated:
physics.bind(self._site).rgba = self._pressed_rgba
else:
physics.bind(self._site).rgba = self._rgba
def before_step(self, physics, unused_random_state):
self._update_activation(physics)
def after_substep(self, physics, unused_random_state):
self._update_activation(physics)
@property
def activated(self):
"""Whether this floor pad is pressed at the moment."""
return self._activated
def reset(self, physics):
self._activated = False
physics.bind(self._site).rgba = self._rgba
|
deepmind-research-master
|
physics_planning_games/mujoban/mujoban_pad.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Module for generating Mujoban level.
"""
import labmaze
BOX_CHAR = 'B'
TARGET_CHAR = labmaze.defaults.OBJECT_TOKEN
_DEFAULT_LEVEL = """
#####
# #
#### # #
# . .# #
# . #
# .## $##
## #$$ #
## $@#
## ###
####"""
# The meaning of symbols here are the same as defined in
# http://sneezingtiger.com/sokoban/levels/sasquatch5Text.html. These are the
# same symbols as used by the Sokoban community.
EMPTY_CELL = ' '
GOAL = '.'
PLAYER = '@'
PLAYER_ON_GOAL = '+'
BOX = '$'
BOX_ON_GOAL = '*'
WALL = '#'
_SOKOBAN_SYMBOLS = [
EMPTY_CELL, GOAL, PLAYER, PLAYER_ON_GOAL, BOX, BOX_ON_GOAL, WALL
]
def single_level_generator(level=_DEFAULT_LEVEL):
while True:
yield level
def _ascii_to_text_grid_level(ascii_level):
"""Goes from official Sokoban ASCII art to string understood by Mujoban.
Args:
ascii_level: a multiline string; each character is a location in a
gridworld.
Returns:
A string.
"""
level = ascii_level
if level.startswith('\n'):
level = level[1:]
level = level.replace('$', BOX_CHAR)
level = level.replace('.', TARGET_CHAR)
level = level.replace(' ', '.')
level = level.replace('#', '*')
level = level.replace('@', 'P')
if level[-1] == '\n':
level = level[:-1]
# Pad
all_rows = level.split('\n')
width = max(len(row) for row in all_rows)
padded_rows = []
for row in all_rows:
row += '*' * (width - len(row))
padded_rows.append(row)
level = '\n'.join(padded_rows)
return level + '\n'
class MujobanLevel(labmaze.BaseMaze):
"""A maze that represents a level in Mujoban."""
def __init__(self, ascii_level_generator=single_level_generator):
"""Constructor.
Args:
ascii_level_generator: a Python generator. At each iteration, this should
return a string representing a level. The symbols in the string should be
those of http://sneezingtiger.com/sokoban/levels/sasquatch5Text.html.
These are the same symbols as used by the Sokoban community.
"""
self._level_iterator = ascii_level_generator()
self.regenerate()
def regenerate(self):
"""Regenerates the maze if required."""
level = next(self._level_iterator)
self._entity_layer = labmaze.TextGrid(_ascii_to_text_grid_level(level))
self._variation_layer = self._entity_layer.copy()
self._variation_layer[:] = '.'
self._num_boxes = (self._entity_layer == BOX_CHAR).sum()
num_targets = (self._entity_layer == TARGET_CHAR).sum()
if num_targets != self._num_boxes:
raise ValueError('Number of targets {} should equal number of boxes {}.'
.format(num_targets, self._num_boxes))
@property
def num_boxes(self):
return self._num_boxes
@property
def num_targets(self):
return self._num_boxes
@property
def entity_layer(self):
return self._entity_layer
@property
def variations_layer(self):
return self._variation_layer
@property
def height(self):
return self._entity_layer.shape[0]
@property
def width(self):
return self._entity_layer.shape[1]
|
deepmind-research-master
|
physics_planning_games/mujoban/mujoban_level.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Top-level module definitions for mujoban."""
from physics_planning_games.mujoban.mujoban import Mujoban
from physics_planning_games.mujoban.mujoban_level import MujobanLevel
|
deepmind-research-master
|
physics_planning_games/mujoban/__init__.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for mujoban_level."""
from absl.testing import absltest
from physics_planning_games.mujoban import mujoban_level
_LEVEL = """
#####
# @####
# $. #
###$.# #
# $.# #
# #$. #
# ###
######"""
_GRID_LEVEL = """********
*..P****
*..BG..*
***BG*.*
*..BG*.*
*.*BG..*
*....***
********
"""
class MujobanLevelTest(absltest.TestCase):
def test_ascii_to_text_grid_level(self):
grid_level = mujoban_level._ascii_to_text_grid_level(_LEVEL)
self.assertEqual(_GRID_LEVEL, grid_level)
if __name__ == '__main__':
absltest.main()
|
deepmind-research-master
|
physics_planning_games/mujoban/mujoban_level_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Box props used in Mujoban that the agent pushes.
"""
import itertools
from dm_control import composer
from dm_control.entities import props
class Box(props.Primitive):
"""A class representing a box prop."""
def _build(self, half_lengths=None, mass=None, name='box'):
half_lengths = half_lengths or [0.05, 0.1, 0.15]
super(Box, self)._build(geom_type='box',
size=half_lengths,
mass=mass,
name=name)
class BoxWithSites(Box):
"""A class representing a box prop with sites on the corners."""
def _build(self, half_lengths=None, mass=None, name='box'):
half_lengths = half_lengths or [0.05, 0.1, 0.15]
super(BoxWithSites, self)._build(half_lengths=half_lengths, mass=mass,
name=name)
corner_positions = itertools.product([half_lengths[0], -half_lengths[0]],
[half_lengths[1], -half_lengths[1]],
[half_lengths[2], -half_lengths[2]])
corner_sites = []
for i, corner_pos in enumerate(corner_positions):
corner_sites.append(
self.mjcf_model.worldbody.add(
'site',
type='sphere',
name='corner_{}'.format(i),
size=[0.1],
pos=corner_pos,
rgba=[1, 0, 0, 1.0],
group=composer.SENSOR_SITES_GROUP))
self._corner_sites = tuple(corner_sites)
@property
def corner_sites(self):
return self._corner_sites
|
deepmind-research-master
|
physics_planning_games/mujoban/props.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MuJoban task.
Mujoban is a single player puzzle-solving game embedded in the MuJoCo
simulation environment. The puzzle is based on the 2D game of Sokoban,
where an agent situated on a grid has to push boxes onto target locations.
"""
import collections
from dm_control import composer
from dm_control.composer.observation import observable as observable_lib
from dm_control.locomotion.arenas import labmaze_textures
from dm_control.locomotion.arenas.mazes import MazeWithTargets
from dm_env import specs
import numpy as np
from six.moves import range
from six.moves import zip
from physics_planning_games.mujoban import mujoban_level
from physics_planning_games.mujoban.mujoban_pad import MujobanPad
from physics_planning_games.mujoban.props import BoxWithSites
_FLOOR_GAP_CHAR = '#'
_AMBIENT_HEADLIGHT = 0.8
_BOX_SIZE = 0.4
_BOX_HEIGHT = 0.15
_BOX_MASS = 2.5
_BOX_FRICTION = [0.5, 0.005, 0.0001]
_BOX_RGBA = [173. / 255., 179. / 255., 60. / 255., 1.]
_BOX_PRESSED_RGBA = [0, 0, 1, 1]
_TARGET_RGBA = [1.0, 0., 0., 1.]
_PRESSED_TARGET_RGBA = [0., 1., 0., 1.]
_PEG_SIZE = 0.05
_PEG_HEIGHT = 0.25
_PEG_RGBA = [0.5, 0.5, 0.5, 1]
_PEG_ANGLE = np.pi / 4
# Aliveness in [-1., 0.].
_ALIVE_THRESHOLD = -0.5
# Constants used by the full entity layer
_WALL_LAYER = 0
_TARGET_LAYER = 1
_SOKOBAN_LAYER = 2
_BOX_LAYER = 3
def _round_positions(boxes, walker, last_round_walker):
"""Round float positions to snap objects to grid."""
round_walker = np.round(walker).astype('int32')
round_boxes = [np.round(box).astype('int32') for box in boxes]
for box in round_boxes:
if np.array_equal(box, round_walker):
round_walker = last_round_walker
return round_boxes, round_walker
class Mujoban(composer.Task):
"""Requires objects to be moved onto matching-colored floor pads.
Agent only receives instantaneous rewards of +1 for the
timestep in which a box first enters a target, and -1 for the
timestep in which a box leaves the target. There is an additional reward of
+10 when all the boxes are put on targets, at which point the episode
terminates.
"""
def __init__(self,
walker,
maze,
target_height=0,
box_prop=None,
box_size=None,
box_mass=None,
with_grid_pegs=False,
detection_tolerance=0.0,
physics_timestep=0.001,
control_timestep=0.025,
top_camera_height=128,
top_camera_width=128,
box_on_target_reward=1.0,
level_solved_reward=10.0):
"""Initializes this task.
Args:
walker: A `Walker` object.
maze: A `BaseMaze` object.
target_height: The height of the target pads above the ground, in meters.
box_prop: An optional `Primitive` prop to use as the box.
box_size: An optional three element sequence defining the half lengths of
the sides of the box.
box_mass: Box mass. If this is a list or tuple, a random value is sampled
from the truncated exponential distribution in [a, b) where a =
box_mass[0] and b = box_mass[1], with scale factor box_mass[2] * (b -
a).
with_grid_pegs: Whether to add solid pegs at the corners of the maze
grid cells. This helps to enforce the usual Sokoban rules where
diagonal movements are forbidden.
detection_tolerance: A maximum length scale (in metres) within which a
box is allowed to stick outside a target pad while still activating it.
For example, if this is set to 0.1 then a box will activate a pad if it
sticks out of the pad by no more than 10 centimetres.
physics_timestep: The time step of the physics simulation.
control_timestep: Should be an integer multiple of the physics time step.
top_camera_height: An int; the height of the top camera in the
observation. Setting this to 0 will disable the top camera.
top_camera_width: An int; the width of the top camera in the observation.
Setting this to 0 will disable the top camera.
box_on_target_reward: A float; reward for putting a box on a target.
level_solved_reward: A float: reward for solving the level.
"""
skybox_texture = labmaze_textures.SkyBox(style='sky_03')
wall_textures = labmaze_textures.WallTextures(style='style_01')
floor_textures = labmaze_textures.FloorTextures(style='style_01')
self._detection_tolerance = detection_tolerance
self._box_prop = box_prop
self._box_on_target_reward = box_on_target_reward
self._level_solved_reward = level_solved_reward
self._maze = maze
self._arena = MazeWithTargets(
maze=maze,
xy_scale=1,
z_height=1,
skybox_texture=skybox_texture,
wall_textures=wall_textures,
floor_textures=floor_textures)
self._walker = walker
self._arena.mjcf_model.visual.headlight.ambient = [_AMBIENT_HEADLIGHT] * 3
self._arena.text_maze_regenerated_hook = self._regenerate_positions
self._first_step = True
# Targets.
self._targets = []
self._target_positions = []
# Boxes.
self._box_size = box_size or [_BOX_SIZE] * 2 + [_BOX_HEIGHT]
self._box_mass = box_mass or _BOX_MASS
self._boxes = []
self._box_positions = []
self._with_grid_pegs = with_grid_pegs
self._peg_body = None
self._last_walker_position = None
# Create walkers and corresponding observables.
self._walker.create_root_joints(self._arena.attach(self._walker))
enabled_observables = [self._walker.observables.sensors_touch,
self._walker.observables.orientation]
enabled_observables += self._walker.observables.proprioception
enabled_observables += self._walker.observables.kinematic_sensors
for observable in enabled_observables:
observable.enabled = True
if top_camera_width and top_camera_height:
self._arena.observables.top_camera.enabled = True
self._arena.observables.top_camera.width = top_camera_width
self._arena.observables.top_camera.height = top_camera_height
# symbolic entity repenstaion in labyrinth format.
self._entity_layer = self._maze.entity_layer
# pixel layer is same as pixel rendering of symbolic sokoban.
self._pixel_layer = np.zeros(self._entity_layer.shape + (3,), dtype='uint8')
self._full_entity_layer = np.zeros(self._entity_layer.shape + (4,),
dtype='bool')
pixel_layer_obs = observable_lib.Generic(lambda _: self._pixel_layer)
pixel_layer_obs.enabled = True
full_entity_layer_obs = observable_lib.Generic(
lambda _: self._full_entity_layer)
full_entity_layer_obs.enabled = True
self._task_observables = collections.OrderedDict({
'pixel_layer': pixel_layer_obs,
'full_entity_layer': full_entity_layer_obs,
})
# Set time steps.
self.set_timesteps(
physics_timestep=physics_timestep, control_timestep=control_timestep)
self._discount = 1.
@property
def name(self):
return 'Mujoban'
@property
def root_entity(self):
return self._arena
def _regenerate_positions(self):
self._object_positions = self._arena.find_token_grid_positions(
[mujoban_level.TARGET_CHAR, mujoban_level.BOX_CHAR])
self._box_positions = self._arena.grid_to_world_positions(
self._object_positions[mujoban_level.BOX_CHAR])
target_grid_positions = self._object_positions[mujoban_level.TARGET_CHAR]
self._target_positions = self._arena.grid_to_world_positions(
target_grid_positions)
for idx in range(len(self._target_positions)):
target_grid_position = target_grid_positions[idx]
grid_y, grid_x = target_grid_position
self._arena.maze.variations_layer[grid_y, grid_x] = _FLOOR_GAP_CHAR
def initialize_episode_mjcf(self, random_state):
self._arena.regenerate()
# Clear existing targets and boxes
for target in self._targets:
target.detach()
self._targets = []
for box in self._boxes:
box.detach()
self._boxes = []
self._arena.mjcf_model.contact.remove('pair')
for _ in range(self._maze.num_targets):
target = MujobanPad(
size=self._arena.xy_scale,
height=0,
detection_tolerance=self._detection_tolerance)
self._arena.attach(target)
self._targets.append(target)
for _ in range(self._maze.num_boxes):
box = self._box_prop
if not box:
box = BoxWithSites(half_lengths=self._box_size)
box.geom.mass = _BOX_MASS
box.geom.rgba = [0, 0, 0, 1] # Will be randomized for each episode.
frame = self._arena.attach(box)
frame.add('joint', type='slide', axis=[1, 0, 0], name='x_slider')
frame.add('joint', type='slide', axis=[0, 1, 0], name='y_slider')
frame.add('joint', type='slide', axis=[0, 0, 1], name='z_slider')
self._boxes.append(box)
for target in self._targets:
target.register_box(box)
# Reduce the friction between box and ground.
ground_geom = self._arena.mjcf_model.find('geom', 'ground')
self._arena.mjcf_model.contact.add(
'pair',
geom1=box.geom,
geom2=ground_geom,
condim=6,
friction=[
_BOX_FRICTION[0], _BOX_FRICTION[0], _BOX_FRICTION[1],
_BOX_FRICTION[2], _BOX_FRICTION[2]
])
# Set box masses.
for box in self._boxes:
box.geom.mass = _BOX_MASS
box.geom.rgba[:] = _BOX_RGBA
for target in self._targets:
target.rgba[:] = _TARGET_RGBA
target.pressed_rgba[:] = _PRESSED_TARGET_RGBA
if self._with_grid_pegs:
if self._peg_body is not None:
self._peg_body.remove()
self._peg_body = self._arena.mjcf_model.worldbody.add('body')
for y in range(self._arena.maze.height - 1):
for x in range(self._arena.maze.width - 1):
peg_x, peg_y, _ = self._arena.grid_to_world_positions(
[[x + 0.5, y + 0.5]])[0]
self._peg_body.add(
'geom', type='box',
size=[_PEG_SIZE / np.sqrt(2),
_PEG_SIZE / np.sqrt(2),
_PEG_HEIGHT / 2],
pos=[peg_x, peg_y, _PEG_HEIGHT / 2],
quat=[np.cos(_PEG_ANGLE / 2), 0, 0, np.sin(_PEG_ANGLE / 2)],
rgba=_PEG_RGBA)
def initialize_episode(self, physics, random_state):
self._first_step = True
self._was_activated = [False] * len(self._targets)
self._is_solved = False
self._discount = 1.
self._walker.reinitialize_pose(physics, random_state)
spawn_position = self._arena.spawn_positions[0]
spawn_rotation = random_state.uniform(-np.pi, np.pi)
spawn_quat = np.array(
[np.cos(spawn_rotation / 2), 0, 0,
np.sin(spawn_rotation / 2)])
self._walker.shift_pose(
physics, [spawn_position[0], spawn_position[1], 0.0], spawn_quat)
for box, box_xy_position in zip(self._boxes, self._box_positions):
# Position at the middle of a maze cell.
box_position = np.array(
[box_xy_position[0], box_xy_position[1], self._box_size[2]])
# Commit the box's final pose.
box.set_pose(physics, position=box_position, quaternion=[1., 0., 0., 0.])
for target, target_position in zip(self._targets, self._target_positions):
target.set_pose(physics, position=target_position)
target.reset(physics)
self._update_entity_pixel_layers(physics)
def before_step(self, physics, actions, random_state):
if isinstance(actions, list):
actions = np.concatenate(actions)
super(Mujoban, self).before_step(physics, actions, random_state)
if self._first_step:
self._first_step = False
else:
self._was_activated = [target.activated for target in self._targets]
def _get_object_positions_in_grid(self, physics):
box_positions = self._arena.world_to_grid_positions(
[physics.bind(box.geom).xpos for box in self._boxes])
walker_position = self._arena.world_to_grid_positions(
[physics.bind(self._walker.root_body).xpos])[0]
return box_positions, walker_position
def _update_entity_pixel_layers(self, physics):
"""Updates the pixel observation and both layered representations.
Mujoban offers 3 grid representations of the world:
* the pixel layer: this is a grid representations with an RGB value at
each grid point;
* the entity layer: this is a grid representation with a character at
each grid point. This representation hides information since if Sokoban
or a box are over a target, then the target is occluded. This is the
official entity layer used by arenas which is based on dm_control labmaze;
* the full entity layer: this is a grid represention with a boolean vector
of length 4 at each grid point. The first value is `True` iff there is a
wall at this location. The second value is `True` iff there is a target at
this location. The third value is for Sokoban, and fourth value is for
boxes. Note that this is not a one-hot encoding since Sokoban or a box
can share the same location as a target.
Args:
physics: a Mujoco physics object.
Raises:
RuntimeError: if a box or walker are overlapping with a wall.
"""
# The entity layer from the maze is a string that shows the maze at the
# *beginning* of the level. This is fixed throughout an episode.
entity_layer = self._maze.entity_layer.copy()
box_positions, walker_position = self._get_object_positions_in_grid(physics)
# round positions to snap to grid.
box_positions, walker_position = _round_positions(
box_positions, walker_position, self._last_walker_position)
# setup pixel layer
map_size = entity_layer.shape
pixel_layer = np.ndarray(map_size + (3,), dtype='uint8')
pixel_layer.fill(128)
# setup full entity layer
full_entity_layer = np.zeros(map_size + (4,), dtype='bool')
# remove boxes and agent
entity_layer[entity_layer == mujoban_level.BOX_CHAR] = '.'
entity_layer[entity_layer == 'P'] = '.'
# draw empty space and goals
pixel_layer[entity_layer == '.'] = [0, 0, 0]
pixel_layer[entity_layer == 'G'] = [255, 0, 0]
full_entity_layer[:, :, _WALL_LAYER] = True
full_entity_layer[:, :, _WALL_LAYER][entity_layer == '.'] = False
full_entity_layer[:, :, _WALL_LAYER][entity_layer == 'G'] = False
full_entity_layer[:, :, _TARGET_LAYER][entity_layer == 'G'] = True
# update boxes
for pos in box_positions:
# to ensure we are not changing the walls.
if entity_layer[pos[0], pos[1]] == '*':
raise RuntimeError('Box and wall positions are overlapping and this ',
'should not happen. It requires investigation and ',
'and fixing.')
# the entity layer has no representation of box on goal.
entity_layer[pos[0], pos[1]] = mujoban_level.BOX_CHAR
if np.array_equal(pixel_layer[pos[0], pos[1]], [255, 0, 0]):
pixel_layer[pos[0], pos[1]] = [0, 255, 0] # box on goal
else:
pixel_layer[pos[0], pos[1]] = [255, 255, 0]
full_entity_layer[pos[0], pos[1], _BOX_LAYER] = True
# update player
if entity_layer[walker_position[0], walker_position[1]] == '*':
raise RuntimeError('Walker and wall positions are overlapping and this ',
'should have not happen. It requires investigation ',
'and fixing.')
entity_layer[walker_position[0], walker_position[1]] = 'P'
pixel_layer[walker_position[0], walker_position[1]] = 0, 0, 255
full_entity_layer[
walker_position[0], walker_position[1], _SOKOBAN_LAYER] = True
self._last_walker_position = walker_position
self._entity_layer = entity_layer
self._pixel_layer = pixel_layer
self._full_entity_layer = full_entity_layer
def after_step(self, physics, random_state):
super(Mujoban, self).after_step(physics, random_state)
for box in self._boxes:
physics.bind(box.geom).rgba = _BOX_RGBA
for target in self._targets:
if target.activated:
target.activator.rgba = _BOX_PRESSED_RGBA
self._update_entity_pixel_layers(physics)
self._is_solved = all([target.activated for target in self._targets])
if self._is_solved:
self._discount = 0.
def get_reward(self, physics):
reward = 0.0
for target, was_activated in zip(self._targets, self._was_activated):
if target.activated and not was_activated:
reward += self._box_on_target_reward
elif was_activated and not target.activated:
reward -= self._box_on_target_reward
if self._is_solved:
reward += self._level_solved_reward
return reward
def get_discount(self, physics):
return self._discount
def should_terminate_episode(self, physics):
is_dead = self._walker.aliveness(physics) < _ALIVE_THRESHOLD
return self._is_solved or is_dead
def get_reward_spec(self):
return specs.ArraySpec(shape=[], dtype=np.float32)
@property
def task_observables(self):
return self._task_observables
|
deepmind-research-master
|
physics_planning_games/mujoban/mujoban.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Level generator for Mujoban based on levels from follwing dataset.
https://github.com/deepmind/boxoban-levels/
"""
import glob
import os
import zipfile
import numpy as np
import requests
BOXOBAN_URL = "https://github.com/deepmind/boxoban-levels/archive/master.zip"
def boxoban_level_generator(levels_set="unfiltered", data_split="valid"):
env = Boxoban(levels_set=levels_set, data_split=data_split)
while True:
index = np.random.randint(0, env.num_levels-1)
yield env.levels[index]
class Boxoban(object):
"""Class for loading and generatting Boxoban levels."""
def __init__(self,
levels_set="unfiltered",
data_split="valid"):
self._levels_set = levels_set
self._data_split = data_split
self._levels = []
data_file_path_local = os.path.join(os.path.dirname(__file__),
"boxoban_cache",
"{}_{}.npz".format(self._levels_set,
self._data_split))
data_file_path_global = os.path.join("/tmp/boxoban_cache",
"{}_{}.npz".format(self._levels_set,
self._data_split))
if os.path.exists(data_file_path_local):
self.levels = np.load(data_file_path_local)["levels"]
elif os.path.exists(data_file_path_global):
self.levels = np.load(data_file_path_global)["levels"]
else:
self.levels = self.get_data()
self.num_levels = len(self.levels)
def get_data(self):
"""Downloads and cache the data."""
try:
cache_path = os.path.join(
os.path.dirname(__file__), "boxoban_cache")
os.makedirs(cache_path, exist_ok=True)
except PermissionError:
cache_path = os.path.join("/tmp/boxoban_cache")
if not os.path.exists(cache_path):
os.makedirs(cache_path, exist_ok=True)
# Get the zip file
zip_file_path = os.path.join(cache_path, "master.zip")
if not os.path.exists(zip_file_path):
response = requests.get(BOXOBAN_URL, stream=True)
handle = open(zip_file_path, "wb")
for chunk in response.iter_content(chunk_size=512):
if chunk:
handle.write(chunk)
handle.close()
with zipfile.ZipFile(zip_file_path, "r") as zipref:
zipref.extractall(cache_path)
# convert to npz
path = os.path.join(cache_path, "boxoban-levels-master",
self._levels_set,
self._data_split)
files = glob.glob(path + "/*.txt")
levels = "".join([open(f, "r").read() for f in files])
levels = levels.split("\n;")
levels = ["\n".join(item.split("\n")[1:]) for item in levels]
levels = np.asarray(levels)
data_file_path = os.path.join(
cache_path, "{}_{}.npz".format(self._levels_set, self._data_split))
np.savez(data_file_path, levels=levels)
return levels
|
deepmind-research-master
|
physics_planning_games/mujoban/boxoban.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from physics_planning_games.board_games import tic_tac_toe_logic
class TicTacToeGameLogicTest(parameterized.TestCase):
def setUp(self):
super(TicTacToeGameLogicTest, self).setUp()
self.logic = tic_tac_toe_logic.TicTacToeGameLogic()
self.expected_board_state = np.zeros((3, 3, 3), dtype=bool)
self.expected_board_state[..., 0] = True # All positions initially empty.
def test_valid_move_sequence(self):
np.testing.assert_array_equal(self.logic.get_board_state(),
self.expected_board_state)
action = tic_tac_toe_logic.SingleMarkerAction(col=1, row=2)
self.assertTrue(self.logic.apply(player=0, action=action),
msg='Invalid action: {}'.format(action))
self.expected_board_state[action.row, action.col, 0] = False
self.expected_board_state[action.row, action.col, 1] = True
np.testing.assert_array_equal(self.logic.get_board_state(),
self.expected_board_state)
action = tic_tac_toe_logic.SingleMarkerAction(col=0, row=1)
self.assertTrue(self.logic.apply(player=1, action=action),
msg='Invalid action: {}'.format(action))
self.expected_board_state[action.row, action.col, 0] = False
self.expected_board_state[action.row, action.col, 2] = True
np.testing.assert_array_equal(self.logic.get_board_state(),
self.expected_board_state)
def test_invalid_move_sequence(self):
np.testing.assert_array_equal(self.logic.get_board_state(),
self.expected_board_state)
action = tic_tac_toe_logic.SingleMarkerAction(col=1, row=2)
self.assertTrue(self.logic.apply(player=0, action=action),
msg='Invalid action: {}'.format(action))
self.expected_board_state[action.row, action.col, 0] = False
self.expected_board_state[action.row, action.col, 1] = True
np.testing.assert_array_equal(self.logic.get_board_state(),
self.expected_board_state)
# Player 0 tries to move again in the same location.
action = tic_tac_toe_logic.SingleMarkerAction(col=1, row=2)
self.assertFalse(self.logic.apply(player=0, action=action),
msg='Invalid action was accepted: {}'.format(action))
# Player 1 tries to move in the same location as player 0.
self.assertFalse(self.logic.apply(player=1, action=action),
msg='Invalid action was accepted: {}'.format(action))
# The board state should not have changed as a result of invalid actions.
np.testing.assert_array_equal(self.logic.get_board_state(),
self.expected_board_state)
@parameterized.named_parameters([
dict(testcase_name='player_0_win',
move_sequence=((0, 0, 0),
(1, 0, 1),
(0, 1, 0),
(1, 2, 1),
(0, 2, 0)),
winner_id=0),
dict(testcase_name='player_1_win',
move_sequence=((0, 0, 0),
(1, 0, 2),
(0, 1, 0),
(1, 1, 1),
(0, 0, 1),
(1, 2, 0)),
winner_id=1),
dict(testcase_name='draw',
move_sequence=((0, 0, 0),
(1, 1, 1),
(0, 1, 0),
(1, 2, 0),
(0, 0, 2),
(1, 0, 1),
(0, 2, 1),
(1, 2, 2),
(0, 1, 2)),
winner_id=None)])
def test_reward_and_termination(self, move_sequence, winner_id):
for (player_id, row, col) in move_sequence:
self.assertFalse(self.logic.is_game_over)
self.assertDictEqual(self.logic.get_reward, {0: 0.0, 1: 0.0})
action = tic_tac_toe_logic.SingleMarkerAction(col=col, row=row)
self.assertTrue(self.logic.apply(player=player_id, action=action),
msg='Invalid action: {}'.format(action))
self.assertTrue(self.logic.is_game_over)
rewards = self.logic.get_reward
if winner_id is not None:
loser_id = 1 - winner_id
self.assertDictEqual(rewards, {winner_id: 1.0, loser_id: 0.0})
else: # Draw
self.assertDictEqual(rewards, {0: 0.5, 1: 0.5})
def test_random_opponent_vs_optimal(self):
"""Play random v optimal opponents and check that optimal largely wins.
"""
rand_state = np.random.RandomState(42)
optimal_opponent = tic_tac_toe_logic.TicTacToeOptimalOpponent()
random_opponent = tic_tac_toe_logic.TicTacToeRandomOpponent()
players = [optimal_opponent, random_opponent]
optimal_returns = []
random_returns = []
for _ in range(20):
logic = tic_tac_toe_logic.TicTacToeGameLogic()
optimal_opponent.reset()
random_opponent.reset()
rand_state.shuffle(players)
current_player_idx = 0
while not logic.is_game_over:
current_player = players[current_player_idx]
action = current_player.policy(logic, rand_state)
self.assertTrue(logic.apply(current_player_idx, action),
msg='Opponent {} selected invalid action {}'.format(
current_player, action))
current_player_idx = (current_player_idx + 1) % 2
# Record the winner.
reward = logic.get_reward
if players[0] == optimal_opponent:
optimal_return = reward[0]
random_return = reward[1]
else:
optimal_return = reward[1]
random_return = reward[0]
optimal_returns.append(optimal_return)
random_returns.append(random_return)
mean_optimal_returns = np.mean(optimal_returns)
mean_random_returns = np.mean(random_returns)
self.assertGreater(mean_optimal_returns, 0.9)
self.assertLess(mean_random_returns, 0.1)
@parameterized.named_parameters([
dict(testcase_name='pos0',
move_sequence=((0, 0, 1),
(1, 1, 1),
(0, 0, 2),
(1, 1, 2)),
optimal_move=(0, 0)),
dict(testcase_name='pos1',
move_sequence=((0, 0, 1),
(1, 1, 2),
(0, 0, 2),
(1, 1, 1)),
optimal_move=(0, 0)),
dict(testcase_name='pos2',
move_sequence=((0, 2, 1),
(1, 1, 2),
(0, 2, 2),
(1, 1, 1)),
optimal_move=(2, 0)),
])
def test_minimax_policy(self, move_sequence, optimal_move):
rand_state = np.random.RandomState(42)
for (player_id, row, col) in move_sequence:
action = tic_tac_toe_logic.SingleMarkerAction(col=col, row=row)
self.assertTrue(self.logic.apply(player=player_id, action=action),
msg='Invalid action: {}'.format(action))
state = self.logic.open_spiel_state
planner_action = tic_tac_toe_logic.tic_tac_toe_minimax(state,
rand_state)
self.assertEqual(planner_action, optimal_move)
# Do the same but with np array as input
self.logic = tic_tac_toe_logic.TicTacToeGameLogic()
for (player_id, row, col) in move_sequence:
action = tic_tac_toe_logic.SingleMarkerAction(col=col, row=row)
self.assertTrue(self.logic.apply(player=player_id, action=action),
msg='Invalid action: {}'.format(action))
board = self.logic.get_board_state()
planner_action = tic_tac_toe_logic.tic_tac_toe_minimax(board,
rand_state)
self.assertEqual(planner_action, optimal_move)
if __name__ == '__main__':
absltest.main()
|
deepmind-research-master
|
physics_planning_games/board_games/tic_tac_toe_logic_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TicTacToe logic wrapper for use in manipulation tasks."""
import collections
import itertools
import numpy as np
from physics_planning_games.board_games import logic_base
from open_spiel.python.algorithms import minimax
import pyspiel
SingleMarkerAction = collections.namedtuple('SingleMarkerAction',
['row', 'col'])
force_random_start_position = False
class TicTacToeGameLogic(logic_base.OpenSpielBasedLogic):
"""Logic for TicTacToe game."""
def __init__(self):
self.reset()
def reset(self):
"""Resets the game state."""
# For now we always assume we are the starting player.
game = pyspiel.load_game('tic_tac_toe')
self._open_spiel_state = game.new_initial_state()
if force_random_start_position:
# For debugging purposes only, force some random moves
rand_state = np.random.RandomState(46)
rand_player = TicTacToeRandomOpponent()
num_moves = 4
for _ in range(num_moves):
action = rand_player.policy(self, rand_state)
action_1d = np.ravel_multi_index(action, (3, 3))
self._open_spiel_state.apply_action(action_1d)
def get_board_state(self):
"""Returns the logical board state as a numpy array.
Returns:
A boolean array of shape (H, W, C), where H=3, W=3 (height and width
of the board) and C=3 for the 3 planes. The 3 planes are, in order,
unmarked squares, x's (player 0) and y's (player 1).
"""
board_state = np.reshape(
np.array(self._open_spiel_state.observation_tensor(0), dtype=bool),
[3, 3, 3])
board_state = np.transpose(board_state, [1, 2, 0])
board_state = board_state[:, :, [0, 2, 1]]
return board_state
def apply(self, player, action):
"""Checks whether action is valid, and if so applies it to the game state.
Args:
player: Integer specifying the player ID; either 0 or 1.
action: A `SingleMarkerAction` instance.
Returns:
True if the action was valid, else False.
"""
action_value = np.ravel_multi_index((action.row, action.col), (3, 3))
if self._open_spiel_state.current_player() != player:
return False
try:
self._open_spiel_state.apply_action(action_value)
was_valid_move = True
except RuntimeError:
was_valid_move = False
return was_valid_move
class TicTacToeRandomOpponent(logic_base.Opponent):
"""An easy opponent for TicTacToe."""
def __init__(self):
pass
def reset(self):
"""Resets the opponent's internal state (not implemented)."""
pass
def policy(self, game_logic, random_state):
"""Return a random, valid move.
Args:
game_logic: TicTacToeGameLogic state of the game.
random_state: An instance of `np.random.RandomState`
Returns:
SingleMarkerAction of opponent.
"""
if game_logic.is_game_over:
return None
valid_moves = game_logic.open_spiel_state.legal_actions()
assert valid_moves
move = random_state.choice(valid_moves)
row, col = np.unravel_index(move, shape=(3, 3))
return SingleMarkerAction(row=row, col=col)
class TicTacToeMixtureOpponent(logic_base.Opponent):
"""A TicTacToe opponent which makes a mixture of optimal and random moves.
The optimal mixture component uses minimax search.
"""
def __init__(self, mixture_p):
"""Initialize the mixture opponent.
Args:
mixture_p: The mixture probability. We choose moves from the random
opponent with probability mixture_p and moves from the optimal
opponent with probability 1 - mixture_p.
"""
self._random_opponent = TicTacToeRandomOpponent()
self._optimal_opponent = TicTacToeOptimalOpponent()
self._mixture_p = mixture_p
def reset(self):
pass
def policy(self, game_logic, random_state):
if random_state.rand() < self._mixture_p:
return self._random_opponent.policy(game_logic, random_state)
else:
return self._optimal_opponent.policy(game_logic, random_state)
class TicTacToeOptimalOpponent(logic_base.Opponent):
"""A TicTacToe opponent which makes perfect moves.
Uses minimax search.
"""
def __init__(self):
pass
def reset(self):
pass
def policy(self, game_logic, random_state):
action = tic_tac_toe_minimax(game_logic.open_spiel_state, random_state)
return action
def numpy_array_to_open_spiel_state(board_state):
"""Take a numpy observation [3x3x3] bool area and create an OpenSpiel state.
Args:
board_state: 3x3x3 bool array with [col, row, c] with c indexing, in order,
empty squares, x moves, y moves.
Returns:
open_spiel_state: OpenSpiel state of this position.
"""
game = pyspiel.load_game('tic_tac_toe')
open_spiel_state = game.new_initial_state()
x_moves = np.flatnonzero(board_state[:, :, 1])
y_moves = np.flatnonzero(board_state[:, :, 2])
for x_m, y_m in itertools.zip_longest(x_moves, y_moves):
if open_spiel_state.is_terminal():
break
open_spiel_state.apply_action(x_m)
if open_spiel_state.is_terminal():
break
if y_m is not None:
open_spiel_state.apply_action(y_m)
return open_spiel_state
def open_spiel_move_to_single_marker_action(action):
row, col = np.unravel_index(action, shape=(3, 3))
return SingleMarkerAction(row=row, col=col)
def tic_tac_toe_random_move(state, random_state):
"""Returns a legal move at random from current state.
Args:
state: World state of the game. Either an OpenSpiel state
or a numpy encoding of the board.
random_state: numpy random state used for choosing randomly if there is more
than one optimal action.
Returns:
action: SingleMarkerAction of a random move.
"""
if isinstance(state, np.ndarray):
spiel_state = numpy_array_to_open_spiel_state(state)
else:
spiel_state = state
if spiel_state.is_terminal():
return False
legal_actions = spiel_state.legal_actions()
action = random_state.choice(legal_actions)
return open_spiel_move_to_single_marker_action(action)
def tic_tac_toe_minimax(state, random_state):
"""Tree search from the world_state in order to find the optimal action.
Args:
state: World state of the game. Either an OpenSpiel state
or a numpy encoding of the board.
random_state: numpy random state used for choosing randomly if there is more
than one optimal action.
Returns:
action: SingleMarkerAction of an optimal move.
"""
if isinstance(state, np.ndarray):
spiel_state = numpy_array_to_open_spiel_state(state)
else:
spiel_state = state
if spiel_state.is_terminal():
return False
current_player = spiel_state.current_player()
legal_actions = spiel_state.legal_actions()
best_actions = []
best_value = -100
for action in legal_actions:
state_after_action = spiel_state.clone()
state_after_action.apply_action(action)
value, _ = minimax.expectiminimax(state_after_action, 100, None,
current_player)
if value > best_value:
best_value = value
best_actions = [action]
elif value == best_value:
best_actions.append(action)
assert best_actions
action = random_state.choice(best_actions)
return open_spiel_move_to_single_marker_action(action)
|
deepmind-research-master
|
physics_planning_games/board_games/tic_tac_toe_logic.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Abstract base classes and utility functions for logical aspects of the games.
"""
import abc
ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()})
class GameLogic(ABC):
"""Define the abstrat game logic class.
"""
@abc.abstractmethod
def __init__(self):
pass
@abc.abstractmethod
def reset(self):
pass
@abc.abstractproperty
def is_game_over(self):
"""Boolean specifying whether the current game has ended."""
@abc.abstractproperty
def get_reward(self):
pass
@abc.abstractmethod
def get_board_state(self):
"""Returns the logical board state as a numpy array."""
@abc.abstractmethod
def apply(self, player, action):
"""Checks whether action is valid, and if so applies it to the game state.
Args:
player: Integer specifying the player ID; either 0 or 1.
action: A `GoMarkerAction` instance.
Returns:
True if the action was valid, else False.
"""
class OpenSpielBasedLogic(GameLogic):
"""GameLogic using OpenSpiel for tracking game state.
"""
@property
def is_game_over(self):
"""Boolean specifying whether the current game has ended."""
return self._open_spiel_state.is_terminal()
@property
def get_reward(self):
"""Returns a dictionary that maps from `{player_id: player_reward}`."""
if self.is_game_over:
player0_return = self._open_spiel_state.player_return(0)
# Translate from OpenSpiel returns to 0.5 for draw, -1 for loss,
# +1 for win.
if player0_return == 0.:
reward = {0: 0.5, 1: 0.5}
elif player0_return == 1.:
reward = {0: 1., 1: 0.}
else:
assert player0_return == -1.
reward = {0: 0., 1: 1.}
else:
reward = {0: 0.,
1: 0.}
return reward
@property
def open_spiel_state(self):
"""OpenSpiel object representing the underlying game state."""
return self._open_spiel_state
class Opponent(ABC):
"""Abstract Opponent class."""
@abc.abstractmethod
def __init__(self):
pass
@abc.abstractmethod
def reset(self):
pass
@abc.abstractmethod
def policy(self, game_logic, random_state):
"""Return policy action.
Args:
game_logic: Go game logic state.
random_state: Numpy random state object.
Returns:
NamedTuple indicating opponent move.
"""
|
deepmind-research-master
|
physics_planning_games/board_games/logic_base.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Shared base class for two-player Jaco arm board games.
"""
import functools
from dm_control import composer
from dm_control.composer import initializers
from dm_control.composer.observation import observable
from dm_control.composer.variation import distributions
from dm_control.composer.variation import rotations
from dm_control.entities.manipulators import base
from dm_control.entities.manipulators import kinova
import numpy as np
from physics_planning_games.board_games._internal import arenas
from physics_planning_games.board_games._internal import observations
_ARM_Y_OFFSET = 0.4
_TCP_LOWER_BOUNDS = (-0.1, -0.1, 0.2)
_TCP_UPPER_BOUNDS = (0.1, 0.1, 0.4)
# Player IDs
SELF = 0
OPPONENT = 1
def _uniform_downward_rotation():
angle = distributions.Uniform(-np.pi, np.pi, single_sample=True)
quaternion = rotations.QuaternionFromAxisAngle(axis=(0., 0., 1.), angle=angle)
return functools.partial(rotations.QuaternionPreMultiply(quaternion),
initial_value=base.DOWN_QUATERNION)
class JacoArmBoardGame(composer.Task):
"""Base class for two-player checker-like board games."""
def __init__(self, observation_settings, opponent, game_logic, board,
markers):
"""Initializes the task.
Args:
observation_settings: An `observations.ObservationSettings` namedtuple
specifying configuration options for each category of observation.
opponent: Opponent used for generating opponent moves.
game_logic: Logic for keeping track of the logical state of the board.
board: Board to use.
markers: Markers to use.
"""
self._game_logic = game_logic
self._game_opponent = opponent
arena = arenas.Standard(observable_options=observations.make_options(
observation_settings, observations.ARENA_OBSERVABLES))
arena.attach(board)
arm = kinova.JacoArm(observable_options=observations.make_options(
observation_settings, observations.JACO_ARM_OBSERVABLES))
hand = kinova.JacoHand(observable_options=observations.make_options(
observation_settings, observations.JACO_HAND_OBSERVABLES))
arm.attach(hand)
arena.attach_offset(arm, offset=(0, _ARM_Y_OFFSET, 0))
arena.attach(markers)
# Geoms belonging to the arm and hand are placed in a custom group in order
# to disable their visibility to the top-down camera. NB: we assume that
# there are no other geoms in ROBOT_GEOM_GROUP that don't belong to the
# robot (this is usually the case since the default geom group is 0). If
# there are then these will also be invisible to the top-down camera.
for robot_geom in arm.mjcf_model.find_all('geom'):
robot_geom.group = arenas.ROBOT_GEOM_GROUP
self._arena = arena
self._board = board
self._arm = arm
self._hand = hand
self._markers = markers
self._tcp_initializer = initializers.ToolCenterPointInitializer(
hand=hand, arm=arm,
position=distributions.Uniform(_TCP_LOWER_BOUNDS, _TCP_UPPER_BOUNDS),
quaternion=_uniform_downward_rotation())
# Add an observable exposing the logical state of the board.
board_state_observable = observable.Generic(
lambda physics: self._game_logic.get_board_state())
board_state_observable.configure(
**observation_settings.board_state._asdict())
self._task_observables = {'board_state': board_state_observable}
@property
def root_entity(self):
return self._arena
@property
def arm(self):
return self._arm
@property
def hand(self):
return self._hand
@property
def task_observables(self):
return self._task_observables
def get_reward(self, physics):
del physics # Unused.
return self._game_logic.get_reward[SELF]
def should_terminate_episode(self, physics):
return self._game_logic.is_game_over
def initialize_episode(self, physics, random_state):
self._tcp_initializer(physics, random_state)
self._game_logic.reset()
self._game_opponent.reset()
def before_step(self, physics, action, random_state):
super(JacoArmBoardGame, self).before_step(physics, action, random_state)
self._made_move_this_step = False
def after_substep(self, physics, random_state):
raise NotImplementedError('Subclass must implement after_substep.')
|
deepmind-research-master
|
physics_planning_games/board_games/jaco_arm_board_game.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Basic smoke test for board_games environments."""
from absl.testing import absltest
from dm_env import test_utils
from physics_planning_games import board_games
class GoTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
return board_games.load(environment_name='go_7x7', seed=0)
class TicTacToeTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
return board_games.load(
environment_name='tic_tac_toe_mixture_opponent_markers_features',
seed=0)
if __name__ == '__main__':
absltest.main()
|
deepmind-research-master
|
physics_planning_games/board_games/board_games_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Physically-grounded board game environments."""
from dm_control import composer as _composer
from physics_planning_games.board_games import go as _go
from physics_planning_games.board_games import tic_tac_toe as _tic_tac_toe
from physics_planning_games.board_games._internal import registry as _registry
_registry.done_importing_tasks()
ALL = tuple(_registry.get_all_names())
TAGS = tuple(_registry.get_tags())
def get_environments_by_tag(tag):
"""Returns the names of all environments matching a given tag.
Args:
tag: A string from `TAGS`.
Returns:
A tuple of environment names.
"""
return tuple(_registry.get_names_by_tag(tag))
def load(environment_name,
env_kwargs=None,
seed=None,
time_limit=float('inf'),
strip_singleton_obs_buffer_dim=False):
"""Loads an environment from board_games.
Args:
environment_name: String, the name of the environment to load. Must be in
`ALL`.
env_kwargs: extra params to pass to task creation.
seed: Optional, either an int seed or an `np.random.RandomState`
object. If None (default), the random number generator will self-seed
from a platform-dependent source of entropy.
time_limit: (optional) A float, the time limit in seconds beyond which an
episode is forced to terminate.
strip_singleton_obs_buffer_dim: (optional) A boolean, if `True`,
the array shape of observations with `buffer_size == 1` will not have a
leading buffer dimension.
Returns:
An instance of `composer.Environment`.
"""
if env_kwargs is not None:
task = _registry.get_constructor(environment_name)(**env_kwargs)
else:
task = _registry.get_constructor(environment_name)()
return _composer.Environment(
task=task,
time_limit=time_limit,
strip_singleton_obs_buffer_dim=strip_singleton_obs_buffer_dim,
random_state=seed)
|
deepmind-research-master
|
physics_planning_games/board_games/__init__.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Logic for the Go Game."""
import abc
import collections
import enum
import shutil
import subprocess
from absl import logging
import numpy as np
from dm_control.utils import io as resources
from physics_planning_games.board_games import logic_base
import pyspiel
GNUGO_PATH = '/usr/games/gnugo'
GoMarkerAction = collections.namedtuple('GoMarkerAction',
['row', 'col', 'pass_action'])
# Note that there is no 'i' in these Go board coordinates
# (cf https://senseis.xmp.net/?Coordinates)
_X_CHARS = 'abcdefghjklmnopqrstuvwxyz'
_X_MAP = {c: x for c, x in zip(_X_CHARS, range(len(_X_CHARS)))}
def _go_marker_to_int(go_marker, board_size):
"""Convert GoMarkerAction into GoPoint integer encoding of move.
Args:
go_marker: GoMarkerAction.
board_size: Board size of the go board (e.g. 9 or 19).
Returns:
GoPoint int value.
"""
if go_marker.pass_action:
return board_size * board_size
else:
return int((go_marker.row) * board_size + go_marker.col)
def _int_to_go_marker(move_int, board_size):
"""Decode the integer move encoding to a GoMarkerAction.
Args:
move_int: Integer encoding the go move.
board_size: Board size of the go board (e.g. 9 or 19).
Returns:
GoMarkerAction encoding of move.
"""
if move_int == board_size * board_size:
go_marker_action = GoMarkerAction(row=-1, col=-1, pass_action=True)
else:
row = move_int // board_size
col = move_int % board_size
go_marker_action = GoMarkerAction(row=row, col=col, pass_action=False)
return go_marker_action
def _go_marker_to_str(go_marker):
if go_marker.pass_action:
return 'PASS'
else:
move_str = _X_CHARS[go_marker.col] + str(go_marker.row + 1)
return move_str
def _str_to_go_marker(move_str):
"""Convert from a 2-letter Go move str (e.g.
a3) to a GoMarker.
Args:
move_str: String describing the move (e.g. a3).
Returns:
GoMarkerAction encoding of move.
"""
move_str = move_str.lower()
if move_str == 'pass':
action = GoMarkerAction(row=-1, col=-1, pass_action=True)
elif move_str == 'resign':
raise NotImplementedError('Not dealing with resign')
else:
assert len(move_str) == 2
col, row = move_str[0], move_str[1]
col = _X_MAP[col]
row = int(row) - 1
action = GoMarkerAction(row=row, col=col, pass_action=False)
return action
def _get_gnugo_ref_config(level=1, binary_path=None):
"""Reference config for GnuGo.
Args:
level: GnuGo level
binary_path: string pointing to GnuGo binary
Returns:
Config dict that can be passed to gtp engine
"""
try:
gnugo_binary_path = resources.GetResourceFilename(binary_path)
except FileNotFoundError:
gnugo_binary_path = shutil.which('gnugo')
if not gnugo_binary_path:
raise FileNotFoundError('Not able to locate gnugo library. ',
'Try installing it by: apt install gnugo')
gnugo_extra_flags = ['--mode', 'gtp']
gnugo_extra_flags += ['--chinese-rules', '--capture-all-dead']
gtp_player_cfg = {
'name': 'gnugo',
'binary_path': gnugo_binary_path,
'level': level,
'extra_flags': gnugo_extra_flags,
}
return gtp_player_cfg
class Stone(enum.Enum):
EMPTY = 1
WHITE = 2
BLACK = 3
def __lt__(self, other):
value = int(self.value)
return value < other.value
def gtp_to_sgf_point(gtp_point, board_size):
"""Format a GTP point according to the SGF format."""
if gtp_point.lower() == 'pass' or gtp_point.lower() == 'resign':
return 'tt'
column, row = gtp_point[0], gtp_point[1:]
# GTP doesn't use i, but SGF does, so we need to convert.
gtp_columns = 'abcdefghjklmnopqrstuvwxyz'
sgf_columns = 'abcdefghijklmnopqrstuvwxyz'
x = gtp_columns.find(column.lower())
y = board_size - int(row)
return '%s%s' % (sgf_columns[x], sgf_columns[y])
class Gtp(object):
"""Wrapper around Go playing program that communicates using GTP."""
__metaclass__ = abc.ABCMeta
def __init__(self, checkpoint_file=None):
self.stones = {
'.': Stone.EMPTY,
'+': Stone.EMPTY,
'O': Stone.WHITE,
'X': Stone.BLACK
}
self.moves = []
self.comments = []
self.handicap = 0
self.board_size = 19
self.komi = 0
self.free_handicap = None
self.byo_yomi_time = None
self.checkpoint_file = checkpoint_file
self.stderr = None
def set_board_size(self, size):
self.board_size = size
self.gtp_command('boardsize %d' % size)
self.gtp_command('clear_board')
def set_komi(self, komi):
self.komi = komi
self.gtp_command('komi %s' % komi)
def set_free_handicap(self, vertices):
self.free_handicap = vertices
self.gtp_command('set_free_handicap %s' % vertices)
def place_free_handicap(self, n):
self.free_handicap = self.gtp_command('place_free_handicap %d' % n)
return self.free_handicap
def make_move(self, move, record=True):
self.gtp_command('play %s' % move)
if record:
self._record_move(move)
def set_byo_yomi_time(self, t):
self.byo_yomi_time = t
def num_moves(self):
return len(self.moves)
def clear_board(self):
self.moves = []
self.comments = []
self.gtp_command('clear_board')
def generate_move(self, color):
if self.byo_yomi_time is not None:
self.gtp_command('time_left %s %d 1' % (color, self.byo_yomi_time))
move = '%s %s' % (color, self.gtp_command(
'genmove %s' % color).split(' ')[-1].lower())
self._record_move(move, stderr=self.stderr)
return move
def board(self):
raw_board = self.gtp_command('showboard', log=False)[1:].strip()
rows = [line.strip().split(' ')[0] for line in raw_board.split('\n')][1:-1]
rows = [''.join(row.split(' ')[1:-1]) for row in rows]
return [[self.stones[cell] for cell in row] for row in rows]
def quit(self):
self.gtp_command('quit')
def final_status(self, status):
return self.gtp_command('final_status_list %s' % status)[2:].replace(
'\n', ' ').split(' ')
def fixed_handicap(self, handicap):
self.handicap = handicap
self.gtp_command('fixed_handicap %d' % handicap)
def undo(self, num_moves):
self.gtp_command('gg-undo %d' % num_moves)
for _ in range(num_moves):
self.moves.pop()
self.comments.pop()
def _record_move(self, move, stderr=None):
self.moves.append(move)
self.comments.append(stderr)
if self.checkpoint_file:
with open(self.checkpoint_file, 'w') as f:
f.write(self.to_sgf())
def to_sgf(self):
sgf = '(;PB[Black]PW[White]KM[%.1f]HA[%d]SZ[19]' % (self.komi,
self.handicap)
for i, move in enumerate(self.moves):
sgf += '\n;' + self._format_sgf_move(move)
if self.comments[i]:
sgf += 'C[' + self._sgf_escape(self.comments[i]) + ']'
return sgf + ')'
def _format_sgf_move(self, move):
"""Format a move according to the SGF format."""
color, vertex = str(move).split(' ')
return '%s[%s]' % (color[0].upper(),
gtp_to_sgf_point(vertex, self.board_size))
def _sgf_escape(self, text):
return ''.join(['\\' + t if t == ']' or t == '\\' else t for t in text])
@abc.abstractmethod
def gtp_command(self, command, log=True):
"""Executes a GTP command and returns its response.
Args:
command: The GTP command to run, no trailing newline.
log: Whether to log command and response to INFO.
Returns:
The GTP response.
Raises:
GtpError: if the response is not ok (doesn't start with '=').
"""
pass
class GtpError(Exception):
def __init__(self, response):
super(GtpError, self).__init__()
self.response = response
def __str__(self):
return self.response
class GoEngine(Gtp):
"""GTP-based Go engine.
Supports at least GnuGo and Pachi.
For GnuGo, at least specify ['--mode', 'gtp'] in extra_flags.
"""
def __init__(self, command='', checkpoint_file=None, extra_flags=None):
super(GoEngine, self).__init__(checkpoint_file)
if extra_flags:
command = [command] + extra_flags
self.p = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
def gtp_command(self, command, log=True):
if log:
logging.info('GTP: %s', command)
self.p.stdin.write(command)
self.p.stdin.write('\n')
self.p.stdin.flush()
response = [self.p.stdout.readline()]
while response[-1] != '\n':
response.append(self.p.stdout.readline())
response = ''.join(response).strip()
if log:
logging.info('GTP: %s', response)
if response[0][0] != '=':
raise GtpError(response)
return response
class GoGameLogic(logic_base.OpenSpielBasedLogic):
"""Logic for Go game."""
def __init__(self, board_size, gnugo_level=1, komi=5.5):
self._board_size = board_size
self._komi = komi
gtp_player_cfg = _get_gnugo_ref_config(
level=gnugo_level,
binary_path=GNUGO_PATH)
self._gtp_player = GoEngine(
command=gtp_player_cfg['binary_path'],
extra_flags=gtp_player_cfg['extra_flags'])
self._gtp_player.set_board_size(board_size)
self.reset()
def board_size(self):
return self._board_size
def get_gtp_player(self):
return self._gtp_player
def reset(self):
"""Resets the game state."""
# For now we always assume we are the starting player and use a random
# opponent.
self._gtp_player.gtp_command('clear_board', log=False)
self._gtp_player.set_board_size(self._board_size)
self._gtp_player.set_komi(self._komi)
game = pyspiel.load_game('go', {'board_size': self._board_size})
self._open_spiel_state = game.new_initial_state()
self._moves = np.ones(
(self._board_size * self._board_size * 2,), dtype=np.int32) * -1
self._move_id = 0
def show_board(self):
self._gtp_player.gtp_command('showboard')
def get_gtp_reward(self):
self._gtp_player.gtp_command('final_score')
def get_board_state(self):
"""Returns the logical board state as a numpy array.
Returns: A boolean array of shape (H, W, C), where H=3, W=3 (height and
width of the board) and C=4 for the 4 planes. The 4 planes are, in order,
unmarked, black (player 0), white (player 1) and komi (this layer is
always all the same value indicating whether white is to play).
"""
board_state = np.reshape(
np.array(self._open_spiel_state.observation_tensor(0), dtype=bool),
[4, self._board_size, self._board_size])
board_state = np.transpose(board_state, [1, 2, 0])
board_state = board_state[:, :, [2, 0, 1, 3]]
return board_state
def set_state_from_history(self, move_history):
self.reset()
move_history = np.squeeze(move_history.numpy())
for t in range(move_history.size):
if move_history[t] < 0:
break
else:
self.apply(t % 2, move_history[t])
# self.show_board()
def get_move_history(self):
"""Returns the move history as padded numpy array."""
return self._moves
def apply(self, player, action):
"""Checks whether action is valid, and if so applies it to the game state.
Args:
player: Integer specifying the player ID; either 0 or 1.
action: A `GoMarkerAction` instance (or numpy.int32) which represent the
action in the board of size `board_size`.
Returns:
True if the action was valid, else False.
"""
if isinstance(action, GoMarkerAction):
action = _go_marker_to_int(action, self._board_size)
if self._open_spiel_state.current_player() != player:
return False
legal_actions = self._open_spiel_state.legal_actions()
if np.isin(action, legal_actions):
self._open_spiel_state.apply_action(action)
was_valid_move = True
else:
was_valid_move = False
if not was_valid_move:
return False
self._moves[self._move_id] = action
self._move_id += 1
# Apply to the Go program
player_color = 'B' if player == 0 else 'W'
action_str = _go_marker_to_str(_int_to_go_marker(action, self._board_size))
self._gtp_player.gtp_command('play {} {}'.format(player_color, action_str))
return was_valid_move
def gen_move(game_logic, player):
"""Generate move from GTP player and game state defined in game_logic."""
player_color = 'B' if player == 0 else 'W'
gtp_player = game_logic.get_gtp_player()
move_str = gtp_player.gtp_command(
'reg_genmove {}'.format(player_color), log=True)
move_str = move_str[2:].lower()
action = _str_to_go_marker(move_str)
return action
def gen_random_move(game_logic, random_state):
"""Generate random move for current state in game logic."""
if game_logic.is_game_over:
return None
valid_moves = game_logic.open_spiel_state.legal_actions()
assert valid_moves
move = random_state.choice(valid_moves)
go_action = _int_to_go_marker(move, board_size=game_logic.board_size())
return go_action
class GoGTPOpponent(logic_base.Opponent):
"""Use external binary Pachi to generate opponent moves."""
def __init__(self, board_size, mixture_p=0.0):
"""Initialize Go opponent.
Args:
board_size: Go board size (int)
mixture_p: Probability of playing a random move (amongst legal moves).
"""
self._board_size = board_size
self._mixture_p = mixture_p
def reset(self):
pass
def policy(self, game_logic, player, random_state):
"""Return policy action.
Args:
game_logic: Go game logic state.
player: Integer specifying the player ID; either 0 or 1.
random_state: Numpy random state object.
Returns:
GoMarkerAction indicating opponent move.
"""
if random_state.rand() < self._mixture_p:
return gen_random_move(game_logic, random_state)
else:
return gen_move(game_logic, player)
class GoRandomOpponent(logic_base.Opponent):
"""An easy opponent for Go."""
def __init__(self, board_size):
self._board_size = board_size
def reset(self):
"""Resets the opponent's internal state (not implemented)."""
pass
def policy(self, game_logic, player, random_state):
"""Return a random, valid move.
Args:
game_logic: TicTacToeGameLogic state of the game.
player: Integer specifying the player ID; either 0 or 1.
random_state: An instance of `np.random.RandomState`
Returns:
GoMarkerAction of opponent.
"""
return gen_random_move(game_logic, random_state)
|
deepmind-research-master
|
physics_planning_games/board_games/go_logic.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from physics_planning_games.board_games import go_logic
class GoGameLogicTest(parameterized.TestCase):
def setUp(self):
super(GoGameLogicTest, self).setUp()
self.logic = go_logic.GoGameLogic(board_size=5)
self.expected_board_state = np.zeros((5, 5, 4), dtype=bool)
self.expected_board_state[:, :, 0] = True
def test_valid_move_sequence(self):
np.testing.assert_array_equal(self.logic.get_board_state(),
self.expected_board_state)
action = go_logic.GoMarkerAction(col=1, row=2, pass_action=False)
self.assertTrue(self.logic.apply(player=0, action=action),
msg='Invalid action: {}'.format(action))
def test_pass(self):
action = go_logic.GoMarkerAction(col=0, row=0, pass_action=True)
self.assertTrue(self.logic.apply(player=0, action=action),
msg='Invalid action: {}'.format(action))
self.expected_board_state[:, :, 3] = True
np.testing.assert_array_equal(self.logic.get_board_state(),
self.expected_board_state)
def test_invalid_move_sequence(self):
np.testing.assert_array_equal(self.logic.get_board_state(),
self.expected_board_state)
action = go_logic.GoMarkerAction(col=1, row=2, pass_action=False)
self.assertTrue(self.logic.apply(player=0, action=action),
msg='Invalid action: {}'.format(action))
self.expected_board_state[action.row, action.col, 0] = False
self.expected_board_state[action.row, action.col, 1] = True
self.expected_board_state[:, :, 3] = True
np.testing.assert_array_equal(self.logic.get_board_state(),
self.expected_board_state)
action = go_logic.GoMarkerAction(col=1, row=2, pass_action=False)
self.assertFalse(self.logic.apply(player=0, action=action),
msg='Invalid action was accepted: {}'.format(action))
# Player 1 tries to move in the same location as player 0.
self.assertFalse(self.logic.apply(player=1, action=action),
msg='Invalid action was accepted: {}'.format(action))
# The board state should not have changed as a result of invalid actions.
np.testing.assert_array_equal(self.logic.get_board_state(),
self.expected_board_state)
def test_random_opponent_vs_gnugo(self):
"""Play random v gnugo opponents and check that optimal largely wins.
"""
board_size = 9
rand_state = np.random.RandomState(42)
pachi_opponent = go_logic.GoGTPOpponent(board_size)
random_opponent = go_logic.GoRandomOpponent(board_size)
players = [pachi_opponent, random_opponent]
pachi_returns = []
random_returns = []
for _ in range(3):
logic = go_logic.GoGameLogic(board_size)
pachi_opponent.reset()
random_opponent.reset()
rand_state.shuffle(players)
current_player_idx = 0
while not logic.is_game_over:
current_player = players[current_player_idx]
action = current_player.policy(logic, current_player_idx, rand_state)
valid_action = logic.apply(current_player_idx, action)
self.assertTrue(valid_action,
msg='Opponent {} selected invalid action {}'.format(
current_player, action))
current_player_idx = (current_player_idx + 1) % 2
# Record the winner.
reward = logic.get_reward
if players[0] == pachi_opponent:
pachi_return = reward[0]
random_return = reward[1]
else:
pachi_return = reward[1]
random_return = reward[0]
pachi_returns.append(pachi_return)
random_returns.append(random_return)
mean_pachi_returns = np.mean(pachi_returns)
mean_random_returns = np.mean(random_returns)
self.assertGreater(mean_pachi_returns, 0.95)
self.assertLess(mean_random_returns, 0.05)
@parameterized.named_parameters([
dict(testcase_name='00',
row=0, col=0),
dict(testcase_name='01',
row=1, col=0)])
def test_go_marker_to_int(self, row, col):
go_marker = go_logic.GoMarkerAction(row=row, col=col, pass_action=False)
int_action = go_logic._go_marker_to_int(go_marker, board_size=19)
recovered_go_marker = go_logic._int_to_go_marker(int_action, board_size=19)
self.assertEqual(go_marker, recovered_go_marker,
msg='Initial go marker {}, recovered {}'.format(
go_marker, recovered_go_marker))
@parameterized.named_parameters([
dict(testcase_name='00',
row=0, col=0),
dict(testcase_name='01',
row=1, col=0)])
def test_go_marker_to_str(self, row, col):
go_marker = go_logic.GoMarkerAction(row=row, col=col, pass_action=False)
str_action = go_logic._go_marker_to_str(go_marker)
recovered_go_marker = go_logic._str_to_go_marker(str_action)
self.assertEqual(go_marker,
recovered_go_marker,
msg='Initial go marker {}, recovered {}, '
'str_action {}'.format(go_marker, recovered_go_marker,
str_action))
if __name__ == '__main__':
absltest.main()
|
deepmind-research-master
|
physics_planning_games/board_games/go_logic_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A Go board game."""
from dm_control.composer.observation import observable
import numpy as np
from physics_planning_games.board_games import go_logic
from physics_planning_games.board_games import jaco_arm_board_game
from physics_planning_games.board_games._internal import boards
from physics_planning_games.board_games._internal import observations
from physics_planning_games.board_games._internal import pieces
from physics_planning_games.board_games._internal import registry
from physics_planning_games.board_games._internal import tags
_BLACK = (0., 0., 0., 0.75)
_WHITE = (1., 1., 1., 0.75)
_GO_PIECE_SIZE = 0.04
_DEFAULT_OPPONENT_MIXTURE = 0.2
class Go(jaco_arm_board_game.JacoArmBoardGame):
"""Single-player Go of configurable size."""
def __init__(self, board_size, observation_settings, opponent=None,
reset_arm_after_move=True):
"""Initializes a `Go` task.
Args:
board_size: board size
observation_settings: An `observations.ObservationSettings` namedtuple
specifying configuration options for each category of observation.
opponent: Go opponent to use for the opponent player actions.
reset_arm_after_move: Whether to reset arm to random position after every
piece being placed on the board.
"""
game_logic = go_logic.GoGameLogic(board_size=board_size)
if opponent is None:
opponent = go_logic.GoGTPOpponent(board_size=board_size,
mixture_p=_DEFAULT_OPPONENT_MIXTURE)
self._last_valid_move_is_pass = False
super(Go, self).__init__(observation_settings=observation_settings,
opponent=opponent,
game_logic=game_logic,
board=boards.GoBoard(boardsize=board_size),
markers=pieces.Markers(
player_colors=(_BLACK, _WHITE),
halfwidth=_GO_PIECE_SIZE,
num_per_player=board_size*board_size*2,
observable_options=observations.make_options(
observation_settings,
observations.MARKER_OBSERVABLES),
board_size=board_size))
self._reset_arm_after_move = reset_arm_after_move
# Add an observable exposing the move history (to reconstruct game states)
move_history_observable = observable.Generic(
lambda physics: self._game_logic.get_move_history())
move_history_observable.configure(
**observation_settings.board_state._asdict())
self._task_observables['move_history'] = move_history_observable
@property
def name(self):
return 'Go'
@property
def control_timestep(self):
return 0.05
def after_substep(self, physics, random_state):
if not self._made_move_this_step:
# which board square received the most contact pressure
indices = self._board.get_contact_indices(physics)
if not indices:
return
row, col = indices
# Makes sure that contact with that board square involved a finger
finger_touch = self._board.validate_finger_touch(physics,
row, col, self._hand)
if not finger_touch:
return
pass_action = True if (row == -1 and col == -1) else False
if pass_action and self._last_valid_move_is_pass:
# Don't allow two passes in a row (otherwise hard to only pass once)
valid_move = False
else:
valid_move = self._game_logic.apply(
player=jaco_arm_board_game.SELF,
action=go_logic.GoMarkerAction(row=int(row), col=int(col),
pass_action=pass_action))
if valid_move:
self._made_move_this_step = True
if not pass_action:
self._last_valid_move_is_pass = False
marker_pos = self._board.get_contact_pos(
physics=physics, row=row, col=col)
self._markers.mark(physics=physics,
player_id=jaco_arm_board_game.SELF,
pos=marker_pos,
bpos=(row, col))
else:
self._last_valid_move_is_pass = True
if not self._game_logic.is_game_over:
opponent_move = self._game_opponent.policy(
game_logic=self._game_logic, player=jaco_arm_board_game.OPPONENT,
random_state=random_state)
assert opponent_move
assert self._game_logic.apply(player=jaco_arm_board_game.OPPONENT,
action=opponent_move)
marker_pos = self._board.sample_pos_inside_touch_sensor(
physics=physics,
random_state=random_state,
row=opponent_move.row,
col=opponent_move.col)
self._markers.mark(physics=physics,
player_id=jaco_arm_board_game.OPPONENT,
pos=marker_pos,
bpos=(opponent_move.row,
opponent_move.col))
if self._reset_arm_after_move:
self._tcp_initializer(physics, random_state)
# Redraw all markers that are on the board (after captures)
self._markers.make_all_invisible(physics)
board = self._game_logic.get_board_state()
black_stones = np.transpose(np.nonzero(board[:, :, 1]))
white_stones = np.transpose(np.nonzero(board[:, :, 2]))
if black_stones.size > 0:
self._markers.make_visible_by_bpos(physics, 0, black_stones)
if white_stones.size > 0:
self._markers.make_visible_by_bpos(physics, 1, white_stones)
@registry.add(tags.EASY, tags.FEATURES)
def go_7x7():
return Go(board_size=7,
observation_settings=observations.PERFECT_FEATURES)
|
deepmind-research-master
|
physics_planning_games/board_games/go.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A Tic Tac Toe task."""
from physics_planning_games.board_games import jaco_arm_board_game
from physics_planning_games.board_games import tic_tac_toe_logic
from physics_planning_games.board_games._internal import boards
from physics_planning_games.board_games._internal import observations
from physics_planning_games.board_games._internal import pieces
from physics_planning_games.board_games._internal import registry
from physics_planning_games.board_games._internal import tags
class TicTacToe(jaco_arm_board_game.JacoArmBoardGame):
"""Single-player Tic Tac Toe."""
def __init__(self, observation_settings, opponent=None,
reset_arm_after_move=True):
"""Initializes a `TicTacToe` task.
Args:
observation_settings: An `observations.ObservationSettings` namedtuple
specifying configuration options for each category of observation.
opponent: TicTacToeOpponent used for generating opponent moves.
reset_arm_after_move: Whether to reset arm to random position after every
piece being placed on the board.
"""
game_logic = tic_tac_toe_logic.TicTacToeGameLogic()
if opponent is None:
opponent = tic_tac_toe_logic.TicTacToeRandomOpponent()
markers = pieces.Markers(num_per_player=5,
observable_options=observations.make_options(
observation_settings,
observations.MARKER_OBSERVABLES))
self._reset_arm_after_move = reset_arm_after_move
super(TicTacToe, self).__init__(observation_settings=observation_settings,
opponent=opponent,
game_logic=game_logic,
board=boards.CheckerBoard(),
markers=markers)
@property
def control_timestep(self):
return 0.05
def after_substep(self, physics, random_state):
if not self._made_move_this_step:
indices = self._board.get_contact_indices(physics)
if not indices:
return
row, col = indices
valid_move = self._game_logic.apply(
player=jaco_arm_board_game.SELF,
action=tic_tac_toe_logic.SingleMarkerAction(row=row, col=col))
if valid_move:
self._made_move_this_step = True
marker_pos = self._board.get_contact_pos(
physics=physics, row=row, col=col)
self._markers.mark(physics=physics, player_id=jaco_arm_board_game.SELF,
pos=marker_pos)
if not self._game_logic.is_game_over:
opponent_move = self._game_opponent.policy(
game_logic=self._game_logic, random_state=random_state)
assert opponent_move
assert self._game_logic.apply(player=jaco_arm_board_game.OPPONENT,
action=opponent_move)
marker_pos = self._board.sample_pos_inside_touch_sensor(
physics=physics,
random_state=random_state,
row=opponent_move.row,
col=opponent_move.col)
self._markers.mark(physics=physics,
player_id=jaco_arm_board_game.OPPONENT,
pos=marker_pos)
if self._reset_arm_after_move:
self._tcp_initializer(physics, random_state)
@registry.add(tags.EASY, tags.FEATURES)
def tic_tac_toe_markers_features(**unused_kwargs):
return TicTacToe(observation_settings=observations.PERFECT_FEATURES)
@registry.add(tags.MED, tags.FEATURES)
def tic_tac_toe_mixture_opponent_markers_features(mixture_p=0.25):
print('Creating tictactoe task with random/optimal opponent mixture, p={}'
.format(mixture_p))
return TicTacToe(
observation_settings=observations.PERFECT_FEATURES,
opponent=tic_tac_toe_logic.TicTacToeMixtureOpponent(mixture_p))
@registry.add(tags.HARD, tags.FEATURES)
def tic_tac_toe_optimal_opponent_markers_features(**unused_kwargs):
return TicTacToe(observation_settings=observations.PERFECT_FEATURES,
opponent=tic_tac_toe_logic.TicTacToeOptimalOpponent())
|
deepmind-research-master
|
physics_planning_games/board_games/tic_tac_toe.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""String constants used to annotate task constructors."""
FEATURES = 'features'
VISION = 'vision'
EASY = 'easy'
MED = 'medium'
HARD = 'hard'
|
deepmind-research-master
|
physics_planning_games/board_games/_internal/tags.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A global registry of constructors for board game environments."""
from dm_control.utils import containers
_ALL_CONSTRUCTORS = containers.TaggedTasks(allow_overriding_keys=False)
add = _ALL_CONSTRUCTORS.add
get_constructor = _ALL_CONSTRUCTORS.__getitem__
get_all_names = _ALL_CONSTRUCTORS.keys
get_tags = _ALL_CONSTRUCTORS.tags
get_names_by_tag = _ALL_CONSTRUCTORS.tagged
# This disables the check that prevents the same task constructor name from
# being added to the container more than once. This is done in order to allow
# individual task modules to be reloaded without also reloading `registry.py`
# first (e.g. when "hot-reloading" environments in domain explorer).
def done_importing_tasks():
_ALL_CONSTRUCTORS.allow_overriding_keys = True
|
deepmind-research-master
|
physics_planning_games/board_games/_internal/registry.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Configuration for observations."""
import collections
import numpy as np
class ObservableSpec(collections.namedtuple(
'ObservableSpec',
['enabled', 'update_interval', 'buffer_size', 'delay', 'aggregator',
'corruptor'])):
"""Configuration options for generic observables."""
__slots__ = ()
class CameraObservableSpec(collections.namedtuple(
'CameraObservableSpec', ('height', 'width') + ObservableSpec._fields)):
"""Configuration options for camera observables."""
__slots__ = ()
class ObservationSettings(collections.namedtuple(
'ObservationSettings',
['proprio', 'ftt', 'prop_pose', 'board_state', 'camera'])):
"""Container of `ObservableSpecs` grouped by category."""
__slots__ = ()
class ObservableNames(collections.namedtuple(
'ObservableNames',
['proprio', 'ftt', 'prop_pose', 'board_state', 'camera'])):
"""Container that groups the names of observables by category."""
__slots__ = ()
def __new__(cls, proprio=(), ftt=(), prop_pose=(), board_state=(), camera=()):
return super(ObservableNames, cls).__new__(
cls,
proprio=proprio,
ftt=ftt,
prop_pose=prop_pose,
board_state=board_state,
camera=camera)
# Global defaults for "feature" observables (i.e. anything that isn't a camera).
_DISABLED_FEATURE = ObservableSpec(
enabled=False,
update_interval=1,
buffer_size=1,
delay=0,
aggregator=None,
corruptor=None)
_ENABLED_FEATURE = _DISABLED_FEATURE._replace(enabled=True)
# Force, torque and touch-sensor readings are scaled using a symmetric
# logarithmic transformation that handles 0 and negative values.
_symlog1p = lambda x, random_state: np.sign(x) * np.log1p(abs(x))
_DISABLED_FTT = _DISABLED_FEATURE._replace(corruptor=_symlog1p)
_ENABLED_FTT = _ENABLED_FEATURE._replace(corruptor=_symlog1p)
# Global defaults for camera observables.
_DISABLED_CAMERA = CameraObservableSpec(
height=84,
width=84,
enabled=False,
update_interval=1,
buffer_size=1,
delay=0,
aggregator=None,
corruptor=None)
_ENABLED_CAMERA = _DISABLED_CAMERA._replace(enabled=True)
# Predefined sets of configurations options to apply to each category of
# observable.
PERFECT_FEATURES = ObservationSettings(
proprio=_ENABLED_FEATURE,
ftt=_ENABLED_FTT,
prop_pose=_ENABLED_FEATURE,
board_state=_ENABLED_FEATURE,
camera=_ENABLED_CAMERA)
ARENA_OBSERVABLES = ObservableNames(camera=['front_camera', 'front_camera_2'])
JACO_ARM_OBSERVABLES = ObservableNames(
proprio=['joints_pos', 'joints_vel'], ftt=['joints_torque'])
JACO_HAND_OBSERVABLES = ObservableNames(
proprio=['joints_pos', 'joints_vel', 'pinch_site_pos', 'pinch_site_rmat'])
MARKER_OBSERVABLES = ObservableNames(prop_pose=['position'])
def make_options(obs_settings, obs_names):
"""Constructs a dict of configuration options for a set of named observables.
Args:
obs_settings: An `ObservationSettings` instance.
obs_names: An `ObservableNames` instance.
Returns:
A nested dict containing `{observable_name: {option_name: value}}`.
"""
observable_options = {}
for category, spec in obs_settings._asdict().items():
for observable_name in getattr(obs_names, category):
observable_options[observable_name] = spec._asdict()
return observable_options
|
deepmind-research-master
|
physics_planning_games/board_games/_internal/observations.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for physics_planning_games.board_games._internal.pieces.py."""
from absl.testing import absltest
from dm_control import mjcf
import numpy as np
from physics_planning_games.board_games._internal import pieces
class MarkersTest(absltest.TestCase):
def test_position_observable(self):
num_per_player = 3
markers = pieces.Markers(num_per_player=num_per_player)
physics = mjcf.Physics.from_mjcf_model(markers.mjcf_model)
all_positions = [
[(0, 1, 2), (3, 4, 5), (6, 7, 8)], # Player 0
[(-1, 2, -3), (4, -5, 6)], # Player 1
]
for player_id, positions in enumerate(all_positions):
for marker_pos in positions:
markers.mark(physics=physics, player_id=player_id, pos=marker_pos)
expected_positions = np.zeros((2, num_per_player, 3), dtype=np.double)
expected_positions[0, :len(all_positions[0])] = all_positions[0]
expected_positions[1, :len(all_positions[1])] = all_positions[1]
observed_positions = markers.observables.position(physics)
np.testing.assert_array_equal(
expected_positions.reshape(-1, 3), observed_positions)
def test_invalid_player_id(self):
markers = pieces.Markers(num_per_player=5)
physics = mjcf.Physics.from_mjcf_model(markers.mjcf_model)
invalid_player_id = 99
with self.assertRaisesWithLiteralMatch(
ValueError, pieces._INVALID_PLAYER_ID.format(1, 99)):
markers.mark(physics=physics, player_id=invalid_player_id, pos=(1, 2, 3))
def test_too_many_moves(self):
num_per_player = 5
player_id = 0
markers = pieces.Markers(num_per_player=num_per_player)
physics = mjcf.Physics.from_mjcf_model(markers.mjcf_model)
for _ in range(num_per_player):
markers.mark(physics=physics, player_id=player_id, pos=(1, 2, 3))
with self.assertRaisesWithLiteralMatch(
RuntimeError,
pieces._NO_MORE_MARKERS_AVAILABLE.format(num_per_player, player_id)):
markers.mark(physics=physics, player_id=player_id, pos=(1, 2, 3))
if __name__ == '__main__':
absltest.main()
|
deepmind-research-master
|
physics_planning_games/board_games/_internal/pieces_test.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Entities representing board game pieces."""
import itertools
from dm_control import composer
from dm_control import mjcf
from dm_control.composer.observation import observable
import numpy as np
_VISIBLE_SITE_GROUP = 0
_INVISIBLE_SITE_GROUP = 3
_RED = (1., 0., 0., 0.5)
_BLUE = (0., 0, 1., 0.5)
_INVALID_PLAYER_ID = '`player_id` must be between 0 and {}, got {}.'
_NO_MORE_MARKERS_AVAILABLE = (
'All {} markers for player {} have already been placed.')
class Markers(composer.Entity):
"""A collection of non-physical entities for marking board positions."""
def _build(self,
num_per_player,
player_colors=(_RED, _BLUE),
halfwidth=0.025,
height=0.01,
board_size=7):
"""Builds a `Markers` entity.
Args:
num_per_player: Integer, the total number of markers to create per player.
player_colors: Sequence of (R, G, B, A) values specifying the marker
colors for each player.
halfwidth: Scalar, the halfwidth of each marker.
height: Scalar, height of each marker.
board_size: Integer, optional if using the integer indexing.
"""
root = mjcf.RootElement(model='markers')
root.default.site.set_attributes(type='cylinder', size=(halfwidth, height))
all_markers = []
for i, color in enumerate(player_colors):
player_name = 'player_{}'.format(i)
# TODO(alimuldal): Would look cool if these were textured.
material = root.asset.add('material', name=player_name, rgba=color)
player_markers = []
for j in range(num_per_player):
player_markers.append(
root.worldbody.add(
'site',
name='player_{}_move_{}'.format(i, j),
material=material))
all_markers.append(player_markers)
self._num_players = len(player_colors)
self._mjcf_model = root
self._all_markers = all_markers
self._move_counts = [0] * self._num_players
# To go from integer position to marker index in the all_markers array
self._marker_ids = np.zeros((2, board_size, board_size))
self._board_size = board_size
def _build_observables(self):
return MarkersObservables(self)
@property
def mjcf_model(self):
"""`mjcf.RootElement` for this entity."""
return self._mjcf_model
@property
def markers(self):
"""Marker sites belonging to all players.
Returns:
A nested list, where `markers[i][j]` contains the `mjcf.Element`
corresponding to player i's jth marker.
"""
return self._all_markers
def initialize_episode(self, physics, random_state):
"""Resets the markers at the start of an episode."""
del random_state # Unused.
self._reset(physics)
def _reset(self, physics):
for player_markers in self._all_markers:
for marker in player_markers:
bound_marker = physics.bind(marker)
bound_marker.pos = 0. # Markers are initially placed at the origin.
bound_marker.group = _INVISIBLE_SITE_GROUP
self._move_counts = [0] * self._num_players
self._marker_ids = np.zeros((2, self._board_size, self._board_size),
dtype=np.int32)
def make_all_invisible(self, physics):
for player_markers in self._all_markers:
for marker in player_markers:
bound_marker = physics.bind(marker)
bound_marker.group = _INVISIBLE_SITE_GROUP
def make_visible_by_bpos(self, physics, player_id, all_bpos):
for bpos in all_bpos:
marker_id = self._marker_ids[player_id][bpos[0]][bpos[1]]
marker = self._all_markers[player_id][marker_id]
bound_marker = physics.bind(marker)
bound_marker.group = _VISIBLE_SITE_GROUP
def mark(self, physics, player_id, pos, bpos=None):
"""Enables the visibility of a marker, moves it to the specified position.
Args:
physics: `mjcf.Physics` instance.
player_id: Integer specifying the ID of the player whose marker to use.
pos: Array-like object specifying the cartesian position of the marker.
bpos: Board position, optional integer coordinates to index the markers.
Raises:
ValueError: If `player_id` is invalid.
RuntimeError: If `player_id` has no more available markers.
"""
if not 0 <= player_id < self._num_players:
raise ValueError(
_INVALID_PLAYER_ID.format(self._num_players - 1, player_id))
markers = self._all_markers[player_id]
move_count = self._move_counts[player_id]
if move_count >= len(markers):
raise RuntimeError(
_NO_MORE_MARKERS_AVAILABLE.format(move_count, player_id))
bound_marker = physics.bind(markers[move_count])
bound_marker.pos = pos
# TODO(alimuldal): Set orientation as well (random? same as contact frame?)
bound_marker.group = _VISIBLE_SITE_GROUP
self._move_counts[player_id] += 1
if bpos:
self._marker_ids[player_id][bpos[0]][bpos[1]] = move_count
class MarkersObservables(composer.Observables):
"""Observables for a `Markers` entity."""
@composer.observable
def position(self):
"""Cartesian positions of all marker sites.
Returns:
An `observable.MJCFFeature` instance. When called with an instance of
`physics` as the argument, this will return a numpy float64 array of shape
(num_players * num_markers, 3) where each row contains the cartesian
position of a marker. Unplaced markers will have position (0, 0, 0).
"""
return observable.MJCFFeature(
'xpos', list(itertools.chain.from_iterable(self._entity.markers)))
|
deepmind-research-master
|
physics_planning_games/board_games/_internal/pieces.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Composer entities corresponding to game boards."""
import copy
import os
from dm_control import composer
from dm_control import mjcf
import numpy as np
from dm_control.utils import io as resources
_TOUCH_THRESHOLD = 1e-3 # Activation threshold for touch sensors (N).
# whether to display underlying sensors for Goboard (useful to align texture)
_SHOW_DEBUG_GRID = False
_TEXTURE_PATH = os.path.join(os.path.dirname(__file__), 'goboard_7x7.png')
def _make_checkerboard(rows,
columns,
square_halfwidth,
height=0.01,
sensor_size=0.7,
name='checkerboard'):
"""Builds a checkerboard with touch sensors centered on each square."""
root = mjcf.RootElement(model=name)
black_mat = root.asset.add('material', name='black', rgba=(0.2, 0.2, 0.2, 1))
white_mat = root.asset.add('material', name='white', rgba=(0.8, 0.8, 0.8, 1))
sensor_mat = root.asset.add('material', name='sensor', rgba=(0, 1, 0, 0.3))
root.default.geom.set_attributes(
type='box', size=(square_halfwidth, square_halfwidth, height))
root.default.site.set_attributes(
type='box',
size=(sensor_size * square_halfwidth,) * 2 + (0.5 * height,),
material=sensor_mat, group=composer.SENSOR_SITES_GROUP)
xpos = (np.arange(columns) - 0.5*(columns - 1)) * 2 * square_halfwidth
ypos = (np.arange(rows) - 0.5*(rows - 1)) * 2 * square_halfwidth
geoms = []
touch_sensors = []
for i in range(rows):
for j in range(columns):
geom_mat = black_mat if ((i % 2) == (j % 2)) else white_mat
name = '{}_{}'.format(i, j)
geoms.append(
root.worldbody.add(
'geom',
pos=(xpos[j], ypos[i], height),
name=name,
material=geom_mat))
site = root.worldbody.add('site', pos=(xpos[j], ypos[i], 2*height),
name=name)
touch_sensors.append(root.sensor.add('touch', site=site, name=name))
return root, geoms, touch_sensors
def _make_goboard(boardsize,
square_halfwidth,
height=0.01,
sensor_size=0.7,
name='goboard'):
"""Builds a Go with touch sensors centered on each intersection."""
y_offset = -0.08
rows = boardsize
columns = boardsize
root = mjcf.RootElement(model=name)
if _SHOW_DEBUG_GRID:
black_mat = root.asset.add('material', name='black',
rgba=(0.2, 0.2, 0.2, 0.5))
white_mat = root.asset.add('material', name='white',
rgba=(0.8, 0.8, 0.8, 0.5))
else:
transparent_mat = root.asset.add('material', name='intersection',
rgba=(0, 1, 0, 0.0))
sensor_mat = root.asset.add('material', name='sensor', rgba=(0, 1, 0, 0.3))
contents = resources.GetResource(_TEXTURE_PATH)
root.asset.add('texture', name='goboard', type='2d',
file=mjcf.Asset(contents, '.png'))
board_mat = root.asset.add(
'material', name='goboard', texture='goboard',
texrepeat=[0.97, 0.97])
root.default.geom.set_attributes(
type='box', size=(square_halfwidth, square_halfwidth, height))
root.default.site.set_attributes(
type='box',
size=(sensor_size * square_halfwidth,) * 2 + (0.5 * height,),
material=sensor_mat, group=composer.SENSOR_SITES_GROUP)
board_height = height
if _SHOW_DEBUG_GRID:
board_height = 0.5*height
root.worldbody.add(
'geom',
pos=(0, 0+y_offset, height),
type='box',
size=(square_halfwidth * boardsize,) * 2 + (board_height,),
name=name,
material=board_mat)
xpos = (np.arange(columns) - 0.5*(columns - 1)) * 2 * square_halfwidth
ypos = (np.arange(rows) - 0.5*(rows - 1)) * 2 * square_halfwidth + y_offset
geoms = []
touch_sensors = []
for i in range(rows):
for j in range(columns):
name = '{}_{}'.format(i, j)
if _SHOW_DEBUG_GRID:
transparent_mat = black_mat if ((i % 2) == (j % 2)) else white_mat
geoms.append(
root.worldbody.add(
'geom',
pos=(xpos[j], ypos[i], height),
name=name,
material=transparent_mat))
site = root.worldbody.add('site', pos=(xpos[j], ypos[i], 2*height),
name=name)
touch_sensors.append(root.sensor.add('touch', site=site, name=name))
pass_geom = root.worldbody.add(
'geom',
pos=(0, y_offset, 0.0),
size=(square_halfwidth*boardsize*2,
square_halfwidth*boardsize) + (0.5 * height,),
name='pass',
material=transparent_mat)
site = root.worldbody.add('site', pos=(0, y_offset, 0.0),
size=(square_halfwidth*boardsize*2,
square_halfwidth*boardsize) + (0.5 * height,),
name='pass')
pass_sensor = root.sensor.add('touch', site=site, name='pass')
return root, geoms, touch_sensors, pass_geom, pass_sensor
class CheckerBoard(composer.Entity):
"""An entity representing a checkerboard."""
def __init__(self, *args, **kwargs):
super(CheckerBoard, self).__init__(*args, **kwargs)
self._contact_from_before_substep = None
def _build(self, rows=3, columns=3, square_halfwidth=0.05):
"""Builds a `CheckerBoard` entity.
Args:
rows: Integer, the number of rows.
columns: Integer, the number of columns.
square_halfwidth: Float, the halfwidth of the squares on the board.
"""
root, geoms, touch_sensors = _make_checkerboard(
rows=rows, columns=columns, square_halfwidth=square_halfwidth)
self._mjcf_model = root
self._geoms = np.array(geoms).reshape(rows, columns)
self._touch_sensors = np.array(touch_sensors).reshape(rows, columns)
@property
def mjcf_model(self):
return self._mjcf_model
def before_substep(self, physics, random_state):
del random_state # Unused.
# Cache a copy of the array of active contacts before each substep.
self._contact_from_before_substep = [
copy.copy(c) for c in physics.data.contact
]
def validate_finger_touch(self, physics, row, col, hand):
# Geom for the board square
geom_id = physics.bind(self._geoms[row, col]).element_id
# finger geoms
finger_geoms_ids = set(physics.bind(hand.finger_geoms).element_id)
contacts = self._contact_from_before_substep
set1, set2 = set([geom_id]), finger_geoms_ids
for contact in contacts:
finger_tile_contact = ((contact.geom1 in set1 and
contact.geom2 in set2) or
(contact.geom1 in set2 and contact.geom2 in set1))
if finger_tile_contact:
return True
return False
def get_contact_pos(self, physics, row, col):
geom_id = physics.bind(self._geoms[row, col]).element_id
# Here we use the array of active contacts from the previous substep, rather
# than the current values in `physics.data.contact`. This is because we use
# touch sensors to detect when a square on the board is being pressed, and
# the pressure readings are based on forces that were calculated at the end
# of the previous substep. It's possible that `physics.data.contact` no
# longer contains any active contacts involving the board geoms, even though
# the touch sensors are telling us that one of the squares on the board is
# being pressed.
contacts = self._contact_from_before_substep
relevant_contacts = [
c for c in contacts if c.geom1 == geom_id or c.geom2 == geom_id
]
if relevant_contacts:
# If there are multiple contacts involving this square of the board, just
# pick the first one.
return relevant_contacts[0].pos.copy()
else:
print("Touch sensor at ({},{}) doesn't have any active contacts!".format(
row, col))
return False
def get_contact_indices(self, physics):
pressures = physics.bind(self._touch_sensors.ravel()).sensordata
# If any of the touch sensors exceed the threshold, return the (row, col)
# indices of the most strongly activated sensor.
if np.any(pressures > _TOUCH_THRESHOLD):
return np.unravel_index(np.argmax(pressures), self._touch_sensors.shape)
else:
return None
def sample_pos_inside_touch_sensor(self, physics, random_state, row, col):
bound_site = physics.bind(self._touch_sensors[row, col].site)
jitter = bound_site.size * np.array([1., 1., 0.])
return bound_site.xpos + random_state.uniform(-jitter, jitter)
class GoBoard(CheckerBoard):
"""An entity representing a Goboard."""
def _build(self, boardsize=7, square_halfwidth=0.05): # pytype: disable=signature-mismatch # overriding-default-value-checks
"""Builds a `GoBoard` entity.
Args:
boardsize: Integer, the size of the board (boardsize x boardsize).
square_halfwidth: Float, the halfwidth of the squares on the board.
"""
if boardsize != 7:
raise ValueError('Only boardsize of 7x7 is implemented at the moment')
root, geoms, touch_sensors, pass_geom, pass_sensor = _make_goboard(
boardsize=boardsize, square_halfwidth=square_halfwidth)
self._mjcf_model = root
self._geoms = np.array(geoms).reshape(boardsize, boardsize)
self._touch_sensors = np.array(touch_sensors).reshape(boardsize, boardsize)
self._pass_geom = pass_geom
self._pass_sensor = pass_sensor
def get_contact_indices(self, physics):
pressures = physics.bind(self._touch_sensors.ravel()).sensordata
# Deal with pass first
pass_pressure = physics.bind(self._pass_sensor).sensordata
if pass_pressure > np.max(pressures) and pass_pressure > _TOUCH_THRESHOLD:
return -1, -1
# If any of the other touch sensors exceed the threshold, return the
# (row, col) indices of the most strongly activated sensor.
if np.any(pressures > _TOUCH_THRESHOLD):
return np.unravel_index(np.argmax(pressures), self._touch_sensors.shape)
else:
return None
def validate_finger_touch(self, physics, row, col, hand):
# Geom for the board square
if row == -1 and col == -1:
geom_id = physics.bind(self._pass_geom).element_id
else:
geom_id = physics.bind(self._geoms[row, col]).element_id
# finger geoms
finger_geoms_ids = set(physics.bind(hand.finger_geoms).element_id)
contacts = self._contact_from_before_substep
set1, set2 = set([geom_id]), finger_geoms_ids
for contact in contacts:
finger_tile_contact = ((contact.geom1 in set1 and
contact.geom2 in set2) or
(contact.geom1 in set2 and contact.geom2 in set1))
if finger_tile_contact:
return True
return False
def sample_pos_inside_touch_sensor(self, physics, random_state, row, col):
bound_site = physics.bind(self._touch_sensors[row, col].site)
jitter = bound_site.size * np.array([0.25, 0.25, 0.])
return bound_site.xpos + random_state.uniform(-jitter, jitter)
|
deepmind-research-master
|
physics_planning_games/board_games/_internal/boards.py
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Board game-specific arena classes."""
from dm_control import composer
from dm_control.composer.observation import observable
from dm_control.mujoco import wrapper
# Robot geoms will be assigned to this group in order to disable their
# visibility to the top-down camera.
ROBOT_GEOM_GROUP = 1
class Standard(composer.Arena):
""""Board game-specific arena class."""
def _build(self, name=None):
"""Initializes this arena.
Args:
name: (optional) A string, the name of this arena. If `None`, use the
model name defined in the MJCF file.
"""
super(Standard, self)._build(name=name)
# Add visual assets.
self.mjcf_model.asset.add(
'texture',
type='skybox',
builtin='gradient',
rgb1=(0.4, 0.6, 0.8),
rgb2=(0., 0., 0.),
width=100,
height=100)
groundplane_texture = self.mjcf_model.asset.add(
'texture',
name='groundplane',
type='2d',
builtin='checker',
rgb1=(0.2, 0.3, 0.4),
rgb2=(0.1, 0.2, 0.3),
width=300,
height=300,
mark='edge',
markrgb=(.8, .8, .8))
groundplane_material = self.mjcf_model.asset.add(
'material',
name='groundplane',
texture=groundplane_texture,
texrepeat=(5, 5),
texuniform='true',
reflectance=0.2)
# Add ground plane.
self.mjcf_model.worldbody.add(
'geom',
name='ground',
type='plane',
material=groundplane_material,
size=(1, 1, 0.1),
friction=(0.4,),
solimp=(0.95, 0.99, 0.001),
solref=(0.002, 1))
# Add lighting
self.mjcf_model.worldbody.add(
'light',
pos=(0, 0, 1.5),
dir=(0, 0, -1),
diffuse=(0.7, 0.7, 0.7),
specular=(.3, .3, .3),
directional='false',
castshadow='true')
# Add some fixed cameras to the arena.
self._front_camera = self.mjcf_model.worldbody.add(
'camera',
name='front',
pos=(0., -0.6, 0.75),
xyaxes=(1., 0., 0., 0., 0.7, 0.75))
# Ensures a 7x7 go board fits into the view from camera
self._front_camera_2 = self.mjcf_model.worldbody.add(
'camera',
name='front_2',
pos=(0., -0.65, 0.85),
xyaxes=(1., 0., 0., 0., 0.85, 0.6))
self._top_down_camera = self.mjcf_model.worldbody.add(
'camera',
name='top_down',
pos=(0., 0., 0.5),
xyaxes=(1., 0., 0., 0., 1., 0.))
# Always initialize the free camera so that it points at the origin.
self.mjcf_model.statistic.center = (0., 0., 0.)
def _build_observables(self):
return ArenaObservables(self)
@property
def front_camera(self):
return self._front_camera
@property
def front_camera_2(self):
return self._front_camera_2
@property
def top_down_camera(self):
return self._top_down_camera
def attach_offset(self, entity, offset, attach_site=None):
"""Attaches another entity at a position offset from the attachment site.
Args:
entity: The `Entity` to attach.
offset: A length 3 array-like object representing the XYZ offset.
attach_site: (optional) The site to which to attach the entity's model.
If not set, defaults to self.attachment_site.
Returns:
The frame of the attached model.
"""
frame = self.attach(entity, attach_site=attach_site)
frame.pos = offset
return frame
class ArenaObservables(composer.Observables):
"""Observables belonging to the arena."""
@composer.observable
def front_camera(self):
return observable.MJCFCamera(mjcf_element=self._entity.front_camera)
@composer.observable
def front_camera_2(self):
return observable.MJCFCamera(mjcf_element=self._entity.front_camera_2)
@composer.observable
def top_down_camera(self):
return observable.MJCFCamera(mjcf_element=self._entity.top_down_camera)
@composer.observable
def top_down_camera_invisible_robot(self):
# Custom scene options for making robot geoms invisible.
robot_geoms_invisible = wrapper.MjvOption()
robot_geoms_invisible.geomgroup[ROBOT_GEOM_GROUP] = 0
return observable.MJCFCamera(mjcf_element=self._entity.top_down_camera,
scene_option=robot_geoms_invisible)
|
deepmind-research-master
|
physics_planning_games/board_games/_internal/arenas.py
|
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Model for CylinderFlow."""
import sonnet as snt
import tensorflow.compat.v1 as tf
from meshgraphnets import common
from meshgraphnets import core_model
from meshgraphnets import normalization
class Model(snt.AbstractModule):
"""Model for fluid simulation."""
def __init__(self, learned_model, name='Model'):
super(Model, self).__init__(name=name)
with self._enter_variable_scope():
self._learned_model = learned_model
self._output_normalizer = normalization.Normalizer(
size=2, name='output_normalizer')
self._node_normalizer = normalization.Normalizer(
size=2+common.NodeType.SIZE, name='node_normalizer')
self._edge_normalizer = normalization.Normalizer(
size=3, name='edge_normalizer') # 2D coord + length
def _build_graph(self, inputs, is_training):
"""Builds input graph."""
# construct graph nodes
node_type = tf.one_hot(inputs['node_type'][:, 0], common.NodeType.SIZE)
node_features = tf.concat([inputs['velocity'], node_type], axis=-1)
# construct graph edges
senders, receivers = common.triangles_to_edges(inputs['cells'])
relative_mesh_pos = (tf.gather(inputs['mesh_pos'], senders) -
tf.gather(inputs['mesh_pos'], receivers))
edge_features = tf.concat([
relative_mesh_pos,
tf.norm(relative_mesh_pos, axis=-1, keepdims=True)], axis=-1)
mesh_edges = core_model.EdgeSet(
name='mesh_edges',
features=self._edge_normalizer(edge_features, is_training),
receivers=receivers,
senders=senders)
return core_model.MultiGraph(
node_features=self._node_normalizer(node_features, is_training),
edge_sets=[mesh_edges])
def _build(self, inputs):
graph = self._build_graph(inputs, is_training=False)
per_node_network_output = self._learned_model(graph)
return self._update(inputs, per_node_network_output)
@snt.reuse_variables
def loss(self, inputs):
"""L2 loss on velocity."""
graph = self._build_graph(inputs, is_training=True)
network_output = self._learned_model(graph)
# build target velocity change
cur_velocity = inputs['velocity']
target_velocity = inputs['target|velocity']
target_velocity_change = target_velocity - cur_velocity
target_normalized = self._output_normalizer(target_velocity_change)
# build loss
node_type = inputs['node_type'][:, 0]
loss_mask = tf.logical_or(tf.equal(node_type, common.NodeType.NORMAL),
tf.equal(node_type, common.NodeType.OUTFLOW))
error = tf.reduce_sum((target_normalized - network_output)**2, axis=1)
loss = tf.reduce_mean(error[loss_mask])
return loss
def _update(self, inputs, per_node_network_output):
"""Integrate model outputs."""
velocity_update = self._output_normalizer.inverse(per_node_network_output)
# integrate forward
cur_velocity = inputs['velocity']
return cur_velocity + velocity_update
|
deepmind-research-master
|
meshgraphnets/cfd_model.py
|
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Model for FlagSimple."""
import sonnet as snt
import tensorflow.compat.v1 as tf
from meshgraphnets import common
from meshgraphnets import core_model
from meshgraphnets import normalization
class Model(snt.AbstractModule):
"""Model for static cloth simulation."""
def __init__(self, learned_model, name='Model'):
super(Model, self).__init__(name=name)
with self._enter_variable_scope():
self._learned_model = learned_model
self._output_normalizer = normalization.Normalizer(
size=3, name='output_normalizer')
self._node_normalizer = normalization.Normalizer(
size=3+common.NodeType.SIZE, name='node_normalizer')
self._edge_normalizer = normalization.Normalizer(
size=7, name='edge_normalizer') # 2D coord + 3D coord + 2*length = 7
def _build_graph(self, inputs, is_training):
"""Builds input graph."""
# construct graph nodes
velocity = inputs['world_pos'] - inputs['prev|world_pos']
node_type = tf.one_hot(inputs['node_type'][:, 0], common.NodeType.SIZE)
node_features = tf.concat([velocity, node_type], axis=-1)
# construct graph edges
senders, receivers = common.triangles_to_edges(inputs['cells'])
relative_world_pos = (tf.gather(inputs['world_pos'], senders) -
tf.gather(inputs['world_pos'], receivers))
relative_mesh_pos = (tf.gather(inputs['mesh_pos'], senders) -
tf.gather(inputs['mesh_pos'], receivers))
edge_features = tf.concat([
relative_world_pos,
tf.norm(relative_world_pos, axis=-1, keepdims=True),
relative_mesh_pos,
tf.norm(relative_mesh_pos, axis=-1, keepdims=True)], axis=-1)
mesh_edges = core_model.EdgeSet(
name='mesh_edges',
features=self._edge_normalizer(edge_features, is_training),
receivers=receivers,
senders=senders)
return core_model.MultiGraph(
node_features=self._node_normalizer(node_features, is_training),
edge_sets=[mesh_edges])
def _build(self, inputs):
graph = self._build_graph(inputs, is_training=False)
per_node_network_output = self._learned_model(graph)
return self._update(inputs, per_node_network_output)
@snt.reuse_variables
def loss(self, inputs):
"""L2 loss on position."""
graph = self._build_graph(inputs, is_training=True)
network_output = self._learned_model(graph)
# build target acceleration
cur_position = inputs['world_pos']
prev_position = inputs['prev|world_pos']
target_position = inputs['target|world_pos']
target_acceleration = target_position - 2*cur_position + prev_position
target_normalized = self._output_normalizer(target_acceleration)
# build loss
loss_mask = tf.equal(inputs['node_type'][:, 0], common.NodeType.NORMAL)
error = tf.reduce_sum((target_normalized - network_output)**2, axis=1)
loss = tf.reduce_mean(error[loss_mask])
return loss
def _update(self, inputs, per_node_network_output):
"""Integrate model outputs."""
acceleration = self._output_normalizer.inverse(per_node_network_output)
# integrate forward
cur_position = inputs['world_pos']
prev_position = inputs['prev|world_pos']
position = 2*cur_position + acceleration - prev_position
return position
|
deepmind-research-master
|
meshgraphnets/cloth_model.py
|
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Runs the learner/evaluator."""
import pickle
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
from meshgraphnets import cfd_eval
from meshgraphnets import cfd_model
from meshgraphnets import cloth_eval
from meshgraphnets import cloth_model
from meshgraphnets import core_model
from meshgraphnets import dataset
FLAGS = flags.FLAGS
flags.DEFINE_enum('mode', 'train', ['train', 'eval'],
'Train model, or run evaluation.')
flags.DEFINE_enum('model', None, ['cfd', 'cloth'],
'Select model to run.')
flags.DEFINE_string('checkpoint_dir', None, 'Directory to save checkpoint')
flags.DEFINE_string('dataset_dir', None, 'Directory to load dataset from.')
flags.DEFINE_string('rollout_path', None,
'Pickle file to save eval trajectories')
flags.DEFINE_enum('rollout_split', 'valid', ['train', 'test', 'valid'],
'Dataset split to use for rollouts.')
flags.DEFINE_integer('num_rollouts', 10, 'No. of rollout trajectories')
flags.DEFINE_integer('num_training_steps', int(10e6), 'No. of training steps')
PARAMETERS = {
'cfd': dict(noise=0.02, gamma=1.0, field='velocity', history=False,
size=2, batch=2, model=cfd_model, evaluator=cfd_eval),
'cloth': dict(noise=0.003, gamma=0.1, field='world_pos', history=True,
size=3, batch=1, model=cloth_model, evaluator=cloth_eval)
}
def learner(model, params):
"""Run a learner job."""
ds = dataset.load_dataset(FLAGS.dataset_dir, 'train')
ds = dataset.add_targets(ds, [params['field']], add_history=params['history'])
ds = dataset.split_and_preprocess(ds, noise_field=params['field'],
noise_scale=params['noise'],
noise_gamma=params['gamma'])
inputs = tf.data.make_one_shot_iterator(ds).get_next()
loss_op = model.loss(inputs)
global_step = tf.train.create_global_step()
lr = tf.train.exponential_decay(learning_rate=1e-4,
global_step=global_step,
decay_steps=int(5e6),
decay_rate=0.1) + 1e-6
optimizer = tf.train.AdamOptimizer(learning_rate=lr)
train_op = optimizer.minimize(loss_op, global_step=global_step)
# Don't train for the first few steps, just accumulate normalization stats
train_op = tf.cond(tf.less(global_step, 1000),
lambda: tf.group(tf.assign_add(global_step, 1)),
lambda: tf.group(train_op))
with tf.train.MonitoredTrainingSession(
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.num_training_steps)],
checkpoint_dir=FLAGS.checkpoint_dir,
save_checkpoint_secs=600) as sess:
while not sess.should_stop():
_, step, loss = sess.run([train_op, global_step, loss_op])
if step % 1000 == 0:
logging.info('Step %d: Loss %g', step, loss)
logging.info('Training complete.')
def evaluator(model, params):
"""Run a model rollout trajectory."""
ds = dataset.load_dataset(FLAGS.dataset_dir, FLAGS.rollout_split)
ds = dataset.add_targets(ds, [params['field']], add_history=params['history'])
inputs = tf.data.make_one_shot_iterator(ds).get_next()
scalar_op, traj_ops = params['evaluator'].evaluate(model, inputs)
tf.train.create_global_step()
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.checkpoint_dir,
save_checkpoint_secs=None,
save_checkpoint_steps=None) as sess:
trajectories = []
scalars = []
for traj_idx in range(FLAGS.num_rollouts):
logging.info('Rollout trajectory %d', traj_idx)
scalar_data, traj_data = sess.run([scalar_op, traj_ops])
trajectories.append(traj_data)
scalars.append(scalar_data)
for key in scalars[0]:
logging.info('%s: %g', key, np.mean([x[key] for x in scalars]))
with open(FLAGS.rollout_path, 'wb') as fp:
pickle.dump(trajectories, fp)
def main(argv):
del argv
tf.enable_resource_variables()
tf.disable_eager_execution()
params = PARAMETERS[FLAGS.model]
learned_model = core_model.EncodeProcessDecode(
output_size=params['size'],
latent_size=128,
num_layers=2,
message_passing_steps=15)
model = params['model'].Model(learned_model)
if FLAGS.mode == 'train':
learner(model, params)
elif FLAGS.mode == 'eval':
evaluator(model, params)
if __name__ == '__main__':
app.run(main)
|
deepmind-research-master
|
meshgraphnets/run_model.py
|
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Core learned graph net model."""
import collections
import functools
import sonnet as snt
import tensorflow.compat.v1 as tf
EdgeSet = collections.namedtuple('EdgeSet', ['name', 'features', 'senders',
'receivers'])
MultiGraph = collections.namedtuple('Graph', ['node_features', 'edge_sets'])
class GraphNetBlock(snt.AbstractModule):
"""Multi-Edge Interaction Network with residual connections."""
def __init__(self, model_fn, name='GraphNetBlock'):
super(GraphNetBlock, self).__init__(name=name)
self._model_fn = model_fn
def _update_edge_features(self, node_features, edge_set):
"""Aggregrates node features, and applies edge function."""
sender_features = tf.gather(node_features, edge_set.senders)
receiver_features = tf.gather(node_features, edge_set.receivers)
features = [sender_features, receiver_features, edge_set.features]
with tf.variable_scope(edge_set.name+'_edge_fn'):
return self._model_fn()(tf.concat(features, axis=-1))
def _update_node_features(self, node_features, edge_sets):
"""Aggregrates edge features, and applies node function."""
num_nodes = tf.shape(node_features)[0]
features = [node_features]
for edge_set in edge_sets:
features.append(tf.math.unsorted_segment_sum(edge_set.features,
edge_set.receivers,
num_nodes))
with tf.variable_scope('node_fn'):
return self._model_fn()(tf.concat(features, axis=-1))
def _build(self, graph):
"""Applies GraphNetBlock and returns updated MultiGraph."""
# apply edge functions
new_edge_sets = []
for edge_set in graph.edge_sets:
updated_features = self._update_edge_features(graph.node_features,
edge_set)
new_edge_sets.append(edge_set._replace(features=updated_features))
# apply node function
new_node_features = self._update_node_features(graph.node_features,
new_edge_sets)
# add residual connections
new_node_features += graph.node_features
new_edge_sets = [es._replace(features=es.features + old_es.features)
for es, old_es in zip(new_edge_sets, graph.edge_sets)]
return MultiGraph(new_node_features, new_edge_sets)
class EncodeProcessDecode(snt.AbstractModule):
"""Encode-Process-Decode GraphNet model."""
def __init__(self,
output_size,
latent_size,
num_layers,
message_passing_steps,
name='EncodeProcessDecode'):
super(EncodeProcessDecode, self).__init__(name=name)
self._latent_size = latent_size
self._output_size = output_size
self._num_layers = num_layers
self._message_passing_steps = message_passing_steps
def _make_mlp(self, output_size, layer_norm=True):
"""Builds an MLP."""
widths = [self._latent_size] * self._num_layers + [output_size]
network = snt.nets.MLP(widths, activate_final=False)
if layer_norm:
network = snt.Sequential([network, snt.LayerNorm()])
return network
def _encoder(self, graph):
"""Encodes node and edge features into latent features."""
with tf.variable_scope('encoder'):
node_latents = self._make_mlp(self._latent_size)(graph.node_features)
new_edges_sets = []
for edge_set in graph.edge_sets:
latent = self._make_mlp(self._latent_size)(edge_set.features)
new_edges_sets.append(edge_set._replace(features=latent))
return MultiGraph(node_latents, new_edges_sets)
def _decoder(self, graph):
"""Decodes node features from graph."""
with tf.variable_scope('decoder'):
decoder = self._make_mlp(self._output_size, layer_norm=False)
return decoder(graph.node_features)
def _build(self, graph):
"""Encodes and processes a multigraph, and returns node features."""
model_fn = functools.partial(self._make_mlp, output_size=self._latent_size)
latent_graph = self._encoder(graph)
for _ in range(self._message_passing_steps):
latent_graph = GraphNetBlock(model_fn)(latent_graph)
return self._decoder(latent_graph)
|
deepmind-research-master
|
meshgraphnets/core_model.py
|
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utility functions for reading the datasets."""
import functools
import json
import os
import tensorflow.compat.v1 as tf
from meshgraphnets.common import NodeType
def _parse(proto, meta):
"""Parses a trajectory from tf.Example."""
feature_lists = {k: tf.io.VarLenFeature(tf.string)
for k in meta['field_names']}
features = tf.io.parse_single_example(proto, feature_lists)
out = {}
for key, field in meta['features'].items():
data = tf.io.decode_raw(features[key].values, getattr(tf, field['dtype']))
data = tf.reshape(data, field['shape'])
if field['type'] == 'static':
data = tf.tile(data, [meta['trajectory_length'], 1, 1])
elif field['type'] == 'dynamic_varlen':
length = tf.io.decode_raw(features['length_'+key].values, tf.int32)
length = tf.reshape(length, [-1])
data = tf.RaggedTensor.from_row_lengths(data, row_lengths=length)
elif field['type'] != 'dynamic':
raise ValueError('invalid data format')
out[key] = data
return out
def load_dataset(path, split):
"""Load dataset."""
with open(os.path.join(path, 'meta.json'), 'r') as fp:
meta = json.loads(fp.read())
ds = tf.data.TFRecordDataset(os.path.join(path, split+'.tfrecord'))
ds = ds.map(functools.partial(_parse, meta=meta), num_parallel_calls=8)
ds = ds.prefetch(1)
return ds
def add_targets(ds, fields, add_history):
"""Adds target and optionally history fields to dataframe."""
def fn(trajectory):
out = {}
for key, val in trajectory.items():
out[key] = val[1:-1]
if key in fields:
if add_history:
out['prev|'+key] = val[0:-2]
out['target|'+key] = val[2:]
return out
return ds.map(fn, num_parallel_calls=8)
def split_and_preprocess(ds, noise_field, noise_scale, noise_gamma):
"""Splits trajectories into frames, and adds training noise."""
def add_noise(frame):
noise = tf.random.normal(tf.shape(frame[noise_field]),
stddev=noise_scale, dtype=tf.float32)
# don't apply noise to boundary nodes
mask = tf.equal(frame['node_type'], NodeType.NORMAL)[:, 0]
noise = tf.where(mask, noise, tf.zeros_like(noise))
frame[noise_field] += noise
frame['target|'+noise_field] += (1.0 - noise_gamma) * noise
return frame
ds = ds.flat_map(tf.data.Dataset.from_tensor_slices)
ds = ds.map(add_noise, num_parallel_calls=8)
ds = ds.shuffle(10000)
ds = ds.repeat(None)
return ds.prefetch(10)
def batch_dataset(ds, batch_size):
"""Batches input datasets."""
shapes = ds.output_shapes
types = ds.output_types
def renumber(buffer, frame):
nodes, cells = buffer
new_nodes, new_cells = frame
return nodes + new_nodes, tf.concat([cells, new_cells+nodes], axis=0)
def batch_accumulate(ds_window):
out = {}
for key, ds_val in ds_window.items():
initial = tf.zeros((0, shapes[key][1]), dtype=types[key])
if key == 'cells':
# renumber node indices in cells
num_nodes = ds_window['node_type'].map(lambda x: tf.shape(x)[0])
cells = tf.data.Dataset.zip((num_nodes, ds_val))
initial = (tf.constant(0, tf.int32), initial)
_, out[key] = cells.reduce(initial, renumber)
else:
merge = lambda prev, cur: tf.concat([prev, cur], axis=0)
out[key] = ds_val.reduce(initial, merge)
return out
if batch_size > 1:
ds = ds.window(batch_size, drop_remainder=True)
ds = ds.map(batch_accumulate, num_parallel_calls=8)
return ds
|
deepmind-research-master
|
meshgraphnets/dataset.py
|
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Plots a cloth trajectory rollout."""
import pickle
from absl import app
from absl import flags
from matplotlib import animation
import matplotlib.pyplot as plt
FLAGS = flags.FLAGS
flags.DEFINE_string('rollout_path', None, 'Path to rollout pickle file')
def main(unused_argv):
with open(FLAGS.rollout_path, 'rb') as fp:
rollout_data = pickle.load(fp)
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111, projection='3d')
skip = 10
num_steps = rollout_data[0]['gt_pos'].shape[0]
num_frames = len(rollout_data) * num_steps // skip
# compute bounds
bounds = []
for trajectory in rollout_data:
bb_min = trajectory['gt_pos'].min(axis=(0, 1))
bb_max = trajectory['gt_pos'].max(axis=(0, 1))
bounds.append((bb_min, bb_max))
def animate(num):
step = (num*skip) % num_steps
traj = (num*skip) // num_steps
ax.cla()
bound = bounds[traj]
ax.set_xlim([bound[0][0], bound[1][0]])
ax.set_ylim([bound[0][1], bound[1][1]])
ax.set_zlim([bound[0][2], bound[1][2]])
pos = rollout_data[traj]['pred_pos'][step]
faces = rollout_data[traj]['faces'][step]
ax.plot_trisurf(pos[:, 0], pos[:, 1], faces, pos[:, 2], shade=True)
ax.set_title('Trajectory %d Step %d' % (traj, step))
return fig,
_ = animation.FuncAnimation(fig, animate, frames=num_frames, interval=100)
plt.show(block=True)
if __name__ == '__main__':
app.run(main)
|
deepmind-research-master
|
meshgraphnets/plot_cloth.py
|
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Commonly used data structures and functions."""
import enum
import tensorflow.compat.v1 as tf
class NodeType(enum.IntEnum):
NORMAL = 0
OBSTACLE = 1
AIRFOIL = 2
HANDLE = 3
INFLOW = 4
OUTFLOW = 5
WALL_BOUNDARY = 6
SIZE = 9
def triangles_to_edges(faces):
"""Computes mesh edges from triangles."""
# collect edges from triangles
edges = tf.concat([faces[:, 0:2],
faces[:, 1:3],
tf.stack([faces[:, 2], faces[:, 0]], axis=1)], axis=0)
# those edges are sometimes duplicated (within the mesh) and sometimes
# single (at the mesh boundary).
# sort & pack edges as single tf.int64
receivers = tf.reduce_min(edges, axis=1)
senders = tf.reduce_max(edges, axis=1)
packed_edges = tf.bitcast(tf.stack([senders, receivers], axis=1), tf.int64)
# remove duplicates and unpack
unique_edges = tf.bitcast(tf.unique(packed_edges)[0], tf.int32)
senders, receivers = tf.unstack(unique_edges, axis=1)
# create two-way connectivity
return (tf.concat([senders, receivers], axis=0),
tf.concat([receivers, senders], axis=0))
|
deepmind-research-master
|
meshgraphnets/common.py
|
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Functions to build evaluation metrics for cloth data."""
import tensorflow.compat.v1 as tf
from meshgraphnets.common import NodeType
def _rollout(model, initial_state, num_steps):
"""Rolls out a model trajectory."""
mask = tf.equal(initial_state['node_type'][:, 0], NodeType.NORMAL)
def step_fn(step, prev_pos, cur_pos, trajectory):
prediction = model({**initial_state,
'prev|world_pos': prev_pos,
'world_pos': cur_pos})
# don't update kinematic nodes
next_pos = tf.where(mask, prediction, cur_pos)
trajectory = trajectory.write(step, cur_pos)
return step+1, cur_pos, next_pos, trajectory
_, _, _, output = tf.while_loop(
cond=lambda step, last, cur, traj: tf.less(step, num_steps),
body=step_fn,
loop_vars=(0, initial_state['prev|world_pos'], initial_state['world_pos'],
tf.TensorArray(tf.float32, num_steps)),
parallel_iterations=1)
return output.stack()
def evaluate(model, inputs):
"""Performs model rollouts and create stats."""
initial_state = {k: v[0] for k, v in inputs.items()}
num_steps = inputs['cells'].shape[0]
prediction = _rollout(model, initial_state, num_steps)
error = tf.reduce_mean((prediction - inputs['world_pos'])**2, axis=-1)
scalars = {'mse_%d_steps' % horizon: tf.reduce_mean(error[1:horizon+1])
for horizon in [1, 10, 20, 50, 100, 200]}
traj_ops = {
'faces': inputs['cells'],
'mesh_pos': inputs['mesh_pos'],
'gt_pos': inputs['world_pos'],
'pred_pos': prediction
}
return scalars, traj_ops
|
deepmind-research-master
|
meshgraphnets/cloth_eval.py
|
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Plots a CFD trajectory rollout."""
import pickle
from absl import app
from absl import flags
from matplotlib import animation
from matplotlib import tri as mtri
import matplotlib.pyplot as plt
FLAGS = flags.FLAGS
flags.DEFINE_string('rollout_path', None, 'Path to rollout pickle file')
def main(unused_argv):
with open(FLAGS.rollout_path, 'rb') as fp:
rollout_data = pickle.load(fp)
fig, ax = plt.subplots(1, 1, figsize=(12, 8))
skip = 10
num_steps = rollout_data[0]['gt_velocity'].shape[0]
num_frames = len(rollout_data) * num_steps // skip
# compute bounds
bounds = []
for trajectory in rollout_data:
bb_min = trajectory['gt_velocity'].min(axis=(0, 1))
bb_max = trajectory['gt_velocity'].max(axis=(0, 1))
bounds.append((bb_min, bb_max))
def animate(num):
step = (num*skip) % num_steps
traj = (num*skip) // num_steps
ax.cla()
ax.set_aspect('equal')
ax.set_axis_off()
vmin, vmax = bounds[traj]
pos = rollout_data[traj]['mesh_pos'][step]
faces = rollout_data[traj]['faces'][step]
velocity = rollout_data[traj]['pred_velocity'][step]
triang = mtri.Triangulation(pos[:, 0], pos[:, 1], faces)
ax.tripcolor(triang, velocity[:, 0], vmin=vmin[0], vmax=vmax[0])
ax.triplot(triang, 'ko-', ms=0.5, lw=0.3)
ax.set_title('Trajectory %d Step %d' % (traj, step))
return fig,
_ = animation.FuncAnimation(fig, animate, frames=num_frames, interval=100)
plt.show(block=True)
if __name__ == '__main__':
app.run(main)
|
deepmind-research-master
|
meshgraphnets/plot_cfd.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.